summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 23:54:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 23:54:57 -0500
commit457fa3469a65a524be04412f5cd497fa3b11c9fd (patch)
treee826786d7838668595dfac115ced53b32e5c97b9
parentb07039b79c9ea64c1eacda1e01d645082e4a0d5d (diff)
parentfbc4904c287778ddb74bf6060ac9dec51992fc53 (diff)
Merge tag 'char-misc-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big set of char and misc driver patches for 4.21-rc1. Lots of different types of driver things in here, as this tree seems to be the "collection of various driver subsystems not big enough to have their own git tree" lately. Anyway, some highlights of the changes in here: - binderfs: is it a rule that all driver subsystems will eventually grow to have their own filesystem? Binder now has one to handle the use of it in containerized systems. This was discussed at the Plumbers conference a few months ago and knocked into mergable shape very fast by Christian Brauner. Who also has signed up to be another binder maintainer, showing a distinct lack of good judgement :) - binder updates and fixes - mei driver updates - fpga driver updates and additions - thunderbolt driver updates - soundwire driver updates - extcon driver updates - nvmem driver updates - hyper-v driver updates - coresight driver updates - pvpanic driver additions and reworking for more device support - lp driver updates. Yes really, it's _finally_ moved to the proper parallal port driver model, something I never thought I would see happen. Good stuff. - other tiny driver updates and fixes. All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (116 commits) MAINTAINERS: add another Android binder maintainer intel_th: msu: Fix an off-by-one in attribute store stm class: Add a reference to the SyS-T document stm class: Fix a module refcount leak in policy creation error path char: lp: use new parport device model char: lp: properly count the lp devices char: lp: use first unused lp number while registering char: lp: detach the device when parallel port is removed char: lp: introduce list to save port number bus: qcom: remove duplicated include from qcom-ebi2.c VMCI: Use memdup_user() rather than duplicating its implementation char/rtc: Use of_node_name_eq for node name comparisons misc: mic: fix a DMA pool free failure ptp: fix an IS_ERR() vs NULL check genwqe: Fix size check binder: implement binderfs binder: fix use-after-free due to ksys_close() during fdget() bus: fsl-mc: remove duplicated include files bus: fsl-mc: explicitly define the fsl_mc_command endianness misc: ti-st: make array read_ver_cmd static, shrinks object size ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt9
-rw-r--r--Documentation/admin-guide/thunderbolt.rst20
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt57
-rw-r--r--Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt17
-rw-r--r--Documentation/devicetree/bindings/misc/pvpanic-mmio.txt29
-rw-r--r--Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt3
-rw-r--r--Documentation/driver-api/firmware/other_interfaces.rst30
-rw-r--r--Documentation/trace/index.rst1
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi33
-rw-r--r--drivers/acpi/property.c11
-rw-r--r--drivers/android/Kconfig12
-rw-r--r--drivers/android/Makefile1
-rw-r--r--drivers/android/binder.c182
-rw-r--r--drivers/android/binder_alloc.c1
-rw-r--r--drivers/android/binder_alloc.h20
-rw-r--r--drivers/android/binder_internal.h49
-rw-r--r--drivers/android/binderfs.c544
-rw-r--r--drivers/bus/fsl-mc/dpbp.c1
-rw-r--r--drivers/bus/fsl-mc/dpcon.c1
-rw-r--r--drivers/bus/qcom-ebi2.c1
-rw-r--r--drivers/char/lp.c278
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/tlclk.c87
-rw-r--r--drivers/char/virtio_console.c17
-rw-r--r--drivers/extcon/extcon-max14577.c15
-rw-r--r--drivers/extcon/extcon-max77693.c16
-rw-r--r--drivers/extcon/extcon-max77843.c18
-rw-r--r--drivers/extcon/extcon-max8997.c25
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/stratix10-svc.c1041
-rw-r--r--drivers/fpga/Kconfig6
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/altera-cvp.c49
-rw-r--r--drivers/fpga/altera-ps-spi.c40
-rw-r--r--drivers/fpga/dfl-fme-pr.c2
-rw-r--r--drivers/fpga/dfl-fme-region.c2
-rw-r--r--drivers/fpga/of-fpga-region.c2
-rw-r--r--drivers/fpga/stratix10-soc.c535
-rw-r--r--drivers/fpga/zynq-fpga.c4
-rw-r--r--drivers/hv/channel.c1
-rw-r--r--drivers/hv/channel_mgmt.c44
-rw-r--r--drivers/hv/hv.c10
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c23
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c2
-rw-r--r--drivers/hwtracing/intel_th/msu.c3
-rw-r--r--drivers/hwtracing/stm/policy.c12
-rw-r--r--drivers/iommu/dmar.c25
-rw-r--r--drivers/iommu/intel-iommu.c56
-rw-r--r--drivers/misc/Kconfig8
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/altera-stapl/altera.c3
-rw-r--r--drivers/misc/genwqe/card_debugfs.c85
-rw-r--r--drivers/misc/genwqe/card_utils.c2
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/client.c91
-rw-r--r--drivers/misc/mei/dma-ring.c269
-rw-r--r--drivers/misc/mei/hbm.c92
-rw-r--r--drivers/misc/mei/hbm.h2
-rw-r--r--drivers/misc/mei/hw-me.c6
-rw-r--r--drivers/misc/mei/hw.h29
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/misc/mei/interrupt.c41
-rw-r--r--drivers/misc/mei/mei_dev.h26
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mic/card/mic_debugfs.c24
-rw-r--r--drivers/misc/mic/cosm/cosm_debugfs.c39
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c62
-rw-r--r--drivers/misc/mic/scif/scif_debugfs.c44
-rw-r--r--drivers/misc/mic/scif/scif_fence.c22
-rw-r--r--drivers/misc/mic/scif/scif_rma.h13
-rw-r--r--drivers/misc/mic/vop/vop_debugfs.c40
-rw-r--r--drivers/misc/pvpanic.c192
-rw-r--r--drivers/misc/ti-st/st_kim.c36
-rw-r--r--drivers/misc/vexpress-syscfg.c2
-rw-r--r--drivers/misc/vmw_balloon.c13
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c18
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/mtdcore.c56
-rw-r--r--drivers/nvmem/core.c31
-rw-r--r--drivers/nvmem/meson-efuse.c29
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/pci/pci-acpi.c19
-rw-r--r--drivers/pci/probe.c15
-rw-r--r--drivers/platform/x86/Kconfig8
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/pvpanic.c124
-rw-r--r--drivers/pps/clients/pps-gpio.c4
-rw-r--r--drivers/pps/clients/pps-ktimer.c4
-rw-r--r--drivers/pps/clients/pps-ldisc.c4
-rw-r--r--drivers/pps/clients/pps_parport.c2
-rw-r--r--drivers/pps/kapi.c5
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/slimbus/Kconfig5
-rw-r--r--drivers/slimbus/qcom-ctrl.c6
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c7
-rw-r--r--drivers/soundwire/intel.c4
-rw-r--r--drivers/thunderbolt/domain.c17
-rw-r--r--drivers/uio/uio.c19
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c6
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c2
-rw-r--r--fs/file.c29
-rw-r--r--include/linux/dmar.h8
-rw-r--r--include/linux/fdtable.h1
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h312
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h217
-rw-r--r--include/linux/fsl/mc.h12
-rw-r--r--include/linux/hyperv.h17
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/nvmem-provider.h11
-rw-r--r--include/linux/pci.h8
-rw-r--r--include/uapi/linux/android/binder_ctl.h35
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--tools/Makefile7
-rw-r--r--tools/firmware/Makefile13
-rw-r--r--tools/firmware/ihex2fw.c281
-rw-r--r--tools/hv/hv_kvp_daemon.c15
123 files changed, 4973 insertions, 917 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index 151584a1f950..b21fba14689b 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -21,6 +21,15 @@ Description: Holds a comma separated list of device unique_ids that
21 If a device is authorized automatically during boot its 21 If a device is authorized automatically during boot its
22 boot attribute is set to 1. 22 boot attribute is set to 1.
23 23
24What: /sys/bus/thunderbolt/devices/.../domainX/iommu_dma_protection
25Date: Mar 2019
26KernelVersion: 4.21
27Contact: thunderbolt-software@lists.01.org
28Description: This attribute tells whether the system uses IOMMU
29 for DMA protection. Value of 1 means IOMMU is used 0 means
30 it is not (DMA protection is solely based on Thunderbolt
31 security levels).
32
24What: /sys/bus/thunderbolt/devices/.../domainX/security 33What: /sys/bus/thunderbolt/devices/.../domainX/security
25Date: Sep 2017 34Date: Sep 2017
26KernelVersion: 4.13 35KernelVersion: 4.13
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
index 35fccba6a9a6..898ad78f3cc7 100644
--- a/Documentation/admin-guide/thunderbolt.rst
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -133,6 +133,26 @@ If the user still wants to connect the device they can either approve
133the device without a key or write a new key and write 1 to the 133the device without a key or write a new key and write 1 to the
134``authorized`` file to get the new key stored on the device NVM. 134``authorized`` file to get the new key stored on the device NVM.
135 135
136DMA protection utilizing IOMMU
137------------------------------
138Recent systems from 2018 and forward with Thunderbolt ports may natively
139support IOMMU. This means that Thunderbolt security is handled by an IOMMU
140so connected devices cannot access memory regions outside of what is
141allocated for them by drivers. When Linux is running on such system it
142automatically enables IOMMU if not enabled by the user already. These
143systems can be identified by reading ``1`` from
144``/sys/bus/thunderbolt/devices/domainX/iommu_dma_protection`` attribute.
145
146The driver does not do anything special in this case but because DMA
147protection is handled by the IOMMU, security levels (if set) are
148redundant. For this reason some systems ship with security level set to
149``none``. Other systems have security level set to ``user`` in order to
150support downgrade to older OS, so users who want to automatically
151authorize devices when IOMMU DMA protection is enabled can use the
152following ``udev`` rule::
153
154 ACTION=="add", SUBSYSTEM=="thunderbolt", ATTRS{iommu_dma_protection}=="1", ATTR{authorized}=="0", ATTR{authorized}="1"
155
136Upgrading NVM on Thunderbolt device or host 156Upgrading NVM on Thunderbolt device or host
137------------------------------------------- 157-------------------------------------------
138Since most of the functionality is handled in firmware running on a 158Since most of the functionality is handled in firmware running on a
diff --git a/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt b/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt
new file mode 100644
index 000000000000..1fa66065acc6
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt
@@ -0,0 +1,57 @@
1Intel Service Layer Driver for Stratix10 SoC
2============================================
3Intel Stratix10 SoC is composed of a 64 bit quad-core ARM Cortex A53 hard
4processor system (HPS) and Secure Device Manager (SDM). When the FPGA is
5configured from HPS, there needs to be a way for HPS to notify SDM the
6location and size of the configuration data. Then SDM will get the
7configuration data from that location and perform the FPGA configuration.
8
9To meet the whole system security needs and support virtual machine requesting
10communication with SDM, only the secure world of software (EL3, Exception
11Layer 3) can interface with SDM. All software entities running on other
12exception layers must channel through the EL3 software whenever it needs
13service from SDM.
14
15Intel Stratix10 service layer driver, running at privileged exception level
16(EL1, Exception Layer 1), interfaces with the service providers and provides
17the services for FPGA configuration, QSPI, Crypto and warm reset. Service layer
18driver also manages secure monitor call (SMC) to communicate with secure monitor
19code running in EL3.
20
21Required properties:
22-------------------
23The svc node has the following mandatory properties, must be located under
24the firmware node.
25
26- compatible: "intel,stratix10-svc"
27- method: smc or hvc
28 smc - Secure Monitor Call
29 hvc - Hypervisor Call
30- memory-region:
31 phandle to the reserved memory node. See
32 Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
33 for details
34
35Example:
36-------
37
38 reserved-memory {
39 #address-cells = <2>;
40 #size-cells = <2>;
41 ranges;
42
43 service_reserved: svcbuffer@0 {
44 compatible = "shared-dma-pool";
45 reg = <0x0 0x0 0x0 0x1000000>;
46 alignment = <0x1000>;
47 no-map;
48 };
49 };
50
51 firmware {
52 svc {
53 compatible = "intel,stratix10-svc";
54 method = "smc";
55 memory-region = <&service_reserved>;
56 };
57 };
diff --git a/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt
new file mode 100644
index 000000000000..6e03f79287fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt
@@ -0,0 +1,17 @@
1Intel Stratix10 SoC FPGA Manager
2
3Required properties:
4The fpga_mgr node has the following mandatory property, must be located under
5firmware/svc node.
6
7- compatible : should contain "intel,stratix10-soc-fpga-mgr"
8
9Example:
10
11 firmware {
12 svc {
13 fpga_mgr: fpga-mgr {
14 compatible = "intel,stratix10-soc-fpga-mgr";
15 };
16 };
17 };
diff --git a/Documentation/devicetree/bindings/misc/pvpanic-mmio.txt b/Documentation/devicetree/bindings/misc/pvpanic-mmio.txt
new file mode 100644
index 000000000000..985e90736780
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/pvpanic-mmio.txt
@@ -0,0 +1,29 @@
1* QEMU PVPANIC MMIO Configuration bindings
2
3QEMU's emulation / virtualization targets provide the following PVPANIC
4MMIO Configuration interface on the "virt" machine.
5type:
6
7- a read-write, 16-bit wide data register.
8
9QEMU exposes the data register to guests as memory mapped registers.
10
11Required properties:
12
13- compatible: "qemu,pvpanic-mmio".
14- reg: the MMIO region used by the device.
15 * Bytes 0x0 Write panic event to the reg when guest OS panics.
16 * Bytes 0x1 Reserved.
17
18Example:
19
20/ {
21 #size-cells = <0x2>;
22 #address-cells = <0x2>;
23
24 pvpanic-mmio@9060000 {
25 compatible = "qemu,pvpanic-mmio";
26 reg = <0x0 0x9060000 0x0 0x2>;
27 };
28};
29
diff --git a/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt b/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt
index e3298e18de26..2e0723ab3384 100644
--- a/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt
@@ -2,6 +2,8 @@
2 2
3Required properties: 3Required properties:
4- compatible: should be "amlogic,meson-gxbb-efuse" 4- compatible: should be "amlogic,meson-gxbb-efuse"
5- clocks: phandle to the efuse peripheral clock provided by the
6 clock controller.
5 7
6= Data cells = 8= Data cells =
7Are child nodes of eFuse, bindings of which as described in 9Are child nodes of eFuse, bindings of which as described in
@@ -11,6 +13,7 @@ Example:
11 13
12 efuse: efuse { 14 efuse: efuse {
13 compatible = "amlogic,meson-gxbb-efuse"; 15 compatible = "amlogic,meson-gxbb-efuse";
16 clocks = <&clkc CLKID_EFUSE>;
14 #address-cells = <1>; 17 #address-cells = <1>;
15 #size-cells = <1>; 18 #size-cells = <1>;
16 19
diff --git a/Documentation/driver-api/firmware/other_interfaces.rst b/Documentation/driver-api/firmware/other_interfaces.rst
index 36c47b1e9824..a4ac54b5fd79 100644
--- a/Documentation/driver-api/firmware/other_interfaces.rst
+++ b/Documentation/driver-api/firmware/other_interfaces.rst
@@ -13,3 +13,33 @@ EDD Interfaces
13.. kernel-doc:: drivers/firmware/edd.c 13.. kernel-doc:: drivers/firmware/edd.c
14 :internal: 14 :internal:
15 15
16Intel Stratix10 SoC Service Layer
17---------------------------------
18Some features of the Intel Stratix10 SoC require a level of privilege
19higher than the kernel is granted. Such secure features include
20FPGA programming. In terms of the ARMv8 architecture, the kernel runs
21at Exception Level 1 (EL1), access to the features requires
22Exception Level 3 (EL3).
23
24The Intel Stratix10 SoC service layer provides an in kernel API for
25drivers to request access to the secure features. The requests are queued
26and processed one by one. ARM’s SMCCC is used to pass the execution
27of the requests on to a secure monitor (EL3).
28
29.. kernel-doc:: include/linux/firmware/intel/stratix10-svc-client.h
30 :functions: stratix10_svc_command_code
31
32.. kernel-doc:: include/linux/firmware/intel/stratix10-svc-client.h
33 :functions: stratix10_svc_client_msg
34
35.. kernel-doc:: include/linux/firmware/intel/stratix10-svc-client.h
36 :functions: stratix10_svc_command_reconfig_payload
37
38.. kernel-doc:: include/linux/firmware/intel/stratix10-svc-client.h
39 :functions: stratix10_svc_cb_data
40
41.. kernel-doc:: include/linux/firmware/intel/stratix10-svc-client.h
42 :functions: stratix10_svc_client
43
44.. kernel-doc:: drivers/firmware/stratix10-svc.c
45 :export:
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index 306997941ba1..6b4107cf4b98 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -22,3 +22,4 @@ Linux Tracing Technologies
22 hwlat_detector 22 hwlat_detector
23 intel_th 23 intel_th
24 stm 24 stm
25 sys-t
diff --git a/MAINTAINERS b/MAINTAINERS
index 87ae13cd288c..7d37f8a4743c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -958,6 +958,7 @@ M: Arve Hjønnevåg <arve@android.com>
958M: Todd Kjos <tkjos@android.com> 958M: Todd Kjos <tkjos@android.com>
959M: Martijn Coenen <maco@android.com> 959M: Martijn Coenen <maco@android.com>
960M: Joel Fernandes <joel@joelfernandes.org> 960M: Joel Fernandes <joel@joelfernandes.org>
961M: Christian Brauner <christian@brauner.io>
961T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git 962T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
962L: devel@driverdev.osuosl.org 963L: devel@driverdev.osuosl.org
963S: Supported 964S: Supported
@@ -1442,6 +1443,7 @@ F: arch/arm/mach-ep93xx/micro9.c
1442 1443
1443ARM/CORESIGHT FRAMEWORK AND DRIVERS 1444ARM/CORESIGHT FRAMEWORK AND DRIVERS
1444M: Mathieu Poirier <mathieu.poirier@linaro.org> 1445M: Mathieu Poirier <mathieu.poirier@linaro.org>
1446R: Suzuki K Poulose <suzuki.poulose@arm.com>
1445L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1447L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1446S: Maintained 1448S: Maintained
1447F: drivers/hwtracing/coresight/* 1449F: drivers/hwtracing/coresight/*
@@ -16242,7 +16244,7 @@ F: drivers/vme/
16242F: include/linux/vme* 16244F: include/linux/vme*
16243 16245
16244VMWARE BALLOON DRIVER 16246VMWARE BALLOON DRIVER
16245M: Xavier Deguillard <xdeguillard@vmware.com> 16247M: Julien Freche <jfreche@vmware.com>
16246M: Nadav Amit <namit@vmware.com> 16248M: Nadav Amit <namit@vmware.com>
16247M: "VMware, Inc." <pv-drivers@vmware.com> 16249M: "VMware, Inc." <pv-drivers@vmware.com>
16248L: linux-kernel@vger.kernel.org 16250L: linux-kernel@vger.kernel.org
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index fef7351e9f67..a20df0d9c96d 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -24,6 +24,19 @@
24 #address-cells = <2>; 24 #address-cells = <2>;
25 #size-cells = <2>; 25 #size-cells = <2>;
26 26
27 reserved-memory {
28 #address-cells = <2>;
29 #size-cells = <2>;
30 ranges;
31
32 service_reserved: svcbuffer@0 {
33 compatible = "shared-dma-pool";
34 reg = <0x0 0x0 0x0 0x1000000>;
35 alignment = <0x1000>;
36 no-map;
37 };
38 };
39
27 cpus { 40 cpus {
28 #address-cells = <1>; 41 #address-cells = <1>;
29 #size-cells = <0>; 42 #size-cells = <0>;
@@ -93,6 +106,14 @@
93 interrupt-parent = <&intc>; 106 interrupt-parent = <&intc>;
94 ranges = <0 0 0 0xffffffff>; 107 ranges = <0 0 0 0xffffffff>;
95 108
109 base_fpga_region {
110 #address-cells = <0x1>;
111 #size-cells = <0x1>;
112
113 compatible = "fpga-region";
114 fpga-mgr = <&fpga_mgr>;
115 };
116
96 clkmgr: clock-controller@ffd10000 { 117 clkmgr: clock-controller@ffd10000 {
97 compatible = "intel,stratix10-clkmgr"; 118 compatible = "intel,stratix10-clkmgr";
98 reg = <0xffd10000 0x1000>; 119 reg = <0xffd10000 0x1000>;
@@ -537,5 +558,17 @@
537 558
538 status = "disabled"; 559 status = "disabled";
539 }; 560 };
561
562 firmware {
563 svc {
564 compatible = "intel,stratix10-svc";
565 method = "smc";
566 memory-region = <&service_reserved>;
567
568 fpga_mgr: fpga-mgr {
569 compatible = "intel,stratix10-soc-fpga-mgr";
570 };
571 };
572 };
540 }; 573 };
541}; 574};
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 8c7c4583b52d..77abe0ec4043 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -24,6 +24,14 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
24 acpi_object_type type, 24 acpi_object_type type,
25 const union acpi_object **obj); 25 const union acpi_object **obj);
26 26
27/*
28 * The GUIDs here are made equivalent to each other in order to avoid extra
29 * complexity in the properties handling code, with the caveat that the
30 * kernel will accept certain combinations of GUID and properties that are
31 * not defined without a warning. For instance if any of the properties
32 * from different GUID appear in a property list of another, it will be
33 * accepted by the kernel. Firmware validation tools should catch these.
34 */
27static const guid_t prp_guids[] = { 35static const guid_t prp_guids[] = {
28 /* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */ 36 /* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
29 GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c, 37 GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
@@ -31,6 +39,9 @@ static const guid_t prp_guids[] = {
31 /* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */ 39 /* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */
32 GUID_INIT(0x6211e2c0, 0x58a3, 0x4af3, 40 GUID_INIT(0x6211e2c0, 0x58a3, 0x4af3,
33 0x90, 0xe1, 0x92, 0x7a, 0x4e, 0x0c, 0x55, 0xa4), 41 0x90, 0xe1, 0x92, 0x7a, 0x4e, 0x0c, 0x55, 0xa4),
42 /* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */
43 GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3,
44 0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
34}; 45};
35 46
36static const guid_t ads_guid = 47static const guid_t ads_guid =
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 51e8250d113f..4c190f8d1f4c 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -20,6 +20,18 @@ config ANDROID_BINDER_IPC
20 Android process, using Binder to identify, invoke and pass arguments 20 Android process, using Binder to identify, invoke and pass arguments
21 between said processes. 21 between said processes.
22 22
23config ANDROID_BINDERFS
24 bool "Android Binderfs filesystem"
25 depends on ANDROID_BINDER_IPC
26 default n
27 ---help---
28 Binderfs is a pseudo-filesystem for the Android Binder IPC driver
29 which can be mounted per-ipc namespace allowing to run multiple
30 instances of Android.
31 Each binderfs mount initially only contains a binder-control device.
32 It can be used to dynamically allocate new binder IPC devices via
33 ioctls.
34
23config ANDROID_BINDER_DEVICES 35config ANDROID_BINDER_DEVICES
24 string "Android Binder devices" 36 string "Android Binder devices"
25 depends on ANDROID_BINDER_IPC 37 depends on ANDROID_BINDER_IPC
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index a01254c43ee3..c7856e3200da 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,4 +1,5 @@
1ccflags-y += -I$(src) # needed for trace events 1ccflags-y += -I$(src) # needed for trace events
2 2
3obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
3obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o 4obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
4obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o 5obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 9f1000d2a40c..cdfc87629efb 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -72,12 +72,14 @@
72#include <linux/spinlock.h> 72#include <linux/spinlock.h>
73#include <linux/ratelimit.h> 73#include <linux/ratelimit.h>
74#include <linux/syscalls.h> 74#include <linux/syscalls.h>
75#include <linux/task_work.h>
75 76
76#include <uapi/linux/android/binder.h> 77#include <uapi/linux/android/binder.h>
77 78
78#include <asm/cacheflush.h> 79#include <asm/cacheflush.h>
79 80
80#include "binder_alloc.h" 81#include "binder_alloc.h"
82#include "binder_internal.h"
81#include "binder_trace.h" 83#include "binder_trace.h"
82 84
83static HLIST_HEAD(binder_deferred_list); 85static HLIST_HEAD(binder_deferred_list);
@@ -94,22 +96,8 @@ static struct dentry *binder_debugfs_dir_entry_root;
94static struct dentry *binder_debugfs_dir_entry_proc; 96static struct dentry *binder_debugfs_dir_entry_proc;
95static atomic_t binder_last_id; 97static atomic_t binder_last_id;
96 98
97#define BINDER_DEBUG_ENTRY(name) \ 99static int proc_show(struct seq_file *m, void *unused);
98static int binder_##name##_open(struct inode *inode, struct file *file) \ 100DEFINE_SHOW_ATTRIBUTE(proc);
99{ \
100 return single_open(file, binder_##name##_show, inode->i_private); \
101} \
102\
103static const struct file_operations binder_##name##_fops = { \
104 .owner = THIS_MODULE, \
105 .open = binder_##name##_open, \
106 .read = seq_read, \
107 .llseek = seq_lseek, \
108 .release = single_release, \
109}
110
111static int binder_proc_show(struct seq_file *m, void *unused);
112BINDER_DEBUG_ENTRY(proc);
113 101
114/* This is only defined in include/asm-arm/sizes.h */ 102/* This is only defined in include/asm-arm/sizes.h */
115#ifndef SZ_1K 103#ifndef SZ_1K
@@ -262,20 +250,6 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
262 return e; 250 return e;
263} 251}
264 252
265struct binder_context {
266 struct binder_node *binder_context_mgr_node;
267 struct mutex context_mgr_node_lock;
268
269 kuid_t binder_context_mgr_uid;
270 const char *name;
271};
272
273struct binder_device {
274 struct hlist_node hlist;
275 struct miscdevice miscdev;
276 struct binder_context context;
277};
278
279/** 253/**
280 * struct binder_work - work enqueued on a worklist 254 * struct binder_work - work enqueued on a worklist
281 * @entry: node enqueued on list 255 * @entry: node enqueued on list
@@ -660,6 +634,7 @@ struct binder_transaction {
660#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 634#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
661static void 635static void
662_binder_proc_lock(struct binder_proc *proc, int line) 636_binder_proc_lock(struct binder_proc *proc, int line)
637 __acquires(&proc->outer_lock)
663{ 638{
664 binder_debug(BINDER_DEBUG_SPINLOCKS, 639 binder_debug(BINDER_DEBUG_SPINLOCKS,
665 "%s: line=%d\n", __func__, line); 640 "%s: line=%d\n", __func__, line);
@@ -675,6 +650,7 @@ _binder_proc_lock(struct binder_proc *proc, int line)
675#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 650#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
676static void 651static void
677_binder_proc_unlock(struct binder_proc *proc, int line) 652_binder_proc_unlock(struct binder_proc *proc, int line)
653 __releases(&proc->outer_lock)
678{ 654{
679 binder_debug(BINDER_DEBUG_SPINLOCKS, 655 binder_debug(BINDER_DEBUG_SPINLOCKS,
680 "%s: line=%d\n", __func__, line); 656 "%s: line=%d\n", __func__, line);
@@ -690,6 +666,7 @@ _binder_proc_unlock(struct binder_proc *proc, int line)
690#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 666#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
691static void 667static void
692_binder_inner_proc_lock(struct binder_proc *proc, int line) 668_binder_inner_proc_lock(struct binder_proc *proc, int line)
669 __acquires(&proc->inner_lock)
693{ 670{
694 binder_debug(BINDER_DEBUG_SPINLOCKS, 671 binder_debug(BINDER_DEBUG_SPINLOCKS,
695 "%s: line=%d\n", __func__, line); 672 "%s: line=%d\n", __func__, line);
@@ -705,6 +682,7 @@ _binder_inner_proc_lock(struct binder_proc *proc, int line)
705#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 682#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
706static void 683static void
707_binder_inner_proc_unlock(struct binder_proc *proc, int line) 684_binder_inner_proc_unlock(struct binder_proc *proc, int line)
685 __releases(&proc->inner_lock)
708{ 686{
709 binder_debug(BINDER_DEBUG_SPINLOCKS, 687 binder_debug(BINDER_DEBUG_SPINLOCKS,
710 "%s: line=%d\n", __func__, line); 688 "%s: line=%d\n", __func__, line);
@@ -720,6 +698,7 @@ _binder_inner_proc_unlock(struct binder_proc *proc, int line)
720#define binder_node_lock(node) _binder_node_lock(node, __LINE__) 698#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
721static void 699static void
722_binder_node_lock(struct binder_node *node, int line) 700_binder_node_lock(struct binder_node *node, int line)
701 __acquires(&node->lock)
723{ 702{
724 binder_debug(BINDER_DEBUG_SPINLOCKS, 703 binder_debug(BINDER_DEBUG_SPINLOCKS,
725 "%s: line=%d\n", __func__, line); 704 "%s: line=%d\n", __func__, line);
@@ -735,6 +714,7 @@ _binder_node_lock(struct binder_node *node, int line)
735#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 714#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
736static void 715static void
737_binder_node_unlock(struct binder_node *node, int line) 716_binder_node_unlock(struct binder_node *node, int line)
717 __releases(&node->lock)
738{ 718{
739 binder_debug(BINDER_DEBUG_SPINLOCKS, 719 binder_debug(BINDER_DEBUG_SPINLOCKS,
740 "%s: line=%d\n", __func__, line); 720 "%s: line=%d\n", __func__, line);
@@ -751,12 +731,16 @@ _binder_node_unlock(struct binder_node *node, int line)
751#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 731#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
752static void 732static void
753_binder_node_inner_lock(struct binder_node *node, int line) 733_binder_node_inner_lock(struct binder_node *node, int line)
734 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
754{ 735{
755 binder_debug(BINDER_DEBUG_SPINLOCKS, 736 binder_debug(BINDER_DEBUG_SPINLOCKS,
756 "%s: line=%d\n", __func__, line); 737 "%s: line=%d\n", __func__, line);
757 spin_lock(&node->lock); 738 spin_lock(&node->lock);
758 if (node->proc) 739 if (node->proc)
759 binder_inner_proc_lock(node->proc); 740 binder_inner_proc_lock(node->proc);
741 else
742 /* annotation for sparse */
743 __acquire(&node->proc->inner_lock);
760} 744}
761 745
762/** 746/**
@@ -768,6 +752,7 @@ _binder_node_inner_lock(struct binder_node *node, int line)
768#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 752#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
769static void 753static void
770_binder_node_inner_unlock(struct binder_node *node, int line) 754_binder_node_inner_unlock(struct binder_node *node, int line)
755 __releases(&node->lock) __releases(&node->proc->inner_lock)
771{ 756{
772 struct binder_proc *proc = node->proc; 757 struct binder_proc *proc = node->proc;
773 758
@@ -775,6 +760,9 @@ _binder_node_inner_unlock(struct binder_node *node, int line)
775 "%s: line=%d\n", __func__, line); 760 "%s: line=%d\n", __func__, line);
776 if (proc) 761 if (proc)
777 binder_inner_proc_unlock(proc); 762 binder_inner_proc_unlock(proc);
763 else
764 /* annotation for sparse */
765 __release(&node->proc->inner_lock);
778 spin_unlock(&node->lock); 766 spin_unlock(&node->lock);
779} 767}
780 768
@@ -1384,10 +1372,14 @@ static void binder_dec_node_tmpref(struct binder_node *node)
1384 binder_node_inner_lock(node); 1372 binder_node_inner_lock(node);
1385 if (!node->proc) 1373 if (!node->proc)
1386 spin_lock(&binder_dead_nodes_lock); 1374 spin_lock(&binder_dead_nodes_lock);
1375 else
1376 __acquire(&binder_dead_nodes_lock);
1387 node->tmp_refs--; 1377 node->tmp_refs--;
1388 BUG_ON(node->tmp_refs < 0); 1378 BUG_ON(node->tmp_refs < 0);
1389 if (!node->proc) 1379 if (!node->proc)
1390 spin_unlock(&binder_dead_nodes_lock); 1380 spin_unlock(&binder_dead_nodes_lock);
1381 else
1382 __release(&binder_dead_nodes_lock);
1391 /* 1383 /*
1392 * Call binder_dec_node() to check if all refcounts are 0 1384 * Call binder_dec_node() to check if all refcounts are 0
1393 * and cleanup is needed. Calling with strong=0 and internal=1 1385 * and cleanup is needed. Calling with strong=0 and internal=1
@@ -1890,18 +1882,22 @@ static struct binder_thread *binder_get_txn_from(
1890 */ 1882 */
1891static struct binder_thread *binder_get_txn_from_and_acq_inner( 1883static struct binder_thread *binder_get_txn_from_and_acq_inner(
1892 struct binder_transaction *t) 1884 struct binder_transaction *t)
1885 __acquires(&t->from->proc->inner_lock)
1893{ 1886{
1894 struct binder_thread *from; 1887 struct binder_thread *from;
1895 1888
1896 from = binder_get_txn_from(t); 1889 from = binder_get_txn_from(t);
1897 if (!from) 1890 if (!from) {
1891 __acquire(&from->proc->inner_lock);
1898 return NULL; 1892 return NULL;
1893 }
1899 binder_inner_proc_lock(from->proc); 1894 binder_inner_proc_lock(from->proc);
1900 if (t->from) { 1895 if (t->from) {
1901 BUG_ON(from != t->from); 1896 BUG_ON(from != t->from);
1902 return from; 1897 return from;
1903 } 1898 }
1904 binder_inner_proc_unlock(from->proc); 1899 binder_inner_proc_unlock(from->proc);
1900 __acquire(&from->proc->inner_lock);
1905 binder_thread_dec_tmpref(from); 1901 binder_thread_dec_tmpref(from);
1906 return NULL; 1902 return NULL;
1907} 1903}
@@ -1973,6 +1969,8 @@ static void binder_send_failed_reply(struct binder_transaction *t,
1973 binder_thread_dec_tmpref(target_thread); 1969 binder_thread_dec_tmpref(target_thread);
1974 binder_free_transaction(t); 1970 binder_free_transaction(t);
1975 return; 1971 return;
1972 } else {
1973 __release(&target_thread->proc->inner_lock);
1976 } 1974 }
1977 next = t->from_parent; 1975 next = t->from_parent;
1978 1976
@@ -2160,6 +2158,64 @@ static bool binder_validate_fixup(struct binder_buffer *b,
2160 return (fixup_offset >= last_min_offset); 2158 return (fixup_offset >= last_min_offset);
2161} 2159}
2162 2160
2161/**
2162 * struct binder_task_work_cb - for deferred close
2163 *
2164 * @twork: callback_head for task work
2165 * @fd: fd to close
2166 *
2167 * Structure to pass task work to be handled after
2168 * returning from binder_ioctl() via task_work_add().
2169 */
2170struct binder_task_work_cb {
2171 struct callback_head twork;
2172 struct file *file;
2173};
2174
2175/**
2176 * binder_do_fd_close() - close list of file descriptors
2177 * @twork: callback head for task work
2178 *
2179 * It is not safe to call ksys_close() during the binder_ioctl()
2180 * function if there is a chance that binder's own file descriptor
2181 * might be closed. This is to meet the requirements for using
2182 * fdget() (see comments for __fget_light()). Therefore use
2183 * task_work_add() to schedule the close operation once we have
2184 * returned from binder_ioctl(). This function is a callback
2185 * for that mechanism and does the actual ksys_close() on the
2186 * given file descriptor.
2187 */
2188static void binder_do_fd_close(struct callback_head *twork)
2189{
2190 struct binder_task_work_cb *twcb = container_of(twork,
2191 struct binder_task_work_cb, twork);
2192
2193 fput(twcb->file);
2194 kfree(twcb);
2195}
2196
2197/**
2198 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2199 * @fd: file-descriptor to close
2200 *
2201 * See comments in binder_do_fd_close(). This function is used to schedule
2202 * a file-descriptor to be closed after returning from binder_ioctl().
2203 */
2204static void binder_deferred_fd_close(int fd)
2205{
2206 struct binder_task_work_cb *twcb;
2207
2208 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2209 if (!twcb)
2210 return;
2211 init_task_work(&twcb->twork, binder_do_fd_close);
2212 __close_fd_get_file(fd, &twcb->file);
2213 if (twcb->file)
2214 task_work_add(current, &twcb->twork, true);
2215 else
2216 kfree(twcb);
2217}
2218
2163static void binder_transaction_buffer_release(struct binder_proc *proc, 2219static void binder_transaction_buffer_release(struct binder_proc *proc,
2164 struct binder_buffer *buffer, 2220 struct binder_buffer *buffer,
2165 binder_size_t *failed_at) 2221 binder_size_t *failed_at)
@@ -2299,7 +2355,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2299 } 2355 }
2300 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2356 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2301 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2357 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2302 ksys_close(fd_array[fd_index]); 2358 binder_deferred_fd_close(fd_array[fd_index]);
2303 } break; 2359 } break;
2304 default: 2360 default:
2305 pr_err("transaction release %d bad object type %x\n", 2361 pr_err("transaction release %d bad object type %x\n",
@@ -2394,11 +2450,15 @@ static int binder_translate_handle(struct flat_binder_object *fp,
2394 fp->cookie = node->cookie; 2450 fp->cookie = node->cookie;
2395 if (node->proc) 2451 if (node->proc)
2396 binder_inner_proc_lock(node->proc); 2452 binder_inner_proc_lock(node->proc);
2453 else
2454 __acquire(&node->proc->inner_lock);
2397 binder_inc_node_nilocked(node, 2455 binder_inc_node_nilocked(node,
2398 fp->hdr.type == BINDER_TYPE_BINDER, 2456 fp->hdr.type == BINDER_TYPE_BINDER,
2399 0, NULL); 2457 0, NULL);
2400 if (node->proc) 2458 if (node->proc)
2401 binder_inner_proc_unlock(node->proc); 2459 binder_inner_proc_unlock(node->proc);
2460 else
2461 __release(&node->proc->inner_lock);
2402 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2462 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2403 binder_debug(BINDER_DEBUG_TRANSACTION, 2463 binder_debug(BINDER_DEBUG_TRANSACTION,
2404 " ref %d desc %d -> node %d u%016llx\n", 2464 " ref %d desc %d -> node %d u%016llx\n",
@@ -2762,6 +2822,8 @@ static void binder_transaction(struct binder_proc *proc,
2762 binder_set_nice(in_reply_to->saved_priority); 2822 binder_set_nice(in_reply_to->saved_priority);
2763 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2823 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2764 if (target_thread == NULL) { 2824 if (target_thread == NULL) {
2825 /* annotation for sparse */
2826 __release(&target_thread->proc->inner_lock);
2765 return_error = BR_DEAD_REPLY; 2827 return_error = BR_DEAD_REPLY;
2766 return_error_line = __LINE__; 2828 return_error_line = __LINE__;
2767 goto err_dead_binder; 2829 goto err_dead_binder;
@@ -3912,7 +3974,7 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
3912 } else if (ret) { 3974 } else if (ret) {
3913 u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); 3975 u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
3914 3976
3915 ksys_close(*fdp); 3977 binder_deferred_fd_close(*fdp);
3916 } 3978 }
3917 list_del(&fixup->fixup_entry); 3979 list_del(&fixup->fixup_entry);
3918 kfree(fixup); 3980 kfree(fixup);
@@ -4164,6 +4226,11 @@ retry:
4164 if (cmd == BR_DEAD_BINDER) 4226 if (cmd == BR_DEAD_BINDER)
4165 goto done; /* DEAD_BINDER notifications can cause transactions */ 4227 goto done; /* DEAD_BINDER notifications can cause transactions */
4166 } break; 4228 } break;
4229 default:
4230 binder_inner_proc_unlock(proc);
4231 pr_err("%d:%d: bad work type %d\n",
4232 proc->pid, thread->pid, w->type);
4233 break;
4167 } 4234 }
4168 4235
4169 if (!t) 4236 if (!t)
@@ -4467,6 +4534,8 @@ static int binder_thread_release(struct binder_proc *proc,
4467 spin_lock(&t->lock); 4534 spin_lock(&t->lock);
4468 if (t->to_thread == thread) 4535 if (t->to_thread == thread)
4469 send_reply = t; 4536 send_reply = t;
4537 } else {
4538 __acquire(&t->lock);
4470 } 4539 }
4471 thread->is_dead = true; 4540 thread->is_dead = true;
4472 4541
@@ -4495,7 +4564,11 @@ static int binder_thread_release(struct binder_proc *proc,
4495 spin_unlock(&last_t->lock); 4564 spin_unlock(&last_t->lock);
4496 if (t) 4565 if (t)
4497 spin_lock(&t->lock); 4566 spin_lock(&t->lock);
4567 else
4568 __acquire(&t->lock);
4498 } 4569 }
4570 /* annotation for sparse, lock not acquired in last iteration above */
4571 __release(&t->lock);
4499 4572
4500 /* 4573 /*
4501 * If this thread used poll, make sure we remove the waitqueue 4574 * If this thread used poll, make sure we remove the waitqueue
@@ -4938,8 +5011,12 @@ static int binder_open(struct inode *nodp, struct file *filp)
4938 proc->tsk = current->group_leader; 5011 proc->tsk = current->group_leader;
4939 INIT_LIST_HEAD(&proc->todo); 5012 INIT_LIST_HEAD(&proc->todo);
4940 proc->default_priority = task_nice(current); 5013 proc->default_priority = task_nice(current);
4941 binder_dev = container_of(filp->private_data, struct binder_device, 5014 /* binderfs stashes devices in i_private */
4942 miscdev); 5015 if (is_binderfs_device(nodp))
5016 binder_dev = nodp->i_private;
5017 else
5018 binder_dev = container_of(filp->private_data,
5019 struct binder_device, miscdev);
4943 proc->context = &binder_dev->context; 5020 proc->context = &binder_dev->context;
4944 binder_alloc_init(&proc->alloc); 5021 binder_alloc_init(&proc->alloc);
4945 5022
@@ -4967,7 +5044,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
4967 proc->debugfs_entry = debugfs_create_file(strbuf, 0444, 5044 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
4968 binder_debugfs_dir_entry_proc, 5045 binder_debugfs_dir_entry_proc,
4969 (void *)(unsigned long)proc->pid, 5046 (void *)(unsigned long)proc->pid,
4970 &binder_proc_fops); 5047 &proc_fops);
4971 } 5048 }
4972 5049
4973 return 0; 5050 return 0;
@@ -5391,6 +5468,9 @@ static void print_binder_proc(struct seq_file *m,
5391 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 5468 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5392 struct binder_node *node = rb_entry(n, struct binder_node, 5469 struct binder_node *node = rb_entry(n, struct binder_node,
5393 rb_node); 5470 rb_node);
5471 if (!print_all && !node->has_async_transaction)
5472 continue;
5473
5394 /* 5474 /*
5395 * take a temporary reference on the node so it 5475 * take a temporary reference on the node so it
5396 * survives and isn't removed from the tree 5476 * survives and isn't removed from the tree
@@ -5595,7 +5675,7 @@ static void print_binder_proc_stats(struct seq_file *m,
5595} 5675}
5596 5676
5597 5677
5598static int binder_state_show(struct seq_file *m, void *unused) 5678static int state_show(struct seq_file *m, void *unused)
5599{ 5679{
5600 struct binder_proc *proc; 5680 struct binder_proc *proc;
5601 struct binder_node *node; 5681 struct binder_node *node;
@@ -5634,7 +5714,7 @@ static int binder_state_show(struct seq_file *m, void *unused)
5634 return 0; 5714 return 0;
5635} 5715}
5636 5716
5637static int binder_stats_show(struct seq_file *m, void *unused) 5717static int stats_show(struct seq_file *m, void *unused)
5638{ 5718{
5639 struct binder_proc *proc; 5719 struct binder_proc *proc;
5640 5720
@@ -5650,7 +5730,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
5650 return 0; 5730 return 0;
5651} 5731}
5652 5732
5653static int binder_transactions_show(struct seq_file *m, void *unused) 5733static int transactions_show(struct seq_file *m, void *unused)
5654{ 5734{
5655 struct binder_proc *proc; 5735 struct binder_proc *proc;
5656 5736
@@ -5663,7 +5743,7 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
5663 return 0; 5743 return 0;
5664} 5744}
5665 5745
5666static int binder_proc_show(struct seq_file *m, void *unused) 5746static int proc_show(struct seq_file *m, void *unused)
5667{ 5747{
5668 struct binder_proc *itr; 5748 struct binder_proc *itr;
5669 int pid = (unsigned long)m->private; 5749 int pid = (unsigned long)m->private;
@@ -5706,7 +5786,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
5706 "\n" : " (incomplete)\n"); 5786 "\n" : " (incomplete)\n");
5707} 5787}
5708 5788
5709static int binder_transaction_log_show(struct seq_file *m, void *unused) 5789static int transaction_log_show(struct seq_file *m, void *unused)
5710{ 5790{
5711 struct binder_transaction_log *log = m->private; 5791 struct binder_transaction_log *log = m->private;
5712 unsigned int log_cur = atomic_read(&log->cur); 5792 unsigned int log_cur = atomic_read(&log->cur);
@@ -5727,7 +5807,7 @@ static int binder_transaction_log_show(struct seq_file *m, void *unused)
5727 return 0; 5807 return 0;
5728} 5808}
5729 5809
5730static const struct file_operations binder_fops = { 5810const struct file_operations binder_fops = {
5731 .owner = THIS_MODULE, 5811 .owner = THIS_MODULE,
5732 .poll = binder_poll, 5812 .poll = binder_poll,
5733 .unlocked_ioctl = binder_ioctl, 5813 .unlocked_ioctl = binder_ioctl,
@@ -5738,10 +5818,10 @@ static const struct file_operations binder_fops = {
5738 .release = binder_release, 5818 .release = binder_release,
5739}; 5819};
5740 5820
5741BINDER_DEBUG_ENTRY(state); 5821DEFINE_SHOW_ATTRIBUTE(state);
5742BINDER_DEBUG_ENTRY(stats); 5822DEFINE_SHOW_ATTRIBUTE(stats);
5743BINDER_DEBUG_ENTRY(transactions); 5823DEFINE_SHOW_ATTRIBUTE(transactions);
5744BINDER_DEBUG_ENTRY(transaction_log); 5824DEFINE_SHOW_ATTRIBUTE(transaction_log);
5745 5825
5746static int __init init_binder_device(const char *name) 5826static int __init init_binder_device(const char *name)
5747{ 5827{
@@ -5795,27 +5875,27 @@ static int __init binder_init(void)
5795 0444, 5875 0444,
5796 binder_debugfs_dir_entry_root, 5876 binder_debugfs_dir_entry_root,
5797 NULL, 5877 NULL,
5798 &binder_state_fops); 5878 &state_fops);
5799 debugfs_create_file("stats", 5879 debugfs_create_file("stats",
5800 0444, 5880 0444,
5801 binder_debugfs_dir_entry_root, 5881 binder_debugfs_dir_entry_root,
5802 NULL, 5882 NULL,
5803 &binder_stats_fops); 5883 &stats_fops);
5804 debugfs_create_file("transactions", 5884 debugfs_create_file("transactions",
5805 0444, 5885 0444,
5806 binder_debugfs_dir_entry_root, 5886 binder_debugfs_dir_entry_root,
5807 NULL, 5887 NULL,
5808 &binder_transactions_fops); 5888 &transactions_fops);
5809 debugfs_create_file("transaction_log", 5889 debugfs_create_file("transaction_log",
5810 0444, 5890 0444,
5811 binder_debugfs_dir_entry_root, 5891 binder_debugfs_dir_entry_root,
5812 &binder_transaction_log, 5892 &binder_transaction_log,
5813 &binder_transaction_log_fops); 5893 &transaction_log_fops);
5814 debugfs_create_file("failed_transaction_log", 5894 debugfs_create_file("failed_transaction_log",
5815 0444, 5895 0444,
5816 binder_debugfs_dir_entry_root, 5896 binder_debugfs_dir_entry_root,
5817 &binder_transaction_log_failed, 5897 &binder_transaction_log_failed,
5818 &binder_transaction_log_fops); 5898 &transaction_log_fops);
5819 } 5899 }
5820 5900
5821 /* 5901 /*
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 030c98f35cca..022cd80e80cc 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -939,6 +939,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
939 struct list_lru_one *lru, 939 struct list_lru_one *lru,
940 spinlock_t *lock, 940 spinlock_t *lock,
941 void *cb_arg) 941 void *cb_arg)
942 __must_hold(lock)
942{ 943{
943 struct mm_struct *mm = NULL; 944 struct mm_struct *mm = NULL;
944 struct binder_lru_page *page = container_of(item, 945 struct binder_lru_page *page = container_of(item,
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index fb3238c74c8a..c0aadbbf7f19 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -30,16 +30,16 @@ struct binder_transaction;
30 * struct binder_buffer - buffer used for binder transactions 30 * struct binder_buffer - buffer used for binder transactions
31 * @entry: entry alloc->buffers 31 * @entry: entry alloc->buffers
32 * @rb_node: node for allocated_buffers/free_buffers rb trees 32 * @rb_node: node for allocated_buffers/free_buffers rb trees
33 * @free: true if buffer is free 33 * @free: %true if buffer is free
34 * @allow_user_free: describe the second member of struct blah, 34 * @allow_user_free: %true if user is allowed to free buffer
35 * @async_transaction: describe the second member of struct blah, 35 * @async_transaction: %true if buffer is in use for an async txn
36 * @debug_id: describe the second member of struct blah, 36 * @debug_id: unique ID for debugging
37 * @transaction: describe the second member of struct blah, 37 * @transaction: pointer to associated struct binder_transaction
38 * @target_node: describe the second member of struct blah, 38 * @target_node: struct binder_node associated with this buffer
39 * @data_size: describe the second member of struct blah, 39 * @data_size: size of @transaction data
40 * @offsets_size: describe the second member of struct blah, 40 * @offsets_size: size of array of offsets
41 * @extra_buffers_size: describe the second member of struct blah, 41 * @extra_buffers_size: size of space for other objects (like sg lists)
42 * @data:i describe the second member of struct blah, 42 * @data: pointer to base of buffer space
43 * 43 *
44 * Bookkeeping structure for binder transaction buffers 44 * Bookkeeping structure for binder transaction buffers
45 */ 45 */
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
new file mode 100644
index 000000000000..7fb97f503ef2
--- /dev/null
+++ b/drivers/android/binder_internal.h
@@ -0,0 +1,49 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _LINUX_BINDER_INTERNAL_H
4#define _LINUX_BINDER_INTERNAL_H
5
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/list.h>
9#include <linux/miscdevice.h>
10#include <linux/mutex.h>
11#include <linux/stddef.h>
12#include <linux/types.h>
13#include <linux/uidgid.h>
14
15struct binder_context {
16 struct binder_node *binder_context_mgr_node;
17 struct mutex context_mgr_node_lock;
18 kuid_t binder_context_mgr_uid;
19 const char *name;
20};
21
22/**
23 * struct binder_device - information about a binder device node
24 * @hlist: list of binder devices (only used for devices requested via
25 * CONFIG_ANDROID_BINDER_DEVICES)
26 * @miscdev: information about a binder character device node
27 * @context: binder context information
28 * @binderfs_inode: This is the inode of the root dentry of the super block
29 * belonging to a binderfs mount.
30 */
31struct binder_device {
32 struct hlist_node hlist;
33 struct miscdevice miscdev;
34 struct binder_context context;
35 struct inode *binderfs_inode;
36};
37
38extern const struct file_operations binder_fops;
39
40#ifdef CONFIG_ANDROID_BINDERFS
41extern bool is_binderfs_device(const struct inode *inode);
42#else
43static inline bool is_binderfs_device(const struct inode *inode)
44{
45 return false;
46}
47#endif
48
49#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
new file mode 100644
index 000000000000..7496b10532aa
--- /dev/null
+++ b/drivers/android/binderfs.c
@@ -0,0 +1,544 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <linux/compiler_types.h>
4#include <linux/errno.h>
5#include <linux/fs.h>
6#include <linux/fsnotify.h>
7#include <linux/gfp.h>
8#include <linux/idr.h>
9#include <linux/init.h>
10#include <linux/ipc_namespace.h>
11#include <linux/kdev_t.h>
12#include <linux/kernel.h>
13#include <linux/list.h>
14#include <linux/magic.h>
15#include <linux/major.h>
16#include <linux/miscdevice.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/mount.h>
20#include <linux/parser.h>
21#include <linux/radix-tree.h>
22#include <linux/sched.h>
23#include <linux/slab.h>
24#include <linux/spinlock_types.h>
25#include <linux/stddef.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/uaccess.h>
29#include <linux/user_namespace.h>
30#include <linux/xarray.h>
31#include <uapi/asm-generic/errno-base.h>
32#include <uapi/linux/android/binder.h>
33#include <uapi/linux/android/binder_ctl.h>
34
35#include "binder_internal.h"
36
37#define FIRST_INODE 1
38#define SECOND_INODE 2
39#define INODE_OFFSET 3
40#define INTSTRLEN 21
41#define BINDERFS_MAX_MINOR (1U << MINORBITS)
42
43static struct vfsmount *binderfs_mnt;
44
45static dev_t binderfs_dev;
46static DEFINE_MUTEX(binderfs_minors_mutex);
47static DEFINE_IDA(binderfs_minors);
48
49/**
50 * binderfs_info - information about a binderfs mount
51 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
52 * @control_dentry: This records the dentry of this binderfs mount
53 * binder-control device.
54 * @root_uid: uid that needs to be used when a new binder device is
55 * created.
56 * @root_gid: gid that needs to be used when a new binder device is
57 * created.
58 */
59struct binderfs_info {
60 struct ipc_namespace *ipc_ns;
61 struct dentry *control_dentry;
62 kuid_t root_uid;
63 kgid_t root_gid;
64
65};
66
67static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
68{
69 return inode->i_sb->s_fs_info;
70}
71
72bool is_binderfs_device(const struct inode *inode)
73{
74 if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC)
75 return true;
76
77 return false;
78}
79
80/**
81 * binderfs_binder_device_create - allocate inode from super block of a
82 * binderfs mount
83 * @ref_inode: inode from wich the super block will be taken
84 * @userp: buffer to copy information about new device for userspace to
85 * @req: struct binderfs_device as copied from userspace
86 *
87 * This function allocated a new binder_device and reserves a new minor
88 * number for it.
89 * Minor numbers are limited and tracked globally in binderfs_minors. The
90 * function will stash a struct binder_device for the specific binder
91 * device in i_private of the inode.
92 * It will go on to allocate a new inode from the super block of the
93 * filesystem mount, stash a struct binder_device in its i_private field
94 * and attach a dentry to that inode.
95 *
96 * Return: 0 on success, negative errno on failure
97 */
98static int binderfs_binder_device_create(struct inode *ref_inode,
99 struct binderfs_device __user *userp,
100 struct binderfs_device *req)
101{
102 int minor, ret;
103 struct dentry *dentry, *dup, *root;
104 struct binder_device *device;
105 size_t name_len = BINDERFS_MAX_NAME + 1;
106 char *name = NULL;
107 struct inode *inode = NULL;
108 struct super_block *sb = ref_inode->i_sb;
109 struct binderfs_info *info = sb->s_fs_info;
110
111 /* Reserve new minor number for the new device. */
112 mutex_lock(&binderfs_minors_mutex);
113 minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL);
114 mutex_unlock(&binderfs_minors_mutex);
115 if (minor < 0)
116 return minor;
117
118 ret = -ENOMEM;
119 device = kzalloc(sizeof(*device), GFP_KERNEL);
120 if (!device)
121 goto err;
122
123 inode = new_inode(sb);
124 if (!inode)
125 goto err;
126
127 inode->i_ino = minor + INODE_OFFSET;
128 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
129 init_special_inode(inode, S_IFCHR | 0600,
130 MKDEV(MAJOR(binderfs_dev), minor));
131 inode->i_fop = &binder_fops;
132 inode->i_uid = info->root_uid;
133 inode->i_gid = info->root_gid;
134
135 name = kmalloc(name_len, GFP_KERNEL);
136 if (!name)
137 goto err;
138
139 strscpy(name, req->name, name_len);
140
141 device->binderfs_inode = inode;
142 device->context.binder_context_mgr_uid = INVALID_UID;
143 device->context.name = name;
144 device->miscdev.name = name;
145 device->miscdev.minor = minor;
146 mutex_init(&device->context.context_mgr_node_lock);
147
148 req->major = MAJOR(binderfs_dev);
149 req->minor = minor;
150
151 ret = copy_to_user(userp, req, sizeof(*req));
152 if (ret) {
153 ret = -EFAULT;
154 goto err;
155 }
156
157 root = sb->s_root;
158 inode_lock(d_inode(root));
159 dentry = d_alloc_name(root, name);
160 if (!dentry) {
161 inode_unlock(d_inode(root));
162 ret = -ENOMEM;
163 goto err;
164 }
165
166 /* Verify that the name userspace gave us is not already in use. */
167 dup = d_lookup(root, &dentry->d_name);
168 if (dup) {
169 if (d_really_is_positive(dup)) {
170 dput(dup);
171 dput(dentry);
172 inode_unlock(d_inode(root));
173 ret = -EEXIST;
174 goto err;
175 }
176 dput(dup);
177 }
178
179 inode->i_private = device;
180 d_add(dentry, inode);
181 fsnotify_create(root->d_inode, dentry);
182 inode_unlock(d_inode(root));
183
184 return 0;
185
186err:
187 kfree(name);
188 kfree(device);
189 mutex_lock(&binderfs_minors_mutex);
190 ida_free(&binderfs_minors, minor);
191 mutex_unlock(&binderfs_minors_mutex);
192 iput(inode);
193
194 return ret;
195}
196
197/**
198 * binderfs_ctl_ioctl - handle binder device node allocation requests
199 *
200 * The request handler for the binder-control device. All requests operate on
201 * the binderfs mount the binder-control device resides in:
202 * - BINDER_CTL_ADD
203 * Allocate a new binder device.
204 *
205 * Return: 0 on success, negative errno on failure
206 */
207static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
208 unsigned long arg)
209{
210 int ret = -EINVAL;
211 struct inode *inode = file_inode(file);
212 struct binderfs_device __user *device = (struct binderfs_device __user *)arg;
213 struct binderfs_device device_req;
214
215 switch (cmd) {
216 case BINDER_CTL_ADD:
217 ret = copy_from_user(&device_req, device, sizeof(device_req));
218 if (ret) {
219 ret = -EFAULT;
220 break;
221 }
222
223 ret = binderfs_binder_device_create(inode, device, &device_req);
224 break;
225 default:
226 break;
227 }
228
229 return ret;
230}
231
232static void binderfs_evict_inode(struct inode *inode)
233{
234 struct binder_device *device = inode->i_private;
235
236 clear_inode(inode);
237
238 if (!device)
239 return;
240
241 mutex_lock(&binderfs_minors_mutex);
242 ida_free(&binderfs_minors, device->miscdev.minor);
243 mutex_unlock(&binderfs_minors_mutex);
244
245 kfree(device->context.name);
246 kfree(device);
247}
248
249static const struct super_operations binderfs_super_ops = {
250 .statfs = simple_statfs,
251 .evict_inode = binderfs_evict_inode,
252};
253
254static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
255 struct inode *new_dir, struct dentry *new_dentry,
256 unsigned int flags)
257{
258 struct inode *inode = d_inode(old_dentry);
259
260 /* binderfs doesn't support directories. */
261 if (d_is_dir(old_dentry))
262 return -EPERM;
263
264 if (flags & ~RENAME_NOREPLACE)
265 return -EINVAL;
266
267 if (!simple_empty(new_dentry))
268 return -ENOTEMPTY;
269
270 if (d_really_is_positive(new_dentry))
271 simple_unlink(new_dir, new_dentry);
272
273 old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
274 new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
275
276 return 0;
277}
278
279static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
280{
281 /*
282 * The control dentry is only ever touched during mount so checking it
283 * here should not require us to take lock.
284 */
285 if (BINDERFS_I(dir)->control_dentry == dentry)
286 return -EPERM;
287
288 return simple_unlink(dir, dentry);
289}
290
291static const struct file_operations binder_ctl_fops = {
292 .owner = THIS_MODULE,
293 .open = nonseekable_open,
294 .unlocked_ioctl = binder_ctl_ioctl,
295 .compat_ioctl = binder_ctl_ioctl,
296 .llseek = noop_llseek,
297};
298
299/**
300 * binderfs_binder_ctl_create - create a new binder-control device
301 * @sb: super block of the binderfs mount
302 *
303 * This function creates a new binder-control device node in the binderfs mount
304 * referred to by @sb.
305 *
306 * Return: 0 on success, negative errno on failure
307 */
308static int binderfs_binder_ctl_create(struct super_block *sb)
309{
310 int minor, ret;
311 struct dentry *dentry;
312 struct binder_device *device;
313 struct inode *inode = NULL;
314 struct dentry *root = sb->s_root;
315 struct binderfs_info *info = sb->s_fs_info;
316
317 device = kzalloc(sizeof(*device), GFP_KERNEL);
318 if (!device)
319 return -ENOMEM;
320
321 inode_lock(d_inode(root));
322
323 /* If we have already created a binder-control node, return. */
324 if (info->control_dentry) {
325 ret = 0;
326 goto out;
327 }
328
329 ret = -ENOMEM;
330 inode = new_inode(sb);
331 if (!inode)
332 goto out;
333
334 /* Reserve a new minor number for the new device. */
335 mutex_lock(&binderfs_minors_mutex);
336 minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL);
337 mutex_unlock(&binderfs_minors_mutex);
338 if (minor < 0) {
339 ret = minor;
340 goto out;
341 }
342
343 inode->i_ino = SECOND_INODE;
344 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
345 init_special_inode(inode, S_IFCHR | 0600,
346 MKDEV(MAJOR(binderfs_dev), minor));
347 inode->i_fop = &binder_ctl_fops;
348 inode->i_uid = info->root_uid;
349 inode->i_gid = info->root_gid;
350
351 device->binderfs_inode = inode;
352 device->miscdev.minor = minor;
353
354 dentry = d_alloc_name(root, "binder-control");
355 if (!dentry)
356 goto out;
357
358 inode->i_private = device;
359 info->control_dentry = dentry;
360 d_add(dentry, inode);
361 inode_unlock(d_inode(root));
362
363 return 0;
364
365out:
366 inode_unlock(d_inode(root));
367 kfree(device);
368 iput(inode);
369
370 return ret;
371}
372
373static const struct inode_operations binderfs_dir_inode_operations = {
374 .lookup = simple_lookup,
375 .rename = binderfs_rename,
376 .unlink = binderfs_unlink,
377};
378
379static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
380{
381 struct binderfs_info *info;
382 int ret = -ENOMEM;
383 struct inode *inode = NULL;
384 struct ipc_namespace *ipc_ns = sb->s_fs_info;
385
386 get_ipc_ns(ipc_ns);
387
388 sb->s_blocksize = PAGE_SIZE;
389 sb->s_blocksize_bits = PAGE_SHIFT;
390
391 /*
392 * The binderfs filesystem can be mounted by userns root in a
393 * non-initial userns. By default such mounts have the SB_I_NODEV flag
394 * set in s_iflags to prevent security issues where userns root can
395 * just create random device nodes via mknod() since it owns the
396 * filesystem mount. But binderfs does not allow to create any files
397 * including devices nodes. The only way to create binder devices nodes
398 * is through the binder-control device which userns root is explicitly
399 * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both
400 * necessary and safe.
401 */
402 sb->s_iflags &= ~SB_I_NODEV;
403 sb->s_iflags |= SB_I_NOEXEC;
404 sb->s_magic = BINDERFS_SUPER_MAGIC;
405 sb->s_op = &binderfs_super_ops;
406 sb->s_time_gran = 1;
407
408 info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
409 if (!info)
410 goto err_without_dentry;
411
412 info->ipc_ns = ipc_ns;
413 info->root_gid = make_kgid(sb->s_user_ns, 0);
414 if (!gid_valid(info->root_gid))
415 info->root_gid = GLOBAL_ROOT_GID;
416 info->root_uid = make_kuid(sb->s_user_ns, 0);
417 if (!uid_valid(info->root_uid))
418 info->root_uid = GLOBAL_ROOT_UID;
419
420 sb->s_fs_info = info;
421
422 inode = new_inode(sb);
423 if (!inode)
424 goto err_without_dentry;
425
426 inode->i_ino = FIRST_INODE;
427 inode->i_fop = &simple_dir_operations;
428 inode->i_mode = S_IFDIR | 0755;
429 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
430 inode->i_op = &binderfs_dir_inode_operations;
431 set_nlink(inode, 2);
432
433 sb->s_root = d_make_root(inode);
434 if (!sb->s_root)
435 goto err_without_dentry;
436
437 ret = binderfs_binder_ctl_create(sb);
438 if (ret)
439 goto err_with_dentry;
440
441 return 0;
442
443err_with_dentry:
444 dput(sb->s_root);
445 sb->s_root = NULL;
446
447err_without_dentry:
448 put_ipc_ns(ipc_ns);
449 iput(inode);
450 kfree(info);
451
452 return ret;
453}
454
455static int binderfs_test_super(struct super_block *sb, void *data)
456{
457 struct binderfs_info *info = sb->s_fs_info;
458
459 if (info)
460 return info->ipc_ns == data;
461
462 return 0;
463}
464
465static int binderfs_set_super(struct super_block *sb, void *data)
466{
467 sb->s_fs_info = data;
468 return set_anon_super(sb, NULL);
469}
470
471static struct dentry *binderfs_mount(struct file_system_type *fs_type,
472 int flags, const char *dev_name,
473 void *data)
474{
475 struct super_block *sb;
476 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
477
478 if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN))
479 return ERR_PTR(-EPERM);
480
481 sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super,
482 flags, ipc_ns->user_ns, ipc_ns);
483 if (IS_ERR(sb))
484 return ERR_CAST(sb);
485
486 if (!sb->s_root) {
487 int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
488 if (ret) {
489 deactivate_locked_super(sb);
490 return ERR_PTR(ret);
491 }
492
493 sb->s_flags |= SB_ACTIVE;
494 }
495
496 return dget(sb->s_root);
497}
498
499static void binderfs_kill_super(struct super_block *sb)
500{
501 struct binderfs_info *info = sb->s_fs_info;
502
503 if (info && info->ipc_ns)
504 put_ipc_ns(info->ipc_ns);
505
506 kfree(info);
507 kill_litter_super(sb);
508}
509
510static struct file_system_type binder_fs_type = {
511 .name = "binder",
512 .mount = binderfs_mount,
513 .kill_sb = binderfs_kill_super,
514 .fs_flags = FS_USERNS_MOUNT,
515};
516
517static int __init init_binderfs(void)
518{
519 int ret;
520
521 /* Allocate new major number for binderfs. */
522 ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
523 "binder");
524 if (ret)
525 return ret;
526
527 ret = register_filesystem(&binder_fs_type);
528 if (ret) {
529 unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
530 return ret;
531 }
532
533 binderfs_mnt = kern_mount(&binder_fs_type);
534 if (IS_ERR(binderfs_mnt)) {
535 ret = PTR_ERR(binderfs_mnt);
536 binderfs_mnt = NULL;
537 unregister_filesystem(&binder_fs_type);
538 unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
539 }
540
541 return ret;
542}
543
544device_initcall(init_binderfs);
diff --git a/drivers/bus/fsl-mc/dpbp.c b/drivers/bus/fsl-mc/dpbp.c
index 17e3c5d2f22e..9003cd3698a5 100644
--- a/drivers/bus/fsl-mc/dpbp.c
+++ b/drivers/bus/fsl-mc/dpbp.c
@@ -5,7 +5,6 @@
5 */ 5 */
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/fsl/mc.h> 7#include <linux/fsl/mc.h>
8#include <linux/fsl/mc.h>
9 8
10#include "fsl-mc-private.h" 9#include "fsl-mc-private.h"
11 10
diff --git a/drivers/bus/fsl-mc/dpcon.c b/drivers/bus/fsl-mc/dpcon.c
index 760555d7946e..97b6fa605e62 100644
--- a/drivers/bus/fsl-mc/dpcon.c
+++ b/drivers/bus/fsl-mc/dpcon.c
@@ -5,7 +5,6 @@
5 */ 5 */
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/fsl/mc.h> 7#include <linux/fsl/mc.h>
8#include <linux/fsl/mc.h>
9 8
10#include "fsl-mc-private.h" 9#include "fsl-mc-private.h"
11 10
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
index a6444244c411..56b01e4344d3 100644
--- a/drivers/bus/qcom-ebi2.c
+++ b/drivers/bus/qcom-ebi2.c
@@ -21,7 +21,6 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/io.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26#include <linux/platform_device.h> 25#include <linux/platform_device.h>
27#include <linux/bitops.h> 26#include <linux/bitops.h>
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 8c4dd1a3bb6a..5c8d780637bd 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -46,8 +46,8 @@
46 * lp=auto (assign lp devices to all ports that 46 * lp=auto (assign lp devices to all ports that
47 * have printers attached, as determined 47 * have printers attached, as determined
48 * by the IEEE-1284 autoprobe) 48 * by the IEEE-1284 autoprobe)
49 * 49 *
50 * lp=reset (reset the printer during 50 * lp=reset (reset the printer during
51 * initialisation) 51 * initialisation)
52 * 52 *
53 * lp=off (disable the printer driver entirely) 53 * lp=off (disable the printer driver entirely)
@@ -141,6 +141,7 @@
141 141
142static DEFINE_MUTEX(lp_mutex); 142static DEFINE_MUTEX(lp_mutex);
143static struct lp_struct lp_table[LP_NO]; 143static struct lp_struct lp_table[LP_NO];
144static int port_num[LP_NO];
144 145
145static unsigned int lp_count = 0; 146static unsigned int lp_count = 0;
146static struct class *lp_class; 147static struct class *lp_class;
@@ -166,7 +167,7 @@ static struct parport *console_registered;
166static void lp_claim_parport_or_block(struct lp_struct *this_lp) 167static void lp_claim_parport_or_block(struct lp_struct *this_lp)
167{ 168{
168 if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { 169 if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
169 parport_claim_or_block (this_lp->dev); 170 parport_claim_or_block(this_lp->dev);
170 } 171 }
171} 172}
172 173
@@ -174,7 +175,7 @@ static void lp_claim_parport_or_block(struct lp_struct *this_lp)
174static void lp_release_parport(struct lp_struct *this_lp) 175static void lp_release_parport(struct lp_struct *this_lp)
175{ 176{
176 if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { 177 if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
177 parport_release (this_lp->dev); 178 parport_release(this_lp->dev);
178 } 179 }
179} 180}
180 181
@@ -184,37 +185,37 @@ static int lp_preempt(void *handle)
184{ 185{
185 struct lp_struct *this_lp = (struct lp_struct *)handle; 186 struct lp_struct *this_lp = (struct lp_struct *)handle;
186 set_bit(LP_PREEMPT_REQUEST, &this_lp->bits); 187 set_bit(LP_PREEMPT_REQUEST, &this_lp->bits);
187 return (1); 188 return 1;
188} 189}
189 190
190 191
191/* 192/*
192 * Try to negotiate to a new mode; if unsuccessful negotiate to 193 * Try to negotiate to a new mode; if unsuccessful negotiate to
193 * compatibility mode. Return the mode we ended up in. 194 * compatibility mode. Return the mode we ended up in.
194 */ 195 */
195static int lp_negotiate(struct parport * port, int mode) 196static int lp_negotiate(struct parport *port, int mode)
196{ 197{
197 if (parport_negotiate (port, mode) != 0) { 198 if (parport_negotiate(port, mode) != 0) {
198 mode = IEEE1284_MODE_COMPAT; 199 mode = IEEE1284_MODE_COMPAT;
199 parport_negotiate (port, mode); 200 parport_negotiate(port, mode);
200 } 201 }
201 202
202 return (mode); 203 return mode;
203} 204}
204 205
205static int lp_reset(int minor) 206static int lp_reset(int minor)
206{ 207{
207 int retval; 208 int retval;
208 lp_claim_parport_or_block (&lp_table[minor]); 209 lp_claim_parport_or_block(&lp_table[minor]);
209 w_ctr(minor, LP_PSELECP); 210 w_ctr(minor, LP_PSELECP);
210 udelay (LP_DELAY); 211 udelay(LP_DELAY);
211 w_ctr(minor, LP_PSELECP | LP_PINITP); 212 w_ctr(minor, LP_PSELECP | LP_PINITP);
212 retval = r_str(minor); 213 retval = r_str(minor);
213 lp_release_parport (&lp_table[minor]); 214 lp_release_parport(&lp_table[minor]);
214 return retval; 215 return retval;
215} 216}
216 217
217static void lp_error (int minor) 218static void lp_error(int minor)
218{ 219{
219 DEFINE_WAIT(wait); 220 DEFINE_WAIT(wait);
220 int polling; 221 int polling;
@@ -223,12 +224,15 @@ static void lp_error (int minor)
223 return; 224 return;
224 225
225 polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE; 226 polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE;
226 if (polling) lp_release_parport (&lp_table[minor]); 227 if (polling)
228 lp_release_parport(&lp_table[minor]);
227 prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE); 229 prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE);
228 schedule_timeout(LP_TIMEOUT_POLLED); 230 schedule_timeout(LP_TIMEOUT_POLLED);
229 finish_wait(&lp_table[minor].waitq, &wait); 231 finish_wait(&lp_table[minor].waitq, &wait);
230 if (polling) lp_claim_parport_or_block (&lp_table[minor]); 232 if (polling)
231 else parport_yield_blocking (lp_table[minor].dev); 233 lp_claim_parport_or_block(&lp_table[minor]);
234 else
235 parport_yield_blocking(lp_table[minor].dev);
232} 236}
233 237
234static int lp_check_status(int minor) 238static int lp_check_status(int minor)
@@ -259,7 +263,7 @@ static int lp_check_status(int minor)
259 error = -EIO; 263 error = -EIO;
260 } else { 264 } else {
261 last = 0; /* Come here if LP_CAREFUL is set and no 265 last = 0; /* Come here if LP_CAREFUL is set and no
262 errors are reported. */ 266 errors are reported. */
263 } 267 }
264 268
265 lp_table[minor].last_error = last; 269 lp_table[minor].last_error = last;
@@ -276,14 +280,14 @@ static int lp_wait_ready(int minor, int nonblock)
276 280
277 /* If we're not in compatibility mode, we're ready now! */ 281 /* If we're not in compatibility mode, we're ready now! */
278 if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) { 282 if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) {
279 return (0); 283 return 0;
280 } 284 }
281 285
282 do { 286 do {
283 error = lp_check_status (minor); 287 error = lp_check_status(minor);
284 if (error && (nonblock || (LP_F(minor) & LP_ABORT))) 288 if (error && (nonblock || (LP_F(minor) & LP_ABORT)))
285 break; 289 break;
286 if (signal_pending (current)) { 290 if (signal_pending(current)) {
287 error = -EINTR; 291 error = -EINTR;
288 break; 292 break;
289 } 293 }
@@ -291,8 +295,8 @@ static int lp_wait_ready(int minor, int nonblock)
291 return error; 295 return error;
292} 296}
293 297
294static ssize_t lp_write(struct file * file, const char __user * buf, 298static ssize_t lp_write(struct file *file, const char __user *buf,
295 size_t count, loff_t *ppos) 299 size_t count, loff_t *ppos)
296{ 300{
297 unsigned int minor = iminor(file_inode(file)); 301 unsigned int minor = iminor(file_inode(file));
298 struct parport *port = lp_table[minor].dev->port; 302 struct parport *port = lp_table[minor].dev->port;
@@ -317,26 +321,26 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
317 if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) 321 if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
318 return -EINTR; 322 return -EINTR;
319 323
320 if (copy_from_user (kbuf, buf, copy_size)) { 324 if (copy_from_user(kbuf, buf, copy_size)) {
321 retv = -EFAULT; 325 retv = -EFAULT;
322 goto out_unlock; 326 goto out_unlock;
323 } 327 }
324 328
325 /* Claim Parport or sleep until it becomes available 329 /* Claim Parport or sleep until it becomes available
326 */ 330 */
327 lp_claim_parport_or_block (&lp_table[minor]); 331 lp_claim_parport_or_block(&lp_table[minor]);
328 /* Go to the proper mode. */ 332 /* Go to the proper mode. */
329 lp_table[minor].current_mode = lp_negotiate (port, 333 lp_table[minor].current_mode = lp_negotiate(port,
330 lp_table[minor].best_mode); 334 lp_table[minor].best_mode);
331 335
332 parport_set_timeout (lp_table[minor].dev, 336 parport_set_timeout(lp_table[minor].dev,
333 (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK 337 (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
334 : lp_table[minor].timeout)); 338 : lp_table[minor].timeout));
335 339
336 if ((retv = lp_wait_ready (minor, nonblock)) == 0) 340 if ((retv = lp_wait_ready(minor, nonblock)) == 0)
337 do { 341 do {
338 /* Write the data. */ 342 /* Write the data. */
339 written = parport_write (port, kbuf, copy_size); 343 written = parport_write(port, kbuf, copy_size);
340 if (written > 0) { 344 if (written > 0) {
341 copy_size -= written; 345 copy_size -= written;
342 count -= written; 346 count -= written;
@@ -344,7 +348,7 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
344 retv += written; 348 retv += written;
345 } 349 }
346 350
347 if (signal_pending (current)) { 351 if (signal_pending(current)) {
348 if (retv == 0) 352 if (retv == 0)
349 retv = -EINTR; 353 retv = -EINTR;
350 354
@@ -355,11 +359,11 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
355 /* incomplete write -> check error ! */ 359 /* incomplete write -> check error ! */
356 int error; 360 int error;
357 361
358 parport_negotiate (lp_table[minor].dev->port, 362 parport_negotiate(lp_table[minor].dev->port,
359 IEEE1284_MODE_COMPAT); 363 IEEE1284_MODE_COMPAT);
360 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; 364 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
361 365
362 error = lp_wait_ready (minor, nonblock); 366 error = lp_wait_ready(minor, nonblock);
363 367
364 if (error) { 368 if (error) {
365 if (retv == 0) 369 if (retv == 0)
@@ -371,13 +375,13 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
371 break; 375 break;
372 } 376 }
373 377
374 parport_yield_blocking (lp_table[minor].dev); 378 parport_yield_blocking(lp_table[minor].dev);
375 lp_table[minor].current_mode 379 lp_table[minor].current_mode
376 = lp_negotiate (port, 380 = lp_negotiate(port,
377 lp_table[minor].best_mode); 381 lp_table[minor].best_mode);
378 382
379 } else if (need_resched()) 383 } else if (need_resched())
380 schedule (); 384 schedule();
381 385
382 if (count) { 386 if (count) {
383 copy_size = count; 387 copy_size = count;
@@ -389,27 +393,27 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
389 retv = -EFAULT; 393 retv = -EFAULT;
390 break; 394 break;
391 } 395 }
392 } 396 }
393 } while (count > 0); 397 } while (count > 0);
394 398
395 if (test_and_clear_bit(LP_PREEMPT_REQUEST, 399 if (test_and_clear_bit(LP_PREEMPT_REQUEST,
396 &lp_table[minor].bits)) { 400 &lp_table[minor].bits)) {
397 printk(KERN_INFO "lp%d releasing parport\n", minor); 401 printk(KERN_INFO "lp%d releasing parport\n", minor);
398 parport_negotiate (lp_table[minor].dev->port, 402 parport_negotiate(lp_table[minor].dev->port,
399 IEEE1284_MODE_COMPAT); 403 IEEE1284_MODE_COMPAT);
400 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; 404 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
401 lp_release_parport (&lp_table[minor]); 405 lp_release_parport(&lp_table[minor]);
402 } 406 }
403out_unlock: 407out_unlock:
404 mutex_unlock(&lp_table[minor].port_mutex); 408 mutex_unlock(&lp_table[minor].port_mutex);
405 409
406 return retv; 410 return retv;
407} 411}
408 412
409#ifdef CONFIG_PARPORT_1284 413#ifdef CONFIG_PARPORT_1284
410 414
411/* Status readback conforming to ieee1284 */ 415/* Status readback conforming to ieee1284 */
412static ssize_t lp_read(struct file * file, char __user * buf, 416static ssize_t lp_read(struct file *file, char __user *buf,
413 size_t count, loff_t *ppos) 417 size_t count, loff_t *ppos)
414{ 418{
415 DEFINE_WAIT(wait); 419 DEFINE_WAIT(wait);
@@ -426,21 +430,21 @@ static ssize_t lp_read(struct file * file, char __user * buf,
426 if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) 430 if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
427 return -EINTR; 431 return -EINTR;
428 432
429 lp_claim_parport_or_block (&lp_table[minor]); 433 lp_claim_parport_or_block(&lp_table[minor]);
430 434
431 parport_set_timeout (lp_table[minor].dev, 435 parport_set_timeout(lp_table[minor].dev,
432 (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK 436 (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
433 : lp_table[minor].timeout)); 437 : lp_table[minor].timeout));
434 438
435 parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); 439 parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
436 if (parport_negotiate (lp_table[minor].dev->port, 440 if (parport_negotiate(lp_table[minor].dev->port,
437 IEEE1284_MODE_NIBBLE)) { 441 IEEE1284_MODE_NIBBLE)) {
438 retval = -EIO; 442 retval = -EIO;
439 goto out; 443 goto out;
440 } 444 }
441 445
442 while (retval == 0) { 446 while (retval == 0) {
443 retval = parport_read (port, kbuf, count); 447 retval = parport_read(port, kbuf, count);
444 448
445 if (retval > 0) 449 if (retval > 0)
446 break; 450 break;
@@ -453,11 +457,11 @@ static ssize_t lp_read(struct file * file, char __user * buf,
453 /* Wait for data. */ 457 /* Wait for data. */
454 458
455 if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) { 459 if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) {
456 parport_negotiate (lp_table[minor].dev->port, 460 parport_negotiate(lp_table[minor].dev->port,
457 IEEE1284_MODE_COMPAT); 461 IEEE1284_MODE_COMPAT);
458 lp_error (minor); 462 lp_error(minor);
459 if (parport_negotiate (lp_table[minor].dev->port, 463 if (parport_negotiate(lp_table[minor].dev->port,
460 IEEE1284_MODE_NIBBLE)) { 464 IEEE1284_MODE_NIBBLE)) {
461 retval = -EIO; 465 retval = -EIO;
462 goto out; 466 goto out;
463 } 467 }
@@ -467,18 +471,18 @@ static ssize_t lp_read(struct file * file, char __user * buf,
467 finish_wait(&lp_table[minor].waitq, &wait); 471 finish_wait(&lp_table[minor].waitq, &wait);
468 } 472 }
469 473
470 if (signal_pending (current)) { 474 if (signal_pending(current)) {
471 retval = -ERESTARTSYS; 475 retval = -ERESTARTSYS;
472 break; 476 break;
473 } 477 }
474 478
475 cond_resched (); 479 cond_resched();
476 } 480 }
477 parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); 481 parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
478 out: 482 out:
479 lp_release_parport (&lp_table[minor]); 483 lp_release_parport(&lp_table[minor]);
480 484
481 if (retval > 0 && copy_to_user (buf, kbuf, retval)) 485 if (retval > 0 && copy_to_user(buf, kbuf, retval))
482 retval = -EFAULT; 486 retval = -EFAULT;
483 487
484 mutex_unlock(&lp_table[minor].port_mutex); 488 mutex_unlock(&lp_table[minor].port_mutex);
@@ -488,7 +492,7 @@ static ssize_t lp_read(struct file * file, char __user * buf,
488 492
489#endif /* IEEE 1284 support */ 493#endif /* IEEE 1284 support */
490 494
491static int lp_open(struct inode * inode, struct file * file) 495static int lp_open(struct inode *inode, struct file *file)
492{ 496{
493 unsigned int minor = iminor(inode); 497 unsigned int minor = iminor(inode);
494 int ret = 0; 498 int ret = 0;
@@ -513,9 +517,9 @@ static int lp_open(struct inode * inode, struct file * file)
513 should most likely only ever be used by the tunelp application. */ 517 should most likely only ever be used by the tunelp application. */
514 if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) { 518 if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) {
515 int status; 519 int status;
516 lp_claim_parport_or_block (&lp_table[minor]); 520 lp_claim_parport_or_block(&lp_table[minor]);
517 status = r_str(minor); 521 status = r_str(minor);
518 lp_release_parport (&lp_table[minor]); 522 lp_release_parport(&lp_table[minor]);
519 if (status & LP_POUTPA) { 523 if (status & LP_POUTPA) {
520 printk(KERN_INFO "lp%d out of paper\n", minor); 524 printk(KERN_INFO "lp%d out of paper\n", minor);
521 LP_F(minor) &= ~LP_BUSY; 525 LP_F(minor) &= ~LP_BUSY;
@@ -540,32 +544,32 @@ static int lp_open(struct inode * inode, struct file * file)
540 goto out; 544 goto out;
541 } 545 }
542 /* Determine if the peripheral supports ECP mode */ 546 /* Determine if the peripheral supports ECP mode */
543 lp_claim_parport_or_block (&lp_table[minor]); 547 lp_claim_parport_or_block(&lp_table[minor]);
544 if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) && 548 if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) &&
545 !parport_negotiate (lp_table[minor].dev->port, 549 !parport_negotiate(lp_table[minor].dev->port,
546 IEEE1284_MODE_ECP)) { 550 IEEE1284_MODE_ECP)) {
547 printk (KERN_INFO "lp%d: ECP mode\n", minor); 551 printk(KERN_INFO "lp%d: ECP mode\n", minor);
548 lp_table[minor].best_mode = IEEE1284_MODE_ECP; 552 lp_table[minor].best_mode = IEEE1284_MODE_ECP;
549 } else { 553 } else {
550 lp_table[minor].best_mode = IEEE1284_MODE_COMPAT; 554 lp_table[minor].best_mode = IEEE1284_MODE_COMPAT;
551 } 555 }
552 /* Leave peripheral in compatibility mode */ 556 /* Leave peripheral in compatibility mode */
553 parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); 557 parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
554 lp_release_parport (&lp_table[minor]); 558 lp_release_parport(&lp_table[minor]);
555 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; 559 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
556out: 560out:
557 mutex_unlock(&lp_mutex); 561 mutex_unlock(&lp_mutex);
558 return ret; 562 return ret;
559} 563}
560 564
561static int lp_release(struct inode * inode, struct file * file) 565static int lp_release(struct inode *inode, struct file *file)
562{ 566{
563 unsigned int minor = iminor(inode); 567 unsigned int minor = iminor(inode);
564 568
565 lp_claim_parport_or_block (&lp_table[minor]); 569 lp_claim_parport_or_block(&lp_table[minor]);
566 parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); 570 parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
567 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; 571 lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
568 lp_release_parport (&lp_table[minor]); 572 lp_release_parport(&lp_table[minor]);
569 kfree(lp_table[minor].lp_buffer); 573 kfree(lp_table[minor].lp_buffer);
570 lp_table[minor].lp_buffer = NULL; 574 lp_table[minor].lp_buffer = NULL;
571 LP_F(minor) &= ~LP_BUSY; 575 LP_F(minor) &= ~LP_BUSY;
@@ -615,7 +619,7 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
615 case LPWAIT: 619 case LPWAIT:
616 LP_WAIT(minor) = arg; 620 LP_WAIT(minor) = arg;
617 break; 621 break;
618 case LPSETIRQ: 622 case LPSETIRQ:
619 return -EINVAL; 623 return -EINVAL;
620 break; 624 break;
621 case LPGETIRQ: 625 case LPGETIRQ:
@@ -626,9 +630,9 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
626 case LPGETSTATUS: 630 case LPGETSTATUS:
627 if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) 631 if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
628 return -EINTR; 632 return -EINTR;
629 lp_claim_parport_or_block (&lp_table[minor]); 633 lp_claim_parport_or_block(&lp_table[minor]);
630 status = r_str(minor); 634 status = r_str(minor);
631 lp_release_parport (&lp_table[minor]); 635 lp_release_parport(&lp_table[minor]);
632 mutex_unlock(&lp_table[minor].port_mutex); 636 mutex_unlock(&lp_table[minor].port_mutex);
633 637
634 if (copy_to_user(argp, &status, sizeof(int))) 638 if (copy_to_user(argp, &status, sizeof(int)))
@@ -647,8 +651,8 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
647 sizeof(struct lp_stats)); 651 sizeof(struct lp_stats));
648 break; 652 break;
649#endif 653#endif
650 case LPGETFLAGS: 654 case LPGETFLAGS:
651 status = LP_F(minor); 655 status = LP_F(minor);
652 if (copy_to_user(argp, &status, sizeof(int))) 656 if (copy_to_user(argp, &status, sizeof(int)))
653 return -EFAULT; 657 return -EFAULT;
654 break; 658 break;
@@ -801,31 +805,31 @@ static const struct file_operations lp_fops = {
801 805
802/* The console must be locked when we get here. */ 806/* The console must be locked when we get here. */
803 807
804static void lp_console_write (struct console *co, const char *s, 808static void lp_console_write(struct console *co, const char *s,
805 unsigned count) 809 unsigned count)
806{ 810{
807 struct pardevice *dev = lp_table[CONSOLE_LP].dev; 811 struct pardevice *dev = lp_table[CONSOLE_LP].dev;
808 struct parport *port = dev->port; 812 struct parport *port = dev->port;
809 ssize_t written; 813 ssize_t written;
810 814
811 if (parport_claim (dev)) 815 if (parport_claim(dev))
812 /* Nothing we can do. */ 816 /* Nothing we can do. */
813 return; 817 return;
814 818
815 parport_set_timeout (dev, 0); 819 parport_set_timeout(dev, 0);
816 820
817 /* Go to compatibility mode. */ 821 /* Go to compatibility mode. */
818 parport_negotiate (port, IEEE1284_MODE_COMPAT); 822 parport_negotiate(port, IEEE1284_MODE_COMPAT);
819 823
820 do { 824 do {
821 /* Write the data, converting LF->CRLF as we go. */ 825 /* Write the data, converting LF->CRLF as we go. */
822 ssize_t canwrite = count; 826 ssize_t canwrite = count;
823 char *lf = memchr (s, '\n', count); 827 char *lf = memchr(s, '\n', count);
824 if (lf) 828 if (lf)
825 canwrite = lf - s; 829 canwrite = lf - s;
826 830
827 if (canwrite > 0) { 831 if (canwrite > 0) {
828 written = parport_write (port, s, canwrite); 832 written = parport_write(port, s, canwrite);
829 833
830 if (written <= 0) 834 if (written <= 0)
831 continue; 835 continue;
@@ -843,14 +847,14 @@ static void lp_console_write (struct console *co, const char *s,
843 s++; 847 s++;
844 count--; 848 count--;
845 do { 849 do {
846 written = parport_write (port, crlf, i); 850 written = parport_write(port, crlf, i);
847 if (written > 0) 851 if (written > 0)
848 i -= written, crlf += written; 852 i -= written, crlf += written;
849 } while (i > 0 && (CONSOLE_LP_STRICT || written > 0)); 853 } while (i > 0 && (CONSOLE_LP_STRICT || written > 0));
850 } 854 }
851 } while (count > 0 && (CONSOLE_LP_STRICT || written > 0)); 855 } while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
852 856
853 parport_release (dev); 857 parport_release(dev);
854} 858}
855 859
856static struct console lpcons = { 860static struct console lpcons = {
@@ -871,7 +875,7 @@ module_param_array(parport, charp, NULL, 0);
871module_param(reset, bool, 0); 875module_param(reset, bool, 0);
872 876
873#ifndef MODULE 877#ifndef MODULE
874static int __init lp_setup (char *str) 878static int __init lp_setup(char *str)
875{ 879{
876 static int parport_ptr; 880 static int parport_ptr;
877 int x; 881 int x;
@@ -908,9 +912,13 @@ static int __init lp_setup (char *str)
908 912
909static int lp_register(int nr, struct parport *port) 913static int lp_register(int nr, struct parport *port)
910{ 914{
911 lp_table[nr].dev = parport_register_device(port, "lp", 915 struct pardev_cb ppdev_cb;
912 lp_preempt, NULL, NULL, 0, 916
913 (void *) &lp_table[nr]); 917 memset(&ppdev_cb, 0, sizeof(ppdev_cb));
918 ppdev_cb.preempt = lp_preempt;
919 ppdev_cb.private = &lp_table[nr];
920 lp_table[nr].dev = parport_register_dev_model(port, "lp",
921 &ppdev_cb, nr);
914 if (lp_table[nr].dev == NULL) 922 if (lp_table[nr].dev == NULL)
915 return 1; 923 return 1;
916 lp_table[nr].flags |= LP_EXIST; 924 lp_table[nr].flags |= LP_EXIST;
@@ -921,7 +929,7 @@ static int lp_register(int nr, struct parport *port)
921 device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL, 929 device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL,
922 "lp%d", nr); 930 "lp%d", nr);
923 931
924 printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name, 932 printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name,
925 (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven"); 933 (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven");
926 934
927#ifdef CONFIG_LP_CONSOLE 935#ifdef CONFIG_LP_CONSOLE
@@ -929,17 +937,18 @@ static int lp_register(int nr, struct parport *port)
929 if (port->modes & PARPORT_MODE_SAFEININT) { 937 if (port->modes & PARPORT_MODE_SAFEININT) {
930 register_console(&lpcons); 938 register_console(&lpcons);
931 console_registered = port; 939 console_registered = port;
932 printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP); 940 printk(KERN_INFO "lp%d: console ready\n", CONSOLE_LP);
933 } else 941 } else
934 printk (KERN_ERR "lp%d: cannot run console on %s\n", 942 printk(KERN_ERR "lp%d: cannot run console on %s\n",
935 CONSOLE_LP, port->name); 943 CONSOLE_LP, port->name);
936 } 944 }
937#endif 945#endif
946 port_num[nr] = port->number;
938 947
939 return 0; 948 return 0;
940} 949}
941 950
942static void lp_attach (struct parport *port) 951static void lp_attach(struct parport *port)
943{ 952{
944 unsigned int i; 953 unsigned int i;
945 954
@@ -953,7 +962,11 @@ static void lp_attach (struct parport *port)
953 printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO); 962 printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO);
954 return; 963 return;
955 } 964 }
956 if (!lp_register(lp_count, port)) 965 for (i = 0; i < LP_NO; i++)
966 if (port_num[i] == -1)
967 break;
968
969 if (!lp_register(i, port))
957 lp_count++; 970 lp_count++;
958 break; 971 break;
959 972
@@ -969,8 +982,10 @@ static void lp_attach (struct parport *port)
969 } 982 }
970} 983}
971 984
972static void lp_detach (struct parport *port) 985static void lp_detach(struct parport *port)
973{ 986{
987 int n;
988
974 /* Write this some day. */ 989 /* Write this some day. */
975#ifdef CONFIG_LP_CONSOLE 990#ifdef CONFIG_LP_CONSOLE
976 if (console_registered == port) { 991 if (console_registered == port) {
@@ -978,15 +993,25 @@ static void lp_detach (struct parport *port)
978 console_registered = NULL; 993 console_registered = NULL;
979 } 994 }
980#endif /* CONFIG_LP_CONSOLE */ 995#endif /* CONFIG_LP_CONSOLE */
996
997 for (n = 0; n < LP_NO; n++) {
998 if (port_num[n] == port->number) {
999 port_num[n] = -1;
1000 lp_count--;
1001 device_destroy(lp_class, MKDEV(LP_MAJOR, n));
1002 parport_unregister_device(lp_table[n].dev);
1003 }
1004 }
981} 1005}
982 1006
983static struct parport_driver lp_driver = { 1007static struct parport_driver lp_driver = {
984 .name = "lp", 1008 .name = "lp",
985 .attach = lp_attach, 1009 .match_port = lp_attach,
986 .detach = lp_detach, 1010 .detach = lp_detach,
1011 .devmodel = true,
987}; 1012};
988 1013
989static int __init lp_init (void) 1014static int __init lp_init(void)
990{ 1015{
991 int i, err = 0; 1016 int i, err = 0;
992 1017
@@ -1003,17 +1028,18 @@ static int __init lp_init (void)
1003#ifdef LP_STATS 1028#ifdef LP_STATS
1004 lp_table[i].lastcall = 0; 1029 lp_table[i].lastcall = 0;
1005 lp_table[i].runchars = 0; 1030 lp_table[i].runchars = 0;
1006 memset (&lp_table[i].stats, 0, sizeof (struct lp_stats)); 1031 memset(&lp_table[i].stats, 0, sizeof(struct lp_stats));
1007#endif 1032#endif
1008 lp_table[i].last_error = 0; 1033 lp_table[i].last_error = 0;
1009 init_waitqueue_head (&lp_table[i].waitq); 1034 init_waitqueue_head(&lp_table[i].waitq);
1010 init_waitqueue_head (&lp_table[i].dataq); 1035 init_waitqueue_head(&lp_table[i].dataq);
1011 mutex_init(&lp_table[i].port_mutex); 1036 mutex_init(&lp_table[i].port_mutex);
1012 lp_table[i].timeout = 10 * HZ; 1037 lp_table[i].timeout = 10 * HZ;
1038 port_num[i] = -1;
1013 } 1039 }
1014 1040
1015 if (register_chrdev (LP_MAJOR, "lp", &lp_fops)) { 1041 if (register_chrdev(LP_MAJOR, "lp", &lp_fops)) {
1016 printk (KERN_ERR "lp: unable to get major %d\n", LP_MAJOR); 1042 printk(KERN_ERR "lp: unable to get major %d\n", LP_MAJOR);
1017 return -EIO; 1043 return -EIO;
1018 } 1044 }
1019 1045
@@ -1023,17 +1049,17 @@ static int __init lp_init (void)
1023 goto out_reg; 1049 goto out_reg;
1024 } 1050 }
1025 1051
1026 if (parport_register_driver (&lp_driver)) { 1052 if (parport_register_driver(&lp_driver)) {
1027 printk (KERN_ERR "lp: unable to register with parport\n"); 1053 printk(KERN_ERR "lp: unable to register with parport\n");
1028 err = -EIO; 1054 err = -EIO;
1029 goto out_class; 1055 goto out_class;
1030 } 1056 }
1031 1057
1032 if (!lp_count) { 1058 if (!lp_count) {
1033 printk (KERN_INFO "lp: driver loaded but no devices found\n"); 1059 printk(KERN_INFO "lp: driver loaded but no devices found\n");
1034#ifndef CONFIG_PARPORT_1284 1060#ifndef CONFIG_PARPORT_1284
1035 if (parport_nr[0] == LP_PARPORT_AUTO) 1061 if (parport_nr[0] == LP_PARPORT_AUTO)
1036 printk (KERN_INFO "lp: (is IEEE 1284 support enabled?)\n"); 1062 printk(KERN_INFO "lp: (is IEEE 1284 support enabled?)\n");
1037#endif 1063#endif
1038 } 1064 }
1039 1065
@@ -1046,7 +1072,7 @@ out_reg:
1046 return err; 1072 return err;
1047} 1073}
1048 1074
1049static int __init lp_init_module (void) 1075static int __init lp_init_module(void)
1050{ 1076{
1051 if (parport[0]) { 1077 if (parport[0]) {
1052 /* The user gave some parameters. Let's see what they were. */ 1078 /* The user gave some parameters. Let's see what they were. */
@@ -1060,7 +1086,7 @@ static int __init lp_init_module (void)
1060 else { 1086 else {
1061 char *ep; 1087 char *ep;
1062 unsigned long r = simple_strtoul(parport[n], &ep, 0); 1088 unsigned long r = simple_strtoul(parport[n], &ep, 0);
1063 if (ep != parport[n]) 1089 if (ep != parport[n])
1064 parport_nr[n] = r; 1090 parport_nr[n] = r;
1065 else { 1091 else {
1066 printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]); 1092 printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]);
@@ -1074,23 +1100,15 @@ static int __init lp_init_module (void)
1074 return lp_init(); 1100 return lp_init();
1075} 1101}
1076 1102
1077static void lp_cleanup_module (void) 1103static void lp_cleanup_module(void)
1078{ 1104{
1079 unsigned int offset; 1105 parport_unregister_driver(&lp_driver);
1080
1081 parport_unregister_driver (&lp_driver);
1082 1106
1083#ifdef CONFIG_LP_CONSOLE 1107#ifdef CONFIG_LP_CONSOLE
1084 unregister_console (&lpcons); 1108 unregister_console(&lpcons);
1085#endif 1109#endif
1086 1110
1087 unregister_chrdev(LP_MAJOR, "lp"); 1111 unregister_chrdev(LP_MAJOR, "lp");
1088 for (offset = 0; offset < LP_NO; offset++) {
1089 if (lp_table[offset].dev == NULL)
1090 continue;
1091 parport_unregister_device(lp_table[offset].dev);
1092 device_destroy(lp_class, MKDEV(LP_MAJOR, offset));
1093 }
1094 class_destroy(lp_class); 1112 class_destroy(lp_class);
1095} 1113}
1096 1114
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 4948c8bda6b1..62b7c721c732 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -866,8 +866,8 @@ static int __init rtc_init(void)
866#ifdef CONFIG_SPARC32 866#ifdef CONFIG_SPARC32
867 for_each_node_by_name(ebus_dp, "ebus") { 867 for_each_node_by_name(ebus_dp, "ebus") {
868 struct device_node *dp; 868 struct device_node *dp;
869 for (dp = ebus_dp; dp; dp = dp->sibling) { 869 for_each_child_of_node(ebus_dp, dp) {
870 if (!strcmp(dp->name, "rtc")) { 870 if (of_node_name_eq(dp, "rtc")) {
871 op = of_find_device_by_node(dp); 871 op = of_find_device_by_node(dp);
872 if (op) { 872 if (op) {
873 rtc_port = op->resource[0].start; 873 rtc_port = op->resource[0].start;
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 8eeb4190207d..6d81bb3bb503 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -506,28 +506,28 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
506 506
507 val = (unsigned char)tmp; 507 val = (unsigned char)tmp;
508 spin_lock_irqsave(&event_lock, flags); 508 spin_lock_irqsave(&event_lock, flags);
509 if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { 509 if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
510 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28); 510 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28);
511 SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); 511 SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
512 } else if (val >= CLK_8_592MHz) { 512 } else if (val >= CLK_8_592MHz) {
513 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38); 513 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
514 switch (val) { 514 switch (val) {
515 case CLK_8_592MHz: 515 case CLK_8_592MHz:
516 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); 516 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
517 break; 517 break;
518 case CLK_11_184MHz: 518 case CLK_11_184MHz:
519 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); 519 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
520 break; 520 break;
521 case CLK_34_368MHz: 521 case CLK_34_368MHz:
522 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); 522 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
523 break; 523 break;
524 case CLK_44_736MHz: 524 case CLK_44_736MHz:
525 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); 525 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
526 break; 526 break;
527 } 527 }
528 } else 528 } else {
529 SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3); 529 SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3);
530 530 }
531 spin_unlock_irqrestore(&event_lock, flags); 531 spin_unlock_irqrestore(&event_lock, flags);
532 532
533 return strnlen(buf, count); 533 return strnlen(buf, count);
@@ -548,27 +548,28 @@ static ssize_t store_select_amcb1_transmit_clock(struct device *d,
548 548
549 val = (unsigned char)tmp; 549 val = (unsigned char)tmp;
550 spin_lock_irqsave(&event_lock, flags); 550 spin_lock_irqsave(&event_lock, flags);
551 if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { 551 if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
552 SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5); 552 SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5);
553 SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); 553 SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
554 } else if (val >= CLK_8_592MHz) { 554 } else if (val >= CLK_8_592MHz) {
555 SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7); 555 SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7);
556 switch (val) { 556 switch (val) {
557 case CLK_8_592MHz: 557 case CLK_8_592MHz:
558 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); 558 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
559 break; 559 break;
560 case CLK_11_184MHz: 560 case CLK_11_184MHz:
561 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); 561 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
562 break; 562 break;
563 case CLK_34_368MHz: 563 case CLK_34_368MHz:
564 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); 564 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
565 break; 565 break;
566 case CLK_44_736MHz: 566 case CLK_44_736MHz:
567 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); 567 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
568 break; 568 break;
569 } 569 }
570 } else 570 } else {
571 SET_PORT_BITS(TLCLK_REG3, 0xf8, val); 571 SET_PORT_BITS(TLCLK_REG3, 0xf8, val);
572 }
572 spin_unlock_irqrestore(&event_lock, flags); 573 spin_unlock_irqrestore(&event_lock, flags);
573 574
574 return strnlen(buf, count); 575 return strnlen(buf, count);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5b5b5d72eab7..fbeb71953526 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1309,7 +1309,7 @@ static const struct attribute_group port_attribute_group = {
1309 .attrs = port_sysfs_entries, 1309 .attrs = port_sysfs_entries,
1310}; 1310};
1311 1311
1312static int debugfs_show(struct seq_file *s, void *data) 1312static int port_debugfs_show(struct seq_file *s, void *data)
1313{ 1313{
1314 struct port *port = s->private; 1314 struct port *port = s->private;
1315 1315
@@ -1327,18 +1327,7 @@ static int debugfs_show(struct seq_file *s, void *data)
1327 return 0; 1327 return 0;
1328} 1328}
1329 1329
1330static int debugfs_open(struct inode *inode, struct file *file) 1330DEFINE_SHOW_ATTRIBUTE(port_debugfs);
1331{
1332 return single_open(file, debugfs_show, inode->i_private);
1333}
1334
1335static const struct file_operations port_debugfs_ops = {
1336 .owner = THIS_MODULE,
1337 .open = debugfs_open,
1338 .read = seq_read,
1339 .llseek = seq_lseek,
1340 .release = single_release,
1341};
1342 1331
1343static void set_console_size(struct port *port, u16 rows, u16 cols) 1332static void set_console_size(struct port *port, u16 rows, u16 cols)
1344{ 1333{
@@ -1490,7 +1479,7 @@ static int add_port(struct ports_device *portdev, u32 id)
1490 port->debugfs_file = debugfs_create_file(debugfs_name, 0444, 1479 port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
1491 pdrvdata.debugfs_dir, 1480 pdrvdata.debugfs_dir,
1492 port, 1481 port,
1493 &port_debugfs_ops); 1482 &port_debugfs_fops);
1494 } 1483 }
1495 return 0; 1484 return 0;
1496 1485
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 22d2feb1f8bc..32f663436e6e 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -657,6 +657,8 @@ static int max14577_muic_probe(struct platform_device *pdev)
657 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent); 657 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
658 struct max14577_muic_info *info; 658 struct max14577_muic_info *info;
659 int delay_jiffies; 659 int delay_jiffies;
660 int cable_type;
661 bool attached;
660 int ret; 662 int ret;
661 int i; 663 int i;
662 u8 id; 664 u8 id;
@@ -725,8 +727,17 @@ static int max14577_muic_probe(struct platform_device *pdev)
725 info->path_uart = CTRL1_SW_UART; 727 info->path_uart = CTRL1_SW_UART;
726 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); 728 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
727 729
728 /* Set initial path for UART */ 730 /* Set initial path for UART when JIG is connected to get serial logs */
729 max14577_muic_set_path(info, info->path_uart, true); 731 ret = max14577_bulk_read(info->max14577->regmap,
732 MAX14577_MUIC_REG_STATUS1, info->status, 2);
733 if (ret) {
734 dev_err(info->dev, "Cannot read STATUS registers\n");
735 return ret;
736 }
737 cable_type = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_ADC,
738 &attached);
739 if (attached && cable_type == MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF)
740 max14577_muic_set_path(info, info->path_uart, true);
730 741
731 /* Check revision number of MUIC device*/ 742 /* Check revision number of MUIC device*/
732 ret = max14577_read_reg(info->max14577->regmap, 743 ret = max14577_read_reg(info->max14577->regmap,
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index a79537ebb671..32fc5a66ffa9 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1072,6 +1072,8 @@ static int max77693_muic_probe(struct platform_device *pdev)
1072 struct max77693_reg_data *init_data; 1072 struct max77693_reg_data *init_data;
1073 int num_init_data; 1073 int num_init_data;
1074 int delay_jiffies; 1074 int delay_jiffies;
1075 int cable_type;
1076 bool attached;
1075 int ret; 1077 int ret;
1076 int i; 1078 int i;
1077 unsigned int id; 1079 unsigned int id;
@@ -1212,8 +1214,18 @@ static int max77693_muic_probe(struct platform_device *pdev)
1212 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); 1214 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1213 } 1215 }
1214 1216
1215 /* Set initial path for UART */ 1217 /* Set initial path for UART when JIG is connected to get serial logs */
1216 max77693_muic_set_path(info, info->path_uart, true); 1218 ret = regmap_bulk_read(info->max77693->regmap_muic,
1219 MAX77693_MUIC_REG_STATUS1, info->status, 2);
1220 if (ret) {
1221 dev_err(info->dev, "failed to read MUIC register\n");
1222 return ret;
1223 }
1224 cable_type = max77693_muic_get_cable_type(info,
1225 MAX77693_CABLE_GROUP_ADC, &attached);
1226 if (attached && (cable_type == MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON ||
1227 cable_type == MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF))
1228 max77693_muic_set_path(info, info->path_uart, true);
1217 1229
1218 /* Check revision number of MUIC device*/ 1230 /* Check revision number of MUIC device*/
1219 ret = regmap_read(info->max77693->regmap_muic, 1231 ret = regmap_read(info->max77693->regmap_muic,
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index b98cbd0362f5..a343a6ef3506 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -812,6 +812,8 @@ static int max77843_muic_probe(struct platform_device *pdev)
812 struct max77693_dev *max77843 = dev_get_drvdata(pdev->dev.parent); 812 struct max77693_dev *max77843 = dev_get_drvdata(pdev->dev.parent);
813 struct max77843_muic_info *info; 813 struct max77843_muic_info *info;
814 unsigned int id; 814 unsigned int id;
815 int cable_type;
816 bool attached;
815 int i, ret; 817 int i, ret;
816 818
817 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 819 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -856,9 +858,19 @@ static int max77843_muic_probe(struct platform_device *pdev)
856 /* Set ADC debounce time */ 858 /* Set ADC debounce time */
857 max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS); 859 max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS);
858 860
859 /* Set initial path for UART */ 861 /* Set initial path for UART when JIG is connected to get serial logs */
860 max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART, true, 862 ret = regmap_bulk_read(max77843->regmap_muic,
861 false); 863 MAX77843_MUIC_REG_STATUS1, info->status,
864 MAX77843_MUIC_STATUS_NUM);
865 if (ret) {
866 dev_err(info->dev, "Cannot read STATUS registers\n");
867 goto err_muic_irq;
868 }
869 cable_type = max77843_muic_get_cable_type(info, MAX77843_CABLE_GROUP_ADC,
870 &attached);
871 if (attached && cable_type == MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF)
872 max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART,
873 true, false);
862 874
863 /* Check revision number of MUIC device */ 875 /* Check revision number of MUIC device */
864 ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id); 876 ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index bdabb2479e0d..172e116ac1ce 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -311,12 +311,10 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
311{ 311{
312 int ret = 0; 312 int ret = 0;
313 313
314 if (usb_type == MAX8997_USB_HOST) { 314 ret = max8997_muic_set_path(info, info->path_usb, attached);
315 ret = max8997_muic_set_path(info, info->path_usb, attached); 315 if (ret < 0) {
316 if (ret < 0) { 316 dev_err(info->dev, "failed to update muic register\n");
317 dev_err(info->dev, "failed to update muic register\n"); 317 return ret;
318 return ret;
319 }
320 } 318 }
321 319
322 switch (usb_type) { 320 switch (usb_type) {
@@ -632,6 +630,8 @@ static int max8997_muic_probe(struct platform_device *pdev)
632 struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev); 630 struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev);
633 struct max8997_muic_info *info; 631 struct max8997_muic_info *info;
634 int delay_jiffies; 632 int delay_jiffies;
633 int cable_type;
634 bool attached;
635 int ret, i; 635 int ret, i;
636 636
637 info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info), 637 info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
@@ -724,8 +724,17 @@ static int max8997_muic_probe(struct platform_device *pdev)
724 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); 724 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
725 } 725 }
726 726
727 /* Set initial path for UART */ 727 /* Set initial path for UART when JIG is connected to get serial logs */
728 max8997_muic_set_path(info, info->path_uart, true); 728 ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
729 2, info->status);
730 if (ret) {
731 dev_err(info->dev, "failed to read MUIC register\n");
732 return ret;
733 }
734 cable_type = max8997_muic_get_cable_type(info,
735 MAX8997_CABLE_GROUP_ADC, &attached);
736 if (attached && cable_type == MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF)
737 max8997_muic_set_path(info, info->path_uart, true);
729 738
730 /* Set ADC debounce time */ 739 /* Set ADC debounce time */
731 max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS); 740 max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 7273e5082b41..f754578414f0 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -216,6 +216,18 @@ config FW_CFG_SYSFS_CMDLINE
216 WARNING: Using incorrect parameters (base address in particular) 216 WARNING: Using incorrect parameters (base address in particular)
217 may crash your system. 217 may crash your system.
218 218
219config INTEL_STRATIX10_SERVICE
220 tristate "Intel Stratix10 Service Layer"
221 depends on HAVE_ARM_SMCCC
222 default n
223 help
224 Intel Stratix10 service layer runs at privileged exception level,
225 interfaces with the service providers (FPGA manager is one of them)
226 and manages secure monitor call to communicate with secure monitor
227 software at secure monitor exception level.
228
229 Say Y here if you want Stratix10 service layer support.
230
219config QCOM_SCM 231config QCOM_SCM
220 bool 232 bool
221 depends on ARM || ARM64 233 depends on ARM || ARM64
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 3158dffd9914..80feb635120f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
12obj-$(CONFIG_EDD) += edd.o 12obj-$(CONFIG_EDD) += edd.o
13obj-$(CONFIG_EFI_PCDP) += pcdp.o 13obj-$(CONFIG_EFI_PCDP) += pcdp.o
14obj-$(CONFIG_DMIID) += dmi-id.o 14obj-$(CONFIG_DMIID) += dmi-id.o
15obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o
15obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o 16obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
16obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o 17obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
17obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o 18obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
new file mode 100644
index 000000000000..6e6514825ad0
--- /dev/null
+++ b/drivers/firmware/stratix10-svc.c
@@ -0,0 +1,1041 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017-2018, Intel Corporation
4 */
5
6#include <linux/completion.h>
7#include <linux/delay.h>
8#include <linux/genalloc.h>
9#include <linux/io.h>
10#include <linux/kfifo.h>
11#include <linux/kthread.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/of.h>
15#include <linux/of_platform.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/firmware/intel/stratix10-smc.h>
20#include <linux/firmware/intel/stratix10-svc-client.h>
21#include <linux/types.h>
22
23/**
24 * SVC_NUM_DATA_IN_FIFO - number of struct stratix10_svc_data in the FIFO
25 *
26 * SVC_NUM_CHANNEL - number of channel supported by service layer driver
27 *
28 * FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS - claim back the submitted buffer(s)
29 * from the secure world for FPGA manager to reuse, or to free the buffer(s)
30 * when all bit-stream data had be send.
31 *
32 * FPGA_CONFIG_STATUS_TIMEOUT_SEC - poll the FPGA configuration status,
33 * service layer will return error to FPGA manager when timeout occurs,
34 * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
35 */
36#define SVC_NUM_DATA_IN_FIFO 32
37#define SVC_NUM_CHANNEL 2
38#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
39#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
40
41typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
42 unsigned long, unsigned long, unsigned long,
43 unsigned long, unsigned long,
44 struct arm_smccc_res *);
45struct stratix10_svc_chan;
46
47/**
48 * struct stratix10_svc_sh_memory - service shared memory structure
49 * @sync_complete: state for a completion
50 * @addr: physical address of shared memory block
51 * @size: size of shared memory block
52 * @invoke_fn: function to issue secure monitor or hypervisor call
53 *
54 * This struct is used to save physical address and size of shared memory
55 * block. The shared memory blocked is allocated by secure monitor software
56 * at secure world.
57 *
58 * Service layer driver uses the physical address and size to create a memory
59 * pool, then allocates data buffer from that memory pool for service client.
60 */
61struct stratix10_svc_sh_memory {
62 struct completion sync_complete;
63 unsigned long addr;
64 unsigned long size;
65 svc_invoke_fn *invoke_fn;
66};
67
68/**
69 * struct stratix10_svc_data_mem - service memory structure
70 * @vaddr: virtual address
71 * @paddr: physical address
72 * @size: size of memory
73 * @node: link list head node
74 *
75 * This struct is used in a list that keeps track of buffers which have
76 * been allocated or freed from the memory pool. Service layer driver also
77 * uses this struct to transfer physical address to virtual address.
78 */
79struct stratix10_svc_data_mem {
80 void *vaddr;
81 phys_addr_t paddr;
82 size_t size;
83 struct list_head node;
84};
85
86/**
87 * struct stratix10_svc_data - service data structure
88 * @chan: service channel
89 * @paddr: playload physical address
90 * @size: playload size
91 * @command: service command requested by client
92 * @flag: configuration type (full or partial)
93 * @arg: args to be passed via registers and not physically mapped buffers
94 *
95 * This struct is used in service FIFO for inter-process communication.
96 */
97struct stratix10_svc_data {
98 struct stratix10_svc_chan *chan;
99 phys_addr_t paddr;
100 size_t size;
101 u32 command;
102 u32 flag;
103 u64 arg[3];
104};
105
106/**
107 * struct stratix10_svc_controller - service controller
108 * @dev: device
109 * @chans: array of service channels
110 * @num_chans: number of channels in 'chans' array
111 * @num_active_client: number of active service client
112 * @node: list management
113 * @genpool: memory pool pointing to the memory region
114 * @task: pointer to the thread task which handles SMC or HVC call
115 * @svc_fifo: a queue for storing service message data
116 * @complete_status: state for completion
117 * @svc_fifo_lock: protect access to service message data queue
118 * @invoke_fn: function to issue secure monitor call or hypervisor call
119 *
120 * This struct is used to create communication channels for service clients, to
121 * handle secure monitor or hypervisor call.
122 */
123struct stratix10_svc_controller {
124 struct device *dev;
125 struct stratix10_svc_chan *chans;
126 int num_chans;
127 int num_active_client;
128 struct list_head node;
129 struct gen_pool *genpool;
130 struct task_struct *task;
131 struct kfifo svc_fifo;
132 struct completion complete_status;
133 spinlock_t svc_fifo_lock;
134 svc_invoke_fn *invoke_fn;
135};
136
137/**
138 * struct stratix10_svc_chan - service communication channel
139 * @ctrl: pointer to service controller which is the provider of this channel
140 * @scl: pointer to service client which owns the channel
141 * @name: service client name associated with the channel
142 * @lock: protect access to the channel
143 *
144 * This struct is used by service client to communicate with service layer, each
145 * service client has its own channel created by service controller.
146 */
147struct stratix10_svc_chan {
148 struct stratix10_svc_controller *ctrl;
149 struct stratix10_svc_client *scl;
150 char *name;
151 spinlock_t lock;
152};
153
154static LIST_HEAD(svc_ctrl);
155static LIST_HEAD(svc_data_mem);
156
157/**
158 * svc_pa_to_va() - translate physical address to virtual address
159 * @addr: to be translated physical address
160 *
161 * Return: valid virtual address or NULL if the provided physical
162 * address doesn't exist.
163 */
164static void *svc_pa_to_va(unsigned long addr)
165{
166 struct stratix10_svc_data_mem *pmem;
167
168 pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr);
169 list_for_each_entry(pmem, &svc_data_mem, node)
170 if (pmem->paddr == addr)
171 return pmem->vaddr;
172
173 /* physical address is not found */
174 return NULL;
175}
176
177/**
178 * svc_thread_cmd_data_claim() - claim back buffer from the secure world
179 * @ctrl: pointer to service layer controller
180 * @p_data: pointer to service data structure
181 * @cb_data: pointer to callback data structure to service client
182 *
183 * Claim back the submitted buffers from the secure world and pass buffer
184 * back to service client (FPGA manager, etc) for reuse.
185 */
186static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
187 struct stratix10_svc_data *p_data,
188 struct stratix10_svc_cb_data *cb_data)
189{
190 struct arm_smccc_res res;
191 unsigned long timeout;
192
193 reinit_completion(&ctrl->complete_status);
194 timeout = msecs_to_jiffies(FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS);
195
196 pr_debug("%s: claim back the submitted buffer\n", __func__);
197 do {
198 ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE,
199 0, 0, 0, 0, 0, 0, 0, &res);
200
201 if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
202 if (!res.a1) {
203 complete(&ctrl->complete_status);
204 break;
205 }
206 cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_DONE);
207 cb_data->kaddr1 = svc_pa_to_va(res.a1);
208 cb_data->kaddr2 = (res.a2) ?
209 svc_pa_to_va(res.a2) : NULL;
210 cb_data->kaddr3 = (res.a3) ?
211 svc_pa_to_va(res.a3) : NULL;
212 p_data->chan->scl->receive_cb(p_data->chan->scl,
213 cb_data);
214 } else {
215 pr_debug("%s: secure world busy, polling again\n",
216 __func__);
217 }
218 } while (res.a0 == INTEL_SIP_SMC_STATUS_OK ||
219 res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY ||
220 wait_for_completion_timeout(&ctrl->complete_status, timeout));
221}
222
223/**
224 * svc_thread_cmd_config_status() - check configuration status
225 * @ctrl: pointer to service layer controller
226 * @p_data: pointer to service data structure
227 * @cb_data: pointer to callback data structure to service client
228 *
229 * Check whether the secure firmware at secure world has finished the FPGA
230 * configuration, and then inform FPGA manager the configuration status.
231 */
232static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
233 struct stratix10_svc_data *p_data,
234 struct stratix10_svc_cb_data *cb_data)
235{
236 struct arm_smccc_res res;
237 int count_in_sec;
238
239 cb_data->kaddr1 = NULL;
240 cb_data->kaddr2 = NULL;
241 cb_data->kaddr3 = NULL;
242 cb_data->status = BIT(SVC_STATUS_RECONFIG_ERROR);
243
244 pr_debug("%s: polling config status\n", __func__);
245
246 count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC;
247 while (count_in_sec) {
248 ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE,
249 0, 0, 0, 0, 0, 0, 0, &res);
250 if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
251 (res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR))
252 break;
253
254 /*
255 * configuration is still in progress, wait one second then
256 * poll again
257 */
258 msleep(1000);
259 count_in_sec--;
260 };
261
262 if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
263 cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
264
265 p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
266}
267
268/**
269 * svc_thread_recv_status_ok() - handle the successful status
270 * @p_data: pointer to service data structure
271 * @cb_data: pointer to callback data structure to service client
272 * @res: result from SMC or HVC call
273 *
274 * Send back the correspond status to the service clients.
275 */
276static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
277 struct stratix10_svc_cb_data *cb_data,
278 struct arm_smccc_res res)
279{
280 cb_data->kaddr1 = NULL;
281 cb_data->kaddr2 = NULL;
282 cb_data->kaddr3 = NULL;
283
284 switch (p_data->command) {
285 case COMMAND_RECONFIG:
286 cb_data->status = BIT(SVC_STATUS_RECONFIG_REQUEST_OK);
287 break;
288 case COMMAND_RECONFIG_DATA_SUBMIT:
289 cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
290 break;
291 case COMMAND_NOOP:
292 cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
293 cb_data->kaddr1 = svc_pa_to_va(res.a1);
294 break;
295 case COMMAND_RECONFIG_STATUS:
296 cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
297 break;
298 case COMMAND_RSU_UPDATE:
299 cb_data->status = BIT(SVC_STATUS_RSU_OK);
300 break;
301 default:
302 pr_warn("it shouldn't happen\n");
303 break;
304 }
305
306 pr_debug("%s: call receive_cb\n", __func__);
307 p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
308}
309
310/**
311 * svc_normal_to_secure_thread() - the function to run in the kthread
312 * @data: data pointer for kthread function
313 *
314 * Service layer driver creates stratix10_svc_smc_hvc_call kthread on CPU
315 * node 0, its function stratix10_svc_secure_call_thread is used to handle
316 * SMC or HVC calls between kernel driver and secure monitor software.
317 *
318 * Return: 0 for success or -ENOMEM on error.
319 */
320static int svc_normal_to_secure_thread(void *data)
321{
322 struct stratix10_svc_controller
323 *ctrl = (struct stratix10_svc_controller *)data;
324 struct stratix10_svc_data *pdata;
325 struct stratix10_svc_cb_data *cbdata;
326 struct arm_smccc_res res;
327 unsigned long a0, a1, a2;
328 int ret_fifo = 0;
329
330 pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
331 if (!pdata)
332 return -ENOMEM;
333
334 cbdata = kmalloc(sizeof(*cbdata), GFP_KERNEL);
335 if (!cbdata) {
336 kfree(pdata);
337 return -ENOMEM;
338 }
339
340 /* default set, to remove build warning */
341 a0 = INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK;
342 a1 = 0;
343 a2 = 0;
344
345 pr_debug("smc_hvc_shm_thread is running\n");
346
347 while (!kthread_should_stop()) {
348 ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo,
349 pdata, sizeof(*pdata),
350 &ctrl->svc_fifo_lock);
351
352 if (!ret_fifo)
353 continue;
354
355 pr_debug("get from FIFO pa=0x%016x, command=%u, size=%u\n",
356 (unsigned int)pdata->paddr, pdata->command,
357 (unsigned int)pdata->size);
358
359 switch (pdata->command) {
360 case COMMAND_RECONFIG_DATA_CLAIM:
361 svc_thread_cmd_data_claim(ctrl, pdata, cbdata);
362 continue;
363 case COMMAND_RECONFIG:
364 a0 = INTEL_SIP_SMC_FPGA_CONFIG_START;
365 pr_debug("conf_type=%u\n", (unsigned int)pdata->flag);
366 a1 = pdata->flag;
367 a2 = 0;
368 break;
369 case COMMAND_RECONFIG_DATA_SUBMIT:
370 a0 = INTEL_SIP_SMC_FPGA_CONFIG_WRITE;
371 a1 = (unsigned long)pdata->paddr;
372 a2 = (unsigned long)pdata->size;
373 break;
374 case COMMAND_RECONFIG_STATUS:
375 a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE;
376 a1 = 0;
377 a2 = 0;
378 break;
379 case COMMAND_RSU_STATUS:
380 a0 = INTEL_SIP_SMC_RSU_STATUS;
381 a1 = 0;
382 a2 = 0;
383 break;
384 case COMMAND_RSU_UPDATE:
385 a0 = INTEL_SIP_SMC_RSU_UPDATE;
386 a1 = pdata->arg[0];
387 a2 = 0;
388 break;
389 default:
390 pr_warn("it shouldn't happen\n");
391 break;
392 }
393 pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x",
394 __func__, (unsigned int)a0, (unsigned int)a1);
395 pr_debug(" a2=0x%016x\n", (unsigned int)a2);
396
397 ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
398
399 pr_debug("%s: after SMC call -- res.a0=0x%016x",
400 __func__, (unsigned int)res.a0);
401 pr_debug(" res.a1=0x%016x, res.a2=0x%016x",
402 (unsigned int)res.a1, (unsigned int)res.a2);
403 pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3);
404
405 if (pdata->command == COMMAND_RSU_STATUS) {
406 if (res.a0 == INTEL_SIP_SMC_RSU_ERROR)
407 cbdata->status = BIT(SVC_STATUS_RSU_ERROR);
408 else
409 cbdata->status = BIT(SVC_STATUS_RSU_OK);
410
411 cbdata->kaddr1 = &res;
412 cbdata->kaddr2 = NULL;
413 cbdata->kaddr3 = NULL;
414 pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
415 continue;
416 }
417
418 switch (res.a0) {
419 case INTEL_SIP_SMC_STATUS_OK:
420 svc_thread_recv_status_ok(pdata, cbdata, res);
421 break;
422 case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
423 switch (pdata->command) {
424 case COMMAND_RECONFIG_DATA_SUBMIT:
425 svc_thread_cmd_data_claim(ctrl,
426 pdata, cbdata);
427 break;
428 case COMMAND_RECONFIG_STATUS:
429 svc_thread_cmd_config_status(ctrl,
430 pdata, cbdata);
431 break;
432 default:
433 pr_warn("it shouldn't happen\n");
434 break;
435 }
436 break;
437 case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED:
438 pr_debug("%s: STATUS_REJECTED\n", __func__);
439 break;
440 case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
441 pr_err("%s: STATUS_ERROR\n", __func__);
442 cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR);
443 cbdata->kaddr1 = NULL;
444 cbdata->kaddr2 = NULL;
445 cbdata->kaddr3 = NULL;
446 pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
447 break;
448 default:
449 pr_warn("it shouldn't happen\n");
450 break;
451 }
452 };
453
454 kfree(cbdata);
455 kfree(pdata);
456
457 return 0;
458}
459
460/**
461 * svc_normal_to_secure_shm_thread() - the function to run in the kthread
462 * @data: data pointer for kthread function
463 *
464 * Service layer driver creates stratix10_svc_smc_hvc_shm kthread on CPU
465 * node 0, its function stratix10_svc_secure_shm_thread is used to query the
466 * physical address of memory block reserved by secure monitor software at
467 * secure world.
468 *
469 * svc_normal_to_secure_shm_thread() calls do_exit() directly since it is a
470 * standlone thread for which no one will call kthread_stop() or return when
471 * 'kthread_should_stop()' is true.
472 */
473static int svc_normal_to_secure_shm_thread(void *data)
474{
475 struct stratix10_svc_sh_memory
476 *sh_mem = (struct stratix10_svc_sh_memory *)data;
477 struct arm_smccc_res res;
478
479 /* SMC or HVC call to get shared memory info from secure world */
480 sh_mem->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM,
481 0, 0, 0, 0, 0, 0, 0, &res);
482 if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
483 sh_mem->addr = res.a1;
484 sh_mem->size = res.a2;
485 } else {
486 pr_err("%s: after SMC call -- res.a0=0x%016x", __func__,
487 (unsigned int)res.a0);
488 sh_mem->addr = 0;
489 sh_mem->size = 0;
490 }
491
492 complete(&sh_mem->sync_complete);
493 do_exit(0);
494}
495
496/**
497 * svc_get_sh_memory() - get memory block reserved by secure monitor SW
498 * @pdev: pointer to service layer device
499 * @sh_memory: pointer to service shared memory structure
500 *
501 * Return: zero for successfully getting the physical address of memory block
502 * reserved by secure monitor software, or negative value on error.
503 */
504static int svc_get_sh_memory(struct platform_device *pdev,
505 struct stratix10_svc_sh_memory *sh_memory)
506{
507 struct device *dev = &pdev->dev;
508 struct task_struct *sh_memory_task;
509 unsigned int cpu = 0;
510
511 init_completion(&sh_memory->sync_complete);
512
513 /* smc or hvc call happens on cpu 0 bound kthread */
514 sh_memory_task = kthread_create_on_node(svc_normal_to_secure_shm_thread,
515 (void *)sh_memory,
516 cpu_to_node(cpu),
517 "svc_smc_hvc_shm_thread");
518 if (IS_ERR(sh_memory_task)) {
519 dev_err(dev, "fail to create stratix10_svc_smc_shm_thread\n");
520 return -EINVAL;
521 }
522
523 wake_up_process(sh_memory_task);
524
525 if (!wait_for_completion_timeout(&sh_memory->sync_complete, 10 * HZ)) {
526 dev_err(dev,
527 "timeout to get sh-memory paras from secure world\n");
528 return -ETIMEDOUT;
529 }
530
531 if (!sh_memory->addr || !sh_memory->size) {
532 dev_err(dev,
533 "fails to get shared memory info from secure world\n");
534 return -ENOMEM;
535 }
536
537 dev_dbg(dev, "SM software provides paddr: 0x%016x, size: 0x%08x\n",
538 (unsigned int)sh_memory->addr,
539 (unsigned int)sh_memory->size);
540
541 return 0;
542}
543
544/**
545 * svc_create_memory_pool() - create a memory pool from reserved memory block
546 * @pdev: pointer to service layer device
547 * @sh_memory: pointer to service shared memory structure
548 *
549 * Return: pool allocated from reserved memory block or ERR_PTR() on error.
550 */
551static struct gen_pool *
552svc_create_memory_pool(struct platform_device *pdev,
553 struct stratix10_svc_sh_memory *sh_memory)
554{
555 struct device *dev = &pdev->dev;
556 struct gen_pool *genpool;
557 unsigned long vaddr;
558 phys_addr_t paddr;
559 size_t size;
560 phys_addr_t begin;
561 phys_addr_t end;
562 void *va;
563 size_t page_mask = PAGE_SIZE - 1;
564 int min_alloc_order = 3;
565 int ret;
566
567 begin = roundup(sh_memory->addr, PAGE_SIZE);
568 end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
569 paddr = begin;
570 size = end - begin;
571 va = memremap(paddr, size, MEMREMAP_WC);
572 if (!va) {
573 dev_err(dev, "fail to remap shared memory\n");
574 return ERR_PTR(-EINVAL);
575 }
576 vaddr = (unsigned long)va;
577 dev_dbg(dev,
578 "reserved memory vaddr: %p, paddr: 0x%16x size: 0x%8x\n",
579 va, (unsigned int)paddr, (unsigned int)size);
580 if ((vaddr & page_mask) || (paddr & page_mask) ||
581 (size & page_mask)) {
582 dev_err(dev, "page is not aligned\n");
583 return ERR_PTR(-EINVAL);
584 }
585 genpool = gen_pool_create(min_alloc_order, -1);
586 if (!genpool) {
587 dev_err(dev, "fail to create genpool\n");
588 return ERR_PTR(-ENOMEM);
589 }
590 gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
591 ret = gen_pool_add_virt(genpool, vaddr, paddr, size, -1);
592 if (ret) {
593 dev_err(dev, "fail to add memory chunk to the pool\n");
594 gen_pool_destroy(genpool);
595 return ERR_PTR(ret);
596 }
597
598 return genpool;
599}
600
601/**
602 * svc_smccc_smc() - secure monitor call between normal and secure world
603 * @a0: argument passed in registers 0
604 * @a1: argument passed in registers 1
605 * @a2: argument passed in registers 2
606 * @a3: argument passed in registers 3
607 * @a4: argument passed in registers 4
608 * @a5: argument passed in registers 5
609 * @a6: argument passed in registers 6
610 * @a7: argument passed in registers 7
611 * @res: result values from register 0 to 3
612 */
613static void svc_smccc_smc(unsigned long a0, unsigned long a1,
614 unsigned long a2, unsigned long a3,
615 unsigned long a4, unsigned long a5,
616 unsigned long a6, unsigned long a7,
617 struct arm_smccc_res *res)
618{
619 arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
620}
621
622/**
623 * svc_smccc_hvc() - hypervisor call between normal and secure world
624 * @a0: argument passed in registers 0
625 * @a1: argument passed in registers 1
626 * @a2: argument passed in registers 2
627 * @a3: argument passed in registers 3
628 * @a4: argument passed in registers 4
629 * @a5: argument passed in registers 5
630 * @a6: argument passed in registers 6
631 * @a7: argument passed in registers 7
632 * @res: result values from register 0 to 3
633 */
634static void svc_smccc_hvc(unsigned long a0, unsigned long a1,
635 unsigned long a2, unsigned long a3,
636 unsigned long a4, unsigned long a5,
637 unsigned long a6, unsigned long a7,
638 struct arm_smccc_res *res)
639{
640 arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
641}
642
643/**
644 * get_invoke_func() - invoke SMC or HVC call
645 * @dev: pointer to device
646 *
647 * Return: function pointer to svc_smccc_smc or svc_smccc_hvc.
648 */
649static svc_invoke_fn *get_invoke_func(struct device *dev)
650{
651 const char *method;
652
653 if (of_property_read_string(dev->of_node, "method", &method)) {
654 dev_warn(dev, "missing \"method\" property\n");
655 return ERR_PTR(-ENXIO);
656 }
657
658 if (!strcmp(method, "smc"))
659 return svc_smccc_smc;
660 if (!strcmp(method, "hvc"))
661 return svc_smccc_hvc;
662
663 dev_warn(dev, "invalid \"method\" property: %s\n", method);
664
665 return ERR_PTR(-EINVAL);
666}
667
668/**
669 * stratix10_svc_request_channel_byname() - request a service channel
670 * @client: pointer to service client
671 * @name: service client name
672 *
673 * This function is used by service client to request a service channel.
674 *
675 * Return: a pointer to channel assigned to the client on success,
676 * or ERR_PTR() on error.
677 */
678struct stratix10_svc_chan *stratix10_svc_request_channel_byname(
679 struct stratix10_svc_client *client, const char *name)
680{
681 struct device *dev = client->dev;
682 struct stratix10_svc_controller *controller;
683 struct stratix10_svc_chan *chan = NULL;
684 unsigned long flag;
685 int i;
686
687 /* if probe was called after client's, or error on probe */
688 if (list_empty(&svc_ctrl))
689 return ERR_PTR(-EPROBE_DEFER);
690
691 controller = list_first_entry(&svc_ctrl,
692 struct stratix10_svc_controller, node);
693 for (i = 0; i < SVC_NUM_CHANNEL; i++) {
694 if (!strcmp(controller->chans[i].name, name)) {
695 chan = &controller->chans[i];
696 break;
697 }
698 }
699
700 /* if there was no channel match */
701 if (i == SVC_NUM_CHANNEL) {
702 dev_err(dev, "%s: channel not allocated\n", __func__);
703 return ERR_PTR(-EINVAL);
704 }
705
706 if (chan->scl || !try_module_get(controller->dev->driver->owner)) {
707 dev_dbg(dev, "%s: svc not free\n", __func__);
708 return ERR_PTR(-EBUSY);
709 }
710
711 spin_lock_irqsave(&chan->lock, flag);
712 chan->scl = client;
713 chan->ctrl->num_active_client++;
714 spin_unlock_irqrestore(&chan->lock, flag);
715
716 return chan;
717}
718EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname);
719
720/**
721 * stratix10_svc_free_channel() - free service channel
722 * @chan: service channel to be freed
723 *
724 * This function is used by service client to free a service channel.
725 */
726void stratix10_svc_free_channel(struct stratix10_svc_chan *chan)
727{
728 unsigned long flag;
729
730 spin_lock_irqsave(&chan->lock, flag);
731 chan->scl = NULL;
732 chan->ctrl->num_active_client--;
733 module_put(chan->ctrl->dev->driver->owner);
734 spin_unlock_irqrestore(&chan->lock, flag);
735}
736EXPORT_SYMBOL_GPL(stratix10_svc_free_channel);
737
738/**
739 * stratix10_svc_send() - send a message data to the remote
740 * @chan: service channel assigned to the client
741 * @msg: message data to be sent, in the format of
742 * "struct stratix10_svc_client_msg"
743 *
744 * This function is used by service client to add a message to the service
745 * layer driver's queue for being sent to the secure world.
746 *
747 * Return: 0 for success, -ENOMEM or -ENOBUFS on error.
748 */
749int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
750{
751 struct stratix10_svc_client_msg
752 *p_msg = (struct stratix10_svc_client_msg *)msg;
753 struct stratix10_svc_data_mem *p_mem;
754 struct stratix10_svc_data *p_data;
755 int ret = 0;
756 unsigned int cpu = 0;
757
758 p_data = kzalloc(sizeof(*p_data), GFP_KERNEL);
759 if (!p_data)
760 return -ENOMEM;
761
762 /* first client will create kernel thread */
763 if (!chan->ctrl->task) {
764 chan->ctrl->task =
765 kthread_create_on_node(svc_normal_to_secure_thread,
766 (void *)chan->ctrl,
767 cpu_to_node(cpu),
768 "svc_smc_hvc_thread");
769 if (IS_ERR(chan->ctrl->task)) {
770 dev_err(chan->ctrl->dev,
771 "fails to create svc_smc_hvc_thread\n");
772 kfree(p_data);
773 return -EINVAL;
774 }
775 kthread_bind(chan->ctrl->task, cpu);
776 wake_up_process(chan->ctrl->task);
777 }
778
779 pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
780 p_msg->payload, p_msg->command,
781 (unsigned int)p_msg->payload_length);
782
783 if (list_empty(&svc_data_mem)) {
784 if (p_msg->command == COMMAND_RECONFIG) {
785 struct stratix10_svc_command_config_type *ct =
786 (struct stratix10_svc_command_config_type *)
787 p_msg->payload;
788 p_data->flag = ct->flags;
789 }
790 } else {
791 list_for_each_entry(p_mem, &svc_data_mem, node)
792 if (p_mem->vaddr == p_msg->payload) {
793 p_data->paddr = p_mem->paddr;
794 break;
795 }
796 }
797
798 p_data->command = p_msg->command;
799 p_data->arg[0] = p_msg->arg[0];
800 p_data->arg[1] = p_msg->arg[1];
801 p_data->arg[2] = p_msg->arg[2];
802 p_data->size = p_msg->payload_length;
803 p_data->chan = chan;
804 pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__,
805 (unsigned int)p_data->paddr, p_data->command,
806 (unsigned int)p_data->size);
807 ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data,
808 sizeof(*p_data),
809 &chan->ctrl->svc_fifo_lock);
810
811 kfree(p_data);
812
813 if (!ret)
814 return -ENOBUFS;
815
816 return 0;
817}
818EXPORT_SYMBOL_GPL(stratix10_svc_send);
819
820/**
821 * stratix10_svc_done() - complete service request transactions
822 * @chan: service channel assigned to the client
823 *
824 * This function should be called when client has finished its request
825 * or there is an error in the request process. It allows the service layer
826 * to stop the running thread to have maximize savings in kernel resources.
827 */
828void stratix10_svc_done(struct stratix10_svc_chan *chan)
829{
830 /* stop thread when thread is running AND only one active client */
831 if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) {
832 pr_debug("svc_smc_hvc_shm_thread is stopped\n");
833 kthread_stop(chan->ctrl->task);
834 chan->ctrl->task = NULL;
835 }
836}
837EXPORT_SYMBOL_GPL(stratix10_svc_done);
838
839/**
840 * stratix10_svc_allocate_memory() - allocate memory
841 * @chan: service channel assigned to the client
842 * @size: memory size requested by a specific service client
843 *
844 * Service layer allocates the requested number of bytes buffer from the
845 * memory pool, service client uses this function to get allocated buffers.
846 *
847 * Return: address of allocated memory on success, or ERR_PTR() on error.
848 */
849void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
850 size_t size)
851{
852 struct stratix10_svc_data_mem *pmem;
853 unsigned long va;
854 phys_addr_t pa;
855 struct gen_pool *genpool = chan->ctrl->genpool;
856 size_t s = roundup(size, 1 << genpool->min_alloc_order);
857
858 pmem = devm_kzalloc(chan->ctrl->dev, sizeof(*pmem), GFP_KERNEL);
859 if (!pmem)
860 return ERR_PTR(-ENOMEM);
861
862 va = gen_pool_alloc(genpool, s);
863 if (!va)
864 return ERR_PTR(-ENOMEM);
865
866 memset((void *)va, 0, s);
867 pa = gen_pool_virt_to_phys(genpool, va);
868
869 pmem->vaddr = (void *)va;
870 pmem->paddr = pa;
871 pmem->size = s;
872 list_add_tail(&pmem->node, &svc_data_mem);
873 pr_debug("%s: va=%p, pa=0x%016x\n", __func__,
874 pmem->vaddr, (unsigned int)pmem->paddr);
875
876 return (void *)va;
877}
878EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
879
880/**
881 * stratix10_svc_free_memory() - free allocated memory
882 * @chan: service channel assigned to the client
883 * @kaddr: memory to be freed
884 *
885 * This function is used by service client to free allocated buffers.
886 */
887void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
888{
889 struct stratix10_svc_data_mem *pmem;
890 size_t size = 0;
891
892 list_for_each_entry(pmem, &svc_data_mem, node)
893 if (pmem->vaddr == kaddr) {
894 size = pmem->size;
895 break;
896 }
897
898 gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
899 pmem->vaddr = NULL;
900 list_del(&pmem->node);
901}
902EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
903
904static const struct of_device_id stratix10_svc_drv_match[] = {
905 {.compatible = "intel,stratix10-svc"},
906 {},
907};
908
909static int stratix10_svc_drv_probe(struct platform_device *pdev)
910{
911 struct device *dev = &pdev->dev;
912 struct stratix10_svc_controller *controller;
913 struct stratix10_svc_chan *chans;
914 struct gen_pool *genpool;
915 struct stratix10_svc_sh_memory *sh_memory;
916 svc_invoke_fn *invoke_fn;
917 size_t fifo_size;
918 int ret;
919
920 /* get SMC or HVC function */
921 invoke_fn = get_invoke_func(dev);
922 if (IS_ERR(invoke_fn))
923 return -EINVAL;
924
925 sh_memory = devm_kzalloc(dev, sizeof(*sh_memory), GFP_KERNEL);
926 if (!sh_memory)
927 return -ENOMEM;
928
929 sh_memory->invoke_fn = invoke_fn;
930 ret = svc_get_sh_memory(pdev, sh_memory);
931 if (ret)
932 return ret;
933
934 genpool = svc_create_memory_pool(pdev, sh_memory);
935 if (!genpool)
936 return -ENOMEM;
937
938 /* allocate service controller and supporting channel */
939 controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
940 if (!controller)
941 return -ENOMEM;
942
943 chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
944 sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
945 if (!chans)
946 return -ENOMEM;
947
948 controller->dev = dev;
949 controller->num_chans = SVC_NUM_CHANNEL;
950 controller->num_active_client = 0;
951 controller->chans = chans;
952 controller->genpool = genpool;
953 controller->task = NULL;
954 controller->invoke_fn = invoke_fn;
955 init_completion(&controller->complete_status);
956
957 fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
958 ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
959 if (ret) {
960 dev_err(dev, "fails to allocate FIFO\n");
961 return ret;
962 }
963 spin_lock_init(&controller->svc_fifo_lock);
964
965 chans[0].scl = NULL;
966 chans[0].ctrl = controller;
967 chans[0].name = SVC_CLIENT_FPGA;
968 spin_lock_init(&chans[0].lock);
969
970 chans[1].scl = NULL;
971 chans[1].ctrl = controller;
972 chans[1].name = SVC_CLIENT_RSU;
973 spin_lock_init(&chans[1].lock);
974
975 list_add_tail(&controller->node, &svc_ctrl);
976 platform_set_drvdata(pdev, controller);
977
978 pr_info("Intel Service Layer Driver Initialized\n");
979
980 return ret;
981}
982
983static int stratix10_svc_drv_remove(struct platform_device *pdev)
984{
985 struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
986
987 kfifo_free(&ctrl->svc_fifo);
988 if (ctrl->task) {
989 kthread_stop(ctrl->task);
990 ctrl->task = NULL;
991 }
992 if (ctrl->genpool)
993 gen_pool_destroy(ctrl->genpool);
994 list_del(&ctrl->node);
995
996 return 0;
997}
998
999static struct platform_driver stratix10_svc_driver = {
1000 .probe = stratix10_svc_drv_probe,
1001 .remove = stratix10_svc_drv_remove,
1002 .driver = {
1003 .name = "stratix10-svc",
1004 .of_match_table = stratix10_svc_drv_match,
1005 },
1006};
1007
1008static int __init stratix10_svc_init(void)
1009{
1010 struct device_node *fw_np;
1011 struct device_node *np;
1012 int ret;
1013
1014 fw_np = of_find_node_by_name(NULL, "firmware");
1015 if (!fw_np)
1016 return -ENODEV;
1017
1018 np = of_find_matching_node(fw_np, stratix10_svc_drv_match);
1019 if (!np)
1020 return -ENODEV;
1021
1022 of_node_put(np);
1023 ret = of_platform_populate(fw_np, stratix10_svc_drv_match, NULL, NULL);
1024 if (ret)
1025 return ret;
1026
1027 return platform_driver_register(&stratix10_svc_driver);
1028}
1029
1030static void __exit stratix10_svc_exit(void)
1031{
1032 return platform_driver_unregister(&stratix10_svc_driver);
1033}
1034
1035subsys_initcall(stratix10_svc_init);
1036module_exit(stratix10_svc_exit);
1037
1038MODULE_LICENSE("GPL v2");
1039MODULE_DESCRIPTION("Intel Stratix10 Service Layer Driver");
1040MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
1041MODULE_ALIAS("platform:stratix10-svc");
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 1ebcef4bab5b..0bb7b5cd6cdc 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -56,6 +56,12 @@ config FPGA_MGR_ZYNQ_FPGA
56 help 56 help
57 FPGA manager driver support for Xilinx Zynq FPGAs. 57 FPGA manager driver support for Xilinx Zynq FPGAs.
58 58
59config FPGA_MGR_STRATIX10_SOC
60 tristate "Intel Stratix10 SoC FPGA Manager"
61 depends on (ARCH_STRATIX10 && INTEL_STRATIX10_SERVICE)
62 help
63 FPGA manager driver support for the Intel Stratix10 SoC.
64
59config FPGA_MGR_XILINX_SPI 65config FPGA_MGR_XILINX_SPI
60 tristate "Xilinx Configuration over Slave Serial (SPI)" 66 tristate "Xilinx Configuration over Slave Serial (SPI)"
61 depends on SPI 67 depends on SPI
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 7a2d73ba7122..c0dd4c82fbdb 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_FPGA_MGR_ICE40_SPI) += ice40-spi.o
13obj-$(CONFIG_FPGA_MGR_MACHXO2_SPI) += machxo2-spi.o 13obj-$(CONFIG_FPGA_MGR_MACHXO2_SPI) += machxo2-spi.o
14obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o 14obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
15obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o 15obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
16obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o
16obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o 17obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
17obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o 18obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
18obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o 19obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 610a1558e0ed..35c3aa5792e2 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -403,6 +403,7 @@ static int altera_cvp_probe(struct pci_dev *pdev,
403 struct altera_cvp_conf *conf; 403 struct altera_cvp_conf *conf;
404 struct fpga_manager *mgr; 404 struct fpga_manager *mgr;
405 u16 cmd, val; 405 u16 cmd, val;
406 u32 regval;
406 int ret; 407 int ret;
407 408
408 /* 409 /*
@@ -416,6 +417,14 @@ static int altera_cvp_probe(struct pci_dev *pdev,
416 return -ENODEV; 417 return -ENODEV;
417 } 418 }
418 419
420 pci_read_config_dword(pdev, VSE_CVP_STATUS, &regval);
421 if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
422 dev_err(&pdev->dev,
423 "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
424 regval);
425 return -ENODEV;
426 }
427
419 conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL); 428 conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
420 if (!conf) 429 if (!conf)
421 return -ENOMEM; 430 return -ENOMEM;
@@ -466,18 +475,11 @@ static int altera_cvp_probe(struct pci_dev *pdev,
466 if (ret) 475 if (ret)
467 goto err_unmap; 476 goto err_unmap;
468 477
469 ret = driver_create_file(&altera_cvp_driver.driver,
470 &driver_attr_chkcfg);
471 if (ret) {
472 dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
473 fpga_mgr_unregister(mgr);
474 goto err_unmap;
475 }
476
477 return 0; 478 return 0;
478 479
479err_unmap: 480err_unmap:
480 pci_iounmap(pdev, conf->map); 481 if (conf->map)
482 pci_iounmap(pdev, conf->map);
481 pci_release_region(pdev, CVP_BAR); 483 pci_release_region(pdev, CVP_BAR);
482err_disable: 484err_disable:
483 cmd &= ~PCI_COMMAND_MEMORY; 485 cmd &= ~PCI_COMMAND_MEMORY;
@@ -491,16 +493,39 @@ static void altera_cvp_remove(struct pci_dev *pdev)
491 struct altera_cvp_conf *conf = mgr->priv; 493 struct altera_cvp_conf *conf = mgr->priv;
492 u16 cmd; 494 u16 cmd;
493 495
494 driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
495 fpga_mgr_unregister(mgr); 496 fpga_mgr_unregister(mgr);
496 pci_iounmap(pdev, conf->map); 497 if (conf->map)
498 pci_iounmap(pdev, conf->map);
497 pci_release_region(pdev, CVP_BAR); 499 pci_release_region(pdev, CVP_BAR);
498 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 500 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
499 cmd &= ~PCI_COMMAND_MEMORY; 501 cmd &= ~PCI_COMMAND_MEMORY;
500 pci_write_config_word(pdev, PCI_COMMAND, cmd); 502 pci_write_config_word(pdev, PCI_COMMAND, cmd);
501} 503}
502 504
503module_pci_driver(altera_cvp_driver); 505static int __init altera_cvp_init(void)
506{
507 int ret;
508
509 ret = pci_register_driver(&altera_cvp_driver);
510 if (ret)
511 return ret;
512
513 ret = driver_create_file(&altera_cvp_driver.driver,
514 &driver_attr_chkcfg);
515 if (ret)
516 pr_warn("Can't create sysfs chkcfg file\n");
517
518 return 0;
519}
520
521static void __exit altera_cvp_exit(void)
522{
523 driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
524 pci_unregister_driver(&altera_cvp_driver);
525}
526
527module_init(altera_cvp_init);
528module_exit(altera_cvp_exit);
504 529
505MODULE_LICENSE("GPL v2"); 530MODULE_LICENSE("GPL v2");
506MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>"); 531MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 33aafda50af5..8c18beec6b57 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -75,6 +75,12 @@ static struct altera_ps_data a10_data = {
75 .t_st2ck_us = 10, /* min(t_ST2CK) */ 75 .t_st2ck_us = 10, /* min(t_ST2CK) */
76}; 76};
77 77
78/* Array index is enum altera_ps_devtype */
79static const struct altera_ps_data *altera_ps_data_map[] = {
80 &c5_data,
81 &a10_data,
82};
83
78static const struct of_device_id of_ef_match[] = { 84static const struct of_device_id of_ef_match[] = {
79 { .compatible = "altr,fpga-passive-serial", .data = &c5_data }, 85 { .compatible = "altr,fpga-passive-serial", .data = &c5_data },
80 { .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data }, 86 { .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data },
@@ -234,6 +240,22 @@ static const struct fpga_manager_ops altera_ps_ops = {
234 .write_complete = altera_ps_write_complete, 240 .write_complete = altera_ps_write_complete,
235}; 241};
236 242
243static const struct altera_ps_data *id_to_data(const struct spi_device_id *id)
244{
245 kernel_ulong_t devtype = id->driver_data;
246 const struct altera_ps_data *data;
247
248 /* someone added a altera_ps_devtype without adding to the map array */
249 if (devtype >= ARRAY_SIZE(altera_ps_data_map))
250 return NULL;
251
252 data = altera_ps_data_map[devtype];
253 if (!data || data->devtype != devtype)
254 return NULL;
255
256 return data;
257}
258
237static int altera_ps_probe(struct spi_device *spi) 259static int altera_ps_probe(struct spi_device *spi)
238{ 260{
239 struct altera_ps_conf *conf; 261 struct altera_ps_conf *conf;
@@ -244,11 +266,17 @@ static int altera_ps_probe(struct spi_device *spi)
244 if (!conf) 266 if (!conf)
245 return -ENOMEM; 267 return -ENOMEM;
246 268
247 of_id = of_match_device(of_ef_match, &spi->dev); 269 if (spi->dev.of_node) {
248 if (!of_id) 270 of_id = of_match_device(of_ef_match, &spi->dev);
249 return -ENODEV; 271 if (!of_id)
272 return -ENODEV;
273 conf->data = of_id->data;
274 } else {
275 conf->data = id_to_data(spi_get_device_id(spi));
276 if (!conf->data)
277 return -ENODEV;
278 }
250 279
251 conf->data = of_id->data;
252 conf->spi = spi; 280 conf->spi = spi;
253 conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW); 281 conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
254 if (IS_ERR(conf->config)) { 282 if (IS_ERR(conf->config)) {
@@ -294,7 +322,9 @@ static int altera_ps_remove(struct spi_device *spi)
294} 322}
295 323
296static const struct spi_device_id altera_ps_spi_ids[] = { 324static const struct spi_device_id altera_ps_spi_ids[] = {
297 {"cyclone-ps-spi", 0}, 325 { "cyclone-ps-spi", CYCLONE5 },
326 { "fpga-passive-serial", CYCLONE5 },
327 { "fpga-arria10-passive-serial", ARRIA10 },
298 {} 328 {}
299}; 329};
300MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids); 330MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids);
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index 0b840531ef33..fe5a5578fbf7 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -444,10 +444,8 @@ static void pr_mgmt_uinit(struct platform_device *pdev,
444 struct dfl_feature *feature) 444 struct dfl_feature *feature)
445{ 445{
446 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 446 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
447 struct dfl_fme *priv;
448 447
449 mutex_lock(&pdata->lock); 448 mutex_lock(&pdata->lock);
450 priv = dfl_fpga_pdata_get_private(pdata);
451 449
452 dfl_fme_destroy_regions(pdata); 450 dfl_fme_destroy_regions(pdata);
453 dfl_fme_destroy_bridges(pdata); 451 dfl_fme_destroy_bridges(pdata);
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index ec134ec93f08..1eeb42af1012 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -64,7 +64,7 @@ eprobe_mgr_put:
64 64
65static int fme_region_remove(struct platform_device *pdev) 65static int fme_region_remove(struct platform_device *pdev)
66{ 66{
67 struct fpga_region *region = dev_get_drvdata(&pdev->dev); 67 struct fpga_region *region = platform_get_drvdata(pdev);
68 struct fpga_manager *mgr = region->mgr; 68 struct fpga_manager *mgr = region->mgr;
69 69
70 fpga_region_unregister(region); 70 fpga_region_unregister(region);
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 122286fd255a..75f64abf9c81 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -421,7 +421,7 @@ static int of_fpga_region_probe(struct platform_device *pdev)
421 goto eprobe_mgr_put; 421 goto eprobe_mgr_put;
422 422
423 of_platform_populate(np, fpga_region_of_match, NULL, &region->dev); 423 of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
424 dev_set_drvdata(dev, region); 424 platform_set_drvdata(pdev, region);
425 425
426 dev_info(dev, "FPGA Region probed\n"); 426 dev_info(dev, "FPGA Region probed\n");
427 427
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
new file mode 100644
index 000000000000..a1a09e04fab8
--- /dev/null
+++ b/drivers/fpga/stratix10-soc.c
@@ -0,0 +1,535 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * FPGA Manager Driver for Intel Stratix10 SoC
4 *
5 * Copyright (C) 2018 Intel Corporation
6 */
7#include <linux/completion.h>
8#include <linux/fpga/fpga-mgr.h>
9#include <linux/firmware/intel/stratix10-svc-client.h>
10#include <linux/module.h>
11#include <linux/of.h>
12#include <linux/of_platform.h>
13
14/*
15 * FPGA programming requires a higher level of privilege (EL3), per the SoC
16 * design.
17 */
18#define NUM_SVC_BUFS 4
19#define SVC_BUF_SIZE SZ_512K
20
21/* Indicates buffer is in use if set */
22#define SVC_BUF_LOCK 0
23
24#define S10_BUFFER_TIMEOUT (msecs_to_jiffies(SVC_RECONFIG_BUFFER_TIMEOUT_MS))
25#define S10_RECONFIG_TIMEOUT (msecs_to_jiffies(SVC_RECONFIG_REQUEST_TIMEOUT_MS))
26
27/*
28 * struct s10_svc_buf
29 * buf: virtual address of buf provided by service layer
30 * lock: locked if buffer is in use
31 */
32struct s10_svc_buf {
33 char *buf;
34 unsigned long lock;
35};
36
37struct s10_priv {
38 struct stratix10_svc_chan *chan;
39 struct stratix10_svc_client client;
40 struct completion status_return_completion;
41 struct s10_svc_buf svc_bufs[NUM_SVC_BUFS];
42 unsigned long status;
43};
44
45static int s10_svc_send_msg(struct s10_priv *priv,
46 enum stratix10_svc_command_code command,
47 void *payload, u32 payload_length)
48{
49 struct stratix10_svc_chan *chan = priv->chan;
50 struct device *dev = priv->client.dev;
51 struct stratix10_svc_client_msg msg;
52 int ret;
53
54 dev_dbg(dev, "%s cmd=%d payload=%p length=%d\n",
55 __func__, command, payload, payload_length);
56
57 msg.command = command;
58 msg.payload = payload;
59 msg.payload_length = payload_length;
60
61 ret = stratix10_svc_send(chan, &msg);
62 dev_dbg(dev, "stratix10_svc_send returned status %d\n", ret);
63
64 return ret;
65}
66
67/*
68 * Free buffers allocated from the service layer's pool that are not in use.
69 * Return true when all buffers are freed.
70 */
71static bool s10_free_buffers(struct fpga_manager *mgr)
72{
73 struct s10_priv *priv = mgr->priv;
74 uint num_free = 0;
75 uint i;
76
77 for (i = 0; i < NUM_SVC_BUFS; i++) {
78 if (!priv->svc_bufs[i].buf) {
79 num_free++;
80 continue;
81 }
82
83 if (!test_and_set_bit_lock(SVC_BUF_LOCK,
84 &priv->svc_bufs[i].lock)) {
85 stratix10_svc_free_memory(priv->chan,
86 priv->svc_bufs[i].buf);
87 priv->svc_bufs[i].buf = NULL;
88 num_free++;
89 }
90 }
91
92 return num_free == NUM_SVC_BUFS;
93}
94
95/*
96 * Returns count of how many buffers are not in use.
97 */
98static uint s10_free_buffer_count(struct fpga_manager *mgr)
99{
100 struct s10_priv *priv = mgr->priv;
101 uint num_free = 0;
102 uint i;
103
104 for (i = 0; i < NUM_SVC_BUFS; i++)
105 if (!priv->svc_bufs[i].buf)
106 num_free++;
107
108 return num_free;
109}
110
111/*
112 * s10_unlock_bufs
113 * Given the returned buffer address, match that address to our buffer struct
114 * and unlock that buffer. This marks it as available to be refilled and sent
115 * (or freed).
116 * priv: private data
117 * kaddr: kernel address of buffer that was returned from service layer
118 */
119static void s10_unlock_bufs(struct s10_priv *priv, void *kaddr)
120{
121 uint i;
122
123 if (!kaddr)
124 return;
125
126 for (i = 0; i < NUM_SVC_BUFS; i++)
127 if (priv->svc_bufs[i].buf == kaddr) {
128 clear_bit_unlock(SVC_BUF_LOCK,
129 &priv->svc_bufs[i].lock);
130 return;
131 }
132
133 WARN(1, "Unknown buffer returned from service layer %p\n", kaddr);
134}
135
136/*
137 * s10_receive_callback - callback for service layer to use to provide client
138 * (this driver) messages received through the mailbox.
139 * client: service layer client struct
140 * data: message from service layer
141 */
142static void s10_receive_callback(struct stratix10_svc_client *client,
143 struct stratix10_svc_cb_data *data)
144{
145 struct s10_priv *priv = client->priv;
146 u32 status;
147 int i;
148
149 WARN_ONCE(!data, "%s: stratix10_svc_rc_data = NULL", __func__);
150
151 status = data->status;
152
153 /*
154 * Here we set status bits as we receive them. Elsewhere, we always use
155 * test_and_clear_bit() to check status in priv->status
156 */
157 for (i = 0; i <= SVC_STATUS_RECONFIG_ERROR; i++)
158 if (status & (1 << i))
159 set_bit(i, &priv->status);
160
161 if (status & BIT(SVC_STATUS_RECONFIG_BUFFER_DONE)) {
162 s10_unlock_bufs(priv, data->kaddr1);
163 s10_unlock_bufs(priv, data->kaddr2);
164 s10_unlock_bufs(priv, data->kaddr3);
165 }
166
167 complete(&priv->status_return_completion);
168}
169
170/*
171 * s10_ops_write_init - prepare for FPGA reconfiguration by requesting
172 * partial reconfig and allocating buffers from the service layer.
173 */
174static int s10_ops_write_init(struct fpga_manager *mgr,
175 struct fpga_image_info *info,
176 const char *buf, size_t count)
177{
178 struct s10_priv *priv = mgr->priv;
179 struct device *dev = priv->client.dev;
180 struct stratix10_svc_command_config_type ctype;
181 char *kbuf;
182 uint i;
183 int ret;
184
185 ctype.flags = 0;
186 if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
187 dev_dbg(dev, "Requesting partial reconfiguration.\n");
188 ctype.flags |= BIT(COMMAND_RECONFIG_FLAG_PARTIAL);
189 } else {
190 dev_dbg(dev, "Requesting full reconfiguration.\n");
191 }
192
193 reinit_completion(&priv->status_return_completion);
194 ret = s10_svc_send_msg(priv, COMMAND_RECONFIG,
195 &ctype, sizeof(ctype));
196 if (ret < 0)
197 goto init_done;
198
199 ret = wait_for_completion_interruptible_timeout(
200 &priv->status_return_completion, S10_RECONFIG_TIMEOUT);
201 if (!ret) {
202 dev_err(dev, "timeout waiting for RECONFIG_REQUEST\n");
203 ret = -ETIMEDOUT;
204 goto init_done;
205 }
206 if (ret < 0) {
207 dev_err(dev, "error (%d) waiting for RECONFIG_REQUEST\n", ret);
208 goto init_done;
209 }
210
211 ret = 0;
212 if (!test_and_clear_bit(SVC_STATUS_RECONFIG_REQUEST_OK,
213 &priv->status)) {
214 ret = -ETIMEDOUT;
215 goto init_done;
216 }
217
218 /* Allocate buffers from the service layer's pool. */
219 for (i = 0; i < NUM_SVC_BUFS; i++) {
220 kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
221 if (!kbuf) {
222 s10_free_buffers(mgr);
223 ret = -ENOMEM;
224 goto init_done;
225 }
226
227 priv->svc_bufs[i].buf = kbuf;
228 priv->svc_bufs[i].lock = 0;
229 }
230
231init_done:
232 stratix10_svc_done(priv->chan);
233 return ret;
234}
235
236/*
237 * s10_send_buf - send a buffer to the service layer queue
238 * mgr: fpga manager struct
239 * buf: fpga image buffer
240 * count: size of buf in bytes
241 * Returns # of bytes transferred or -ENOBUFS if the all the buffers are in use
242 * or if the service queue is full. Never returns 0.
243 */
244static int s10_send_buf(struct fpga_manager *mgr, const char *buf, size_t count)
245{
246 struct s10_priv *priv = mgr->priv;
247 struct device *dev = priv->client.dev;
248 void *svc_buf;
249 size_t xfer_sz;
250 int ret;
251 uint i;
252
253 /* get/lock a buffer that that's not being used */
254 for (i = 0; i < NUM_SVC_BUFS; i++)
255 if (!test_and_set_bit_lock(SVC_BUF_LOCK,
256 &priv->svc_bufs[i].lock))
257 break;
258
259 if (i == NUM_SVC_BUFS)
260 return -ENOBUFS;
261
262 xfer_sz = count < SVC_BUF_SIZE ? count : SVC_BUF_SIZE;
263
264 svc_buf = priv->svc_bufs[i].buf;
265 memcpy(svc_buf, buf, xfer_sz);
266 ret = s10_svc_send_msg(priv, COMMAND_RECONFIG_DATA_SUBMIT,
267 svc_buf, xfer_sz);
268 if (ret < 0) {
269 dev_err(dev,
270 "Error while sending data to service layer (%d)", ret);
271 clear_bit_unlock(SVC_BUF_LOCK, &priv->svc_bufs[i].lock);
272 return ret;
273 }
274
275 return xfer_sz;
276}
277
278/*
279 * Send a FPGA image to privileged layers to write to the FPGA. When done
280 * sending, free all service layer buffers we allocated in write_init.
281 */
282static int s10_ops_write(struct fpga_manager *mgr, const char *buf,
283 size_t count)
284{
285 struct s10_priv *priv = mgr->priv;
286 struct device *dev = priv->client.dev;
287 long wait_status;
288 int sent = 0;
289 int ret = 0;
290
291 /*
292 * Loop waiting for buffers to be returned. When a buffer is returned,
293 * reuse it to send more data or free if if all data has been sent.
294 */
295 while (count > 0 || s10_free_buffer_count(mgr) != NUM_SVC_BUFS) {
296 reinit_completion(&priv->status_return_completion);
297
298 if (count > 0) {
299 sent = s10_send_buf(mgr, buf, count);
300 if (sent < 0)
301 continue;
302
303 count -= sent;
304 buf += sent;
305 } else {
306 if (s10_free_buffers(mgr))
307 return 0;
308
309 ret = s10_svc_send_msg(
310 priv, COMMAND_RECONFIG_DATA_CLAIM,
311 NULL, 0);
312 if (ret < 0)
313 break;
314 }
315
316 /*
317 * If callback hasn't already happened, wait for buffers to be
318 * returned from service layer
319 */
320 wait_status = 1; /* not timed out */
321 if (!priv->status)
322 wait_status = wait_for_completion_interruptible_timeout(
323 &priv->status_return_completion,
324 S10_BUFFER_TIMEOUT);
325
326 if (test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_DONE,
327 &priv->status) ||
328 test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
329 &priv->status)) {
330 ret = 0;
331 continue;
332 }
333
334 if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
335 &priv->status)) {
336 dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
337 ret = -EFAULT;
338 break;
339 }
340
341 if (!wait_status) {
342 dev_err(dev, "timeout waiting for svc layer buffers\n");
343 ret = -ETIMEDOUT;
344 break;
345 }
346 if (wait_status < 0) {
347 ret = wait_status;
348 dev_err(dev,
349 "error (%d) waiting for svc layer buffers\n",
350 ret);
351 break;
352 }
353 }
354
355 if (!s10_free_buffers(mgr))
356 dev_err(dev, "%s not all buffers were freed\n", __func__);
357
358 return ret;
359}
360
361static int s10_ops_write_complete(struct fpga_manager *mgr,
362 struct fpga_image_info *info)
363{
364 struct s10_priv *priv = mgr->priv;
365 struct device *dev = priv->client.dev;
366 unsigned long timeout;
367 int ret;
368
369 timeout = usecs_to_jiffies(info->config_complete_timeout_us);
370
371 do {
372 reinit_completion(&priv->status_return_completion);
373
374 ret = s10_svc_send_msg(priv, COMMAND_RECONFIG_STATUS, NULL, 0);
375 if (ret < 0)
376 break;
377
378 ret = wait_for_completion_interruptible_timeout(
379 &priv->status_return_completion, timeout);
380 if (!ret) {
381 dev_err(dev,
382 "timeout waiting for RECONFIG_COMPLETED\n");
383 ret = -ETIMEDOUT;
384 break;
385 }
386 if (ret < 0) {
387 dev_err(dev,
388 "error (%d) waiting for RECONFIG_COMPLETED\n",
389 ret);
390 break;
391 }
392 /* Not error or timeout, so ret is # of jiffies until timeout */
393 timeout = ret;
394 ret = 0;
395
396 if (test_and_clear_bit(SVC_STATUS_RECONFIG_COMPLETED,
397 &priv->status))
398 break;
399
400 if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
401 &priv->status)) {
402 dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
403 ret = -EFAULT;
404 break;
405 }
406 } while (1);
407
408 stratix10_svc_done(priv->chan);
409
410 return ret;
411}
412
413static enum fpga_mgr_states s10_ops_state(struct fpga_manager *mgr)
414{
415 return FPGA_MGR_STATE_UNKNOWN;
416}
417
418static const struct fpga_manager_ops s10_ops = {
419 .state = s10_ops_state,
420 .write_init = s10_ops_write_init,
421 .write = s10_ops_write,
422 .write_complete = s10_ops_write_complete,
423};
424
425static int s10_probe(struct platform_device *pdev)
426{
427 struct device *dev = &pdev->dev;
428 struct s10_priv *priv;
429 struct fpga_manager *mgr;
430 int ret;
431
432 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
433 if (!priv)
434 return -ENOMEM;
435
436 priv->client.dev = dev;
437 priv->client.receive_cb = s10_receive_callback;
438 priv->client.priv = priv;
439
440 priv->chan = stratix10_svc_request_channel_byname(&priv->client,
441 SVC_CLIENT_FPGA);
442 if (IS_ERR(priv->chan)) {
443 dev_err(dev, "couldn't get service channel (%s)\n",
444 SVC_CLIENT_FPGA);
445 return PTR_ERR(priv->chan);
446 }
447
448 init_completion(&priv->status_return_completion);
449
450 mgr = fpga_mgr_create(dev, "Stratix10 SOC FPGA Manager",
451 &s10_ops, priv);
452 if (!mgr) {
453 dev_err(dev, "unable to create FPGA manager\n");
454 ret = -ENOMEM;
455 goto probe_err;
456 }
457
458 ret = fpga_mgr_register(mgr);
459 if (ret) {
460 dev_err(dev, "unable to register FPGA manager\n");
461 fpga_mgr_free(mgr);
462 goto probe_err;
463 }
464
465 platform_set_drvdata(pdev, mgr);
466 return ret;
467
468probe_err:
469 stratix10_svc_free_channel(priv->chan);
470 return ret;
471}
472
473static int s10_remove(struct platform_device *pdev)
474{
475 struct fpga_manager *mgr = platform_get_drvdata(pdev);
476 struct s10_priv *priv = mgr->priv;
477
478 fpga_mgr_unregister(mgr);
479 stratix10_svc_free_channel(priv->chan);
480
481 return 0;
482}
483
484static const struct of_device_id s10_of_match[] = {
485 { .compatible = "intel,stratix10-soc-fpga-mgr", },
486 {},
487};
488
489MODULE_DEVICE_TABLE(of, s10_of_match);
490
491static struct platform_driver s10_driver = {
492 .probe = s10_probe,
493 .remove = s10_remove,
494 .driver = {
495 .name = "Stratix10 SoC FPGA manager",
496 .of_match_table = of_match_ptr(s10_of_match),
497 },
498};
499
500static int __init s10_init(void)
501{
502 struct device_node *fw_np;
503 struct device_node *np;
504 int ret;
505
506 fw_np = of_find_node_by_name(NULL, "svc");
507 if (!fw_np)
508 return -ENODEV;
509
510 np = of_find_matching_node(fw_np, s10_of_match);
511 if (!np) {
512 of_node_put(fw_np);
513 return -ENODEV;
514 }
515
516 of_node_put(np);
517 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
518 of_node_put(fw_np);
519 if (ret)
520 return ret;
521
522 return platform_driver_register(&s10_driver);
523}
524
525static void __exit s10_exit(void)
526{
527 return platform_driver_unregister(&s10_driver);
528}
529
530module_init(s10_init);
531module_exit(s10_exit);
532
533MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
534MODULE_DESCRIPTION("Intel Stratix 10 SOC FPGA Manager");
535MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index bb82efeebb9d..57b0e6775958 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -501,6 +501,10 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
501 if (err) 501 if (err)
502 return err; 502 return err;
503 503
504 /* Release 'PR' control back to the ICAP */
505 zynq_fpga_write(priv, CTRL_OFFSET,
506 zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK);
507
504 err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status, 508 err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status,
505 intr_status & IXR_PCFG_DONE_MASK, 509 intr_status & IXR_PCFG_DONE_MASK,
506 INIT_POLL_DELAY, 510 INIT_POLL_DELAY,
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index fe00b12e4417..ce0ba2062723 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -711,7 +711,6 @@ int vmbus_disconnect_ring(struct vmbus_channel *channel)
711 /* Snapshot the list of subchannels */ 711 /* Snapshot the list of subchannels */
712 spin_lock_irqsave(&channel->lock, flags); 712 spin_lock_irqsave(&channel->lock, flags);
713 list_splice_init(&channel->sc_list, &list); 713 list_splice_init(&channel->sc_list, &list);
714 channel->num_sc = 0;
715 spin_unlock_irqrestore(&channel->lock, flags); 714 spin_unlock_irqrestore(&channel->lock, flags);
716 715
717 list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) { 716 list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index edd34c167a9b..d01689079e9b 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -405,7 +405,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
405 primary_channel = channel->primary_channel; 405 primary_channel = channel->primary_channel;
406 spin_lock_irqsave(&primary_channel->lock, flags); 406 spin_lock_irqsave(&primary_channel->lock, flags);
407 list_del(&channel->sc_list); 407 list_del(&channel->sc_list);
408 primary_channel->num_sc--;
409 spin_unlock_irqrestore(&primary_channel->lock, flags); 408 spin_unlock_irqrestore(&primary_channel->lock, flags);
410 } 409 }
411 410
@@ -1302,49 +1301,6 @@ cleanup:
1302 return ret; 1301 return ret;
1303} 1302}
1304 1303
1305/*
1306 * Retrieve the (sub) channel on which to send an outgoing request.
1307 * When a primary channel has multiple sub-channels, we try to
1308 * distribute the load equally amongst all available channels.
1309 */
1310struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
1311{
1312 struct list_head *cur, *tmp;
1313 int cur_cpu;
1314 struct vmbus_channel *cur_channel;
1315 struct vmbus_channel *outgoing_channel = primary;
1316 int next_channel;
1317 int i = 1;
1318
1319 if (list_empty(&primary->sc_list))
1320 return outgoing_channel;
1321
1322 next_channel = primary->next_oc++;
1323
1324 if (next_channel > (primary->num_sc)) {
1325 primary->next_oc = 0;
1326 return outgoing_channel;
1327 }
1328
1329 cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id());
1330 list_for_each_safe(cur, tmp, &primary->sc_list) {
1331 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1332 if (cur_channel->state != CHANNEL_OPENED_STATE)
1333 continue;
1334
1335 if (cur_channel->target_vp == cur_cpu)
1336 return cur_channel;
1337
1338 if (i == next_channel)
1339 return cur_channel;
1340
1341 i++;
1342 }
1343
1344 return outgoing_channel;
1345}
1346EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
1347
1348static void invoke_sc_cb(struct vmbus_channel *primary_channel) 1304static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1349{ 1305{
1350 struct list_head *cur, *tmp; 1306 struct list_head *cur, *tmp;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 11273cd384d6..632d25674e7f 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -33,9 +33,7 @@
33#include "hyperv_vmbus.h" 33#include "hyperv_vmbus.h"
34 34
35/* The one and only */ 35/* The one and only */
36struct hv_context hv_context = { 36struct hv_context hv_context;
37 .synic_initialized = false,
38};
39 37
40/* 38/*
41 * If false, we're using the old mechanism for stimer0 interrupts 39 * If false, we're using the old mechanism for stimer0 interrupts
@@ -326,8 +324,6 @@ int hv_synic_init(unsigned int cpu)
326 324
327 hv_set_synic_state(sctrl.as_uint64); 325 hv_set_synic_state(sctrl.as_uint64);
328 326
329 hv_context.synic_initialized = true;
330
331 /* 327 /*
332 * Register the per-cpu clockevent source. 328 * Register the per-cpu clockevent source.
333 */ 329 */
@@ -373,7 +369,8 @@ int hv_synic_cleanup(unsigned int cpu)
373 bool channel_found = false; 369 bool channel_found = false;
374 unsigned long flags; 370 unsigned long flags;
375 371
376 if (!hv_context.synic_initialized) 372 hv_get_synic_state(sctrl.as_uint64);
373 if (sctrl.enable != 1)
377 return -EFAULT; 374 return -EFAULT;
378 375
379 /* 376 /*
@@ -435,7 +432,6 @@ int hv_synic_cleanup(unsigned int cpu)
435 hv_set_siefp(siefp.as_uint64); 432 hv_set_siefp(siefp.as_uint64);
436 433
437 /* Disable the global synic bit */ 434 /* Disable the global synic bit */
438 hv_get_synic_state(sctrl.as_uint64);
439 sctrl.enable = 0; 435 sctrl.enable = 0;
440 hv_set_synic_state(sctrl.as_uint64); 436 hv_set_synic_state(sctrl.as_uint64);
441 437
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index d6106e1a0d4a..5054d1105236 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -437,7 +437,7 @@ kvp_send_key(struct work_struct *dummy)
437 val32 = in_msg->body.kvp_set.data.value_u32; 437 val32 = in_msg->body.kvp_set.data.value_u32;
438 message->body.kvp_set.data.value_size = 438 message->body.kvp_set.data.value_size =
439 sprintf(message->body.kvp_set.data.value, 439 sprintf(message->body.kvp_set.data.value,
440 "%d", val32) + 1; 440 "%u", val32) + 1;
441 break; 441 break;
442 442
443 case REG_U64: 443 case REG_U64:
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 423205077bf6..f10eeb120c8b 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -483,7 +483,7 @@ MODULE_DEVICE_TABLE(vmbus, id_table);
483 483
484/* The one and only one */ 484/* The one and only one */
485static struct hv_driver util_drv = { 485static struct hv_driver util_drv = {
486 .name = "hv_util", 486 .name = "hv_utils",
487 .id_table = id_table, 487 .id_table = id_table,
488 .probe = util_probe, 488 .probe = util_probe,
489 .remove = util_remove, 489 .remove = util_remove,
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index ea201034b248..a1f6ce6e5974 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -162,8 +162,6 @@ struct hv_context {
162 162
163 void *tsc_page; 163 void *tsc_page;
164 164
165 bool synic_initialized;
166
167 struct hv_per_cpu_context __percpu *cpu_context; 165 struct hv_per_cpu_context __percpu *cpu_context;
168 166
169 /* 167 /*
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 824be0c5f592..105782ea64c7 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -136,6 +136,11 @@ static void __etb_enable_hw(struct etb_drvdata *drvdata)
136 136
137static int etb_enable_hw(struct etb_drvdata *drvdata) 137static int etb_enable_hw(struct etb_drvdata *drvdata)
138{ 138{
139 int rc = coresight_claim_device(drvdata->base);
140
141 if (rc)
142 return rc;
143
139 __etb_enable_hw(drvdata); 144 __etb_enable_hw(drvdata);
140 return 0; 145 return 0;
141} 146}
@@ -223,7 +228,7 @@ static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
223 return 0; 228 return 0;
224} 229}
225 230
226static void etb_disable_hw(struct etb_drvdata *drvdata) 231static void __etb_disable_hw(struct etb_drvdata *drvdata)
227{ 232{
228 u32 ffcr; 233 u32 ffcr;
229 234
@@ -313,6 +318,13 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
313 CS_LOCK(drvdata->base); 318 CS_LOCK(drvdata->base);
314} 319}
315 320
321static void etb_disable_hw(struct etb_drvdata *drvdata)
322{
323 __etb_disable_hw(drvdata);
324 etb_dump_hw(drvdata);
325 coresight_disclaim_device(drvdata->base);
326}
327
316static void etb_disable(struct coresight_device *csdev) 328static void etb_disable(struct coresight_device *csdev)
317{ 329{
318 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 330 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -323,7 +335,6 @@ static void etb_disable(struct coresight_device *csdev)
323 /* Disable the ETB only if it needs to */ 335 /* Disable the ETB only if it needs to */
324 if (drvdata->mode != CS_MODE_DISABLED) { 336 if (drvdata->mode != CS_MODE_DISABLED) {
325 etb_disable_hw(drvdata); 337 etb_disable_hw(drvdata);
326 etb_dump_hw(drvdata);
327 drvdata->mode = CS_MODE_DISABLED; 338 drvdata->mode = CS_MODE_DISABLED;
328 } 339 }
329 spin_unlock_irqrestore(&drvdata->spinlock, flags); 340 spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -402,7 +413,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
402 413
403 capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS; 414 capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
404 415
405 etb_disable_hw(drvdata); 416 __etb_disable_hw(drvdata);
406 CS_UNLOCK(drvdata->base); 417 CS_UNLOCK(drvdata->base);
407 418
408 /* unit is in words, not bytes */ 419 /* unit is in words, not bytes */
@@ -510,7 +521,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
510 handle->head = (cur * PAGE_SIZE) + offset; 521 handle->head = (cur * PAGE_SIZE) + offset;
511 to_read = buf->nr_pages << PAGE_SHIFT; 522 to_read = buf->nr_pages << PAGE_SHIFT;
512 } 523 }
513 etb_enable_hw(drvdata); 524 __etb_enable_hw(drvdata);
514 CS_LOCK(drvdata->base); 525 CS_LOCK(drvdata->base);
515 526
516 return to_read; 527 return to_read;
@@ -534,9 +545,9 @@ static void etb_dump(struct etb_drvdata *drvdata)
534 545
535 spin_lock_irqsave(&drvdata->spinlock, flags); 546 spin_lock_irqsave(&drvdata->spinlock, flags);
536 if (drvdata->mode == CS_MODE_SYSFS) { 547 if (drvdata->mode == CS_MODE_SYSFS) {
537 etb_disable_hw(drvdata); 548 __etb_disable_hw(drvdata);
538 etb_dump_hw(drvdata); 549 etb_dump_hw(drvdata);
539 etb_enable_hw(drvdata); 550 __etb_enable_hw(drvdata);
540 } 551 }
541 spin_unlock_irqrestore(&drvdata->spinlock, flags); 552 spin_unlock_irqrestore(&drvdata->spinlock, flags);
542 553
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index fd5c4cca7db5..9a63e87ea5f3 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -363,15 +363,16 @@ static int etm_enable_hw(struct etm_drvdata *drvdata)
363 363
364 CS_UNLOCK(drvdata->base); 364 CS_UNLOCK(drvdata->base);
365 365
366 rc = coresight_claim_device_unlocked(drvdata->base);
367 if (rc)
368 goto done;
369
366 /* Turn engine on */ 370 /* Turn engine on */
367 etm_clr_pwrdwn(drvdata); 371 etm_clr_pwrdwn(drvdata);
368 /* Apply power to trace registers */ 372 /* Apply power to trace registers */
369 etm_set_pwrup(drvdata); 373 etm_set_pwrup(drvdata);
370 /* Make sure all registers are accessible */ 374 /* Make sure all registers are accessible */
371 etm_os_unlock(drvdata); 375 etm_os_unlock(drvdata);
372 rc = coresight_claim_device_unlocked(drvdata->base);
373 if (rc)
374 goto done;
375 376
376 etm_set_prog(drvdata); 377 etm_set_prog(drvdata);
377 378
@@ -422,8 +423,6 @@ static int etm_enable_hw(struct etm_drvdata *drvdata)
422 etm_clr_prog(drvdata); 423 etm_clr_prog(drvdata);
423 424
424done: 425done:
425 if (rc)
426 etm_set_pwrdwn(drvdata);
427 CS_LOCK(drvdata->base); 426 CS_LOCK(drvdata->base);
428 427
429 dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n", 428 dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
@@ -577,9 +576,9 @@ static void etm_disable_hw(void *info)
577 for (i = 0; i < drvdata->nr_cntr; i++) 576 for (i = 0; i < drvdata->nr_cntr; i++)
578 config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); 577 config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
579 578
579 etm_set_pwrdwn(drvdata);
580 coresight_disclaim_device_unlocked(drvdata->base); 580 coresight_disclaim_device_unlocked(drvdata->base);
581 581
582 etm_set_pwrdwn(drvdata);
583 CS_LOCK(drvdata->base); 582 CS_LOCK(drvdata->base);
584 583
585 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); 584 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
@@ -602,6 +601,7 @@ static void etm_disable_perf(struct coresight_device *csdev)
602 * power down the tracer. 601 * power down the tracer.
603 */ 602 */
604 etm_set_pwrdwn(drvdata); 603 etm_set_pwrdwn(drvdata);
604 coresight_disclaim_device_unlocked(drvdata->base);
605 605
606 CS_LOCK(drvdata->base); 606 CS_LOCK(drvdata->base);
607} 607}
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 35d6f9709274..ef339ff22090 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -856,7 +856,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
856 856
857 if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) { 857 if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) {
858 dev_info(dev, 858 dev_info(dev,
859 "stm_register_device failed, probing deffered\n"); 859 "stm_register_device failed, probing deferred\n");
860 return -EPROBE_DEFER; 860 return -EPROBE_DEFER;
861 } 861 }
862 862
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 53fc83b72a49..a5f053f2db2c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -86,8 +86,8 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
86 86
87static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) 87static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
88{ 88{
89 coresight_disclaim_device(drvdata);
90 __tmc_etb_disable_hw(drvdata); 89 __tmc_etb_disable_hw(drvdata);
90 coresight_disclaim_device(drvdata->base);
91} 91}
92 92
93static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata) 93static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index d293e55553bd..ba7aaf421f36 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1423,7 +1423,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr,
1423 if (!end) 1423 if (!end)
1424 break; 1424 break;
1425 1425
1426 len -= end - p; 1426 /* consume the number and the following comma, hence +1 */
1427 len -= end - p + 1;
1427 p = end + 1; 1428 p = end + 1;
1428 } while (len); 1429 } while (len);
1429 1430
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 0910ec807187..4b9e44b227d8 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -440,10 +440,8 @@ stp_policy_make(struct config_group *group, const char *name)
440 440
441 stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL); 441 stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL);
442 if (!stm->policy) { 442 if (!stm->policy) {
443 mutex_unlock(&stm->policy_mutex); 443 ret = ERR_PTR(-ENOMEM);
444 stm_put_protocol(pdrv); 444 goto unlock_policy;
445 stm_put_device(stm);
446 return ERR_PTR(-ENOMEM);
447 } 445 }
448 446
449 config_group_init_type_name(&stm->policy->group, name, 447 config_group_init_type_name(&stm->policy->group, name,
@@ -458,7 +456,11 @@ unlock_policy:
458 mutex_unlock(&stm->policy_mutex); 456 mutex_unlock(&stm->policy_mutex);
459 457
460 if (IS_ERR(ret)) { 458 if (IS_ERR(ret)) {
461 stm_put_protocol(stm->pdrv); 459 /*
460 * pdrv and stm->pdrv at this point can be quite different,
461 * and only one of them needs to be 'put'
462 */
463 stm_put_protocol(pdrv);
462 stm_put_device(stm); 464 stm_put_device(stm);
463 } 465 }
464 466
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index d9c748b6f9e4..1edf2a251336 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -2042,3 +2042,28 @@ int dmar_device_remove(acpi_handle handle)
2042{ 2042{
2043 return dmar_device_hotplug(handle, false); 2043 return dmar_device_hotplug(handle, false);
2044} 2044}
2045
2046/*
2047 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2048 *
2049 * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2050 * the ACPI DMAR table. This means that the platform boot firmware has made
2051 * sure no device can issue DMA outside of RMRR regions.
2052 */
2053bool dmar_platform_optin(void)
2054{
2055 struct acpi_table_dmar *dmar;
2056 acpi_status status;
2057 bool ret;
2058
2059 status = acpi_get_table(ACPI_SIG_DMAR, 0,
2060 (struct acpi_table_header **)&dmar);
2061 if (ACPI_FAILURE(status))
2062 return false;
2063
2064 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2065 acpi_put_table((struct acpi_table_header *)dmar);
2066
2067 return ret;
2068}
2069EXPORT_SYMBOL_GPL(dmar_platform_optin);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0ad67d65bbce..63b6ce78492a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -184,6 +184,7 @@ static int rwbf_quirk;
184 */ 184 */
185static int force_on = 0; 185static int force_on = 0;
186int intel_iommu_tboot_noforce; 186int intel_iommu_tboot_noforce;
187static int no_platform_optin;
187 188
188#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) 189#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
189 190
@@ -503,6 +504,7 @@ static int __init intel_iommu_setup(char *str)
503 pr_info("IOMMU enabled\n"); 504 pr_info("IOMMU enabled\n");
504 } else if (!strncmp(str, "off", 3)) { 505 } else if (!strncmp(str, "off", 3)) {
505 dmar_disabled = 1; 506 dmar_disabled = 1;
507 no_platform_optin = 1;
506 pr_info("IOMMU disabled\n"); 508 pr_info("IOMMU disabled\n");
507 } else if (!strncmp(str, "igfx_off", 8)) { 509 } else if (!strncmp(str, "igfx_off", 8)) {
508 dmar_map_gfx = 0; 510 dmar_map_gfx = 0;
@@ -1471,7 +1473,8 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1471 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) 1473 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1472 info->pri_enabled = 1; 1474 info->pri_enabled = 1;
1473#endif 1475#endif
1474 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { 1476 if (!pdev->untrusted && info->ats_supported &&
1477 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1475 info->ats_enabled = 1; 1478 info->ats_enabled = 1;
1476 domain_update_iotlb(info->domain); 1479 domain_update_iotlb(info->domain);
1477 info->ats_qdep = pci_ats_queue_depth(pdev); 1480 info->ats_qdep = pci_ats_queue_depth(pdev);
@@ -2895,6 +2898,13 @@ static int iommu_should_identity_map(struct device *dev, int startup)
2895 if (device_is_rmrr_locked(dev)) 2898 if (device_is_rmrr_locked(dev))
2896 return 0; 2899 return 0;
2897 2900
2901 /*
2902 * Prevent any device marked as untrusted from getting
2903 * placed into the statically identity mapping domain.
2904 */
2905 if (pdev->untrusted)
2906 return 0;
2907
2898 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) 2908 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2899 return 1; 2909 return 1;
2900 2910
@@ -4722,14 +4732,54 @@ const struct attribute_group *intel_iommu_groups[] = {
4722 NULL, 4732 NULL,
4723}; 4733};
4724 4734
4735static int __init platform_optin_force_iommu(void)
4736{
4737 struct pci_dev *pdev = NULL;
4738 bool has_untrusted_dev = false;
4739
4740 if (!dmar_platform_optin() || no_platform_optin)
4741 return 0;
4742
4743 for_each_pci_dev(pdev) {
4744 if (pdev->untrusted) {
4745 has_untrusted_dev = true;
4746 break;
4747 }
4748 }
4749
4750 if (!has_untrusted_dev)
4751 return 0;
4752
4753 if (no_iommu || dmar_disabled)
4754 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4755
4756 /*
4757 * If Intel-IOMMU is disabled by default, we will apply identity
4758 * map for all devices except those marked as being untrusted.
4759 */
4760 if (dmar_disabled)
4761 iommu_identity_mapping |= IDENTMAP_ALL;
4762
4763 dmar_disabled = 0;
4764#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4765 swiotlb = 0;
4766#endif
4767 no_iommu = 0;
4768
4769 return 1;
4770}
4771
4725int __init intel_iommu_init(void) 4772int __init intel_iommu_init(void)
4726{ 4773{
4727 int ret = -ENODEV; 4774 int ret = -ENODEV;
4728 struct dmar_drhd_unit *drhd; 4775 struct dmar_drhd_unit *drhd;
4729 struct intel_iommu *iommu; 4776 struct intel_iommu *iommu;
4730 4777
4731 /* VT-d is required for a TXT/tboot launch, so enforce that */ 4778 /*
4732 force_on = tboot_force_iommu(); 4779 * Intel IOMMU is required for a TXT/tboot launch or platform
4780 * opt in, so enforce that.
4781 */
4782 force_on = tboot_force_iommu() || platform_optin_force_iommu();
4733 4783
4734 if (iommu_init_mempool()) { 4784 if (iommu_init_mempool()) {
4735 if (force_on) 4785 if (force_on)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 3726eacdf65d..f417b06e11c5 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -513,6 +513,14 @@ config MISC_RTSX
513 tristate 513 tristate
514 default MISC_RTSX_PCI || MISC_RTSX_USB 514 default MISC_RTSX_PCI || MISC_RTSX_USB
515 515
516config PVPANIC
517 tristate "pvpanic device support"
518 depends on HAS_IOMEM && (ACPI || OF)
519 help
520 This driver provides support for the pvpanic device. pvpanic is
521 a paravirtualized device provided by QEMU; it lets a virtual machine
522 (guest) communicate panic events to the host.
523
516source "drivers/misc/c2port/Kconfig" 524source "drivers/misc/c2port/Kconfig"
517source "drivers/misc/eeprom/Kconfig" 525source "drivers/misc/eeprom/Kconfig"
518source "drivers/misc/cb710/Kconfig" 526source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index fe3134cf3008..e39ccbbc1b3a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,4 +57,5 @@ obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
57obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o 57obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
58obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o 58obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
59obj-$(CONFIG_OCXL) += ocxl/ 59obj-$(CONFIG_OCXL) += ocxl/
60obj-y += cardreader/ 60obj-y += cardreader/
61obj-$(CONFIG_PVPANIC) += pvpanic.o
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index ef83a9078646..d2ed3b9728b7 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -2176,8 +2176,7 @@ static int altera_get_note(u8 *p, s32 program_size,
2176 key_ptr = &p[note_strings + 2176 key_ptr = &p[note_strings +
2177 get_unaligned_be32( 2177 get_unaligned_be32(
2178 &p[note_table + (8 * i)])]; 2178 &p[note_table + (8 * i)])];
2179 if ((strncasecmp(key, key_ptr, strlen(key_ptr)) == 0) && 2179 if (key && !strncasecmp(key, key_ptr, strlen(key_ptr))) {
2180 (key != NULL)) {
2181 status = 0; 2180 status = 0;
2182 2181
2183 value_ptr = &p[note_strings + 2182 value_ptr = &p[note_strings +
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index c6b82f09b3ba..7c713e01d198 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -33,19 +33,6 @@
33#include "card_base.h" 33#include "card_base.h"
34#include "card_ddcb.h" 34#include "card_ddcb.h"
35 35
36#define GENWQE_DEBUGFS_RO(_name, _showfn) \
37 static int genwqe_debugfs_##_name##_open(struct inode *inode, \
38 struct file *file) \
39 { \
40 return single_open(file, _showfn, inode->i_private); \
41 } \
42 static const struct file_operations genwqe_##_name##_fops = { \
43 .open = genwqe_debugfs_##_name##_open, \
44 .read = seq_read, \
45 .llseek = seq_lseek, \
46 .release = single_release, \
47 }
48
49static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs, 36static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs,
50 int entries) 37 int entries)
51{ 38{
@@ -87,26 +74,26 @@ static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
87 return 0; 74 return 0;
88} 75}
89 76
90static int genwqe_curr_dbg_uid0_show(struct seq_file *s, void *unused) 77static int curr_dbg_uid0_show(struct seq_file *s, void *unused)
91{ 78{
92 return curr_dbg_uidn_show(s, unused, 0); 79 return curr_dbg_uidn_show(s, unused, 0);
93} 80}
94 81
95GENWQE_DEBUGFS_RO(curr_dbg_uid0, genwqe_curr_dbg_uid0_show); 82DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid0);
96 83
97static int genwqe_curr_dbg_uid1_show(struct seq_file *s, void *unused) 84static int curr_dbg_uid1_show(struct seq_file *s, void *unused)
98{ 85{
99 return curr_dbg_uidn_show(s, unused, 1); 86 return curr_dbg_uidn_show(s, unused, 1);
100} 87}
101 88
102GENWQE_DEBUGFS_RO(curr_dbg_uid1, genwqe_curr_dbg_uid1_show); 89DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid1);
103 90
104static int genwqe_curr_dbg_uid2_show(struct seq_file *s, void *unused) 91static int curr_dbg_uid2_show(struct seq_file *s, void *unused)
105{ 92{
106 return curr_dbg_uidn_show(s, unused, 2); 93 return curr_dbg_uidn_show(s, unused, 2);
107} 94}
108 95
109GENWQE_DEBUGFS_RO(curr_dbg_uid2, genwqe_curr_dbg_uid2_show); 96DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid2);
110 97
111static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid) 98static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
112{ 99{
@@ -116,28 +103,28 @@ static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
116 return 0; 103 return 0;
117} 104}
118 105
119static int genwqe_prev_dbg_uid0_show(struct seq_file *s, void *unused) 106static int prev_dbg_uid0_show(struct seq_file *s, void *unused)
120{ 107{
121 return prev_dbg_uidn_show(s, unused, 0); 108 return prev_dbg_uidn_show(s, unused, 0);
122} 109}
123 110
124GENWQE_DEBUGFS_RO(prev_dbg_uid0, genwqe_prev_dbg_uid0_show); 111DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid0);
125 112
126static int genwqe_prev_dbg_uid1_show(struct seq_file *s, void *unused) 113static int prev_dbg_uid1_show(struct seq_file *s, void *unused)
127{ 114{
128 return prev_dbg_uidn_show(s, unused, 1); 115 return prev_dbg_uidn_show(s, unused, 1);
129} 116}
130 117
131GENWQE_DEBUGFS_RO(prev_dbg_uid1, genwqe_prev_dbg_uid1_show); 118DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid1);
132 119
133static int genwqe_prev_dbg_uid2_show(struct seq_file *s, void *unused) 120static int prev_dbg_uid2_show(struct seq_file *s, void *unused)
134{ 121{
135 return prev_dbg_uidn_show(s, unused, 2); 122 return prev_dbg_uidn_show(s, unused, 2);
136} 123}
137 124
138GENWQE_DEBUGFS_RO(prev_dbg_uid2, genwqe_prev_dbg_uid2_show); 125DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid2);
139 126
140static int genwqe_curr_regs_show(struct seq_file *s, void *unused) 127static int curr_regs_show(struct seq_file *s, void *unused)
141{ 128{
142 struct genwqe_dev *cd = s->private; 129 struct genwqe_dev *cd = s->private;
143 unsigned int i; 130 unsigned int i;
@@ -164,9 +151,9 @@ static int genwqe_curr_regs_show(struct seq_file *s, void *unused)
164 return 0; 151 return 0;
165} 152}
166 153
167GENWQE_DEBUGFS_RO(curr_regs, genwqe_curr_regs_show); 154DEFINE_SHOW_ATTRIBUTE(curr_regs);
168 155
169static int genwqe_prev_regs_show(struct seq_file *s, void *unused) 156static int prev_regs_show(struct seq_file *s, void *unused)
170{ 157{
171 struct genwqe_dev *cd = s->private; 158 struct genwqe_dev *cd = s->private;
172 unsigned int i; 159 unsigned int i;
@@ -188,9 +175,9 @@ static int genwqe_prev_regs_show(struct seq_file *s, void *unused)
188 return 0; 175 return 0;
189} 176}
190 177
191GENWQE_DEBUGFS_RO(prev_regs, genwqe_prev_regs_show); 178DEFINE_SHOW_ATTRIBUTE(prev_regs);
192 179
193static int genwqe_jtimer_show(struct seq_file *s, void *unused) 180static int jtimer_show(struct seq_file *s, void *unused)
194{ 181{
195 struct genwqe_dev *cd = s->private; 182 struct genwqe_dev *cd = s->private;
196 unsigned int vf_num; 183 unsigned int vf_num;
@@ -209,9 +196,9 @@ static int genwqe_jtimer_show(struct seq_file *s, void *unused)
209 return 0; 196 return 0;
210} 197}
211 198
212GENWQE_DEBUGFS_RO(jtimer, genwqe_jtimer_show); 199DEFINE_SHOW_ATTRIBUTE(jtimer);
213 200
214static int genwqe_queue_working_time_show(struct seq_file *s, void *unused) 201static int queue_working_time_show(struct seq_file *s, void *unused)
215{ 202{
216 struct genwqe_dev *cd = s->private; 203 struct genwqe_dev *cd = s->private;
217 unsigned int vf_num; 204 unsigned int vf_num;
@@ -227,9 +214,9 @@ static int genwqe_queue_working_time_show(struct seq_file *s, void *unused)
227 return 0; 214 return 0;
228} 215}
229 216
230GENWQE_DEBUGFS_RO(queue_working_time, genwqe_queue_working_time_show); 217DEFINE_SHOW_ATTRIBUTE(queue_working_time);
231 218
232static int genwqe_ddcb_info_show(struct seq_file *s, void *unused) 219static int ddcb_info_show(struct seq_file *s, void *unused)
233{ 220{
234 struct genwqe_dev *cd = s->private; 221 struct genwqe_dev *cd = s->private;
235 unsigned int i; 222 unsigned int i;
@@ -300,9 +287,9 @@ static int genwqe_ddcb_info_show(struct seq_file *s, void *unused)
300 return 0; 287 return 0;
301} 288}
302 289
303GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show); 290DEFINE_SHOW_ATTRIBUTE(ddcb_info);
304 291
305static int genwqe_info_show(struct seq_file *s, void *unused) 292static int info_show(struct seq_file *s, void *unused)
306{ 293{
307 struct genwqe_dev *cd = s->private; 294 struct genwqe_dev *cd = s->private;
308 u64 app_id, slu_id, bitstream = -1; 295 u64 app_id, slu_id, bitstream = -1;
@@ -335,7 +322,7 @@ static int genwqe_info_show(struct seq_file *s, void *unused)
335 return 0; 322 return 0;
336} 323}
337 324
338GENWQE_DEBUGFS_RO(info, genwqe_info_show); 325DEFINE_SHOW_ATTRIBUTE(info);
339 326
340int genwqe_init_debugfs(struct genwqe_dev *cd) 327int genwqe_init_debugfs(struct genwqe_dev *cd)
341{ 328{
@@ -356,14 +343,14 @@ int genwqe_init_debugfs(struct genwqe_dev *cd)
356 343
357 /* non privileged interfaces are done here */ 344 /* non privileged interfaces are done here */
358 file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd, 345 file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd,
359 &genwqe_ddcb_info_fops); 346 &ddcb_info_fops);
360 if (!file) { 347 if (!file) {
361 ret = -ENOMEM; 348 ret = -ENOMEM;
362 goto err1; 349 goto err1;
363 } 350 }
364 351
365 file = debugfs_create_file("info", S_IRUGO, root, cd, 352 file = debugfs_create_file("info", S_IRUGO, root, cd,
366 &genwqe_info_fops); 353 &info_fops);
367 if (!file) { 354 if (!file) {
368 ret = -ENOMEM; 355 ret = -ENOMEM;
369 goto err1; 356 goto err1;
@@ -396,56 +383,56 @@ int genwqe_init_debugfs(struct genwqe_dev *cd)
396 } 383 }
397 384
398 file = debugfs_create_file("curr_regs", S_IRUGO, root, cd, 385 file = debugfs_create_file("curr_regs", S_IRUGO, root, cd,
399 &genwqe_curr_regs_fops); 386 &curr_regs_fops);
400 if (!file) { 387 if (!file) {
401 ret = -ENOMEM; 388 ret = -ENOMEM;
402 goto err1; 389 goto err1;
403 } 390 }
404 391
405 file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd, 392 file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd,
406 &genwqe_curr_dbg_uid0_fops); 393 &curr_dbg_uid0_fops);
407 if (!file) { 394 if (!file) {
408 ret = -ENOMEM; 395 ret = -ENOMEM;
409 goto err1; 396 goto err1;
410 } 397 }
411 398
412 file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd, 399 file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd,
413 &genwqe_curr_dbg_uid1_fops); 400 &curr_dbg_uid1_fops);
414 if (!file) { 401 if (!file) {
415 ret = -ENOMEM; 402 ret = -ENOMEM;
416 goto err1; 403 goto err1;
417 } 404 }
418 405
419 file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd, 406 file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd,
420 &genwqe_curr_dbg_uid2_fops); 407 &curr_dbg_uid2_fops);
421 if (!file) { 408 if (!file) {
422 ret = -ENOMEM; 409 ret = -ENOMEM;
423 goto err1; 410 goto err1;
424 } 411 }
425 412
426 file = debugfs_create_file("prev_regs", S_IRUGO, root, cd, 413 file = debugfs_create_file("prev_regs", S_IRUGO, root, cd,
427 &genwqe_prev_regs_fops); 414 &prev_regs_fops);
428 if (!file) { 415 if (!file) {
429 ret = -ENOMEM; 416 ret = -ENOMEM;
430 goto err1; 417 goto err1;
431 } 418 }
432 419
433 file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd, 420 file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd,
434 &genwqe_prev_dbg_uid0_fops); 421 &prev_dbg_uid0_fops);
435 if (!file) { 422 if (!file) {
436 ret = -ENOMEM; 423 ret = -ENOMEM;
437 goto err1; 424 goto err1;
438 } 425 }
439 426
440 file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd, 427 file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd,
441 &genwqe_prev_dbg_uid1_fops); 428 &prev_dbg_uid1_fops);
442 if (!file) { 429 if (!file) {
443 ret = -ENOMEM; 430 ret = -ENOMEM;
444 goto err1; 431 goto err1;
445 } 432 }
446 433
447 file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd, 434 file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd,
448 &genwqe_prev_dbg_uid2_fops); 435 &prev_dbg_uid2_fops);
449 if (!file) { 436 if (!file) {
450 ret = -ENOMEM; 437 ret = -ENOMEM;
451 goto err1; 438 goto err1;
@@ -463,14 +450,14 @@ int genwqe_init_debugfs(struct genwqe_dev *cd)
463 } 450 }
464 451
465 file = debugfs_create_file("jobtimer", S_IRUGO, root, cd, 452 file = debugfs_create_file("jobtimer", S_IRUGO, root, cd,
466 &genwqe_jtimer_fops); 453 &jtimer_fops);
467 if (!file) { 454 if (!file) {
468 ret = -ENOMEM; 455 ret = -ENOMEM;
469 goto err1; 456 goto err1;
470 } 457 }
471 458
472 file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd, 459 file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd,
473 &genwqe_queue_working_time_fops); 460 &queue_working_time_fops);
474 if (!file) { 461 if (!file) {
475 ret = -ENOMEM; 462 ret = -ENOMEM;
476 goto err1; 463 goto err1;
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 3fcb9a2fe1c9..efe2fb72d54b 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -215,7 +215,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
215void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, 215void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
216 dma_addr_t *dma_handle) 216 dma_addr_t *dma_handle)
217{ 217{
218 if (get_order(size) > MAX_ORDER) 218 if (get_order(size) >= MAX_ORDER)
219 return NULL; 219 return NULL;
220 220
221 return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, 221 return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index cd6825afa8e1..d9215fc4e499 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -9,6 +9,7 @@ mei-objs += hbm.o
9mei-objs += interrupt.o 9mei-objs += interrupt.o
10mei-objs += client.o 10mei-objs += client.o
11mei-objs += main.o 11mei-objs += main.o
12mei-objs += dma-ring.o
12mei-objs += bus.o 13mei-objs += bus.o
13mei-objs += bus-fixup.o 14mei-objs += bus-fixup.o
14mei-$(CONFIG_DEBUG_FS) += debugfs.o 15mei-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index ebdcf0b450e2..1fc8ea0f519b 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -318,23 +318,6 @@ void mei_me_cl_rm_all(struct mei_device *dev)
318} 318}
319 319
320/** 320/**
321 * mei_cl_cmp_id - tells if the clients are the same
322 *
323 * @cl1: host client 1
324 * @cl2: host client 2
325 *
326 * Return: true - if the clients has same host and me ids
327 * false - otherwise
328 */
329static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
330 const struct mei_cl *cl2)
331{
332 return cl1 && cl2 &&
333 (cl1->host_client_id == cl2->host_client_id) &&
334 (mei_cl_me_id(cl1) == mei_cl_me_id(cl2));
335}
336
337/**
338 * mei_io_cb_free - free mei_cb_private related memory 321 * mei_io_cb_free - free mei_cb_private related memory
339 * 322 *
340 * @cb: mei callback struct 323 * @cb: mei callback struct
@@ -418,7 +401,7 @@ static void mei_io_list_flush_cl(struct list_head *head,
418 struct mei_cl_cb *cb, *next; 401 struct mei_cl_cb *cb, *next;
419 402
420 list_for_each_entry_safe(cb, next, head, list) { 403 list_for_each_entry_safe(cb, next, head, list) {
421 if (mei_cl_cmp_id(cl, cb->cl)) 404 if (cl == cb->cl)
422 list_del_init(&cb->list); 405 list_del_init(&cb->list);
423 } 406 }
424} 407}
@@ -435,7 +418,7 @@ static void mei_io_tx_list_free_cl(struct list_head *head,
435 struct mei_cl_cb *cb, *next; 418 struct mei_cl_cb *cb, *next;
436 419
437 list_for_each_entry_safe(cb, next, head, list) { 420 list_for_each_entry_safe(cb, next, head, list) {
438 if (mei_cl_cmp_id(cl, cb->cl)) 421 if (cl == cb->cl)
439 mei_tx_cb_dequeue(cb); 422 mei_tx_cb_dequeue(cb);
440 } 423 }
441} 424}
@@ -478,7 +461,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
478 if (length == 0) 461 if (length == 0)
479 return cb; 462 return cb;
480 463
481 cb->buf.data = kmalloc(length, GFP_KERNEL); 464 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
482 if (!cb->buf.data) { 465 if (!cb->buf.data) {
483 mei_io_cb_free(cb); 466 mei_io_cb_free(cb);
484 return NULL; 467 return NULL;
@@ -1374,7 +1357,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
1374 1357
1375 mutex_unlock(&dev->device_lock); 1358 mutex_unlock(&dev->device_lock);
1376 wait_event_timeout(cl->wait, 1359 wait_event_timeout(cl->wait,
1377 cl->notify_en == request || !mei_cl_is_connected(cl), 1360 cl->notify_en == request ||
1361 cl->status ||
1362 !mei_cl_is_connected(cl),
1378 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1363 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1379 mutex_lock(&dev->device_lock); 1364 mutex_lock(&dev->device_lock);
1380 1365
@@ -1573,10 +1558,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1573 struct mei_msg_hdr mei_hdr; 1558 struct mei_msg_hdr mei_hdr;
1574 size_t hdr_len = sizeof(mei_hdr); 1559 size_t hdr_len = sizeof(mei_hdr);
1575 size_t len; 1560 size_t len;
1576 size_t hbuf_len; 1561 size_t hbuf_len, dr_len;
1577 int hbuf_slots; 1562 int hbuf_slots;
1563 u32 dr_slots;
1564 u32 dma_len;
1578 int rets; 1565 int rets;
1579 bool first_chunk; 1566 bool first_chunk;
1567 const void *data;
1580 1568
1581 if (WARN_ON(!cl || !cl->dev)) 1569 if (WARN_ON(!cl || !cl->dev))
1582 return -ENODEV; 1570 return -ENODEV;
@@ -1597,6 +1585,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1597 } 1585 }
1598 1586
1599 len = buf->size - cb->buf_idx; 1587 len = buf->size - cb->buf_idx;
1588 data = buf->data + cb->buf_idx;
1600 hbuf_slots = mei_hbuf_empty_slots(dev); 1589 hbuf_slots = mei_hbuf_empty_slots(dev);
1601 if (hbuf_slots < 0) { 1590 if (hbuf_slots < 0) {
1602 rets = -EOVERFLOW; 1591 rets = -EOVERFLOW;
@@ -1604,6 +1593,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1604 } 1593 }
1605 1594
1606 hbuf_len = mei_slots2data(hbuf_slots); 1595 hbuf_len = mei_slots2data(hbuf_slots);
1596 dr_slots = mei_dma_ring_empty_slots(dev);
1597 dr_len = mei_slots2data(dr_slots);
1607 1598
1608 mei_msg_hdr_init(&mei_hdr, cb); 1599 mei_msg_hdr_init(&mei_hdr, cb);
1609 1600
@@ -1614,23 +1605,33 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1614 if (len + hdr_len <= hbuf_len) { 1605 if (len + hdr_len <= hbuf_len) {
1615 mei_hdr.length = len; 1606 mei_hdr.length = len;
1616 mei_hdr.msg_complete = 1; 1607 mei_hdr.msg_complete = 1;
1608 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1609 mei_hdr.dma_ring = 1;
1610 if (len > dr_len)
1611 len = dr_len;
1612 else
1613 mei_hdr.msg_complete = 1;
1614
1615 mei_hdr.length = sizeof(dma_len);
1616 dma_len = len;
1617 data = &dma_len;
1617 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { 1618 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1618 mei_hdr.length = hbuf_len - hdr_len; 1619 len = hbuf_len - hdr_len;
1620 mei_hdr.length = len;
1619 } else { 1621 } else {
1620 return 0; 1622 return 0;
1621 } 1623 }
1622 1624
1623 cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", 1625 if (mei_hdr.dma_ring)
1624 cb->buf.size, cb->buf_idx); 1626 mei_dma_ring_write(dev, buf->data + cb->buf_idx, len);
1625 1627
1626 rets = mei_write_message(dev, &mei_hdr, hdr_len, 1628 rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length);
1627 buf->data + cb->buf_idx, mei_hdr.length);
1628 if (rets) 1629 if (rets)
1629 goto err; 1630 goto err;
1630 1631
1631 cl->status = 0; 1632 cl->status = 0;
1632 cl->writing_state = MEI_WRITING; 1633 cl->writing_state = MEI_WRITING;
1633 cb->buf_idx += mei_hdr.length; 1634 cb->buf_idx += len;
1634 1635
1635 if (first_chunk) { 1636 if (first_chunk) {
1636 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { 1637 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
@@ -1665,11 +1666,13 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1665 struct mei_msg_data *buf; 1666 struct mei_msg_data *buf;
1666 struct mei_msg_hdr mei_hdr; 1667 struct mei_msg_hdr mei_hdr;
1667 size_t hdr_len = sizeof(mei_hdr); 1668 size_t hdr_len = sizeof(mei_hdr);
1668 size_t len; 1669 size_t len, hbuf_len, dr_len;
1669 size_t hbuf_len;
1670 int hbuf_slots; 1670 int hbuf_slots;
1671 u32 dr_slots;
1672 u32 dma_len;
1671 ssize_t rets; 1673 ssize_t rets;
1672 bool blocking; 1674 bool blocking;
1675 const void *data;
1673 1676
1674 if (WARN_ON(!cl || !cl->dev)) 1677 if (WARN_ON(!cl || !cl->dev))
1675 return -ENODEV; 1678 return -ENODEV;
@@ -1681,10 +1684,12 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1681 1684
1682 buf = &cb->buf; 1685 buf = &cb->buf;
1683 len = buf->size; 1686 len = buf->size;
1684 blocking = cb->blocking;
1685 1687
1686 cl_dbg(dev, cl, "len=%zd\n", len); 1688 cl_dbg(dev, cl, "len=%zd\n", len);
1687 1689
1690 blocking = cb->blocking;
1691 data = buf->data;
1692
1688 rets = pm_runtime_get(dev->dev); 1693 rets = pm_runtime_get(dev->dev);
1689 if (rets < 0 && rets != -EINPROGRESS) { 1694 if (rets < 0 && rets != -EINPROGRESS) {
1690 pm_runtime_put_noidle(dev->dev); 1695 pm_runtime_put_noidle(dev->dev);
@@ -1721,16 +1726,32 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1721 } 1726 }
1722 1727
1723 hbuf_len = mei_slots2data(hbuf_slots); 1728 hbuf_len = mei_slots2data(hbuf_slots);
1729 dr_slots = mei_dma_ring_empty_slots(dev);
1730 dr_len = mei_slots2data(dr_slots);
1724 1731
1725 if (len + hdr_len <= hbuf_len) { 1732 if (len + hdr_len <= hbuf_len) {
1726 mei_hdr.length = len; 1733 mei_hdr.length = len;
1727 mei_hdr.msg_complete = 1; 1734 mei_hdr.msg_complete = 1;
1735 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1736 mei_hdr.dma_ring = 1;
1737 if (len > dr_len)
1738 len = dr_len;
1739 else
1740 mei_hdr.msg_complete = 1;
1741
1742 mei_hdr.length = sizeof(dma_len);
1743 dma_len = len;
1744 data = &dma_len;
1728 } else { 1745 } else {
1729 mei_hdr.length = hbuf_len - hdr_len; 1746 len = hbuf_len - hdr_len;
1747 mei_hdr.length = len;
1730 } 1748 }
1731 1749
1750 if (mei_hdr.dma_ring)
1751 mei_dma_ring_write(dev, buf->data, len);
1752
1732 rets = mei_write_message(dev, &mei_hdr, hdr_len, 1753 rets = mei_write_message(dev, &mei_hdr, hdr_len,
1733 buf->data, mei_hdr.length); 1754 data, mei_hdr.length);
1734 if (rets) 1755 if (rets)
1735 goto err; 1756 goto err;
1736 1757
@@ -1739,7 +1760,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1739 goto err; 1760 goto err;
1740 1761
1741 cl->writing_state = MEI_WRITING; 1762 cl->writing_state = MEI_WRITING;
1742 cb->buf_idx = mei_hdr.length; 1763 cb->buf_idx = len;
1764 /* restore return value */
1765 len = buf->size;
1743 1766
1744out: 1767out:
1745 if (mei_hdr.msg_complete) 1768 if (mei_hdr.msg_complete)
diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c
new file mode 100644
index 000000000000..795641b82181
--- /dev/null
+++ b/drivers/misc/mei/dma-ring.c
@@ -0,0 +1,269 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved.
4 */
5#include <linux/dma-mapping.h>
6#include <linux/mei.h>
7
8#include "mei_dev.h"
9
10/**
11 * mei_dmam_dscr_alloc() - allocate a managed coherent buffer
12 * for the dma descriptor
13 * @dev: mei_device
14 * @dscr: dma descriptor
15 *
16 * Return:
17 * * 0 - on success or zero allocation request
18 * * -EINVAL - if size is not power of 2
19 * * -ENOMEM - of allocation has failed
20 */
21static int mei_dmam_dscr_alloc(struct mei_device *dev,
22 struct mei_dma_dscr *dscr)
23{
24 if (!dscr->size)
25 return 0;
26
27 if (WARN_ON(!is_power_of_2(dscr->size)))
28 return -EINVAL;
29
30 if (dscr->vaddr)
31 return 0;
32
33 dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
34 GFP_KERNEL);
35 if (!dscr->vaddr)
36 return -ENOMEM;
37
38 return 0;
39}
40
41/**
42 * mei_dmam_dscr_free() - free a managed coherent buffer
43 * from the dma descriptor
44 * @dev: mei_device
45 * @dscr: dma descriptor
46 */
47static void mei_dmam_dscr_free(struct mei_device *dev,
48 struct mei_dma_dscr *dscr)
49{
50 if (!dscr->vaddr)
51 return;
52
53 dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
54 dscr->vaddr = NULL;
55}
56
57/**
58 * mei_dmam_ring_free() - free dma ring buffers
59 * @dev: mei device
60 */
61void mei_dmam_ring_free(struct mei_device *dev)
62{
63 int i;
64
65 for (i = 0; i < DMA_DSCR_NUM; i++)
66 mei_dmam_dscr_free(dev, &dev->dr_dscr[i]);
67}
68
69/**
70 * mei_dmam_ring_alloc() - allocate dma ring buffers
71 * @dev: mei device
72 *
73 * Return: -ENOMEM on allocation failure 0 otherwise
74 */
75int mei_dmam_ring_alloc(struct mei_device *dev)
76{
77 int i;
78
79 for (i = 0; i < DMA_DSCR_NUM; i++)
80 if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i]))
81 goto err;
82
83 return 0;
84
85err:
86 mei_dmam_ring_free(dev);
87 return -ENOMEM;
88}
89
90/**
91 * mei_dma_ring_is_allocated() - check if dma ring is allocated
92 * @dev: mei device
93 *
94 * Return: true if dma ring is allocated
95 */
96bool mei_dma_ring_is_allocated(struct mei_device *dev)
97{
98 return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr;
99}
100
101static inline
102struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev)
103{
104 return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr;
105}
106
107/**
108 * mei_dma_ring_reset() - reset the dma control block
109 * @dev: mei device
110 */
111void mei_dma_ring_reset(struct mei_device *dev)
112{
113 struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
114
115 if (!ctrl)
116 return;
117
118 memset(ctrl, 0, sizeof(*ctrl));
119}
120
121/**
122 * mei_dma_copy_from() - copy from dma ring into buffer
123 * @dev: mei device
124 * @buf: data buffer
125 * @offset: offset in slots.
126 * @n: number of slots to copy.
127 */
128static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
129 u32 offset, u32 n)
130{
131 unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr;
132
133 size_t b_offset = offset << 2;
134 size_t b_n = n << 2;
135
136 memcpy(buf, dbuf + b_offset, b_n);
137
138 return b_n;
139}
140
141/**
142 * mei_dma_copy_to() - copy to a buffer to the dma ring
143 * @dev: mei device
144 * @buf: data buffer
145 * @offset: offset in slots.
146 * @n: number of slots to copy.
147 */
148static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
149 u32 offset, u32 n)
150{
151 unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
152
153 size_t b_offset = offset << 2;
154 size_t b_n = n << 2;
155
156 memcpy(hbuf + b_offset, buf, b_n);
157
158 return b_n;
159}
160
161/**
162 * mei_dma_ring_read() - read data from the ring
163 * @dev: mei device
164 * @buf: buffer to read into: may be NULL in case of droping the data.
165 * @len: length to read.
166 */
167void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
168{
169 struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
170 u32 dbuf_depth;
171 u32 rd_idx, rem, slots;
172
173 if (WARN_ON(!ctrl))
174 return;
175
176 dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
177
178 if (!len)
179 return;
180
181 dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2;
182 rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1);
183 slots = mei_data2slots(len);
184
185 /* if buf is NULL we drop the packet by advancing the pointer.*/
186 if (!buf)
187 goto out;
188
189 if (rd_idx + slots > dbuf_depth) {
190 buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx);
191 rem = slots - (dbuf_depth - rd_idx);
192 rd_idx = 0;
193 } else {
194 rem = slots;
195 }
196
197 mei_dma_copy_from(dev, buf, rd_idx, rem);
198out:
199 WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots);
200}
201
202static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev)
203{
204 return dev->dr_dscr[DMA_DSCR_HOST].size >> 2;
205}
206
207/**
208 * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
209 * @dev: mei_device
210 *
211 * Return: number of empty slots
212 */
213u32 mei_dma_ring_empty_slots(struct mei_device *dev)
214{
215 struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
216 u32 wr_idx, rd_idx, hbuf_depth, empty;
217
218 if (!mei_dma_ring_is_allocated(dev))
219 return 0;
220
221 if (WARN_ON(!ctrl))
222 return 0;
223
224 /* easier to work in slots */
225 hbuf_depth = mei_dma_ring_hbuf_depth(dev);
226 rd_idx = READ_ONCE(ctrl->hbuf_rd_idx);
227 wr_idx = READ_ONCE(ctrl->hbuf_wr_idx);
228
229 if (rd_idx > wr_idx)
230 empty = rd_idx - wr_idx;
231 else
232 empty = hbuf_depth - (wr_idx - rd_idx);
233
234 return empty;
235}
236
237/**
238 * mei_dma_ring_write - write data to dma ring host buffer
239 *
240 * @dev: mei_device
241 * @buf: data will be written
242 * @len: data length
243 */
244void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
245{
246 struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
247 u32 hbuf_depth;
248 u32 wr_idx, rem, slots;
249
250 if (WARN_ON(!ctrl))
251 return;
252
253 dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
254 hbuf_depth = mei_dma_ring_hbuf_depth(dev);
255 wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
256 slots = mei_data2slots(len);
257
258 if (wr_idx + slots > hbuf_depth) {
259 buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx);
260 rem = slots - (hbuf_depth - wr_idx);
261 wr_idx = 0;
262 } else {
263 rem = slots;
264 }
265
266 mei_dma_copy_to(dev, buf, wr_idx, rem);
267
268 WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots);
269}
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index e56f3e72d57a..78c26cebf5d4 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -65,6 +65,7 @@ const char *mei_hbm_state_str(enum mei_hbm_state state)
65 MEI_HBM_STATE(IDLE); 65 MEI_HBM_STATE(IDLE);
66 MEI_HBM_STATE(STARTING); 66 MEI_HBM_STATE(STARTING);
67 MEI_HBM_STATE(STARTED); 67 MEI_HBM_STATE(STARTED);
68 MEI_HBM_STATE(DR_SETUP);
68 MEI_HBM_STATE(ENUM_CLIENTS); 69 MEI_HBM_STATE(ENUM_CLIENTS);
69 MEI_HBM_STATE(CLIENT_PROPERTIES); 70 MEI_HBM_STATE(CLIENT_PROPERTIES);
70 MEI_HBM_STATE(STOPPED); 71 MEI_HBM_STATE(STOPPED);
@@ -296,6 +297,48 @@ int mei_hbm_start_req(struct mei_device *dev)
296} 297}
297 298
298/** 299/**
300 * mei_hbm_dma_setup_req() - setup DMA request
301 * @dev: the device structure
302 *
303 * Return: 0 on success and < 0 on failure
304 */
305static int mei_hbm_dma_setup_req(struct mei_device *dev)
306{
307 struct mei_msg_hdr mei_hdr;
308 struct hbm_dma_setup_request req;
309 const size_t len = sizeof(struct hbm_dma_setup_request);
310 unsigned int i;
311 int ret;
312
313 mei_hbm_hdr(&mei_hdr, len);
314
315 memset(&req, 0, len);
316 req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD;
317 for (i = 0; i < DMA_DSCR_NUM; i++) {
318 phys_addr_t paddr;
319
320 paddr = dev->dr_dscr[i].daddr;
321 req.dma_dscr[i].addr_hi = upper_32_bits(paddr);
322 req.dma_dscr[i].addr_lo = lower_32_bits(paddr);
323 req.dma_dscr[i].size = dev->dr_dscr[i].size;
324 }
325
326 mei_dma_ring_reset(dev);
327
328 ret = mei_hbm_write_message(dev, &mei_hdr, &req);
329 if (ret) {
330 dev_err(dev->dev, "dma setup request write failed: ret = %d.\n",
331 ret);
332 return ret;
333 }
334
335 dev->hbm_state = MEI_HBM_DR_SETUP;
336 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
337 mei_schedule_stall_timer(dev);
338 return 0;
339}
340
341/**
299 * mei_hbm_enum_clients_req - sends enumeration client request message. 342 * mei_hbm_enum_clients_req - sends enumeration client request message.
300 * 343 *
301 * @dev: the device structure 344 * @dev: the device structure
@@ -1044,6 +1087,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1044 struct hbm_host_version_response *version_res; 1087 struct hbm_host_version_response *version_res;
1045 struct hbm_props_response *props_res; 1088 struct hbm_props_response *props_res;
1046 struct hbm_host_enum_response *enum_res; 1089 struct hbm_host_enum_response *enum_res;
1090 struct hbm_dma_setup_response *dma_setup_res;
1047 struct hbm_add_client_request *add_cl_req; 1091 struct hbm_add_client_request *add_cl_req;
1048 int ret; 1092 int ret;
1049 1093
@@ -1108,14 +1152,52 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1108 return -EPROTO; 1152 return -EPROTO;
1109 } 1153 }
1110 1154
1111 if (mei_hbm_enum_clients_req(dev)) { 1155 if (dev->hbm_f_dr_supported) {
1112 dev_err(dev->dev, "hbm: start: failed to send enumeration request\n"); 1156 if (mei_dmam_ring_alloc(dev))
1113 return -EIO; 1157 dev_info(dev->dev, "running w/o dma ring\n");
1158 if (mei_dma_ring_is_allocated(dev)) {
1159 if (mei_hbm_dma_setup_req(dev))
1160 return -EIO;
1161
1162 wake_up(&dev->wait_hbm_start);
1163 break;
1164 }
1114 } 1165 }
1115 1166
1167 dev->hbm_f_dr_supported = 0;
1168 mei_dmam_ring_free(dev);
1169
1170 if (mei_hbm_enum_clients_req(dev))
1171 return -EIO;
1172
1116 wake_up(&dev->wait_hbm_start); 1173 wake_up(&dev->wait_hbm_start);
1117 break; 1174 break;
1118 1175
1176 case MEI_HBM_DMA_SETUP_RES_CMD:
1177 dev_dbg(dev->dev, "hbm: dma setup response: message received.\n");
1178
1179 dev->init_clients_timer = 0;
1180
1181 if (dev->hbm_state != MEI_HBM_DR_SETUP) {
1182 dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n",
1183 dev->dev_state, dev->hbm_state);
1184 return -EPROTO;
1185 }
1186
1187 dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
1188
1189 if (dma_setup_res->status) {
1190 dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
1191 dma_setup_res->status,
1192 mei_hbm_status_str(dma_setup_res->status));
1193 dev->hbm_f_dr_supported = 0;
1194 mei_dmam_ring_free(dev);
1195 }
1196
1197 if (mei_hbm_enum_clients_req(dev))
1198 return -EIO;
1199 break;
1200
1119 case CLIENT_CONNECT_RES_CMD: 1201 case CLIENT_CONNECT_RES_CMD:
1120 dev_dbg(dev->dev, "hbm: client connect response: message received.\n"); 1202 dev_dbg(dev->dev, "hbm: client connect response: message received.\n");
1121 mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT); 1203 mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT);
@@ -1271,8 +1353,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1271 break; 1353 break;
1272 1354
1273 default: 1355 default:
1274 BUG(); 1356 WARN(1, "hbm: wrong command %d\n", mei_msg->hbm_cmd);
1275 break; 1357 return -EPROTO;
1276 1358
1277 } 1359 }
1278 return 0; 1360 return 0;
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index a2025a5083a3..0171a7e79bab 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -26,6 +26,7 @@ struct mei_cl;
26 * 26 *
27 * @MEI_HBM_IDLE : protocol not started 27 * @MEI_HBM_IDLE : protocol not started
28 * @MEI_HBM_STARTING : start request message was sent 28 * @MEI_HBM_STARTING : start request message was sent
29 * @MEI_HBM_DR_SETUP : dma ring setup request message was sent
29 * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent 30 * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent
30 * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties 31 * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties
31 * @MEI_HBM_STARTED : enumeration was completed 32 * @MEI_HBM_STARTED : enumeration was completed
@@ -34,6 +35,7 @@ struct mei_cl;
34enum mei_hbm_state { 35enum mei_hbm_state {
35 MEI_HBM_IDLE = 0, 36 MEI_HBM_IDLE = 0,
36 MEI_HBM_STARTING, 37 MEI_HBM_STARTING,
38 MEI_HBM_DR_SETUP,
37 MEI_HBM_ENUM_CLIENTS, 39 MEI_HBM_ENUM_CLIENTS,
38 MEI_HBM_CLIENT_PROPERTIES, 40 MEI_HBM_CLIENT_PROPERTIES,
39 MEI_HBM_STARTED, 41 MEI_HBM_STARTED,
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 0759c3a668de..3fbbadfa2ae1 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1471,15 +1471,21 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
1471{ 1471{
1472 struct mei_device *dev; 1472 struct mei_device *dev;
1473 struct mei_me_hw *hw; 1473 struct mei_me_hw *hw;
1474 int i;
1474 1475
1475 dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) + 1476 dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
1476 sizeof(struct mei_me_hw), GFP_KERNEL); 1477 sizeof(struct mei_me_hw), GFP_KERNEL);
1477 if (!dev) 1478 if (!dev)
1478 return NULL; 1479 return NULL;
1480
1479 hw = to_me_hw(dev); 1481 hw = to_me_hw(dev);
1480 1482
1483 for (i = 0; i < DMA_DSCR_NUM; i++)
1484 dev->dr_dscr[i].size = cfg->dma_size[i];
1485
1481 mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); 1486 mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
1482 hw->cfg = cfg; 1487 hw->cfg = cfg;
1488
1483 return dev; 1489 return dev;
1484} 1490}
1485 1491
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 65655925791a..2b7f7677f8cc 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -35,7 +35,7 @@
35/* 35/*
36 * MEI Version 36 * MEI Version
37 */ 37 */
38#define HBM_MINOR_VERSION 0 38#define HBM_MINOR_VERSION 1
39#define HBM_MAJOR_VERSION 2 39#define HBM_MAJOR_VERSION 2
40 40
41/* 41/*
@@ -206,6 +206,7 @@ enum mei_cl_disconnect_status {
206 * @dma_ring: message is on dma ring 206 * @dma_ring: message is on dma ring
207 * @internal: message is internal 207 * @internal: message is internal
208 * @msg_complete: last packet of the message 208 * @msg_complete: last packet of the message
209 * @extension: extension of the header
209 */ 210 */
210struct mei_msg_hdr { 211struct mei_msg_hdr {
211 u32 me_addr:8; 212 u32 me_addr:8;
@@ -215,8 +216,11 @@ struct mei_msg_hdr {
215 u32 dma_ring:1; 216 u32 dma_ring:1;
216 u32 internal:1; 217 u32 internal:1;
217 u32 msg_complete:1; 218 u32 msg_complete:1;
219 u32 extension[0];
218} __packed; 220} __packed;
219 221
222#define MEI_MSG_HDR_MAX 2
223
220struct mei_bus_message { 224struct mei_bus_message {
221 u8 hbm_cmd; 225 u8 hbm_cmd;
222 u8 data[0]; 226 u8 data[0];
@@ -512,4 +516,27 @@ struct hbm_dma_setup_response {
512 u8 reserved[2]; 516 u8 reserved[2];
513} __packed; 517} __packed;
514 518
519/**
520 * struct mei_dma_ring_ctrl - dma ring control block
521 *
522 * @hbuf_wr_idx: host circular buffer write index in slots
523 * @reserved1: reserved for alignment
524 * @hbuf_rd_idx: host circular buffer read index in slots
525 * @reserved2: reserved for alignment
526 * @dbuf_wr_idx: device circular buffer write index in slots
527 * @reserved3: reserved for alignment
528 * @dbuf_rd_idx: device circular buffer read index in slots
529 * @reserved4: reserved for alignment
530 */
531struct hbm_dma_ring_ctrl {
532 u32 hbuf_wr_idx;
533 u32 reserved1;
534 u32 hbuf_rd_idx;
535 u32 reserved2;
536 u32 dbuf_wr_idx;
537 u32 reserved3;
538 u32 dbuf_rd_idx;
539 u32 reserved4;
540} __packed;
541
515#endif 542#endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 4888ebc076b7..eb026e2a0537 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -151,7 +151,7 @@ int mei_reset(struct mei_device *dev)
151 151
152 mei_hbm_reset(dev); 152 mei_hbm_reset(dev);
153 153
154 dev->rd_msg_hdr = 0; 154 memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
155 155
156 if (ret) { 156 if (ret) {
157 dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); 157 dev_err(dev->dev, "hw_reset failed ret = %d\n", ret);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 5a661cbdf2ae..055c2d89b310 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -75,6 +75,8 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
75 */ 75 */
76static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) 76static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
77{ 77{
78 if (hdr->dma_ring)
79 mei_dma_ring_read(dev, NULL, hdr->extension[0]);
78 /* 80 /*
79 * no need to check for size as it is guarantied 81 * no need to check for size as it is guarantied
80 * that length fits into rd_msg_buf 82 * that length fits into rd_msg_buf
@@ -100,6 +102,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
100 struct mei_device *dev = cl->dev; 102 struct mei_device *dev = cl->dev;
101 struct mei_cl_cb *cb; 103 struct mei_cl_cb *cb;
102 size_t buf_sz; 104 size_t buf_sz;
105 u32 length;
103 106
104 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); 107 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
105 if (!cb) { 108 if (!cb) {
@@ -119,25 +122,31 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
119 goto discard; 122 goto discard;
120 } 123 }
121 124
122 buf_sz = mei_hdr->length + cb->buf_idx; 125 length = mei_hdr->dma_ring ? mei_hdr->extension[0] : mei_hdr->length;
126
127 buf_sz = length + cb->buf_idx;
123 /* catch for integer overflow */ 128 /* catch for integer overflow */
124 if (buf_sz < cb->buf_idx) { 129 if (buf_sz < cb->buf_idx) {
125 cl_err(dev, cl, "message is too big len %d idx %zu\n", 130 cl_err(dev, cl, "message is too big len %d idx %zu\n",
126 mei_hdr->length, cb->buf_idx); 131 length, cb->buf_idx);
127 cb->status = -EMSGSIZE; 132 cb->status = -EMSGSIZE;
128 goto discard; 133 goto discard;
129 } 134 }
130 135
131 if (cb->buf.size < buf_sz) { 136 if (cb->buf.size < buf_sz) {
132 cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", 137 cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
133 cb->buf.size, mei_hdr->length, cb->buf_idx); 138 cb->buf.size, length, cb->buf_idx);
134 cb->status = -EMSGSIZE; 139 cb->status = -EMSGSIZE;
135 goto discard; 140 goto discard;
136 } 141 }
137 142
143 if (mei_hdr->dma_ring)
144 mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length);
145
146 /* for DMA read 0 length to generate an interrupt to the device */
138 mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length); 147 mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length);
139 148
140 cb->buf_idx += mei_hdr->length; 149 cb->buf_idx += length;
141 150
142 if (mei_hdr->msg_complete) { 151 if (mei_hdr->msg_complete) {
143 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); 152 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
@@ -247,6 +256,9 @@ static inline int hdr_is_valid(u32 msg_hdr)
247 if (!msg_hdr || mei_hdr->reserved) 256 if (!msg_hdr || mei_hdr->reserved)
248 return -EBADMSG; 257 return -EBADMSG;
249 258
259 if (mei_hdr->dma_ring && mei_hdr->length != MEI_SLOT_SIZE)
260 return -EBADMSG;
261
250 return 0; 262 return 0;
251} 263}
252 264
@@ -267,20 +279,20 @@ int mei_irq_read_handler(struct mei_device *dev,
267 struct mei_cl *cl; 279 struct mei_cl *cl;
268 int ret; 280 int ret;
269 281
270 if (!dev->rd_msg_hdr) { 282 if (!dev->rd_msg_hdr[0]) {
271 dev->rd_msg_hdr = mei_read_hdr(dev); 283 dev->rd_msg_hdr[0] = mei_read_hdr(dev);
272 (*slots)--; 284 (*slots)--;
273 dev_dbg(dev->dev, "slots =%08x.\n", *slots); 285 dev_dbg(dev->dev, "slots =%08x.\n", *slots);
274 286
275 ret = hdr_is_valid(dev->rd_msg_hdr); 287 ret = hdr_is_valid(dev->rd_msg_hdr[0]);
276 if (ret) { 288 if (ret) {
277 dev_err(dev->dev, "corrupted message header 0x%08X\n", 289 dev_err(dev->dev, "corrupted message header 0x%08X\n",
278 dev->rd_msg_hdr); 290 dev->rd_msg_hdr[0]);
279 goto end; 291 goto end;
280 } 292 }
281 } 293 }
282 294
283 mei_hdr = (struct mei_msg_hdr *)&dev->rd_msg_hdr; 295 mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr;
284 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); 296 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
285 297
286 if (mei_slots2data(*slots) < mei_hdr->length) { 298 if (mei_slots2data(*slots) < mei_hdr->length) {
@@ -291,6 +303,12 @@ int mei_irq_read_handler(struct mei_device *dev,
291 goto end; 303 goto end;
292 } 304 }
293 305
306 if (mei_hdr->dma_ring) {
307 dev->rd_msg_hdr[1] = mei_read_hdr(dev);
308 (*slots)--;
309 mei_hdr->length = 0;
310 }
311
294 /* HBM message */ 312 /* HBM message */
295 if (hdr_is_hbm(mei_hdr)) { 313 if (hdr_is_hbm(mei_hdr)) {
296 ret = mei_hbm_dispatch(dev, mei_hdr); 314 ret = mei_hbm_dispatch(dev, mei_hdr);
@@ -324,7 +342,7 @@ int mei_irq_read_handler(struct mei_device *dev,
324 goto reset_slots; 342 goto reset_slots;
325 } 343 }
326 dev_err(dev->dev, "no destination client found 0x%08X\n", 344 dev_err(dev->dev, "no destination client found 0x%08X\n",
327 dev->rd_msg_hdr); 345 dev->rd_msg_hdr[0]);
328 ret = -EBADMSG; 346 ret = -EBADMSG;
329 goto end; 347 goto end;
330 } 348 }
@@ -334,9 +352,8 @@ int mei_irq_read_handler(struct mei_device *dev,
334 352
335reset_slots: 353reset_slots:
336 /* reset the number of slots and header */ 354 /* reset the number of slots and header */
355 memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
337 *slots = mei_count_full_read_slots(dev); 356 *slots = mei_count_full_read_slots(dev);
338 dev->rd_msg_hdr = 0;
339
340 if (*slots == -EOVERFLOW) { 357 if (*slots == -EOVERFLOW) {
341 /* overflow - reset */ 358 /* overflow - reset */
342 dev_err(dev->dev, "resetting due to slots overflow.\n"); 359 dev_err(dev->dev, "resetting due to slots overflow.\n");
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 377397e1b5a5..685b78ce30a5 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -122,6 +122,19 @@ struct mei_msg_data {
122 unsigned char *data; 122 unsigned char *data;
123}; 123};
124 124
125/**
126 * struct mei_dma_dscr - dma address descriptor
127 *
128 * @vaddr: dma buffer virtual address
129 * @daddr: dma buffer physical address
130 * @size : dma buffer size
131 */
132struct mei_dma_dscr {
133 void *vaddr;
134 dma_addr_t daddr;
135 size_t size;
136};
137
125/* Maximum number of processed FW status registers */ 138/* Maximum number of processed FW status registers */
126#define MEI_FW_STATUS_MAX 6 139#define MEI_FW_STATUS_MAX 6
127/* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */ 140/* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */
@@ -409,6 +422,7 @@ struct mei_fw_version {
409 * @rd_msg_hdr : read message header storage 422 * @rd_msg_hdr : read message header storage
410 * 423 *
411 * @hbuf_is_ready : query if the host host/write buffer is ready 424 * @hbuf_is_ready : query if the host host/write buffer is ready
425 * @dr_dscr: DMA ring descriptors: TX, RX, and CTRL
412 * 426 *
413 * @version : HBM protocol version in use 427 * @version : HBM protocol version in use
414 * @hbm_f_pg_supported : hbm feature pgi protocol 428 * @hbm_f_pg_supported : hbm feature pgi protocol
@@ -483,11 +497,13 @@ struct mei_device {
483#endif /* CONFIG_PM */ 497#endif /* CONFIG_PM */
484 498
485 unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; 499 unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE];
486 u32 rd_msg_hdr; 500 u32 rd_msg_hdr[MEI_MSG_HDR_MAX];
487 501
488 /* write buffer */ 502 /* write buffer */
489 bool hbuf_is_ready; 503 bool hbuf_is_ready;
490 504
505 struct mei_dma_dscr dr_dscr[DMA_DSCR_NUM];
506
491 struct hbm_version version; 507 struct hbm_version version;
492 unsigned int hbm_f_pg_supported:1; 508 unsigned int hbm_f_pg_supported:1;
493 unsigned int hbm_f_dc_supported:1; 509 unsigned int hbm_f_dc_supported:1;
@@ -578,6 +594,14 @@ int mei_restart(struct mei_device *dev);
578void mei_stop(struct mei_device *dev); 594void mei_stop(struct mei_device *dev);
579void mei_cancel_work(struct mei_device *dev); 595void mei_cancel_work(struct mei_device *dev);
580 596
597int mei_dmam_ring_alloc(struct mei_device *dev);
598void mei_dmam_ring_free(struct mei_device *dev);
599bool mei_dma_ring_is_allocated(struct mei_device *dev);
600void mei_dma_ring_reset(struct mei_device *dev);
601void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len);
602void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len);
603u32 mei_dma_ring_empty_slots(struct mei_device *dev);
604
581/* 605/*
582 * MEI interrupt functions prototype 606 * MEI interrupt functions prototype
583 */ 607 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index ea4e152270a3..73ace2d59dea 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,9 +98,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
99 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, 99 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
100 100
101 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)}, 101 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
102 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)}, 102 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
103 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)}, 103 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
104 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, 104 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
105 105
106 /* required last entry */ 106 /* required last entry */
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
index 421b3d7911df..7a4140874888 100644
--- a/drivers/misc/mic/card/mic_debugfs.c
+++ b/drivers/misc/mic/card/mic_debugfs.c
@@ -37,9 +37,9 @@
37static struct dentry *mic_dbg; 37static struct dentry *mic_dbg;
38 38
39/** 39/**
40 * mic_intr_test - Send interrupts to host. 40 * mic_intr_show - Send interrupts to host.
41 */ 41 */
42static int mic_intr_test(struct seq_file *s, void *unused) 42static int mic_intr_show(struct seq_file *s, void *unused)
43{ 43{
44 struct mic_driver *mdrv = s->private; 44 struct mic_driver *mdrv = s->private;
45 struct mic_device *mdev = &mdrv->mdev; 45 struct mic_device *mdev = &mdrv->mdev;
@@ -56,23 +56,7 @@ static int mic_intr_test(struct seq_file *s, void *unused)
56 return 0; 56 return 0;
57} 57}
58 58
59static int mic_intr_test_open(struct inode *inode, struct file *file) 59DEFINE_SHOW_ATTRIBUTE(mic_intr);
60{
61 return single_open(file, mic_intr_test, inode->i_private);
62}
63
64static int mic_intr_test_release(struct inode *inode, struct file *file)
65{
66 return single_release(inode, file);
67}
68
69static const struct file_operations intr_test_ops = {
70 .owner = THIS_MODULE,
71 .open = mic_intr_test_open,
72 .read = seq_read,
73 .llseek = seq_lseek,
74 .release = mic_intr_test_release
75};
76 60
77/** 61/**
78 * mic_create_card_debug_dir - Initialize MIC debugfs entries. 62 * mic_create_card_debug_dir - Initialize MIC debugfs entries.
@@ -91,7 +75,7 @@ void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
91 } 75 }
92 76
93 d = debugfs_create_file("intr_test", 0444, mdrv->dbg_dir, 77 d = debugfs_create_file("intr_test", 0444, mdrv->dbg_dir,
94 mdrv, &intr_test_ops); 78 mdrv, &mic_intr_fops);
95 79
96 if (!d) { 80 if (!d) {
97 dev_err(mdrv->dev, 81 dev_err(mdrv->dev,
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
index 216cb3cd2fe3..71c216d0504d 100644
--- a/drivers/misc/mic/cosm/cosm_debugfs.c
+++ b/drivers/misc/mic/cosm/cosm_debugfs.c
@@ -28,12 +28,12 @@
28static struct dentry *cosm_dbg; 28static struct dentry *cosm_dbg;
29 29
30/** 30/**
31 * cosm_log_buf_show - Display MIC kernel log buffer 31 * log_buf_show - Display MIC kernel log buffer
32 * 32 *
33 * log_buf addr/len is read from System.map by user space 33 * log_buf addr/len is read from System.map by user space
34 * and populated in sysfs entries. 34 * and populated in sysfs entries.
35 */ 35 */
36static int cosm_log_buf_show(struct seq_file *s, void *unused) 36static int log_buf_show(struct seq_file *s, void *unused)
37{ 37{
38 void __iomem *log_buf_va; 38 void __iomem *log_buf_va;
39 int __iomem *log_buf_len_va; 39 int __iomem *log_buf_len_va;
@@ -78,26 +78,15 @@ done:
78 return 0; 78 return 0;
79} 79}
80 80
81static int cosm_log_buf_open(struct inode *inode, struct file *file) 81DEFINE_SHOW_ATTRIBUTE(log_buf);
82{
83 return single_open(file, cosm_log_buf_show, inode->i_private);
84}
85
86static const struct file_operations log_buf_ops = {
87 .owner = THIS_MODULE,
88 .open = cosm_log_buf_open,
89 .read = seq_read,
90 .llseek = seq_lseek,
91 .release = single_release
92};
93 82
94/** 83/**
95 * cosm_force_reset_show - Force MIC reset 84 * force_reset_show - Force MIC reset
96 * 85 *
97 * Invokes the force_reset COSM bus op instead of the standard reset 86 * Invokes the force_reset COSM bus op instead of the standard reset
98 * op in case a force reset of the MIC device is required 87 * op in case a force reset of the MIC device is required
99 */ 88 */
100static int cosm_force_reset_show(struct seq_file *s, void *pos) 89static int force_reset_show(struct seq_file *s, void *pos)
101{ 90{
102 struct cosm_device *cdev = s->private; 91 struct cosm_device *cdev = s->private;
103 92
@@ -105,18 +94,7 @@ static int cosm_force_reset_show(struct seq_file *s, void *pos)
105 return 0; 94 return 0;
106} 95}
107 96
108static int cosm_force_reset_debug_open(struct inode *inode, struct file *file) 97DEFINE_SHOW_ATTRIBUTE(force_reset);
109{
110 return single_open(file, cosm_force_reset_show, inode->i_private);
111}
112
113static const struct file_operations force_reset_ops = {
114 .owner = THIS_MODULE,
115 .open = cosm_force_reset_debug_open,
116 .read = seq_read,
117 .llseek = seq_lseek,
118 .release = single_release
119};
120 98
121void cosm_create_debug_dir(struct cosm_device *cdev) 99void cosm_create_debug_dir(struct cosm_device *cdev)
122{ 100{
@@ -130,9 +108,10 @@ void cosm_create_debug_dir(struct cosm_device *cdev)
130 if (!cdev->dbg_dir) 108 if (!cdev->dbg_dir)
131 return; 109 return;
132 110
133 debugfs_create_file("log_buf", 0444, cdev->dbg_dir, cdev, &log_buf_ops); 111 debugfs_create_file("log_buf", 0444, cdev->dbg_dir, cdev,
112 &log_buf_fops);
134 debugfs_create_file("force_reset", 0444, cdev->dbg_dir, cdev, 113 debugfs_create_file("force_reset", 0444, cdev->dbg_dir, cdev,
135 &force_reset_ops); 114 &force_reset_fops);
136} 115}
137 116
138void cosm_delete_debug_dir(struct cosm_device *cdev) 117void cosm_delete_debug_dir(struct cosm_device *cdev)
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 0a9daba8bb5d..c6e3c764699f 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -54,23 +54,7 @@ static int mic_smpt_show(struct seq_file *s, void *pos)
54 return 0; 54 return 0;
55} 55}
56 56
57static int mic_smpt_debug_open(struct inode *inode, struct file *file) 57DEFINE_SHOW_ATTRIBUTE(mic_smpt);
58{
59 return single_open(file, mic_smpt_show, inode->i_private);
60}
61
62static int mic_smpt_debug_release(struct inode *inode, struct file *file)
63{
64 return single_release(inode, file);
65}
66
67static const struct file_operations smpt_file_ops = {
68 .owner = THIS_MODULE,
69 .open = mic_smpt_debug_open,
70 .read = seq_read,
71 .llseek = seq_lseek,
72 .release = mic_smpt_debug_release
73};
74 58
75static int mic_post_code_show(struct seq_file *s, void *pos) 59static int mic_post_code_show(struct seq_file *s, void *pos)
76{ 60{
@@ -81,23 +65,7 @@ static int mic_post_code_show(struct seq_file *s, void *pos)
81 return 0; 65 return 0;
82} 66}
83 67
84static int mic_post_code_debug_open(struct inode *inode, struct file *file) 68DEFINE_SHOW_ATTRIBUTE(mic_post_code);
85{
86 return single_open(file, mic_post_code_show, inode->i_private);
87}
88
89static int mic_post_code_debug_release(struct inode *inode, struct file *file)
90{
91 return single_release(inode, file);
92}
93
94static const struct file_operations post_code_ops = {
95 .owner = THIS_MODULE,
96 .open = mic_post_code_debug_open,
97 .read = seq_read,
98 .llseek = seq_lseek,
99 .release = mic_post_code_debug_release
100};
101 69
102static int mic_msi_irq_info_show(struct seq_file *s, void *pos) 70static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
103{ 71{
@@ -143,24 +111,7 @@ static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
143 return 0; 111 return 0;
144} 112}
145 113
146static int mic_msi_irq_info_debug_open(struct inode *inode, struct file *file) 114DEFINE_SHOW_ATTRIBUTE(mic_msi_irq_info);
147{
148 return single_open(file, mic_msi_irq_info_show, inode->i_private);
149}
150
151static int
152mic_msi_irq_info_debug_release(struct inode *inode, struct file *file)
153{
154 return single_release(inode, file);
155}
156
157static const struct file_operations msi_irq_info_ops = {
158 .owner = THIS_MODULE,
159 .open = mic_msi_irq_info_debug_open,
160 .read = seq_read,
161 .llseek = seq_lseek,
162 .release = mic_msi_irq_info_debug_release
163};
164 115
165/** 116/**
166 * mic_create_debug_dir - Initialize MIC debugfs entries. 117 * mic_create_debug_dir - Initialize MIC debugfs entries.
@@ -177,13 +128,14 @@ void mic_create_debug_dir(struct mic_device *mdev)
177 if (!mdev->dbg_dir) 128 if (!mdev->dbg_dir)
178 return; 129 return;
179 130
180 debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev, &smpt_file_ops); 131 debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev,
132 &mic_smpt_fops);
181 133
182 debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev, 134 debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
183 &post_code_ops); 135 &mic_post_code_fops);
184 136
185 debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev, 137 debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
186 &msi_irq_info_ops); 138 &mic_msi_irq_info_fops);
187} 139}
188 140
189/** 141/**
diff --git a/drivers/misc/mic/scif/scif_debugfs.c b/drivers/misc/mic/scif/scif_debugfs.c
index 6884dad97e17..cca5e980c710 100644
--- a/drivers/misc/mic/scif/scif_debugfs.c
+++ b/drivers/misc/mic/scif/scif_debugfs.c
@@ -24,7 +24,7 @@
24/* Debugfs parent dir */ 24/* Debugfs parent dir */
25static struct dentry *scif_dbg; 25static struct dentry *scif_dbg;
26 26
27static int scif_dev_test(struct seq_file *s, void *unused) 27static int scif_dev_show(struct seq_file *s, void *unused)
28{ 28{
29 int node; 29 int node;
30 30
@@ -44,23 +44,7 @@ static int scif_dev_test(struct seq_file *s, void *unused)
44 return 0; 44 return 0;
45} 45}
46 46
47static int scif_dev_test_open(struct inode *inode, struct file *file) 47DEFINE_SHOW_ATTRIBUTE(scif_dev);
48{
49 return single_open(file, scif_dev_test, inode->i_private);
50}
51
52static int scif_dev_test_release(struct inode *inode, struct file *file)
53{
54 return single_release(inode, file);
55}
56
57static const struct file_operations scif_dev_ops = {
58 .owner = THIS_MODULE,
59 .open = scif_dev_test_open,
60 .read = seq_read,
61 .llseek = seq_lseek,
62 .release = scif_dev_test_release
63};
64 48
65static void scif_display_window(struct scif_window *window, struct seq_file *s) 49static void scif_display_window(struct scif_window *window, struct seq_file *s)
66{ 50{
@@ -104,7 +88,7 @@ static void scif_display_all_windows(struct list_head *head, struct seq_file *s)
104 } 88 }
105} 89}
106 90
107static int scif_rma_test(struct seq_file *s, void *unused) 91static int scif_rma_show(struct seq_file *s, void *unused)
108{ 92{
109 struct scif_endpt *ep; 93 struct scif_endpt *ep;
110 struct list_head *pos; 94 struct list_head *pos;
@@ -123,23 +107,7 @@ static int scif_rma_test(struct seq_file *s, void *unused)
123 return 0; 107 return 0;
124} 108}
125 109
126static int scif_rma_test_open(struct inode *inode, struct file *file) 110DEFINE_SHOW_ATTRIBUTE(scif_rma);
127{
128 return single_open(file, scif_rma_test, inode->i_private);
129}
130
131static int scif_rma_test_release(struct inode *inode, struct file *file)
132{
133 return single_release(inode, file);
134}
135
136static const struct file_operations scif_rma_ops = {
137 .owner = THIS_MODULE,
138 .open = scif_rma_test_open,
139 .read = seq_read,
140 .llseek = seq_lseek,
141 .release = scif_rma_test_release
142};
143 111
144void __init scif_init_debugfs(void) 112void __init scif_init_debugfs(void)
145{ 113{
@@ -150,8 +118,8 @@ void __init scif_init_debugfs(void)
150 return; 118 return;
151 } 119 }
152 120
153 debugfs_create_file("scif_dev", 0444, scif_dbg, NULL, &scif_dev_ops); 121 debugfs_create_file("scif_dev", 0444, scif_dbg, NULL, &scif_dev_fops);
154 debugfs_create_file("scif_rma", 0444, scif_dbg, NULL, &scif_rma_ops); 122 debugfs_create_file("scif_rma", 0444, scif_dbg, NULL, &scif_rma_fops);
155 debugfs_create_u8("en_msg_log", 0666, scif_dbg, &scif_info.en_msg_log); 123 debugfs_create_u8("en_msg_log", 0666, scif_dbg, &scif_info.en_msg_log);
156 debugfs_create_u8("p2p_enable", 0666, scif_dbg, &scif_info.p2p_enable); 124 debugfs_create_u8("p2p_enable", 0666, scif_dbg, &scif_info.p2p_enable);
157} 125}
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index 7bb929f05d85..2e7ce6ae9dd2 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -195,10 +195,11 @@ static inline void *scif_get_local_va(off_t off, struct scif_window *window)
195 195
196static void scif_prog_signal_cb(void *arg) 196static void scif_prog_signal_cb(void *arg)
197{ 197{
198 struct scif_status *status = arg; 198 struct scif_cb_arg *cb_arg = arg;
199 199
200 dma_pool_free(status->ep->remote_dev->signal_pool, status, 200 dma_pool_free(cb_arg->ep->remote_dev->signal_pool, cb_arg->status,
201 status->src_dma_addr); 201 cb_arg->src_dma_addr);
202 kfree(cb_arg);
202} 203}
203 204
204static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val) 205static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
@@ -209,6 +210,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
209 bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1); 210 bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1);
210 struct dma_async_tx_descriptor *tx; 211 struct dma_async_tx_descriptor *tx;
211 struct scif_status *status = NULL; 212 struct scif_status *status = NULL;
213 struct scif_cb_arg *cb_arg = NULL;
212 dma_addr_t src; 214 dma_addr_t src;
213 dma_cookie_t cookie; 215 dma_cookie_t cookie;
214 int err; 216 int err;
@@ -257,8 +259,16 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
257 goto dma_fail; 259 goto dma_fail;
258 } 260 }
259 if (!x100) { 261 if (!x100) {
262 cb_arg = kmalloc(sizeof(*cb_arg), GFP_KERNEL);
263 if (!cb_arg) {
264 err = -ENOMEM;
265 goto dma_fail;
266 }
267 cb_arg->src_dma_addr = src;
268 cb_arg->status = status;
269 cb_arg->ep = ep;
260 tx->callback = scif_prog_signal_cb; 270 tx->callback = scif_prog_signal_cb;
261 tx->callback_param = status; 271 tx->callback_param = cb_arg;
262 } 272 }
263 cookie = tx->tx_submit(tx); 273 cookie = tx->tx_submit(tx);
264 if (dma_submit_error(cookie)) { 274 if (dma_submit_error(cookie)) {
@@ -270,9 +280,11 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
270 dma_async_issue_pending(chan); 280 dma_async_issue_pending(chan);
271 return 0; 281 return 0;
272dma_fail: 282dma_fail:
273 if (!x100) 283 if (!x100) {
274 dma_pool_free(ep->remote_dev->signal_pool, status, 284 dma_pool_free(ep->remote_dev->signal_pool, status,
275 src - offsetof(struct scif_status, val)); 285 src - offsetof(struct scif_status, val));
286 kfree(cb_arg);
287 }
276alloc_fail: 288alloc_fail:
277 return err; 289 return err;
278} 290}
diff --git a/drivers/misc/mic/scif/scif_rma.h b/drivers/misc/mic/scif/scif_rma.h
index fa6722279196..84af3033a473 100644
--- a/drivers/misc/mic/scif/scif_rma.h
+++ b/drivers/misc/mic/scif/scif_rma.h
@@ -206,6 +206,19 @@ struct scif_status {
206}; 206};
207 207
208/* 208/*
209 * struct scif_cb_arg - Stores the argument of the callback func
210 *
211 * @src_dma_addr: Source buffer DMA address
212 * @status: DMA status
213 * @ep: SCIF endpoint
214 */
215struct scif_cb_arg {
216 dma_addr_t src_dma_addr;
217 struct scif_status *status;
218 struct scif_endpt *ep;
219};
220
221/*
209 * struct scif_window - Registration Window for Self and Remote 222 * struct scif_window - Registration Window for Self and Remote
210 * 223 *
211 * @nr_pages: Number of pages which is defined as a s64 instead of an int 224 * @nr_pages: Number of pages which is defined as a s64 instead of an int
diff --git a/drivers/misc/mic/vop/vop_debugfs.c b/drivers/misc/mic/vop/vop_debugfs.c
index ab43884e5cd7..2ccef52aca23 100644
--- a/drivers/misc/mic/vop/vop_debugfs.c
+++ b/drivers/misc/mic/vop/vop_debugfs.c
@@ -101,23 +101,7 @@ static int vop_dp_show(struct seq_file *s, void *pos)
101 return 0; 101 return 0;
102} 102}
103 103
104static int vop_dp_debug_open(struct inode *inode, struct file *file) 104DEFINE_SHOW_ATTRIBUTE(vop_dp);
105{
106 return single_open(file, vop_dp_show, inode->i_private);
107}
108
109static int vop_dp_debug_release(struct inode *inode, struct file *file)
110{
111 return single_release(inode, file);
112}
113
114static const struct file_operations dp_ops = {
115 .owner = THIS_MODULE,
116 .open = vop_dp_debug_open,
117 .read = seq_read,
118 .llseek = seq_lseek,
119 .release = vop_dp_debug_release
120};
121 105
122static int vop_vdev_info_show(struct seq_file *s, void *unused) 106static int vop_vdev_info_show(struct seq_file *s, void *unused)
123{ 107{
@@ -194,23 +178,7 @@ static int vop_vdev_info_show(struct seq_file *s, void *unused)
194 return 0; 178 return 0;
195} 179}
196 180
197static int vop_vdev_info_debug_open(struct inode *inode, struct file *file) 181DEFINE_SHOW_ATTRIBUTE(vop_vdev_info);
198{
199 return single_open(file, vop_vdev_info_show, inode->i_private);
200}
201
202static int vop_vdev_info_debug_release(struct inode *inode, struct file *file)
203{
204 return single_release(inode, file);
205}
206
207static const struct file_operations vdev_info_ops = {
208 .owner = THIS_MODULE,
209 .open = vop_vdev_info_debug_open,
210 .read = seq_read,
211 .llseek = seq_lseek,
212 .release = vop_vdev_info_debug_release
213};
214 182
215void vop_init_debugfs(struct vop_info *vi) 183void vop_init_debugfs(struct vop_info *vi)
216{ 184{
@@ -222,8 +190,8 @@ void vop_init_debugfs(struct vop_info *vi)
222 pr_err("can't create debugfs dir vop\n"); 190 pr_err("can't create debugfs dir vop\n");
223 return; 191 return;
224 } 192 }
225 debugfs_create_file("dp", 0444, vi->dbg, vi, &dp_ops); 193 debugfs_create_file("dp", 0444, vi->dbg, vi, &vop_dp_fops);
226 debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vdev_info_ops); 194 debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vop_vdev_info_fops);
227} 195}
228 196
229void vop_exit_debugfs(struct vop_info *vi) 197void vop_exit_debugfs(struct vop_info *vi)
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
new file mode 100644
index 000000000000..595ac065b401
--- /dev/null
+++ b/drivers/misc/pvpanic.c
@@ -0,0 +1,192 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Pvpanic Device Support
4 *
5 * Copyright (C) 2013 Fujitsu.
6 * Copyright (C) 2018 ZTE.
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/platform_device.h>
17#include <linux/types.h>
18
19static void __iomem *base;
20
21#define PVPANIC_PANICKED (1 << 0)
22
23MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
24MODULE_DESCRIPTION("pvpanic device driver");
25MODULE_LICENSE("GPL");
26
27static void
28pvpanic_send_event(unsigned int event)
29{
30 iowrite8(event, base);
31}
32
33static int
34pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
35 void *unused)
36{
37 pvpanic_send_event(PVPANIC_PANICKED);
38 return NOTIFY_DONE;
39}
40
41static struct notifier_block pvpanic_panic_nb = {
42 .notifier_call = pvpanic_panic_notify,
43 .priority = 1, /* let this called before broken drm_fb_helper */
44};
45
46#ifdef CONFIG_ACPI
47static int pvpanic_add(struct acpi_device *device);
48static int pvpanic_remove(struct acpi_device *device);
49
50static const struct acpi_device_id pvpanic_device_ids[] = {
51 { "QEMU0001", 0 },
52 { "", 0 }
53};
54MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
55
56static struct acpi_driver pvpanic_driver = {
57 .name = "pvpanic",
58 .class = "QEMU",
59 .ids = pvpanic_device_ids,
60 .ops = {
61 .add = pvpanic_add,
62 .remove = pvpanic_remove,
63 },
64 .owner = THIS_MODULE,
65};
66
67static acpi_status
68pvpanic_walk_resources(struct acpi_resource *res, void *context)
69{
70 struct resource r;
71
72 if (acpi_dev_resource_io(res, &r)) {
73 base = ioport_map(r.start, resource_size(&r));
74 return AE_OK;
75 } else if (acpi_dev_resource_memory(res, &r)) {
76 base = ioremap(r.start, resource_size(&r));
77 return AE_OK;
78 }
79
80 return AE_ERROR;
81}
82
83static int pvpanic_add(struct acpi_device *device)
84{
85 int ret;
86
87 ret = acpi_bus_get_status(device);
88 if (ret < 0)
89 return ret;
90
91 if (!device->status.enabled || !device->status.functional)
92 return -ENODEV;
93
94 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
95 pvpanic_walk_resources, NULL);
96
97 if (!base)
98 return -ENODEV;
99
100 atomic_notifier_chain_register(&panic_notifier_list,
101 &pvpanic_panic_nb);
102
103 return 0;
104}
105
106static int pvpanic_remove(struct acpi_device *device)
107{
108
109 atomic_notifier_chain_unregister(&panic_notifier_list,
110 &pvpanic_panic_nb);
111 iounmap(base);
112
113 return 0;
114}
115
116static int pvpanic_register_acpi_driver(void)
117{
118 return acpi_bus_register_driver(&pvpanic_driver);
119}
120
121static void pvpanic_unregister_acpi_driver(void)
122{
123 acpi_bus_unregister_driver(&pvpanic_driver);
124}
125#else
126static int pvpanic_register_acpi_driver(void)
127{
128 return -ENODEV;
129}
130
131static void pvpanic_unregister_acpi_driver(void) {}
132#endif
133
134static int pvpanic_mmio_probe(struct platform_device *pdev)
135{
136 struct resource *mem;
137
138 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
139 if (!mem)
140 return -EINVAL;
141
142 base = devm_ioremap_resource(&pdev->dev, mem);
143 if (IS_ERR(base))
144 return PTR_ERR(base);
145
146 atomic_notifier_chain_register(&panic_notifier_list,
147 &pvpanic_panic_nb);
148
149 return 0;
150}
151
152static int pvpanic_mmio_remove(struct platform_device *pdev)
153{
154
155 atomic_notifier_chain_unregister(&panic_notifier_list,
156 &pvpanic_panic_nb);
157
158 return 0;
159}
160
161static const struct of_device_id pvpanic_mmio_match[] = {
162 { .compatible = "qemu,pvpanic-mmio", },
163 {}
164};
165
166static struct platform_driver pvpanic_mmio_driver = {
167 .driver = {
168 .name = "pvpanic-mmio",
169 .of_match_table = pvpanic_mmio_match,
170 },
171 .probe = pvpanic_mmio_probe,
172 .remove = pvpanic_mmio_remove,
173};
174
175static int __init pvpanic_mmio_init(void)
176{
177 if (acpi_disabled)
178 return platform_driver_register(&pvpanic_mmio_driver);
179 else
180 return pvpanic_register_acpi_driver();
181}
182
183static void __exit pvpanic_mmio_exit(void)
184{
185 if (acpi_disabled)
186 platform_driver_unregister(&pvpanic_mmio_driver);
187 else
188 pvpanic_unregister_acpi_driver();
189}
190
191module_init(pvpanic_mmio_init);
192module_exit(pvpanic_mmio_exit);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 1874ac922166..e7cfdbd1f66d 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -211,7 +211,7 @@ static void kim_int_recv(struct kim_data_s *kim_gdata,
211static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name) 211static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
212{ 212{
213 unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0; 213 unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
214 const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 }; 214 static const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
215 long timeout; 215 long timeout;
216 216
217 pr_debug("%s", __func__); 217 pr_debug("%s", __func__);
@@ -564,7 +564,7 @@ long st_kim_stop(void *kim_data)
564/* functions called from subsystems */ 564/* functions called from subsystems */
565/* called when debugfs entry is read from */ 565/* called when debugfs entry is read from */
566 566
567static int show_version(struct seq_file *s, void *unused) 567static int version_show(struct seq_file *s, void *unused)
568{ 568{
569 struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; 569 struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
570 seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full, 570 seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full,
@@ -573,7 +573,7 @@ static int show_version(struct seq_file *s, void *unused)
573 return 0; 573 return 0;
574} 574}
575 575
576static int show_list(struct seq_file *s, void *unused) 576static int list_show(struct seq_file *s, void *unused)
577{ 577{
578 struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; 578 struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
579 kim_st_list_protocols(kim_gdata->core_data, s); 579 kim_st_list_protocols(kim_gdata->core_data, s);
@@ -688,30 +688,8 @@ err:
688 *core_data = NULL; 688 *core_data = NULL;
689} 689}
690 690
691static int kim_version_open(struct inode *i, struct file *f) 691DEFINE_SHOW_ATTRIBUTE(version);
692{ 692DEFINE_SHOW_ATTRIBUTE(list);
693 return single_open(f, show_version, i->i_private);
694}
695
696static int kim_list_open(struct inode *i, struct file *f)
697{
698 return single_open(f, show_list, i->i_private);
699}
700
701static const struct file_operations version_debugfs_fops = {
702 /* version info */
703 .open = kim_version_open,
704 .read = seq_read,
705 .llseek = seq_lseek,
706 .release = single_release,
707};
708static const struct file_operations list_debugfs_fops = {
709 /* protocols info */
710 .open = kim_list_open,
711 .read = seq_read,
712 .llseek = seq_lseek,
713 .release = single_release,
714};
715 693
716/**********************************************************************/ 694/**********************************************************************/
717/* functions called from platform device driver subsystem 695/* functions called from platform device driver subsystem
@@ -789,9 +767,9 @@ static int kim_probe(struct platform_device *pdev)
789 } 767 }
790 768
791 debugfs_create_file("version", S_IRUGO, kim_debugfs_dir, 769 debugfs_create_file("version", S_IRUGO, kim_debugfs_dir,
792 kim_gdata, &version_debugfs_fops); 770 kim_gdata, &version_fops);
793 debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir, 771 debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir,
794 kim_gdata, &list_debugfs_fops); 772 kim_gdata, &list_fops);
795 return 0; 773 return 0;
796 774
797err_sysfs_group: 775err_sysfs_group:
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 6c3591cdf855..a3c6c773d9dc 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
61 int tries; 61 int tries;
62 long timeout; 62 long timeout;
63 63
64 if (WARN_ON(index > func->num_templates)) 64 if (WARN_ON(index >= func->num_templates))
65 return -EINVAL; 65 return -EINVAL;
66 66
67 command = readl(syscfg->base + SYS_CFGCTRL); 67 command = readl(syscfg->base + SYS_CFGCTRL);
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index e6126a4b95d3..f8240b87df22 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1470,18 +1470,7 @@ static int vmballoon_debug_show(struct seq_file *f, void *offset)
1470 return 0; 1470 return 0;
1471} 1471}
1472 1472
1473static int vmballoon_debug_open(struct inode *inode, struct file *file) 1473DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1474{
1475 return single_open(file, vmballoon_debug_show, inode->i_private);
1476}
1477
1478static const struct file_operations vmballoon_debug_fops = {
1479 .owner = THIS_MODULE,
1480 .open = vmballoon_debug_open,
1481 .read = seq_read,
1482 .llseek = seq_lseek,
1483 .release = single_release,
1484};
1485 1474
1486static int __init vmballoon_debugfs_init(struct vmballoon *b) 1475static int __init vmballoon_debugfs_init(struct vmballoon *b)
1487{ 1476{
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index edfffc9699ba..5da1f3e3f997 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -750,19 +750,10 @@ static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
750 if (copy_from_user(&set_info, uptr, sizeof(set_info))) 750 if (copy_from_user(&set_info, uptr, sizeof(set_info)))
751 return -EFAULT; 751 return -EFAULT;
752 752
753 cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL); 753 cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf,
754 if (!cpt_buf) { 754 set_info.buf_size);
755 vmci_ioctl_err( 755 if (IS_ERR(cpt_buf))
756 "cannot allocate memory to set cpt state (type=%d)\n", 756 return PTR_ERR(cpt_buf);
757 set_info.cpt_type);
758 return -ENOMEM;
759 }
760
761 if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
762 set_info.buf_size)) {
763 retval = -EFAULT;
764 goto out;
765 }
766 757
767 cid = vmci_ctx_get_id(vmci_host_dev->context); 758 cid = vmci_ctx_get_id(vmci_host_dev->context);
768 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type, 759 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
@@ -770,7 +761,6 @@ static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
770 761
771 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0; 762 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
772 763
773out:
774 kfree(cpt_buf); 764 kfree(cpt_buf);
775 return retval; 765 return retval;
776} 766}
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 1e18c9639c3e..79a8ff542883 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,5 +1,6 @@
1menuconfig MTD 1menuconfig MTD
2 tristate "Memory Technology Device (MTD) support" 2 tristate "Memory Technology Device (MTD) support"
3 imply NVMEM
3 help 4 help
4 Memory Technology Devices are flash, RAM and similar chips, often 5 Memory Technology Devices are flash, RAM and similar chips, often
5 used for solid state file systems on embedded devices. This option 6 used for solid state file systems on embedded devices. This option
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b6b93291aba9..21e3cdc04036 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -41,6 +41,7 @@
41#include <linux/reboot.h> 41#include <linux/reboot.h>
42#include <linux/leds.h> 42#include <linux/leds.h>
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/nvmem-provider.h>
44 45
45#include <linux/mtd/mtd.h> 46#include <linux/mtd/mtd.h>
46#include <linux/mtd/partitions.h> 47#include <linux/mtd/partitions.h>
@@ -488,6 +489,50 @@ int mtd_pairing_groups(struct mtd_info *mtd)
488} 489}
489EXPORT_SYMBOL_GPL(mtd_pairing_groups); 490EXPORT_SYMBOL_GPL(mtd_pairing_groups);
490 491
492static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
493 void *val, size_t bytes)
494{
495 struct mtd_info *mtd = priv;
496 size_t retlen;
497 int err;
498
499 err = mtd_read(mtd, offset, bytes, &retlen, val);
500 if (err && err != -EUCLEAN)
501 return err;
502
503 return retlen == bytes ? 0 : -EIO;
504}
505
506static int mtd_nvmem_add(struct mtd_info *mtd)
507{
508 struct nvmem_config config = {};
509
510 config.dev = &mtd->dev;
511 config.name = mtd->name;
512 config.owner = THIS_MODULE;
513 config.reg_read = mtd_nvmem_reg_read;
514 config.size = mtd->size;
515 config.word_size = 1;
516 config.stride = 1;
517 config.read_only = true;
518 config.root_only = true;
519 config.no_of_node = true;
520 config.priv = mtd;
521
522 mtd->nvmem = nvmem_register(&config);
523 if (IS_ERR(mtd->nvmem)) {
524 /* Just ignore if there is no NVMEM support in the kernel */
525 if (PTR_ERR(mtd->nvmem) == -ENOSYS) {
526 mtd->nvmem = NULL;
527 } else {
528 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
529 return PTR_ERR(mtd->nvmem);
530 }
531 }
532
533 return 0;
534}
535
491static struct dentry *dfs_dir_mtd; 536static struct dentry *dfs_dir_mtd;
492 537
493/** 538/**
@@ -570,6 +615,11 @@ int add_mtd_device(struct mtd_info *mtd)
570 if (error) 615 if (error)
571 goto fail_added; 616 goto fail_added;
572 617
618 /* Add the nvmem provider */
619 error = mtd_nvmem_add(mtd);
620 if (error)
621 goto fail_nvmem_add;
622
573 if (!IS_ERR_OR_NULL(dfs_dir_mtd)) { 623 if (!IS_ERR_OR_NULL(dfs_dir_mtd)) {
574 mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(&mtd->dev), dfs_dir_mtd); 624 mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(&mtd->dev), dfs_dir_mtd);
575 if (IS_ERR_OR_NULL(mtd->dbg.dfs_dir)) { 625 if (IS_ERR_OR_NULL(mtd->dbg.dfs_dir)) {
@@ -595,6 +645,8 @@ int add_mtd_device(struct mtd_info *mtd)
595 __module_get(THIS_MODULE); 645 __module_get(THIS_MODULE);
596 return 0; 646 return 0;
597 647
648fail_nvmem_add:
649 device_unregister(&mtd->dev);
598fail_added: 650fail_added:
599 of_node_put(mtd_get_of_node(mtd)); 651 of_node_put(mtd_get_of_node(mtd));
600 idr_remove(&mtd_idr, i); 652 idr_remove(&mtd_idr, i);
@@ -637,6 +689,10 @@ int del_mtd_device(struct mtd_info *mtd)
637 mtd->index, mtd->name, mtd->usecount); 689 mtd->index, mtd->name, mtd->usecount);
638 ret = -EBUSY; 690 ret = -EBUSY;
639 } else { 691 } else {
692 /* Try to remove the NVMEM provider */
693 if (mtd->nvmem)
694 nvmem_unregister(mtd->nvmem);
695
640 device_unregister(&mtd->dev); 696 device_unregister(&mtd->dev);
641 697
642 idr_remove(&mtd_idr, mtd->index); 698 idr_remove(&mtd_idr, mtd->index);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 27f67dfa649d..f7301bb4ef3b 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -28,6 +28,7 @@ struct nvmem_device {
28 size_t size; 28 size_t size;
29 bool read_only; 29 bool read_only;
30 int flags; 30 int flags;
31 enum nvmem_type type;
31 struct bin_attribute eeprom; 32 struct bin_attribute eeprom;
32 struct device *base_dev; 33 struct device *base_dev;
33 struct list_head cells; 34 struct list_head cells;
@@ -60,6 +61,13 @@ static LIST_HEAD(nvmem_lookup_list);
60 61
61static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 62static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
62 63
64static const char * const nvmem_type_str[] = {
65 [NVMEM_TYPE_UNKNOWN] = "Unknown",
66 [NVMEM_TYPE_EEPROM] = "EEPROM",
67 [NVMEM_TYPE_OTP] = "OTP",
68 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
69};
70
63#ifdef CONFIG_DEBUG_LOCK_ALLOC 71#ifdef CONFIG_DEBUG_LOCK_ALLOC
64static struct lock_class_key eeprom_lock_key; 72static struct lock_class_key eeprom_lock_key;
65#endif 73#endif
@@ -83,6 +91,21 @@ static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
83 return -EINVAL; 91 return -EINVAL;
84} 92}
85 93
94static ssize_t type_show(struct device *dev,
95 struct device_attribute *attr, char *buf)
96{
97 struct nvmem_device *nvmem = to_nvmem_device(dev);
98
99 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
100}
101
102static DEVICE_ATTR_RO(type);
103
104static struct attribute *nvmem_attrs[] = {
105 &dev_attr_type.attr,
106 NULL,
107};
108
86static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 109static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
87 struct bin_attribute *attr, 110 struct bin_attribute *attr,
88 char *buf, loff_t pos, size_t count) 111 char *buf, loff_t pos, size_t count)
@@ -168,6 +191,7 @@ static struct bin_attribute *nvmem_bin_rw_attributes[] = {
168 191
169static const struct attribute_group nvmem_bin_rw_group = { 192static const struct attribute_group nvmem_bin_rw_group = {
170 .bin_attrs = nvmem_bin_rw_attributes, 193 .bin_attrs = nvmem_bin_rw_attributes,
194 .attrs = nvmem_attrs,
171}; 195};
172 196
173static const struct attribute_group *nvmem_rw_dev_groups[] = { 197static const struct attribute_group *nvmem_rw_dev_groups[] = {
@@ -191,6 +215,7 @@ static struct bin_attribute *nvmem_bin_ro_attributes[] = {
191 215
192static const struct attribute_group nvmem_bin_ro_group = { 216static const struct attribute_group nvmem_bin_ro_group = {
193 .bin_attrs = nvmem_bin_ro_attributes, 217 .bin_attrs = nvmem_bin_ro_attributes,
218 .attrs = nvmem_attrs,
194}; 219};
195 220
196static const struct attribute_group *nvmem_ro_dev_groups[] = { 221static const struct attribute_group *nvmem_ro_dev_groups[] = {
@@ -215,6 +240,7 @@ static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
215 240
216static const struct attribute_group nvmem_bin_rw_root_group = { 241static const struct attribute_group nvmem_bin_rw_root_group = {
217 .bin_attrs = nvmem_bin_rw_root_attributes, 242 .bin_attrs = nvmem_bin_rw_root_attributes,
243 .attrs = nvmem_attrs,
218}; 244};
219 245
220static const struct attribute_group *nvmem_rw_root_dev_groups[] = { 246static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
@@ -238,6 +264,7 @@ static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
238 264
239static const struct attribute_group nvmem_bin_ro_root_group = { 265static const struct attribute_group nvmem_bin_ro_root_group = {
240 .bin_attrs = nvmem_bin_ro_root_attributes, 266 .bin_attrs = nvmem_bin_ro_root_attributes,
267 .attrs = nvmem_attrs,
241}; 268};
242 269
243static const struct attribute_group *nvmem_ro_root_dev_groups[] = { 270static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
@@ -605,9 +632,11 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
605 nvmem->dev.bus = &nvmem_bus_type; 632 nvmem->dev.bus = &nvmem_bus_type;
606 nvmem->dev.parent = config->dev; 633 nvmem->dev.parent = config->dev;
607 nvmem->priv = config->priv; 634 nvmem->priv = config->priv;
635 nvmem->type = config->type;
608 nvmem->reg_read = config->reg_read; 636 nvmem->reg_read = config->reg_read;
609 nvmem->reg_write = config->reg_write; 637 nvmem->reg_write = config->reg_write;
610 nvmem->dev.of_node = config->dev->of_node; 638 if (!config->no_of_node)
639 nvmem->dev.of_node = config->dev->of_node;
611 640
612 if (config->id == -1 && config->name) { 641 if (config->id == -1 && config->name) {
613 dev_set_name(&nvmem->dev, "%s", config->name); 642 dev_set_name(&nvmem->dev, "%s", config->name);
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index d769840d1e18..99372768446b 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -14,6 +14,7 @@
14 * more details. 14 * more details.
15 */ 15 */
16 16
17#include <linux/clk.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/nvmem-provider.h> 19#include <linux/nvmem-provider.h>
19#include <linux/of.h> 20#include <linux/of.h>
@@ -46,10 +47,36 @@ static int meson_efuse_probe(struct platform_device *pdev)
46 struct device *dev = &pdev->dev; 47 struct device *dev = &pdev->dev;
47 struct nvmem_device *nvmem; 48 struct nvmem_device *nvmem;
48 struct nvmem_config *econfig; 49 struct nvmem_config *econfig;
50 struct clk *clk;
49 unsigned int size; 51 unsigned int size;
52 int ret;
50 53
51 if (meson_sm_call(SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) 54 clk = devm_clk_get(dev, NULL);
55 if (IS_ERR(clk)) {
56 ret = PTR_ERR(clk);
57 if (ret != -EPROBE_DEFER)
58 dev_err(dev, "failed to get efuse gate");
59 return ret;
60 }
61
62 ret = clk_prepare_enable(clk);
63 if (ret) {
64 dev_err(dev, "failed to enable gate");
65 return ret;
66 }
67
68 ret = devm_add_action_or_reset(dev,
69 (void(*)(void *))clk_disable_unprepare,
70 clk);
71 if (ret) {
72 dev_err(dev, "failed to add disable callback");
73 return ret;
74 }
75
76 if (meson_sm_call(SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
77 dev_err(dev, "failed to get max user");
52 return -EINVAL; 78 return -EINVAL;
79 }
53 80
54 econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL); 81 econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
55 if (!econfig) 82 if (!econfig)
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 380916bff9e0..9c8249f74479 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1667,7 +1667,7 @@ static int parport_ECP_supported(struct parport *pb)
1667 default: 1667 default:
1668 printk(KERN_WARNING "0x%lx: Unknown implementation ID\n", 1668 printk(KERN_WARNING "0x%lx: Unknown implementation ID\n",
1669 pb->base); 1669 pb->base);
1670 /* Assume 1 */ 1670 /* Fall through - Assume 1 */
1671 case 1: 1671 case 1:
1672 pword = 1; 1672 pword = 1;
1673 } 1673 }
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 921db6f80340..e1949f7efd9c 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -789,6 +789,24 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
789 ACPI_FREE(obj); 789 ACPI_FREE(obj);
790} 790}
791 791
792static void pci_acpi_set_untrusted(struct pci_dev *dev)
793{
794 u8 val;
795
796 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
797 return;
798 if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
799 return;
800
801 /*
802 * These root ports expose PCIe (including DMA) outside of the
803 * system so make sure we treat them and everything behind as
804 * untrusted.
805 */
806 if (val)
807 dev->untrusted = 1;
808}
809
792static void pci_acpi_setup(struct device *dev) 810static void pci_acpi_setup(struct device *dev)
793{ 811{
794 struct pci_dev *pci_dev = to_pci_dev(dev); 812 struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -798,6 +816,7 @@ static void pci_acpi_setup(struct device *dev)
798 return; 816 return;
799 817
800 pci_acpi_optimize_delay(pci_dev, adev->handle); 818 pci_acpi_optimize_delay(pci_dev, adev->handle);
819 pci_acpi_set_untrusted(pci_dev);
801 820
802 pci_acpi_add_pm_notifier(adev, pci_dev); 821 pci_acpi_add_pm_notifier(adev, pci_dev);
803 if (!adev->wakeup.flags.valid) 822 if (!adev->wakeup.flags.valid)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b1c05b5054a0..257b9f6f2ebb 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1378,6 +1378,19 @@ static void set_pcie_thunderbolt(struct pci_dev *dev)
1378 } 1378 }
1379} 1379}
1380 1380
1381static void set_pcie_untrusted(struct pci_dev *dev)
1382{
1383 struct pci_dev *parent;
1384
1385 /*
1386 * If the upstream bridge is untrusted we treat this device
1387 * untrusted as well.
1388 */
1389 parent = pci_upstream_bridge(dev);
1390 if (parent && parent->untrusted)
1391 dev->untrusted = true;
1392}
1393
1381/** 1394/**
1382 * pci_ext_cfg_is_aliased - Is ext config space just an alias of std config? 1395 * pci_ext_cfg_is_aliased - Is ext config space just an alias of std config?
1383 * @dev: PCI device 1396 * @dev: PCI device
@@ -1638,6 +1651,8 @@ int pci_setup_device(struct pci_dev *dev)
1638 /* Need to have dev->cfg_size ready */ 1651 /* Need to have dev->cfg_size ready */
1639 set_pcie_thunderbolt(dev); 1652 set_pcie_thunderbolt(dev);
1640 1653
1654 set_pcie_untrusted(dev);
1655
1641 /* "Unknown power state" */ 1656 /* "Unknown power state" */
1642 dev->current_state = PCI_UNKNOWN; 1657 dev->current_state = PCI_UNKNOWN;
1643 1658
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 45ef4d22f14c..e3b62c2ee8d1 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1172,14 +1172,6 @@ config INTEL_SMARTCONNECT
1172 This driver checks to determine whether the device has Intel Smart 1172 This driver checks to determine whether the device has Intel Smart
1173 Connect enabled, and if so disables it. 1173 Connect enabled, and if so disables it.
1174 1174
1175config PVPANIC
1176 tristate "pvpanic device support"
1177 depends on ACPI
1178 ---help---
1179 This driver provides support for the pvpanic device. pvpanic is
1180 a paravirtualized device provided by QEMU; it lets a virtual machine
1181 (guest) communicate panic events to the host.
1182
1183config INTEL_PMC_IPC 1175config INTEL_PMC_IPC
1184 tristate "Intel PMC IPC Driver" 1176 tristate "Intel PMC IPC Driver"
1185 depends on ACPI 1177 depends on ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index d841c550e3cc..ce8da260c223 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -79,7 +79,6 @@ obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
79obj-$(CONFIG_INTEL_RST) += intel-rst.o 79obj-$(CONFIG_INTEL_RST) += intel-rst.o
80obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o 80obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
81 81
82obj-$(CONFIG_PVPANIC) += pvpanic.o
83obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o 82obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
84obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o 83obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
85obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o 84obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
deleted file mode 100644
index fd86daba7ffd..000000000000
--- a/drivers/platform/x86/pvpanic.c
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * pvpanic.c - pvpanic Device Support
3 *
4 * Copyright (C) 2013 Fujitsu.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/types.h>
27#include <linux/acpi.h>
28
29MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
30MODULE_DESCRIPTION("pvpanic device driver");
31MODULE_LICENSE("GPL");
32
33static int pvpanic_add(struct acpi_device *device);
34static int pvpanic_remove(struct acpi_device *device);
35
36static const struct acpi_device_id pvpanic_device_ids[] = {
37 { "QEMU0001", 0 },
38 { "", 0 },
39};
40MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
41
42#define PVPANIC_PANICKED (1 << 0)
43
44static u16 port;
45
46static struct acpi_driver pvpanic_driver = {
47 .name = "pvpanic",
48 .class = "QEMU",
49 .ids = pvpanic_device_ids,
50 .ops = {
51 .add = pvpanic_add,
52 .remove = pvpanic_remove,
53 },
54 .owner = THIS_MODULE,
55};
56
57static void
58pvpanic_send_event(unsigned int event)
59{
60 outb(event, port);
61}
62
63static int
64pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
65 void *unused)
66{
67 pvpanic_send_event(PVPANIC_PANICKED);
68 return NOTIFY_DONE;
69}
70
71static struct notifier_block pvpanic_panic_nb = {
72 .notifier_call = pvpanic_panic_notify,
73 .priority = 1, /* let this called before broken drm_fb_helper */
74};
75
76
77static acpi_status
78pvpanic_walk_resources(struct acpi_resource *res, void *context)
79{
80 switch (res->type) {
81 case ACPI_RESOURCE_TYPE_END_TAG:
82 return AE_OK;
83
84 case ACPI_RESOURCE_TYPE_IO:
85 port = res->data.io.minimum;
86 return AE_OK;
87
88 default:
89 return AE_ERROR;
90 }
91}
92
93static int pvpanic_add(struct acpi_device *device)
94{
95 int ret;
96
97 ret = acpi_bus_get_status(device);
98 if (ret < 0)
99 return ret;
100
101 if (!device->status.enabled || !device->status.functional)
102 return -ENODEV;
103
104 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
105 pvpanic_walk_resources, NULL);
106
107 if (!port)
108 return -ENODEV;
109
110 atomic_notifier_chain_register(&panic_notifier_list,
111 &pvpanic_panic_nb);
112
113 return 0;
114}
115
116static int pvpanic_remove(struct acpi_device *device)
117{
118
119 atomic_notifier_chain_unregister(&panic_notifier_list,
120 &pvpanic_panic_nb);
121 return 0;
122}
123
124module_acpi_driver(pvpanic_driver);
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 333ad7d5b45b..dd5d1103e02b 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -158,10 +158,10 @@ static int pps_gpio_probe(struct platform_device *pdev)
158 if (data->capture_clear) 158 if (data->capture_clear)
159 pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR; 159 pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR;
160 data->pps = pps_register_source(&data->info, pps_default_params); 160 data->pps = pps_register_source(&data->info, pps_default_params);
161 if (data->pps == NULL) { 161 if (IS_ERR(data->pps)) {
162 dev_err(&pdev->dev, "failed to register IRQ %d as PPS source\n", 162 dev_err(&pdev->dev, "failed to register IRQ %d as PPS source\n",
163 data->irq); 163 data->irq);
164 return -EINVAL; 164 return PTR_ERR(data->pps);
165 } 165 }
166 166
167 /* register IRQ interrupt handler */ 167 /* register IRQ interrupt handler */
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index 04735649052a..728818b87af3 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -80,9 +80,9 @@ static int __init pps_ktimer_init(void)
80{ 80{
81 pps = pps_register_source(&pps_ktimer_info, 81 pps = pps_register_source(&pps_ktimer_info,
82 PPS_CAPTUREASSERT | PPS_OFFSETASSERT); 82 PPS_CAPTUREASSERT | PPS_OFFSETASSERT);
83 if (pps == NULL) { 83 if (IS_ERR(pps)) {
84 pr_err("cannot register PPS source\n"); 84 pr_err("cannot register PPS source\n");
85 return -ENOMEM; 85 return PTR_ERR(pps);
86 } 86 }
87 87
88 timer_setup(&ktimer, pps_ktimer_event, 0); 88 timer_setup(&ktimer, pps_ktimer_event, 0);
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index 73bd3bb4d93b..00f6c460e493 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -72,9 +72,9 @@ static int pps_tty_open(struct tty_struct *tty)
72 72
73 pps = pps_register_source(&info, PPS_CAPTUREBOTH | \ 73 pps = pps_register_source(&info, PPS_CAPTUREBOTH | \
74 PPS_OFFSETASSERT | PPS_OFFSETCLEAR); 74 PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
75 if (pps == NULL) { 75 if (IS_ERR(pps)) {
76 pr_err("cannot register PPS source \"%s\"\n", info.path); 76 pr_err("cannot register PPS source \"%s\"\n", info.path);
77 return -ENOMEM; 77 return PTR_ERR(pps);
78 } 78 }
79 pps->lookup_cookie = tty; 79 pps->lookup_cookie = tty;
80 80
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 4db824f88d00..7226e39aae83 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -179,7 +179,7 @@ static void parport_attach(struct parport *port)
179 179
180 device->pps = pps_register_source(&info, 180 device->pps = pps_register_source(&info,
181 PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR); 181 PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
182 if (device->pps == NULL) { 182 if (IS_ERR(device->pps)) {
183 pr_err("couldn't register PPS source\n"); 183 pr_err("couldn't register PPS source\n");
184 goto err_release_dev; 184 goto err_release_dev;
185 } 185 }
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index 805c749ac1ad..a1c3cd38754f 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -72,7 +72,8 @@ static void pps_echo_client_default(struct pps_device *pps, int event,
72 * source is described by info's fields and it will have, as default PPS 72 * source is described by info's fields and it will have, as default PPS
73 * parameters, the ones specified into default_params. 73 * parameters, the ones specified into default_params.
74 * 74 *
75 * The function returns, in case of success, the PPS device. Otherwise NULL. 75 * The function returns, in case of success, the PPS device. Otherwise
76 * ERR_PTR(errno).
76 */ 77 */
77 78
78struct pps_device *pps_register_source(struct pps_source_info *info, 79struct pps_device *pps_register_source(struct pps_source_info *info,
@@ -135,7 +136,7 @@ kfree_pps:
135pps_register_source_exit: 136pps_register_source_exit:
136 pr_err("%s: unable to register source\n", info->name); 137 pr_err("%s: unable to register source\n", info->name);
137 138
138 return NULL; 139 return ERR_PTR(err);
139} 140}
140EXPORT_SYMBOL(pps_register_source); 141EXPORT_SYMBOL(pps_register_source);
141 142
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 8a81eecc0ecd..48f3594a7458 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -265,8 +265,8 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
265 pps.mode = PTP_PPS_MODE; 265 pps.mode = PTP_PPS_MODE;
266 pps.owner = info->owner; 266 pps.owner = info->owner;
267 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS); 267 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
268 if (!ptp->pps_source) { 268 if (IS_ERR(ptp->pps_source)) {
269 err = -EINVAL; 269 err = PTR_ERR(ptp->pps_source);
270 pr_err("failed to register pps source\n"); 270 pr_err("failed to register pps source\n");
271 goto no_pps; 271 goto no_pps;
272 } 272 }
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
index 9d73ad806698..8cd595148d17 100644
--- a/drivers/slimbus/Kconfig
+++ b/drivers/slimbus/Kconfig
@@ -22,8 +22,9 @@ config SLIM_QCOM_CTRL
22 22
23config SLIM_QCOM_NGD_CTRL 23config SLIM_QCOM_NGD_CTRL
24 tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component" 24 tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
25 depends on QCOM_QMI_HELPERS 25 depends on HAS_IOMEM && DMA_ENGINE && NET
26 depends on HAS_IOMEM && DMA_ENGINE 26 depends on ARCH_QCOM || COMPILE_TEST
27 select QCOM_QMI_HELPERS
27 help 28 help
28 Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device 29 Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device
29 Component is programmed using Linux kernel. 30 Component is programmed using Linux kernel.
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index db1f5135846a..ad3e2e23f56e 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -654,8 +654,7 @@ static int qcom_slim_remove(struct platform_device *pdev)
654#ifdef CONFIG_PM 654#ifdef CONFIG_PM
655static int qcom_slim_runtime_suspend(struct device *device) 655static int qcom_slim_runtime_suspend(struct device *device)
656{ 656{
657 struct platform_device *pdev = to_platform_device(device); 657 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
658 struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
659 int ret; 658 int ret;
660 659
661 dev_dbg(device, "pm_runtime: suspending...\n"); 660 dev_dbg(device, "pm_runtime: suspending...\n");
@@ -672,8 +671,7 @@ static int qcom_slim_runtime_suspend(struct device *device)
672 671
673static int qcom_slim_runtime_resume(struct device *device) 672static int qcom_slim_runtime_resume(struct device *device)
674{ 673{
675 struct platform_device *pdev = to_platform_device(device); 674 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
676 struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
677 int ret = 0; 675 int ret = 0;
678 676
679 dev_dbg(device, "pm_runtime: resuming...\n"); 677 dev_dbg(device, "pm_runtime: resuming...\n");
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 1382a8df6c75..71f094c9ec68 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -787,7 +787,7 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
787 787
788 if (txn->msg->num_bytes > SLIM_MSGQ_BUF_LEN || 788 if (txn->msg->num_bytes > SLIM_MSGQ_BUF_LEN ||
789 txn->rl > SLIM_MSGQ_BUF_LEN) { 789 txn->rl > SLIM_MSGQ_BUF_LEN) {
790 dev_err(ctrl->dev, "msg exeeds HW limit\n"); 790 dev_err(ctrl->dev, "msg exceeds HW limit\n");
791 return -EINVAL; 791 return -EINVAL;
792 } 792 }
793 793
@@ -1327,11 +1327,12 @@ static int of_qcom_slim_ngd_register(struct device *parent,
1327{ 1327{
1328 const struct ngd_reg_offset_data *data; 1328 const struct ngd_reg_offset_data *data;
1329 struct qcom_slim_ngd *ngd; 1329 struct qcom_slim_ngd *ngd;
1330 const struct of_device_id *match;
1330 struct device_node *node; 1331 struct device_node *node;
1331 u32 id; 1332 u32 id;
1332 1333
1333 data = of_match_node(qcom_slim_ngd_dt_match, parent->of_node)->data; 1334 match = of_match_node(qcom_slim_ngd_dt_match, parent->of_node);
1334 1335 data = match->data;
1335 for_each_available_child_of_node(parent->of_node, node) { 1336 for_each_available_child_of_node(parent->of_node, node) {
1336 if (of_property_read_u32(node, "reg", &id)) 1337 if (of_property_read_u32(node, "reg", &id))
1337 continue; 1338 continue;
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index c5ee97ee7886..fd8d034cfec1 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -654,14 +654,14 @@ static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
654 return cdns_set_sdw_stream(dai, stream, false, direction); 654 return cdns_set_sdw_stream(dai, stream, false, direction);
655} 655}
656 656
657static struct snd_soc_dai_ops intel_pcm_dai_ops = { 657static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
658 .hw_params = intel_hw_params, 658 .hw_params = intel_hw_params,
659 .hw_free = intel_hw_free, 659 .hw_free = intel_hw_free,
660 .shutdown = sdw_cdns_shutdown, 660 .shutdown = sdw_cdns_shutdown,
661 .set_sdw_stream = intel_pcm_set_sdw_stream, 661 .set_sdw_stream = intel_pcm_set_sdw_stream,
662}; 662};
663 663
664static struct snd_soc_dai_ops intel_pdm_dai_ops = { 664static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
665 .hw_params = intel_hw_params, 665 .hw_params = intel_hw_params,
666 .hw_free = intel_hw_free, 666 .hw_free = intel_hw_free,
667 .shutdown = sdw_cdns_shutdown, 667 .shutdown = sdw_cdns_shutdown,
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 93e562f18d40..7416bdbd8576 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -7,7 +7,9 @@
7 */ 7 */
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/dmar.h>
10#include <linux/idr.h> 11#include <linux/idr.h>
12#include <linux/iommu.h>
11#include <linux/module.h> 13#include <linux/module.h>
12#include <linux/pm_runtime.h> 14#include <linux/pm_runtime.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
@@ -236,6 +238,20 @@ err_free_str:
236} 238}
237static DEVICE_ATTR_RW(boot_acl); 239static DEVICE_ATTR_RW(boot_acl);
238 240
241static ssize_t iommu_dma_protection_show(struct device *dev,
242 struct device_attribute *attr,
243 char *buf)
244{
245 /*
246 * Kernel DMA protection is a feature where Thunderbolt security is
247 * handled natively using IOMMU. It is enabled when IOMMU is
248 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
249 */
250 return sprintf(buf, "%d\n",
251 iommu_present(&pci_bus_type) && dmar_platform_optin());
252}
253static DEVICE_ATTR_RO(iommu_dma_protection);
254
239static ssize_t security_show(struct device *dev, struct device_attribute *attr, 255static ssize_t security_show(struct device *dev, struct device_attribute *attr,
240 char *buf) 256 char *buf)
241{ 257{
@@ -251,6 +267,7 @@ static DEVICE_ATTR_RO(security);
251 267
252static struct attribute *domain_attrs[] = { 268static struct attribute *domain_attrs[] = {
253 &dev_attr_boot_acl.attr, 269 &dev_attr_boot_acl.attr,
270 &dev_attr_iommu_dma_protection.attr,
254 &dev_attr_security.attr, 271 &dev_attr_security.attr,
255 NULL, 272 NULL,
256}; 273};
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 0a357db4b31b..131342280b46 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -569,20 +569,20 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
569 ssize_t retval = 0; 569 ssize_t retval = 0;
570 s32 event_count; 570 s32 event_count;
571 571
572 mutex_lock(&idev->info_lock);
573 if (!idev->info || !idev->info->irq)
574 retval = -EIO;
575 mutex_unlock(&idev->info_lock);
576
577 if (retval)
578 return retval;
579
580 if (count != sizeof(s32)) 572 if (count != sizeof(s32))
581 return -EINVAL; 573 return -EINVAL;
582 574
583 add_wait_queue(&idev->wait, &wait); 575 add_wait_queue(&idev->wait, &wait);
584 576
585 do { 577 do {
578 mutex_lock(&idev->info_lock);
579 if (!idev->info || !idev->info->irq) {
580 retval = -EIO;
581 mutex_unlock(&idev->info_lock);
582 break;
583 }
584 mutex_unlock(&idev->info_lock);
585
586 set_current_state(TASK_INTERRUPTIBLE); 586 set_current_state(TASK_INTERRUPTIBLE);
587 587
588 event_count = atomic_read(&idev->event); 588 event_count = atomic_read(&idev->event);
@@ -1017,6 +1017,9 @@ void uio_unregister_device(struct uio_info *info)
1017 idev->info = NULL; 1017 idev->info = NULL;
1018 mutex_unlock(&idev->info_lock); 1018 mutex_unlock(&idev->info_lock);
1019 1019
1020 wake_up_interruptible(&idev->wait);
1021 kill_fasync(&idev->async_queue, SIGIO, POLL_HUP);
1022
1020 device_unregister(&idev->dev); 1023 device_unregister(&idev->dev);
1021 1024
1022 return; 1025 return;
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index 9cc37fe07d35..0ee3cd3c25ee 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -74,8 +74,7 @@ DEVICE_ATTR(reg_or, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store);
74static ssize_t reg_show(struct device *dev, struct device_attribute *attr, 74static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
75 char *buf) 75 char *buf)
76{ 76{
77 struct platform_device *pdev = to_platform_device(dev); 77 struct uio_info *info = dev_get_drvdata(dev);
78 struct uio_info *info = platform_get_drvdata(pdev);
79 struct fsl_elbc_gpcm *priv = info->priv; 78 struct fsl_elbc_gpcm *priv = info->priv;
80 struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank]; 79 struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank];
81 80
@@ -94,8 +93,7 @@ static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
94static ssize_t reg_store(struct device *dev, struct device_attribute *attr, 93static ssize_t reg_store(struct device *dev, struct device_attribute *attr,
95 const char *buf, size_t count) 94 const char *buf, size_t count)
96{ 95{
97 struct platform_device *pdev = to_platform_device(dev); 96 struct uio_info *info = dev_get_drvdata(dev);
98 struct uio_info *info = platform_get_drvdata(pdev);
99 struct fsl_elbc_gpcm *priv = info->priv; 97 struct fsl_elbc_gpcm *priv = info->priv;
100 struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank]; 98 struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank];
101 unsigned long val; 99 unsigned long val;
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 3093655c7b92..1475ed5ffcde 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1312,7 +1312,7 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1312 return -EINVAL; 1312 return -EINVAL;
1313 } 1313 }
1314 1314
1315 if (f32bit) 1315 if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1316 ret = vbg_hgcm_call32(gdev, client_id, 1316 ret = vbg_hgcm_call32(gdev, client_id,
1317 call->function, call->timeout_ms, 1317 call->function, call->timeout_ms,
1318 VBG_IOCTL_HGCM_CALL_PARMS32(call), 1318 VBG_IOCTL_HGCM_CALL_PARMS32(call),
diff --git a/fs/file.c b/fs/file.c
index 50304c7525ea..3209ee271c41 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -640,6 +640,35 @@ out_unlock:
640} 640}
641EXPORT_SYMBOL(__close_fd); /* for ksys_close() */ 641EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
642 642
643/*
644 * variant of __close_fd that gets a ref on the file for later fput
645 */
646int __close_fd_get_file(unsigned int fd, struct file **res)
647{
648 struct files_struct *files = current->files;
649 struct file *file;
650 struct fdtable *fdt;
651
652 spin_lock(&files->file_lock);
653 fdt = files_fdtable(files);
654 if (fd >= fdt->max_fds)
655 goto out_unlock;
656 file = fdt->fd[fd];
657 if (!file)
658 goto out_unlock;
659 rcu_assign_pointer(fdt->fd[fd], NULL);
660 __put_unused_fd(files, fd);
661 spin_unlock(&files->file_lock);
662 get_file(file);
663 *res = file;
664 return filp_close(file, files);
665
666out_unlock:
667 spin_unlock(&files->file_lock);
668 *res = NULL;
669 return -ENOENT;
670}
671
643void do_close_on_exec(struct files_struct *files) 672void do_close_on_exec(struct files_struct *files)
644{ 673{
645 unsigned i; 674 unsigned i;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 843a41ba7e28..f8af1d770520 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -39,6 +39,7 @@ struct acpi_dmar_header;
39/* DMAR Flags */ 39/* DMAR Flags */
40#define DMAR_INTR_REMAP 0x1 40#define DMAR_INTR_REMAP 0x1
41#define DMAR_X2APIC_OPT_OUT 0x2 41#define DMAR_X2APIC_OPT_OUT 0x2
42#define DMAR_PLATFORM_OPT_IN 0x4
42 43
43struct intel_iommu; 44struct intel_iommu;
44 45
@@ -170,6 +171,8 @@ static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
170{ return 0; } 171{ return 0; }
171#endif /* CONFIG_IRQ_REMAP */ 172#endif /* CONFIG_IRQ_REMAP */
172 173
174extern bool dmar_platform_optin(void);
175
173#else /* CONFIG_DMAR_TABLE */ 176#else /* CONFIG_DMAR_TABLE */
174 177
175static inline int dmar_device_add(void *handle) 178static inline int dmar_device_add(void *handle)
@@ -182,6 +185,11 @@ static inline int dmar_device_remove(void *handle)
182 return 0; 185 return 0;
183} 186}
184 187
188static inline bool dmar_platform_optin(void)
189{
190 return false;
191}
192
185#endif /* CONFIG_DMAR_TABLE */ 193#endif /* CONFIG_DMAR_TABLE */
186 194
187struct irte { 195struct irte {
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 41615f38bcff..f07c55ea0c22 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -121,6 +121,7 @@ extern void __fd_install(struct files_struct *files,
121 unsigned int fd, struct file *file); 121 unsigned int fd, struct file *file);
122extern int __close_fd(struct files_struct *files, 122extern int __close_fd(struct files_struct *files,
123 unsigned int fd); 123 unsigned int fd);
124extern int __close_fd_get_file(unsigned int fd, struct file **res);
124 125
125extern struct kmem_cache *files_cachep; 126extern struct kmem_cache *files_cachep;
126 127
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
new file mode 100644
index 000000000000..5be5dab50b13
--- /dev/null
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -0,0 +1,312 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2017-2018, Intel Corporation
4 */
5
6#ifndef __STRATIX10_SMC_H
7#define __STRATIX10_SMC_H
8
9#include <linux/arm-smccc.h>
10#include <linux/bitops.h>
11
12/**
13 * This file defines the Secure Monitor Call (SMC) message protocol used for
14 * service layer driver in normal world (EL1) to communicate with secure
15 * monitor software in Secure Monitor Exception Level 3 (EL3).
16 *
17 * This file is shared with secure firmware (FW) which is out of kernel tree.
18 *
19 * An ARM SMC instruction takes a function identifier and up to 6 64-bit
20 * register values as arguments, and can return up to 4 64-bit register
21 * value. The operation of the secure monitor is determined by the parameter
22 * values passed in through registers.
23 *
24 * EL1 and EL3 communicates pointer as physical address rather than the
25 * virtual address.
26 *
27 * Functions specified by ARM SMC Calling convention:
28 *
29 * FAST call executes atomic operations, returns when the requested operation
30 * has completed.
31 * STD call starts a operation which can be preempted by a non-secure
32 * interrupt. The call can return before the requested operation has
33 * completed.
34 *
35 * a0..a7 is used as register names in the descriptions below, on arm32
36 * that translates to r0..r7 and on arm64 to w0..w7.
37 */
38
39/**
40 * @func_num: function ID
41 */
42#define INTEL_SIP_SMC_STD_CALL_VAL(func_num) \
43 ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
44 ARM_SMCCC_OWNER_SIP, (func_num))
45
46#define INTEL_SIP_SMC_FAST_CALL_VAL(func_num) \
47 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
48 ARM_SMCCC_OWNER_SIP, (func_num))
49
50/**
51 * Return values in INTEL_SIP_SMC_* call
52 *
53 * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION:
54 * Secure monitor software doesn't recognize the request.
55 *
56 * INTEL_SIP_SMC_STATUS_OK:
57 * FPGA configuration completed successfully,
58 * In case of FPGA configuration write operation, it means secure monitor
59 * software can accept the next chunk of FPGA configuration data.
60 *
61 * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
62 * In case of FPGA configuration write operation, it means secure monitor
63 * software is still processing previous data & can't accept the next chunk
64 * of data. Service driver needs to issue
65 * INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE call to query the
66 * completed block(s).
67 *
68 * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
69 * There is error during the FPGA configuration process.
70 *
71 * INTEL_SIP_SMC_REG_ERROR:
72 * There is error during a read or write operation of the protected registers.
73 *
74 * INTEL_SIP_SMC_RSU_ERROR:
75 * There is error during a remote status update.
76 */
77#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
78#define INTEL_SIP_SMC_STATUS_OK 0x0
79#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY 0x1
80#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED 0x2
81#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR 0x4
82#define INTEL_SIP_SMC_REG_ERROR 0x5
83#define INTEL_SIP_SMC_RSU_ERROR 0x7
84
85/**
86 * Request INTEL_SIP_SMC_FPGA_CONFIG_START
87 *
88 * Sync call used by service driver at EL1 to request the FPGA in EL3 to
89 * be prepare to receive a new configuration.
90 *
91 * Call register usage:
92 * a0: INTEL_SIP_SMC_FPGA_CONFIG_START.
93 * a1: flag for full or partial configuration. 0 for full and 1 for partial
94 * configuration.
95 * a2-7: not used.
96 *
97 * Return status:
98 * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
99 * a1-3: not used.
100 */
101#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1
102#define INTEL_SIP_SMC_FPGA_CONFIG_START \
103 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START)
104
105/**
106 * Request INTEL_SIP_SMC_FPGA_CONFIG_WRITE
107 *
108 * Async call used by service driver at EL1 to provide FPGA configuration data
109 * to secure world.
110 *
111 * Call register usage:
112 * a0: INTEL_SIP_SMC_FPGA_CONFIG_WRITE.
113 * a1: 64bit physical address of the configuration data memory block
114 * a2: Size of configuration data block.
115 * a3-7: not used.
116 *
117 * Return status:
118 * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
119 * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
120 * a1: 64bit physical address of 1st completed memory block if any completed
121 * block, otherwise zero value.
122 * a2: 64bit physical address of 2nd completed memory block if any completed
123 * block, otherwise zero value.
124 * a3: 64bit physical address of 3rd completed memory block if any completed
125 * block, otherwise zero value.
126 */
127#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE 2
128#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE \
129 INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE)
130
131/**
132 * Request INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE
133 *
134 * Sync call used by service driver at EL1 to track the completed write
135 * transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE
136 * call returns INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY.
137 *
138 * Call register usage:
139 * a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE.
140 * a1-7: not used.
141 *
142 * Return status:
143 * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
144 * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
145 * a1: 64bit physical address of 1st completed memory block.
146 * a2: 64bit physical address of 2nd completed memory block if
147 * any completed block, otherwise zero value.
148 * a3: 64bit physical address of 3rd completed memory block if
149 * any completed block, otherwise zero value.
150 */
151#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE 3
152#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE \
153INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
154
155/**
156 * Request INTEL_SIP_SMC_FPGA_CONFIG_ISDONE
157 *
158 * Sync call used by service driver at EL1 to inform secure world that all
159 * data are sent, to check whether or not the secure world had completed
160 * the FPGA configuration process.
161 *
162 * Call register usage:
163 * a0: INTEL_SIP_SMC_FPGA_CONFIG_ISDONE.
164 * a1-7: not used.
165 *
166 * Return status:
167 * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
168 * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
169 * a1-3: not used.
170 */
171#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4
172#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE \
173 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE)
174
175/**
176 * Request INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM
177 *
178 * Sync call used by service driver at EL1 to query the physical address of
179 * memory block reserved by secure monitor software.
180 *
181 * Call register usage:
182 * a0:INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM.
183 * a1-7: not used.
184 *
185 * Return status:
186 * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
187 * a1: start of physical address of reserved memory block.
188 * a2: size of reserved memory block.
189 * a3: not used.
190 */
191#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM 5
192#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM \
193 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM)
194
195/**
196 * Request INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK
197 *
198 * For SMC loop-back mode only, used for internal integration, debugging
199 * or troubleshooting.
200 *
201 * Call register usage:
202 * a0: INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK.
203 * a1-7: not used.
204 *
205 * Return status:
206 * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
207 * a1-3: not used.
208 */
209#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6
210#define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \
211 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK)
212
213/*
214 * Request INTEL_SIP_SMC_REG_READ
215 *
216 * Read a protected register at EL3
217 *
218 * Call register usage:
219 * a0: INTEL_SIP_SMC_REG_READ.
220 * a1: register address.
221 * a2-7: not used.
222 *
223 * Return status:
224 * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
225 * a1: value in the register
226 * a2-3: not used.
227 */
228#define INTEL_SIP_SMC_FUNCID_REG_READ 7
229#define INTEL_SIP_SMC_REG_READ \
230 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ)
231
232/*
233 * Request INTEL_SIP_SMC_REG_WRITE
234 *
235 * Write a protected register at EL3
236 *
237 * Call register usage:
238 * a0: INTEL_SIP_SMC_REG_WRITE.
239 * a1: register address
240 * a2: value to program into register.
241 * a3-7: not used.
242 *
243 * Return status:
244 * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
245 * a1-3: not used.
246 */
247#define INTEL_SIP_SMC_FUNCID_REG_WRITE 8
248#define INTEL_SIP_SMC_REG_WRITE \
249 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
250
251/*
252 * Request INTEL_SIP_SMC_FUNCID_REG_UPDATE
253 *
254 * Update one or more bits in a protected register at EL3 using a
255 * read-modify-write operation.
256 *
257 * Call register usage:
258 * a0: INTEL_SIP_SMC_REG_UPDATE.
259 * a1: register address
260 * a2: write Mask.
261 * a3: value to write.
262 * a4-7: not used.
263 *
264 * Return status:
265 * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
266 * a1-3: Not used.
267 */
268#define INTEL_SIP_SMC_FUNCID_REG_UPDATE 9
269#define INTEL_SIP_SMC_REG_UPDATE \
270 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_UPDATE)
271
272/*
273 * Request INTEL_SIP_SMC_RSU_STATUS
274 *
275 * Request remote status update boot log, call is synchronous.
276 *
277 * Call register usage:
278 * a0 INTEL_SIP_SMC_RSU_STATUS
279 * a1-7 not used
280 *
281 * Return status
282 * a0: Current Image
283 * a1: Last Failing Image
284 * a2: Version | State
285 * a3: Error details | Error location
286 *
287 * Or
288 *
289 * a0: INTEL_SIP_SMC_RSU_ERROR
290 */
291#define INTEL_SIP_SMC_FUNCID_RSU_STATUS 11
292#define INTEL_SIP_SMC_RSU_STATUS \
293 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_STATUS)
294
295/*
296 * Request INTEL_SIP_SMC_RSU_UPDATE
297 *
298 * Request to set the offset of the bitstream to boot after reboot, call
299 * is synchronous.
300 *
301 * Call register usage:
302 * a0 INTEL_SIP_SMC_RSU_UPDATE
303 * a1 64bit physical address of the configuration data memory in flash
304 * a2-7 not used
305 *
306 * Return status
307 * a0 INTEL_SIP_SMC_STATUS_OK
308 */
309#define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12
310#define INTEL_SIP_SMC_RSU_UPDATE \
311 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE)
312#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
new file mode 100644
index 000000000000..e521f172a47a
--- /dev/null
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -0,0 +1,217 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2017-2018, Intel Corporation
4 */
5
6#ifndef __STRATIX10_SVC_CLIENT_H
7#define __STRATIX10_SVC_CLIENT_H
8
9/**
10 * Service layer driver supports client names
11 *
12 * fpga: for FPGA configuration
13 * rsu: for remote status update
14 */
15#define SVC_CLIENT_FPGA "fpga"
16#define SVC_CLIENT_RSU "rsu"
17
18/**
19 * Status of the sent command, in bit number
20 *
21 * SVC_COMMAND_STATUS_RECONFIG_REQUEST_OK:
22 * Secure firmware accepts the request of FPGA reconfiguration.
23 *
24 * SVC_STATUS_RECONFIG_BUFFER_SUBMITTED:
25 * Service client successfully submits FPGA configuration
26 * data buffer to secure firmware.
27 *
28 * SVC_COMMAND_STATUS_RECONFIG_BUFFER_DONE:
29 * Secure firmware completes data process, ready to accept the
30 * next WRITE transaction.
31 *
32 * SVC_COMMAND_STATUS_RECONFIG_COMPLETED:
33 * Secure firmware completes FPGA configuration successfully, FPGA should
34 * be in user mode.
35 *
36 * SVC_COMMAND_STATUS_RECONFIG_BUSY:
37 * FPGA configuration is still in process.
38 *
39 * SVC_COMMAND_STATUS_RECONFIG_ERROR:
40 * Error encountered during FPGA configuration.
41 *
42 * SVC_STATUS_RSU_OK:
43 * Secure firmware accepts the request of remote status update (RSU).
44 */
45#define SVC_STATUS_RECONFIG_REQUEST_OK 0
46#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1
47#define SVC_STATUS_RECONFIG_BUFFER_DONE 2
48#define SVC_STATUS_RECONFIG_COMPLETED 3
49#define SVC_STATUS_RECONFIG_BUSY 4
50#define SVC_STATUS_RECONFIG_ERROR 5
51#define SVC_STATUS_RSU_OK 6
52#define SVC_STATUS_RSU_ERROR 7
53/**
54 * Flag bit for COMMAND_RECONFIG
55 *
56 * COMMAND_RECONFIG_FLAG_PARTIAL:
57 * Set to FPGA configuration type (full or partial), the default
58 * is full reconfig.
59 */
60#define COMMAND_RECONFIG_FLAG_PARTIAL 0
61
62/**
63 * Timeout settings for service clients:
64 * timeout value used in Stratix10 FPGA manager driver.
65 * timeout value used in RSU driver
66 */
67#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 100
68#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 240
69#define SVC_RSU_REQUEST_TIMEOUT_MS 300
70
71struct stratix10_svc_chan;
72
73/**
74 * enum stratix10_svc_command_code - supported service commands
75 *
76 * @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting
77 *
78 * @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status
79 * is SVC_STATUS_RECONFIG_REQUEST_OK
80 *
81 * @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the
82 * FPGA configuration, return status is SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
83 * or SVC_STATUS_RECONFIG_ERROR
84 *
85 * @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return
86 * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
87 * SVC_STATUS_RECONFIG_ERROR
88 *
89 * @COMMAND_RECONFIG_STATUS: check the status of the configuration, return
90 * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
91 * SVC_STATUS_RECONFIG_ERROR
92 *
93 * @COMMAND_RSU_STATUS: request remote system update boot log, return status
94 * is log data or SVC_STATUS_RSU_ERROR
95 *
96 * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot,
97 * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
98 */
99enum stratix10_svc_command_code {
100 COMMAND_NOOP = 0,
101 COMMAND_RECONFIG,
102 COMMAND_RECONFIG_DATA_SUBMIT,
103 COMMAND_RECONFIG_DATA_CLAIM,
104 COMMAND_RECONFIG_STATUS,
105 COMMAND_RSU_STATUS,
106 COMMAND_RSU_UPDATE
107};
108
109/**
110 * struct stratix10_svc_client_msg - message sent by client to service
111 * @payload: starting address of data need be processed
112 * @payload_length: data size in bytes
113 * @command: service command
114 * @arg: args to be passed via registers and not physically mapped buffers
115 */
116struct stratix10_svc_client_msg {
117 void *payload;
118 size_t payload_length;
119 enum stratix10_svc_command_code command;
120 u64 arg[3];
121};
122
123/**
124 * struct stratix10_svc_command_config_type - config type
125 * @flags: flag bit for the type of FPGA configuration
126 */
127struct stratix10_svc_command_config_type {
128 u32 flags;
129};
130
131/**
132 * struct stratix10_svc_cb_data - callback data structure from service layer
133 * @status: the status of sent command
134 * @kaddr1: address of 1st completed data block
135 * @kaddr2: address of 2nd completed data block
136 * @kaddr3: address of 3rd completed data block
137 */
138struct stratix10_svc_cb_data {
139 u32 status;
140 void *kaddr1;
141 void *kaddr2;
142 void *kaddr3;
143};
144
145/**
146 * struct stratix10_svc_client - service client structure
147 * @dev: the client device
148 * @receive_cb: callback to provide service client the received data
149 * @priv: client private data
150 */
151struct stratix10_svc_client {
152 struct device *dev;
153 void (*receive_cb)(struct stratix10_svc_client *client,
154 struct stratix10_svc_cb_data *cb_data);
155 void *priv;
156};
157
158/**
159 * stratix10_svc_request_channel_byname() - request service channel
160 * @client: identity of the client requesting the channel
161 * @name: supporting client name defined above
162 *
163 * Return: a pointer to channel assigned to the client on success,
164 * or ERR_PTR() on error.
165 */
166struct stratix10_svc_chan
167*stratix10_svc_request_channel_byname(struct stratix10_svc_client *client,
168 const char *name);
169
170/**
171 * stratix10_svc_free_channel() - free service channel.
172 * @chan: service channel to be freed
173 */
174void stratix10_svc_free_channel(struct stratix10_svc_chan *chan);
175
176/**
177 * stratix10_svc_allocate_memory() - allocate the momory
178 * @chan: service channel assigned to the client
179 * @size: number of bytes client requests
180 *
181 * Service layer allocates the requested number of bytes from the memory
182 * pool for the client.
183 *
184 * Return: the starting address of allocated memory on success, or
185 * ERR_PTR() on error.
186 */
187void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
188 size_t size);
189
190/**
191 * stratix10_svc_free_memory() - free allocated memory
192 * @chan: service channel assigned to the client
193 * @kaddr: starting address of memory to be free back to pool
194 */
195void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr);
196
197/**
198 * stratix10_svc_send() - send a message to the remote
199 * @chan: service channel assigned to the client
200 * @msg: message data to be sent, in the format of
201 * struct stratix10_svc_client_msg
202 *
203 * Return: 0 for success, -ENOMEM or -ENOBUFS on error.
204 */
205int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg);
206
207/**
208 * intel_svc_done() - complete service request
209 * @chan: service channel assigned to the client
210 *
211 * This function is used by service client to inform service layer that
212 * client's service requests are completed, or there is an error in the
213 * request process.
214 */
215void stratix10_svc_done(struct stratix10_svc_chan *chan);
216#endif
217
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 9d3f668df7df..741f567253ef 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -210,8 +210,8 @@ struct mc_cmd_header {
210}; 210};
211 211
212struct fsl_mc_command { 212struct fsl_mc_command {
213 u64 header; 213 __le64 header;
214 u64 params[MC_CMD_NUM_OF_PARAMS]; 214 __le64 params[MC_CMD_NUM_OF_PARAMS];
215}; 215};
216 216
217enum mc_cmd_status { 217enum mc_cmd_status {
@@ -238,11 +238,11 @@ enum mc_cmd_status {
238/* Command completion flag */ 238/* Command completion flag */
239#define MC_CMD_FLAG_INTR_DIS 0x01 239#define MC_CMD_FLAG_INTR_DIS 0x01
240 240
241static inline u64 mc_encode_cmd_header(u16 cmd_id, 241static inline __le64 mc_encode_cmd_header(u16 cmd_id,
242 u32 cmd_flags, 242 u32 cmd_flags,
243 u16 token) 243 u16 token)
244{ 244{
245 u64 header = 0; 245 __le64 header = 0;
246 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header; 246 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
247 247
248 hdr->cmd_id = cpu_to_le16(cmd_id); 248 hdr->cmd_id = cpu_to_le16(cmd_id);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 14131b6fae68..f0885cc01db6 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -831,15 +831,6 @@ struct vmbus_channel {
831 */ 831 */
832 struct list_head sc_list; 832 struct list_head sc_list;
833 /* 833 /*
834 * Current number of sub-channels.
835 */
836 int num_sc;
837 /*
838 * Number of a sub-channel (position within sc_list) which is supposed
839 * to be used as the next outgoing channel.
840 */
841 int next_oc;
842 /*
843 * The primary channel this sub-channel belongs to. 834 * The primary channel this sub-channel belongs to.
844 * This will be NULL for the primary channel. 835 * This will be NULL for the primary channel.
845 */ 836 */
@@ -973,14 +964,6 @@ void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
973 void (*chn_rescind_cb)(struct vmbus_channel *)); 964 void (*chn_rescind_cb)(struct vmbus_channel *));
974 965
975/* 966/*
976 * Retrieve the (sub) channel on which to send an outgoing request.
977 * When a primary channel has multiple sub-channels, we choose a
978 * channel whose VCPU binding is closest to the VCPU on which
979 * this call is being made.
980 */
981struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
982
983/*
984 * Check if sub-channels have already been offerred. This API will be useful 967 * Check if sub-channels have already been offerred. This API will be useful
985 * when the driver is unloaded after establishing sub-channels. In this case, 968 * when the driver is unloaded after establishing sub-channels. In this case,
986 * when the driver is re-loaded, the driver would have to check if the 969 * when the driver is re-loaded, the driver would have to check if the
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index ba8fa9072aca..677768b21a1d 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -25,6 +25,7 @@
25#include <linux/notifier.h> 25#include <linux/notifier.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/of.h> 27#include <linux/of.h>
28#include <linux/nvmem-provider.h>
28 29
29#include <mtd/mtd-abi.h> 30#include <mtd/mtd-abi.h>
30 31
@@ -342,6 +343,7 @@ struct mtd_info {
342 struct device dev; 343 struct device dev;
343 int usecount; 344 int usecount;
344 struct mtd_debug_info dbg; 345 struct mtd_debug_info dbg;
346 struct nvmem_device *nvmem;
345}; 347};
346 348
347int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 349int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 1e3283c2af77..fe051323be0a 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -19,6 +19,13 @@ typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
19typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, 19typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
20 void *val, size_t bytes); 20 void *val, size_t bytes);
21 21
22enum nvmem_type {
23 NVMEM_TYPE_UNKNOWN = 0,
24 NVMEM_TYPE_EEPROM,
25 NVMEM_TYPE_OTP,
26 NVMEM_TYPE_BATTERY_BACKED,
27};
28
22/** 29/**
23 * struct nvmem_config - NVMEM device configuration 30 * struct nvmem_config - NVMEM device configuration
24 * 31 *
@@ -28,8 +35,10 @@ typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
28 * @owner: Pointer to exporter module. Used for refcounting. 35 * @owner: Pointer to exporter module. Used for refcounting.
29 * @cells: Optional array of pre-defined NVMEM cells. 36 * @cells: Optional array of pre-defined NVMEM cells.
30 * @ncells: Number of elements in cells. 37 * @ncells: Number of elements in cells.
38 * @type: Type of the nvmem storage
31 * @read_only: Device is read-only. 39 * @read_only: Device is read-only.
32 * @root_only: Device is accessibly to root only. 40 * @root_only: Device is accessibly to root only.
41 * @no_of_node: Device should not use the parent's of_node even if it's !NULL.
33 * @reg_read: Callback to read data. 42 * @reg_read: Callback to read data.
34 * @reg_write: Callback to write data. 43 * @reg_write: Callback to write data.
35 * @size: Device size. 44 * @size: Device size.
@@ -51,8 +60,10 @@ struct nvmem_config {
51 struct module *owner; 60 struct module *owner;
52 const struct nvmem_cell_info *cells; 61 const struct nvmem_cell_info *cells;
53 int ncells; 62 int ncells;
63 enum nvmem_type type;
54 bool read_only; 64 bool read_only;
55 bool root_only; 65 bool root_only;
66 bool no_of_node;
56 nvmem_reg_read_t reg_read; 67 nvmem_reg_read_t reg_read;
57 nvmem_reg_write_t reg_write; 68 nvmem_reg_write_t reg_write;
58 int size; 69 int size;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 51a5a5217667..1ab78a23ae08 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -396,6 +396,14 @@ struct pci_dev {
396 unsigned int is_hotplug_bridge:1; 396 unsigned int is_hotplug_bridge:1;
397 unsigned int shpc_managed:1; /* SHPC owned by shpchp */ 397 unsigned int shpc_managed:1; /* SHPC owned by shpchp */
398 unsigned int is_thunderbolt:1; /* Thunderbolt controller */ 398 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
399 /*
400 * Devices marked being untrusted are the ones that can potentially
401 * execute DMA attacks and similar. They are typically connected
402 * through external ports such as Thunderbolt but not limited to
403 * that. When an IOMMU is enabled they should be getting full
404 * mappings to make sure they cannot access arbitrary memory.
405 */
406 unsigned int untrusted:1;
399 unsigned int __aer_firmware_first_valid:1; 407 unsigned int __aer_firmware_first_valid:1;
400 unsigned int __aer_firmware_first:1; 408 unsigned int __aer_firmware_first:1;
401 unsigned int broken_intx_masking:1; /* INTx masking can't be used */ 409 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
diff --git a/include/uapi/linux/android/binder_ctl.h b/include/uapi/linux/android/binder_ctl.h
new file mode 100644
index 000000000000..65b2efd1a0a5
--- /dev/null
+++ b/include/uapi/linux/android/binder_ctl.h
@@ -0,0 +1,35 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * Copyright (C) 2018 Canonical Ltd.
4 *
5 */
6
7#ifndef _UAPI_LINUX_BINDER_CTL_H
8#define _UAPI_LINUX_BINDER_CTL_H
9
10#include <linux/android/binder.h>
11#include <linux/types.h>
12#include <linux/ioctl.h>
13
14#define BINDERFS_MAX_NAME 255
15
16/**
17 * struct binderfs_device - retrieve information about a new binder device
18 * @name: the name to use for the new binderfs binder device
19 * @major: major number allocated for binderfs binder devices
20 * @minor: minor number allocated for the new binderfs binder device
21 *
22 */
23struct binderfs_device {
24 char name[BINDERFS_MAX_NAME + 1];
25 __u8 major;
26 __u8 minor;
27};
28
29/**
30 * Allocate a new binder device.
31 */
32#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
33
34#endif /* _UAPI_LINUX_BINDER_CTL_H */
35
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 96c24478d8ce..f8c00045d537 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -73,6 +73,7 @@
73#define DAXFS_MAGIC 0x64646178 73#define DAXFS_MAGIC 0x64646178
74#define BINFMTFS_MAGIC 0x42494e4d 74#define BINFMTFS_MAGIC 0x42494e4d
75#define DEVPTS_SUPER_MAGIC 0x1cd1 75#define DEVPTS_SUPER_MAGIC 0x1cd1
76#define BINDERFS_SUPER_MAGIC 0x6c6f6f70
76#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA 77#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
77#define PIPEFS_MAGIC 0x50495045 78#define PIPEFS_MAGIC 0x50495045
78#define PROC_SUPER_MAGIC 0x9fa0 79#define PROC_SUPER_MAGIC 0x9fa0
diff --git a/tools/Makefile b/tools/Makefile
index abb358a70ad0..77f1aee8ea01 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -13,6 +13,7 @@ help:
13 @echo ' cgroup - cgroup tools' 13 @echo ' cgroup - cgroup tools'
14 @echo ' cpupower - a tool for all things x86 CPU power' 14 @echo ' cpupower - a tool for all things x86 CPU power'
15 @echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer' 15 @echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
16 @echo ' firmware - Firmware tools'
16 @echo ' freefall - laptop accelerometer program for disk protection' 17 @echo ' freefall - laptop accelerometer program for disk protection'
17 @echo ' gpio - GPIO tools' 18 @echo ' gpio - GPIO tools'
18 @echo ' hv - tools used when in Hyper-V clients' 19 @echo ' hv - tools used when in Hyper-V clients'
@@ -60,7 +61,7 @@ acpi: FORCE
60cpupower: FORCE 61cpupower: FORCE
61 $(call descend,power/$@) 62 $(call descend,power/$@)
62 63
63cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci: FORCE 64cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware: FORCE
64 $(call descend,$@) 65 $(call descend,$@)
65 66
66liblockdep: FORCE 67liblockdep: FORCE
@@ -137,7 +138,7 @@ acpi_clean:
137cpupower_clean: 138cpupower_clean:
138 $(call descend,power/cpupower,clean) 139 $(call descend,power/cpupower,clean)
139 140
140cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean: 141cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean:
141 $(call descend,$(@:_clean=),clean) 142 $(call descend,$(@:_clean=),clean)
142 143
143liblockdep_clean: 144liblockdep_clean:
@@ -175,6 +176,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
175 perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \ 176 perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
176 vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \ 177 vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
177 freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \ 178 freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
178 gpio_clean objtool_clean leds_clean wmi_clean pci_clean 179 gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean
179 180
180.PHONY: FORCE 181.PHONY: FORCE
diff --git a/tools/firmware/Makefile b/tools/firmware/Makefile
new file mode 100644
index 000000000000..d329825aa31b
--- /dev/null
+++ b/tools/firmware/Makefile
@@ -0,0 +1,13 @@
1# SPDX-License-Identifier: GPL-2.0
2# Makefile for firmware tools
3
4CFLAGS = -Wall -Wextra -g
5
6all: ihex2fw
7%: %.c
8 $(CC) $(CFLAGS) -o $@ $^
9
10clean:
11 $(RM) ihex2fw
12
13.PHONY: all clean \ No newline at end of file
diff --git a/tools/firmware/ihex2fw.c b/tools/firmware/ihex2fw.c
new file mode 100644
index 000000000000..b58dd061e978
--- /dev/null
+++ b/tools/firmware/ihex2fw.c
@@ -0,0 +1,281 @@
1/*
2 * Parser/loader for IHEX formatted data.
3 *
4 * Copyright © 2008 David Woodhouse <dwmw2@infradead.org>
5 * Copyright © 2005 Jan Harkes <jaharkes@cs.cmu.edu>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <stdint.h>
13#include <arpa/inet.h>
14#include <stdio.h>
15#include <errno.h>
16#include <sys/types.h>
17#include <sys/stat.h>
18#include <sys/mman.h>
19#include <fcntl.h>
20#include <string.h>
21#include <unistd.h>
22#include <stdlib.h>
23#define _GNU_SOURCE
24#include <getopt.h>
25
26
27struct ihex_binrec {
28 struct ihex_binrec *next; /* not part of the real data structure */
29 uint32_t addr;
30 uint16_t len;
31 uint8_t data[];
32};
33
34/**
35 * nybble/hex are little helpers to parse hexadecimal numbers to a byte value
36 **/
37static uint8_t nybble(const uint8_t n)
38{
39 if (n >= '0' && n <= '9') return n - '0';
40 else if (n >= 'A' && n <= 'F') return n - ('A' - 10);
41 else if (n >= 'a' && n <= 'f') return n - ('a' - 10);
42 return 0;
43}
44
45static uint8_t hex(const uint8_t *data, uint8_t *crc)
46{
47 uint8_t val = (nybble(data[0]) << 4) | nybble(data[1]);
48 *crc += val;
49 return val;
50}
51
52static int process_ihex(uint8_t *data, ssize_t size);
53static void file_record(struct ihex_binrec *record);
54static int output_records(int outfd);
55
56static int sort_records = 0;
57static int wide_records = 0;
58static int include_jump = 0;
59
60static int usage(void)
61{
62 fprintf(stderr, "ihex2fw: Convert ihex files into binary "
63 "representation for use by Linux kernel\n");
64 fprintf(stderr, "usage: ihex2fw [<options>] <src.HEX> <dst.fw>\n");
65 fprintf(stderr, " -w: wide records (16-bit length)\n");
66 fprintf(stderr, " -s: sort records by address\n");
67 fprintf(stderr, " -j: include records for CS:IP/EIP address\n");
68 return 1;
69}
70
71int main(int argc, char **argv)
72{
73 int infd, outfd;
74 struct stat st;
75 uint8_t *data;
76 int opt;
77
78 while ((opt = getopt(argc, argv, "wsj")) != -1) {
79 switch (opt) {
80 case 'w':
81 wide_records = 1;
82 break;
83 case 's':
84 sort_records = 1;
85 break;
86 case 'j':
87 include_jump = 1;
88 break;
89 default:
90 return usage();
91 }
92 }
93
94 if (optind + 2 != argc)
95 return usage();
96
97 if (!strcmp(argv[optind], "-"))
98 infd = 0;
99 else
100 infd = open(argv[optind], O_RDONLY);
101 if (infd == -1) {
102 fprintf(stderr, "Failed to open source file: %s",
103 strerror(errno));
104 return usage();
105 }
106 if (fstat(infd, &st)) {
107 perror("stat");
108 return 1;
109 }
110 data = mmap(NULL, st.st_size, PROT_READ, MAP_SHARED, infd, 0);
111 if (data == MAP_FAILED) {
112 perror("mmap");
113 return 1;
114 }
115
116 if (!strcmp(argv[optind+1], "-"))
117 outfd = 1;
118 else
119 outfd = open(argv[optind+1], O_TRUNC|O_CREAT|O_WRONLY, 0644);
120 if (outfd == -1) {
121 fprintf(stderr, "Failed to open destination file: %s",
122 strerror(errno));
123 return usage();
124 }
125 if (process_ihex(data, st.st_size))
126 return 1;
127
128 return output_records(outfd);
129}
130
131static int process_ihex(uint8_t *data, ssize_t size)
132{
133 struct ihex_binrec *record;
134 uint32_t offset = 0;
135 uint32_t data32;
136 uint8_t type, crc = 0, crcbyte = 0;
137 int i, j;
138 int line = 1;
139 int len;
140
141 i = 0;
142next_record:
143 /* search for the start of record character */
144 while (i < size) {
145 if (data[i] == '\n') line++;
146 if (data[i++] == ':') break;
147 }
148
149 /* Minimum record length would be about 10 characters */
150 if (i + 10 > size) {
151 fprintf(stderr, "Can't find valid record at line %d\n", line);
152 return -EINVAL;
153 }
154
155 len = hex(data + i, &crc); i += 2;
156 if (wide_records) {
157 len <<= 8;
158 len += hex(data + i, &crc); i += 2;
159 }
160 record = malloc((sizeof (*record) + len + 3) & ~3);
161 if (!record) {
162 fprintf(stderr, "out of memory for records\n");
163 return -ENOMEM;
164 }
165 memset(record, 0, (sizeof(*record) + len + 3) & ~3);
166 record->len = len;
167
168 /* now check if we have enough data to read everything */
169 if (i + 8 + (record->len * 2) > size) {
170 fprintf(stderr, "Not enough data to read complete record at line %d\n",
171 line);
172 return -EINVAL;
173 }
174
175 record->addr = hex(data + i, &crc) << 8; i += 2;
176 record->addr |= hex(data + i, &crc); i += 2;
177 type = hex(data + i, &crc); i += 2;
178
179 for (j = 0; j < record->len; j++, i += 2)
180 record->data[j] = hex(data + i, &crc);
181
182 /* check CRC */
183 crcbyte = hex(data + i, &crc); i += 2;
184 if (crc != 0) {
185 fprintf(stderr, "CRC failure at line %d: got 0x%X, expected 0x%X\n",
186 line, crcbyte, (unsigned char)(crcbyte-crc));
187 return -EINVAL;
188 }
189
190 /* Done reading the record */
191 switch (type) {
192 case 0:
193 /* old style EOF record? */
194 if (!record->len)
195 break;
196
197 record->addr += offset;
198 file_record(record);
199 goto next_record;
200
201 case 1: /* End-Of-File Record */
202 if (record->addr || record->len) {
203 fprintf(stderr, "Bad EOF record (type 01) format at line %d",
204 line);
205 return -EINVAL;
206 }
207 break;
208
209 case 2: /* Extended Segment Address Record (HEX86) */
210 case 4: /* Extended Linear Address Record (HEX386) */
211 if (record->addr || record->len != 2) {
212 fprintf(stderr, "Bad HEX86/HEX386 record (type %02X) at line %d\n",
213 type, line);
214 return -EINVAL;
215 }
216
217 /* We shouldn't really be using the offset for HEX86 because
218 * the wraparound case is specified quite differently. */
219 offset = record->data[0] << 8 | record->data[1];
220 offset <<= (type == 2 ? 4 : 16);
221 goto next_record;
222
223 case 3: /* Start Segment Address Record */
224 case 5: /* Start Linear Address Record */
225 if (record->addr || record->len != 4) {
226 fprintf(stderr, "Bad Start Address record (type %02X) at line %d\n",
227 type, line);
228 return -EINVAL;
229 }
230
231 memcpy(&data32, &record->data[0], sizeof(data32));
232 data32 = htonl(data32);
233 memcpy(&record->data[0], &data32, sizeof(data32));
234
235 /* These records contain the CS/IP or EIP where execution
236 * starts. If requested output this as a record. */
237 if (include_jump)
238 file_record(record);
239 goto next_record;
240
241 default:
242 fprintf(stderr, "Unknown record (type %02X)\n", type);
243 return -EINVAL;
244 }
245
246 return 0;
247}
248
249static struct ihex_binrec *records;
250
251static void file_record(struct ihex_binrec *record)
252{
253 struct ihex_binrec **p = &records;
254
255 while ((*p) && (!sort_records || (*p)->addr < record->addr))
256 p = &((*p)->next);
257
258 record->next = *p;
259 *p = record;
260}
261
262static int output_records(int outfd)
263{
264 unsigned char zeroes[6] = {0, 0, 0, 0, 0, 0};
265 struct ihex_binrec *p = records;
266
267 while (p) {
268 uint16_t writelen = (p->len + 9) & ~3;
269
270 p->addr = htonl(p->addr);
271 p->len = htons(p->len);
272 if (write(outfd, &p->addr, writelen) != writelen)
273 return 1;
274 p = p->next;
275 }
276 /* EOF record is zero length, since we don't bother to represent
277 the type field in the binary version */
278 if (write(outfd, zeroes, 6) != 6)
279 return 1;
280 return 0;
281}
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index bbb2a8ef367c..d7e06fe0270e 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1178,6 +1178,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
1178 FILE *file; 1178 FILE *file;
1179 char cmd[PATH_MAX]; 1179 char cmd[PATH_MAX];
1180 char *mac_addr; 1180 char *mac_addr;
1181 int str_len;
1181 1182
1182 /* 1183 /*
1183 * Set the configuration for the specified interface with 1184 * Set the configuration for the specified interface with
@@ -1301,8 +1302,18 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
1301 * invoke the external script to do its magic. 1302 * invoke the external script to do its magic.
1302 */ 1303 */
1303 1304
1304 snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s", 1305 str_len = snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
1305 "hv_set_ifconfig", if_file); 1306 "hv_set_ifconfig", if_file);
1307 /*
1308 * This is a little overcautious, but it's necessary to suppress some
1309 * false warnings from gcc 8.0.1.
1310 */
1311 if (str_len <= 0 || (unsigned int)str_len >= sizeof(cmd)) {
1312 syslog(LOG_ERR, "Cmd '%s' (len=%d) may be too long",
1313 cmd, str_len);
1314 return HV_E_FAIL;
1315 }
1316
1306 if (system(cmd)) { 1317 if (system(cmd)) {
1307 syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s", 1318 syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s",
1308 cmd, errno, strerror(errno)); 1319 cmd, errno, strerror(errno));