summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight26
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt5
-rw-r--r--Documentation/devicetree/bindings/mfd/mt6397.txt20
-rw-r--r--Documentation/devicetree/bindings/mfd/rn5t618.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt171
-rw-r--r--Documentation/devicetree/bindings/pci/pci-armada8k.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/pci.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/pcie-al.txt46
-rw-r--r--Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt28
-rw-r--r--Documentation/devicetree/bindings/power/reset/mt6323-poweroff.txt20
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi24
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi38
-rw-r--r--arch/ia64/kernel/irq_ia64.c1
-rw-r--r--arch/ia64/mm/contig.c1
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/microblaze/Kconfig3
-rw-r--r--arch/microblaze/boot/dts/system.dts16
-rw-r--r--arch/microblaze/configs/mmu_defconfig22
-rw-r--r--arch/microblaze/configs/nommu_defconfig25
-rw-r--r--arch/microblaze/include/asm/io.h1
-rw-r--r--arch/microblaze/include/asm/pci.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h42
-rw-r--r--arch/microblaze/kernel/reset.c87
-rw-r--r--arch/microblaze/mm/consistent.c221
-rw-r--r--arch/mips/include/asm/pci.h1
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c207
-rw-r--r--arch/s390/hypfs/inode.c137
-rw-r--r--arch/sparc/include/asm/pci.h2
-rw-r--r--drivers/acpi/pci_root.c1
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/xillybus/xillybus_pcie.c1
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c43
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c4
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c7
-rw-r--r--drivers/crypto/inside-secure/safexcel.c40
-rw-r--r--drivers/crypto/talitos.c1
-rw-r--r--drivers/hid/Kconfig9
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-apple.c49
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-cougar.c6
-rw-r--r--drivers/hid/hid-creative-sb0540.c268
-rw-r--r--drivers/hid/hid-gfrm.c7
-rw-r--r--drivers/hid/hid-hyperv.c4
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-lenovo.c2
-rw-r--r--drivers/hid/hid-lg.c10
-rw-r--r--drivers/hid/hid-lg4ff.c1
-rw-r--r--drivers/hid/hid-logitech-dj.c32
-rw-r--r--drivers/hid/hid-multitouch.c37
-rw-r--r--drivers/hid/hid-picolcd_core.c7
-rw-r--r--drivers/hid/hid-prodikeys.c12
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-sensor-hub.c1
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/hidraw.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c4
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c95
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/wacom_sys.c25
-rw-r--r--drivers/hid/wacom_wac.c76
-rw-r--r--drivers/hv/channel_mgmt.c161
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv.c66
-rw-r--r--drivers/hv/hv_balloon.c143
-rw-r--r--drivers/hv/hyperv_vmbus.h30
-rw-r--r--drivers/hv/vmbus_drv.c265
-rw-r--r--drivers/infiniband/core/rw.c6
-rw-r--r--drivers/mfd/88pm800.c12
-rw-r--r--drivers/mfd/88pm860x-core.c6
-rw-r--r--drivers/mfd/Kconfig20
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/ab3100-core.c6
-rw-r--r--drivers/mfd/ab8500-debugfs.c8
-rw-r--r--drivers/mfd/asic3.c2
-rw-r--r--drivers/mfd/bcm590xx.c6
-rw-r--r--drivers/mfd/da9150-core.c6
-rw-r--r--drivers/mfd/davinci_voicecodec.c9
-rw-r--r--drivers/mfd/db8500-prcmu.c44
-rw-r--r--drivers/mfd/ezx-pcap.c53
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c4
-rw-r--r--drivers/mfd/htc-i2cpld.c3
-rw-r--r--drivers/mfd/intel-lpss-acpi.c26
-rw-r--r--drivers/mfd/intel-lpss-pci.c25
-rw-r--r--drivers/mfd/intel-lpss.c39
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c4
-rw-r--r--drivers/mfd/intel_soc_pmic_mrfld.c157
-rw-r--r--drivers/mfd/jz4740-adc.c324
-rw-r--r--drivers/mfd/max14577.c6
-rw-r--r--drivers/mfd/max77620.c4
-rw-r--r--drivers/mfd/max77693.c12
-rw-r--r--drivers/mfd/max77843.c6
-rw-r--r--drivers/mfd/max8907.c6
-rw-r--r--drivers/mfd/max8925-i2c.c12
-rw-r--r--drivers/mfd/max8997.c18
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/mt6397-core.c192
-rw-r--r--drivers/mfd/mt6397-irq.c181
-rw-r--r--drivers/mfd/palmas.c6
-rw-r--r--drivers/mfd/qcom_rpm.c12
-rw-r--r--drivers/mfd/sm501.c5
-rw-r--r--drivers/mfd/timberdale.c3
-rw-r--r--drivers/mfd/tps80031.c23
-rw-r--r--drivers/mfd/twl-core.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/jme.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c1
-rw-r--r--drivers/nvme/host/pci.c10
-rw-r--r--drivers/pci/Kconfig6
-rw-r--r--drivers/pci/access.c9
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/controller/dwc/Kconfig42
-rw-r--r--drivers/pci/controller/dwc/Makefile4
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c37
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c365
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c45
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c96
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h12
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c1732
-rw-r--r--drivers/pci/controller/pci-host-common.c3
-rw-r--r--drivers/pci/controller/pci-hyperv.c94
-rw-r--r--drivers/pci/controller/pci-tegra.c22
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c9
-rw-r--r--drivers/pci/controller/pcie-mediatek.c20
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c10
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c16
-rw-r--r--drivers/pci/controller/vmd.c25
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c1
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c1
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.h5
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c1
-rw-r--r--drivers/pci/hotplug/pciehp.h11
-rw-r--r--drivers/pci/hotplug/pciehp_core.c9
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c39
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c87
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c1
-rw-r--r--drivers/pci/iov.c171
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/p2pdma.c374
-rw-r--r--drivers/pci/pci-acpi.c410
-rw-r--r--drivers/pci/pci-bridge-emul.c4
-rw-r--r--drivers/pci/pci-sysfs.c223
-rw-r--r--drivers/pci/pci.c87
-rw-r--r--drivers/pci/pci.h68
-rw-r--r--drivers/pci/pcie/aspm.c9
-rw-r--r--drivers/pci/pcie/err.c2
-rw-r--r--drivers/pci/probe.c326
-rw-r--r--drivers/pci/quirks.c106
-rw-r--r--drivers/pci/search.c1
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pci/vc.c5
-rw-r--r--drivers/pci/vpd.c6
-rw-r--r--drivers/phy/tegra/Kconfig7
-rw-r--r--drivers/phy/tegra/Makefile1
-rw-r--r--drivers/phy/tegra/phy-tegra194-p2u.c120
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c2
-rw-r--r--drivers/platform/x86/pmc_atom.c7
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/hpsa.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c233
-rw-r--r--drivers/video/backlight/Kconfig2
-rw-r--r--drivers/video/backlight/backlight.c19
-rw-r--r--drivers/video/backlight/gpio_backlight.c24
-rw-r--r--drivers/video/backlight/lm3630a_bl.c3
-rw-r--r--drivers/video/backlight/lms283gf05.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c35
-rw-r--r--drivers/video/backlight/rave-sp-backlight.c10
-rw-r--r--drivers/video/backlight/tosa_lcd.c3
-rw-r--r--fs/gfs2/incore.h8
-rw-r--r--fs/gfs2/ops_fstype.c495
-rw-r--r--fs/gfs2/super.c333
-rw-r--r--fs/gfs2/super.h3
-rw-r--r--fs/notify/dnotify/dnotify.c15
-rw-r--r--fs/notify/fanotify/fanotify_user.c19
-rw-r--r--fs/notify/inotify/inotify_user.c14
-rw-r--r--include/Kbuild1
-rw-r--r--include/linux/backlight.h8
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/hid.h43
-rw-r--r--include/linux/hyperv.h16
-rw-r--r--include/linux/lsm_hooks.h9
-rw-r--r--include/linux/memremap.h1
-rw-r--r--include/linux/mfd/da9063/pdata.h60
-rw-r--r--include/linux/mfd/intel_soc_pmic_mrfld.h81
-rw-r--r--include/linux/mfd/mt6397/core.h11
-rw-r--r--include/linux/pci-aspm.h36
-rw-r--r--include/linux/pci-p2pdma.h28
-rw-r--r--include/linux/pci.h133
-rw-r--r--include/linux/pci_hotplug.h100
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/platform_data/cros_ec_commands.h12
-rw-r--r--include/linux/security.h10
-rw-r--r--include/uapi/linux/pci_regs.h15
-rw-r--r--kernel/bpf/inode.c92
-rw-r--r--kernel/livepatch/core.c1
-rw-r--r--security/safesetid/securityfs.c3
-rw-r--r--security/security.c6
-rw-r--r--security/selinux/hooks.c49
-rw-r--r--security/selinux/include/classmap.h5
-rw-r--r--security/selinux/include/objsec.h20
-rw-r--r--security/selinux/netif.c31
-rw-r--r--security/selinux/netnode.c30
-rw-r--r--security/selinux/netport.c24
-rw-r--r--security/selinux/ss/policydb.c402
-rw-r--r--security/selinux/ss/policydb.h2
-rw-r--r--security/selinux/ss/services.c6
-rw-r--r--security/selinux/ss/sidtab.c48
-rw-r--r--security/selinux/ss/sidtab.h19
-rw-r--r--security/smack/smack_access.c6
-rw-r--r--security/smack/smack_lsm.c40
-rw-r--r--tools/hv/Build3
-rw-r--r--tools/hv/Makefile51
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c122
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c25
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c71
-rw-r--r--tools/power/x86/intel-speed-select/isst.h10
244 files changed, 7785 insertions, 3688 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-backlight b/Documentation/ABI/testing/sysfs-class-backlight
new file mode 100644
index 000000000000..3ab175a3f5cb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-backlight
@@ -0,0 +1,26 @@
1What: /sys/class/backlight/<backlight>/scale
2Date: July 2019
3KernelVersion: 5.4
4Contact: Daniel Thompson <daniel.thompson@linaro.org>
5Description:
6 Description of the scale of the brightness curve.
7
8 The human eye senses brightness approximately logarithmically,
9 hence linear changes in brightness are perceived as being
10 non-linear. To achieve a linear perception of brightness changes
11 controls like sliders need to apply a logarithmic mapping for
12 backlights with a linear brightness curve.
13
14 Possible values of the attribute are:
15
16 unknown
17 The scale of the brightness curve is unknown.
18
19 linear
20 The brightness changes linearly with each step. Brightness
21 controls should apply a logarithmic mapping for a linear
22 perception.
23
24 non-linear
25 The brightness changes non-linearly with each step. Brightness
26 controls should use a linear mapping for a linear perception.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 8a54ed862049..944e03e29f65 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3467,12 +3467,13 @@
3467 specify the device is described above. 3467 specify the device is described above.
3468 If <order of align> is not specified, 3468 If <order of align> is not specified,
3469 PAGE_SIZE is used as alignment. 3469 PAGE_SIZE is used as alignment.
3470 PCI-PCI bridge can be specified, if resource 3470 A PCI-PCI bridge can be specified if resource
3471 windows need to be expanded. 3471 windows need to be expanded.
3472 To specify the alignment for several 3472 To specify the alignment for several
3473 instances of a device, the PCI vendor, 3473 instances of a device, the PCI vendor,
3474 device, subvendor, and subdevice may be 3474 device, subvendor, and subdevice may be
3475 specified, e.g., 4096@pci:8086:9c22:103c:198f 3475 specified, e.g., 12@pci:8086:9c22:103c:198f
3476 for 4096-byte alignment.
3476 ecrc= Enable/disable PCIe ECRC (transaction layer 3477 ecrc= Enable/disable PCIe ECRC (transaction layer
3477 end-to-end CRC checking). 3478 end-to-end CRC checking).
3478 bios: Use BIOS/firmware settings. This is the 3479 bios: Use BIOS/firmware settings. This is the
diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
index 0ebd08af777d..a9b105ac00a8 100644
--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
+++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
@@ -8,11 +8,12 @@ MT6397/MT6323 is a multifunction device with the following sub modules:
8- Clock 8- Clock
9- LED 9- LED
10- Keys 10- Keys
11- Power controller
11 12
12It is interfaced to host controller using SPI interface by a proprietary hardware 13It is interfaced to host controller using SPI interface by a proprietary hardware
13called PMIC wrapper or pwrap. MT6397/MT6323 MFD is a child device of pwrap. 14called PMIC wrapper or pwrap. MT6397/MT6323 MFD is a child device of pwrap.
14See the following for pwarp node definitions: 15See the following for pwarp node definitions:
15Documentation/devicetree/bindings/soc/mediatek/pwrap.txt 16../soc/mediatek/pwrap.txt
16 17
17This document describes the binding for MFD device and its sub module. 18This document describes the binding for MFD device and its sub module.
18 19
@@ -22,14 +23,16 @@ compatible: "mediatek,mt6397" or "mediatek,mt6323"
22Optional subnodes: 23Optional subnodes:
23 24
24- rtc 25- rtc
25 Required properties: 26 Required properties: Should be one of follows
27 - compatible: "mediatek,mt6323-rtc"
26 - compatible: "mediatek,mt6397-rtc" 28 - compatible: "mediatek,mt6397-rtc"
29 For details, see ../rtc/rtc-mt6397.txt
27- regulators 30- regulators
28 Required properties: 31 Required properties:
29 - compatible: "mediatek,mt6397-regulator" 32 - compatible: "mediatek,mt6397-regulator"
30 see Documentation/devicetree/bindings/regulator/mt6397-regulator.txt 33 see ../regulator/mt6397-regulator.txt
31 - compatible: "mediatek,mt6323-regulator" 34 - compatible: "mediatek,mt6323-regulator"
32 see Documentation/devicetree/bindings/regulator/mt6323-regulator.txt 35 see ../regulator/mt6323-regulator.txt
33- codec 36- codec
34 Required properties: 37 Required properties:
35 - compatible: "mediatek,mt6397-codec" 38 - compatible: "mediatek,mt6397-codec"
@@ -39,12 +42,17 @@ Optional subnodes:
39- led 42- led
40 Required properties: 43 Required properties:
41 - compatible: "mediatek,mt6323-led" 44 - compatible: "mediatek,mt6323-led"
42 see Documentation/devicetree/bindings/leds/leds-mt6323.txt 45 see ../leds/leds-mt6323.txt
43 46
44- keys 47- keys
45 Required properties: 48 Required properties:
46 - compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys" 49 - compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys"
47 see Documentation/devicetree/bindings/input/mtk-pmic-keys.txt 50 see ../input/mtk-pmic-keys.txt
51
52- power-controller
53 Required properties:
54 - compatible: "mediatek,mt6323-pwrc"
55 For details, see ../power/reset/mt6323-poweroff.txt
48 56
49Example: 57Example:
50 pwrap: pwrap@1000f000 { 58 pwrap: pwrap@1000f000 {
diff --git a/Documentation/devicetree/bindings/mfd/rn5t618.txt b/Documentation/devicetree/bindings/mfd/rn5t618.txt
index 65c23263cc54..b74e5e94d1cb 100644
--- a/Documentation/devicetree/bindings/mfd/rn5t618.txt
+++ b/Documentation/devicetree/bindings/mfd/rn5t618.txt
@@ -14,6 +14,10 @@ Required properties:
14 "ricoh,rc5t619" 14 "ricoh,rc5t619"
15 - reg: the I2C slave address of the device 15 - reg: the I2C slave address of the device
16 16
17Optional properties:
18 - system-power-controller:
19 See Documentation/devicetree/bindings/power/power-controller.txt
20
17Sub-nodes: 21Sub-nodes:
18 - regulators: the node is required if the regulator functionality is 22 - regulators: the node is required if the regulator functionality is
19 needed. The valid regulator names are: DCDC1, DCDC2, DCDC3, DCDC4 23 needed. The valid regulator names are: DCDC1, DCDC2, DCDC3, DCDC4
@@ -28,6 +32,7 @@ Example:
28 pmic@32 { 32 pmic@32 {
29 compatible = "ricoh,rn5t618"; 33 compatible = "ricoh,rn5t618";
30 reg = <0x32>; 34 reg = <0x32>;
35 system-power-controller;
31 36
32 regulators { 37 regulators {
33 DCDC1 { 38 DCDC1 {
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index 5561a1c060d0..78494c4050f7 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -11,7 +11,6 @@ Required properties:
11 the ATU address space. 11 the ATU address space.
12 (The old way of getting the configuration address space from "ranges" 12 (The old way of getting the configuration address space from "ranges"
13 is deprecated and should be avoided.) 13 is deprecated and should be avoided.)
14- num-lanes: number of lanes to use
15RC mode: 14RC mode:
16- #address-cells: set to <3> 15- #address-cells: set to <3>
17- #size-cells: set to <2> 16- #size-cells: set to <2>
@@ -34,6 +33,11 @@ Optional properties:
34- clock-names: Must include the following entries: 33- clock-names: Must include the following entries:
35 - "pcie" 34 - "pcie"
36 - "pcie_bus" 35 - "pcie_bus"
36- snps,enable-cdm-check: This is a boolean property and if present enables
37 automatic checking of CDM (Configuration Dependent Module) registers
38 for data corruption. CDM registers include standard PCIe configuration
39 space registers, Port Logic registers, DMA and iATU (internal Address
40 Translation Unit) registers.
37RC mode: 41RC mode:
38- num-viewport: number of view ports configured in hardware. If a platform 42- num-viewport: number of view ports configured in hardware. If a platform
39 does not specify it, the driver assumes 2. 43 does not specify it, the driver assumes 2.
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index a7f5f5afa0e6..de4b2baf91e8 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -50,7 +50,7 @@ Additional required properties for imx7d-pcie and imx8mq-pcie:
50- power-domains: Must be set to a phandle pointing to PCIE_PHY power domain 50- power-domains: Must be set to a phandle pointing to PCIE_PHY power domain
51- resets: Must contain phandles to PCIe-related reset lines exposed by SRC 51- resets: Must contain phandles to PCIe-related reset lines exposed by SRC
52 IP block 52 IP block
53- reset-names: Must contain the following entires: 53- reset-names: Must contain the following entries:
54 - "pciephy" 54 - "pciephy"
55 - "apps" 55 - "apps"
56 - "turnoff" 56 - "turnoff"
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
index 92437a366e5f..7468d666763a 100644
--- a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
@@ -6,6 +6,7 @@ Required properties:
6 "mediatek,mt2712-pcie" 6 "mediatek,mt2712-pcie"
7 "mediatek,mt7622-pcie" 7 "mediatek,mt7622-pcie"
8 "mediatek,mt7623-pcie" 8 "mediatek,mt7623-pcie"
9 "mediatek,mt7629-pcie"
9- device_type: Must be "pci" 10- device_type: Must be "pci"
10- reg: Base addresses and lengths of the PCIe subsys and root ports. 11- reg: Base addresses and lengths of the PCIe subsys and root ports.
11- reg-names: Names of the above areas to use during resource lookup. 12- reg-names: Names of the above areas to use during resource lookup.
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
new file mode 100644
index 000000000000..b739f92da58e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
@@ -0,0 +1,171 @@
1NVIDIA Tegra PCIe controller (Synopsys DesignWare Core based)
2
3This PCIe host controller is based on the Synopsis Designware PCIe IP
4and thus inherits all the common properties defined in designware-pcie.txt.
5
6Required properties:
7- compatible: For Tegra19x, must contain "nvidia,tegra194-pcie".
8- device_type: Must be "pci"
9- power-domains: A phandle to the node that controls power to the respective
10 PCIe controller and a specifier name for the PCIe controller. Following are
11 the specifiers for the different PCIe controllers
12 TEGRA194_POWER_DOMAIN_PCIEX8B: C0
13 TEGRA194_POWER_DOMAIN_PCIEX1A: C1
14 TEGRA194_POWER_DOMAIN_PCIEX1A: C2
15 TEGRA194_POWER_DOMAIN_PCIEX1A: C3
16 TEGRA194_POWER_DOMAIN_PCIEX4A: C4
17 TEGRA194_POWER_DOMAIN_PCIEX8A: C5
18 these specifiers are defined in
19 "include/dt-bindings/power/tegra194-powergate.h" file.
20- reg: A list of physical base address and length pairs for each set of
21 controller registers. Must contain an entry for each entry in the reg-names
22 property.
23- reg-names: Must include the following entries:
24 "appl": Controller's application logic registers
25 "config": As per the definition in designware-pcie.txt
26 "atu_dma": iATU and DMA registers. This is where the iATU (internal Address
27 Translation Unit) registers of the PCIe core are made available
28 for SW access.
29 "dbi": The aperture where root port's own configuration registers are
30 available
31- interrupts: A list of interrupt outputs of the controller. Must contain an
32 entry for each entry in the interrupt-names property.
33- interrupt-names: Must include the following entries:
34 "intr": The Tegra interrupt that is asserted for controller interrupts
35 "msi": The Tegra interrupt that is asserted when an MSI is received
36- bus-range: Range of bus numbers associated with this controller
37- #address-cells: Address representation for root ports (must be 3)
38 - cell 0 specifies the bus and device numbers of the root port:
39 [23:16]: bus number
40 [15:11]: device number
41 - cell 1 denotes the upper 32 address bits and should be 0
42 - cell 2 contains the lower 32 address bits and is used to translate to the
43 CPU address space
44- #size-cells: Size representation for root ports (must be 2)
45- ranges: Describes the translation of addresses for root ports and standard
46 PCI regions. The entries must be 7 cells each, where the first three cells
47 correspond to the address as described for the #address-cells property
48 above, the fourth and fifth cells are for the physical CPU address to
49 translate to and the sixth and seventh cells are as described for the
50 #size-cells property above.
51 - Entries setup the mapping for the standard I/O, memory and
52 prefetchable PCI regions. The first cell determines the type of region
53 that is setup:
54 - 0x81000000: I/O memory region
55 - 0x82000000: non-prefetchable memory region
56 - 0xc2000000: prefetchable memory region
57 Please refer to the standard PCI bus binding document for a more detailed
58 explanation.
59- #interrupt-cells: Size representation for interrupts (must be 1)
60- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
61 Please refer to the standard PCI bus binding document for a more detailed
62 explanation.
63- clocks: Must contain an entry for each entry in clock-names.
64 See ../clocks/clock-bindings.txt for details.
65- clock-names: Must include the following entries:
66 - core
67- resets: Must contain an entry for each entry in reset-names.
68 See ../reset/reset.txt for details.
69- reset-names: Must include the following entries:
70 - apb
71 - core
72- phys: Must contain a phandle to P2U PHY for each entry in phy-names.
73- phy-names: Must include an entry for each active lane.
74 "p2u-N": where N ranges from 0 to one less than the total number of lanes
75- nvidia,bpmp: Must contain a pair of phandle to BPMP controller node followed
76 by controller-id. Following are the controller ids for each controller.
77 0: C0
78 1: C1
79 2: C2
80 3: C3
81 4: C4
82 5: C5
83- vddio-pex-ctl-supply: Regulator supply for PCIe side band signals
84
85Optional properties:
86- pinctrl-names: A list of pinctrl state names.
87 It is mandatory for C5 controller and optional for other controllers.
88 - "default": Configures PCIe I/O for proper operation.
89- pinctrl-0: phandle for the 'default' state of pin configuration.
90 It is mandatory for C5 controller and optional for other controllers.
91- supports-clkreq: Refer to Documentation/devicetree/bindings/pci/pci.txt
92- nvidia,update-fc-fixup: This is a boolean property and needs to be present to
93 improve performance when a platform is designed in such a way that it
94 satisfies at least one of the following conditions thereby enabling root
95 port to exchange optimum number of FC (Flow Control) credits with
96 downstream devices
97 1. If C0/C4/C5 run at x1/x2 link widths (irrespective of speed and MPS)
98 2. If C0/C1/C2/C3/C4/C5 operate at their respective max link widths and
99 a) speed is Gen-2 and MPS is 256B
100 b) speed is >= Gen-3 with any MPS
101- nvidia,aspm-cmrt-us: Common Mode Restore Time for proper operation of ASPM
102 to be specified in microseconds
103- nvidia,aspm-pwr-on-t-us: Power On time for proper operation of ASPM to be
104 specified in microseconds
105- nvidia,aspm-l0s-entrance-latency-us: ASPM L0s entrance latency to be
106 specified in microseconds
107- vpcie3v3-supply: A phandle to the regulator node that supplies 3.3V to the slot
108 if the platform has one such slot. (Ex:- x16 slot owned by C5 controller
109 in p2972-0000 platform).
110- vpcie12v-supply: A phandle to the regulator node that supplies 12V to the slot
111 if the platform has one such slot. (Ex:- x16 slot owned by C5 controller
112 in p2972-0000 platform).
113
114Examples:
115=========
116
117Tegra194:
118--------
119
120 pcie@14180000 {
121 compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
122 power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
123 reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
124 0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */
125 0x00 0x38040000 0x0 0x00040000>; /* iATU_DMA reg space (256K) */
126 reg-names = "appl", "config", "atu_dma";
127
128 #address-cells = <3>;
129 #size-cells = <2>;
130 device_type = "pci";
131 num-lanes = <8>;
132 linux,pci-domain = <0>;
133
134 pinctrl-names = "default";
135 pinctrl-0 = <&pex_rst_c5_out_state>, <&clkreq_c5_bi_dir_state>;
136
137 clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>;
138 clock-names = "core";
139
140 resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>,
141 <&bpmp TEGRA194_RESET_PEX0_CORE_0>;
142 reset-names = "apb", "core";
143
144 interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
145 <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */
146 interrupt-names = "intr", "msi";
147
148 #interrupt-cells = <1>;
149 interrupt-map-mask = <0 0 0 0>;
150 interrupt-map = <0 0 0 0 &gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
151
152 nvidia,bpmp = <&bpmp 0>;
153
154 supports-clkreq;
155 nvidia,aspm-cmrt-us = <60>;
156 nvidia,aspm-pwr-on-t-us = <20>;
157 nvidia,aspm-l0s-entrance-latency-us = <3>;
158
159 bus-range = <0x0 0xff>;
160 ranges = <0x81000000 0x0 0x38100000 0x0 0x38100000 0x0 0x00100000 /* downstream I/O (1MB) */
161 0x82000000 0x0 0x38200000 0x0 0x38200000 0x0 0x01E00000 /* non-prefetchable memory (30MB) */
162 0xc2000000 0x18 0x00000000 0x18 0x00000000 0x4 0x00000000>; /* prefetchable memory (16GB) */
163
164 vddio-pex-ctl-supply = <&vdd_1v8ao>;
165 vpcie3v3-supply = <&vdd_3v3_pcie>;
166 vpcie12v-supply = <&vdd_12v_pcie>;
167
168 phys = <&p2u_hsio_2>, <&p2u_hsio_3>, <&p2u_hsio_4>,
169 <&p2u_hsio_5>;
170 phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3";
171 };
diff --git a/Documentation/devicetree/bindings/pci/pci-armada8k.txt b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
index 8324a4ee6f06..7a813d0e6d63 100644
--- a/Documentation/devicetree/bindings/pci/pci-armada8k.txt
+++ b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
@@ -11,7 +11,7 @@ Required properties:
11- reg-names: 11- reg-names:
12 - "ctrl" for the control register region 12 - "ctrl" for the control register region
13 - "config" for the config space region 13 - "config" for the config space region
14- interrupts: Interrupt specifier for the PCIe controler 14- interrupts: Interrupt specifier for the PCIe controller
15- clocks: reference to the PCIe controller clocks 15- clocks: reference to the PCIe controller clocks
16- clock-names: mandatory if there is a second clock, in this case the 16- clock-names: mandatory if there is a second clock, in this case the
17 name must be "core" for the first clock and "reg" for the second 17 name must be "core" for the first clock and "reg" for the second
diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt
index 2a5d91024059..29bcbd88f457 100644
--- a/Documentation/devicetree/bindings/pci/pci.txt
+++ b/Documentation/devicetree/bindings/pci/pci.txt
@@ -27,6 +27,11 @@ driver implementation may support the following properties:
27- reset-gpios: 27- reset-gpios:
28 If present this property specifies PERST# GPIO. Host drivers can parse the 28 If present this property specifies PERST# GPIO. Host drivers can parse the
29 GPIO and apply fundamental reset to endpoints. 29 GPIO and apply fundamental reset to endpoints.
30- supports-clkreq:
31 If present this property specifies that CLKREQ signal routing exists from
32 root port to downstream device and host bridge drivers can do programming
33 which depends on CLKREQ signal existence. For example, programming root port
34 not to advertise ASPM L1 Sub-States support if there is no CLKREQ signal.
30 35
31PCI-PCI Bridge properties 36PCI-PCI Bridge properties
32------------------------- 37-------------------------
diff --git a/Documentation/devicetree/bindings/pci/pcie-al.txt b/Documentation/devicetree/bindings/pci/pcie-al.txt
new file mode 100644
index 000000000000..557a5089229d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pcie-al.txt
@@ -0,0 +1,46 @@
1* Amazon Annapurna Labs PCIe host bridge
2
3Amazon's Annapurna Labs PCIe Host Controller is based on the Synopsys DesignWare
4PCI core. It inherits common properties defined in
5Documentation/devicetree/bindings/pci/designware-pcie.txt.
6
7Properties of the host controller node that differ from it are:
8
9- compatible:
10 Usage: required
11 Value type: <stringlist>
12 Definition: Value should contain
13 - "amazon,al-alpine-v2-pcie" for alpine_v2
14 - "amazon,al-alpine-v3-pcie" for alpine_v3
15
16- reg:
17 Usage: required
18 Value type: <prop-encoded-array>
19 Definition: Register ranges as listed in the reg-names property
20
21- reg-names:
22 Usage: required
23 Value type: <stringlist>
24 Definition: Must include the following entries
25 - "config" PCIe ECAM space
26 - "controller" AL proprietary registers
27 - "dbi" Designware PCIe registers
28
29Example:
30
31 pcie-external0: pcie@fb600000 {
32 compatible = "amazon,al-alpine-v3-pcie";
33 reg = <0x0 0xfb600000 0x0 0x00100000
34 0x0 0xfd800000 0x0 0x00010000
35 0x0 0xfd810000 0x0 0x00001000>;
36 reg-names = "config", "controller", "dbi";
37 bus-range = <0 255>;
38 device_type = "pci";
39 #address-cells = <3>;
40 #size-cells = <2>;
41 #interrupt-cells = <1>;
42 interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
43 interrupt-map-mask = <0x00 0 0 7>;
44 interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; /* INTa */
45 ranges = <0x02000000 0x0 0xc0010000 0x0 0xc0010000 0x0 0x07ff0000>;
46 };
diff --git a/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt b/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt
new file mode 100644
index 000000000000..d23ff90baad5
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt
@@ -0,0 +1,28 @@
1NVIDIA Tegra194 P2U binding
2
3Tegra194 has two PHY bricks namely HSIO (High Speed IO) and NVHS (NVIDIA High
4Speed) each interfacing with 12 and 8 P2U instances respectively.
5A P2U instance is a glue logic between Synopsys DesignWare Core PCIe IP's PIPE
6interface and PHY of HSIO/NVHS bricks. Each P2U instance represents one PCIe
7lane.
8
9Required properties:
10- compatible: For Tegra19x, must contain "nvidia,tegra194-p2u".
11- reg: Should be the physical address space and length of respective each P2U
12 instance.
13- reg-names: Must include the entry "ctl".
14
15Required properties for PHY port node:
16- #phy-cells: Defined by generic PHY bindings. Must be 0.
17
18Refer to phy/phy-bindings.txt for the generic PHY binding properties.
19
20Example:
21
22p2u_hsio_0: phy@3e10000 {
23 compatible = "nvidia,tegra194-p2u";
24 reg = <0x03e10000 0x10000>;
25 reg-names = "ctl";
26
27 #phy-cells = <0>;
28};
diff --git a/Documentation/devicetree/bindings/power/reset/mt6323-poweroff.txt b/Documentation/devicetree/bindings/power/reset/mt6323-poweroff.txt
new file mode 100644
index 000000000000..933f0c48e887
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/mt6323-poweroff.txt
@@ -0,0 +1,20 @@
1Device Tree Bindings for Power Controller on MediaTek PMIC
2
3The power controller which could be found on PMIC is responsible for externally
4powering off or on the remote MediaTek SoC through the circuit BBPU.
5
6Required properties:
7- compatible: Should be one of follows
8 "mediatek,mt6323-pwrc": for MT6323 PMIC
9
10Example:
11
12 pmic {
13 compatible = "mediatek,mt6323";
14
15 ...
16
17 power-controller {
18 compatible = "mediatek,mt6323-pwrc";
19 };
20 }
diff --git a/MAINTAINERS b/MAINTAINERS
index a400af0501c9..54f1286087e9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -728,7 +728,7 @@ ALTERA SYSTEM MANAGER DRIVER
728M: Thor Thayer <thor.thayer@linux.intel.com> 728M: Thor Thayer <thor.thayer@linux.intel.com>
729S: Maintained 729S: Maintained
730F: drivers/mfd/altera-sysmgr.c 730F: drivers/mfd/altera-sysmgr.c
731F: include/linux/mfd/altera-sysgmr.h 731F: include/linux/mfd/altera-sysmgr.h
732 732
733ALTERA SYSTEM RESOURCE DRIVER FOR ARRIA10 DEVKIT 733ALTERA SYSTEM RESOURCE DRIVER FOR ARRIA10 DEVKIT
734M: Thor Thayer <thor.thayer@linux.intel.com> 734M: Thor Thayer <thor.thayer@linux.intel.com>
@@ -2921,6 +2921,8 @@ F: drivers/video/backlight/
2921F: include/linux/backlight.h 2921F: include/linux/backlight.h
2922F: include/linux/pwm_backlight.h 2922F: include/linux/pwm_backlight.h
2923F: Documentation/devicetree/bindings/leds/backlight 2923F: Documentation/devicetree/bindings/leds/backlight
2924F: Documentation/ABI/stable/sysfs-class-backlight
2925F: Documentation/ABI/testing/sysfs-class-backlight
2924 2926
2925BATMAN ADVANCED 2927BATMAN ADVANCED
2926M: Marek Lindner <mareklindner@neomailbox.ch> 2928M: Marek Lindner <mareklindner@neomailbox.ch>
@@ -4338,6 +4340,12 @@ S: Maintained
4338F: Documentation/filesystems/cramfs.txt 4340F: Documentation/filesystems/cramfs.txt
4339F: fs/cramfs/ 4341F: fs/cramfs/
4340 4342
4343CREATIVE SB0540
4344M: Bastien Nocera <hadess@hadess.net>
4345L: linux-input@vger.kernel.org
4346S: Maintained
4347F: drivers/hid/hid-creative-sb0540.c
4348
4341CRYPTO API 4349CRYPTO API
4342M: Herbert Xu <herbert@gondor.apana.org.au> 4350M: Herbert Xu <herbert@gondor.apana.org.au>
4343M: "David S. Miller" <davem@davemloft.net> 4351M: "David S. Miller" <davem@davemloft.net>
@@ -12574,16 +12582,18 @@ F: arch/x86/kernel/early-quirks.c
12574 12582
12575PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS 12583PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
12576M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> 12584M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
12585R: Andrew Murray <andrew.murray@arm.com>
12577L: linux-pci@vger.kernel.org 12586L: linux-pci@vger.kernel.org
12578Q: http://patchwork.ozlabs.org/project/linux-pci/list/ 12587Q: http://patchwork.ozlabs.org/project/linux-pci/list/
12579T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/ 12588T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/
12580S: Supported 12589S: Supported
12581F: drivers/pci/controller/ 12590F: drivers/pci/controller/
12582 12591
12583PCIE DRIVER FOR ANNAPURNA LABS 12592PCIE DRIVER FOR AMAZON ANNAPURNA LABS
12584M: Jonathan Chocron <jonnyc@amazon.com> 12593M: Jonathan Chocron <jonnyc@amazon.com>
12585L: linux-pci@vger.kernel.org 12594L: linux-pci@vger.kernel.org
12586S: Maintained 12595S: Maintained
12596F: Documentation/devicetree/bindings/pci/pcie-al.txt
12587F: drivers/pci/controller/dwc/pcie-al.c 12597F: drivers/pci/controller/dwc/pcie-al.c
12588 12598
12589PCIE DRIVER FOR AMLOGIC MESON 12599PCIE DRIVER FOR AMLOGIC MESON
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 464df4290ffc..2f6977ada447 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -874,7 +874,6 @@
874 #address-cells = <3>; 874 #address-cells = <3>;
875 #size-cells = <2>; 875 #size-cells = <2>;
876 device_type = "pci"; 876 device_type = "pci";
877 num-lanes = <4>;
878 num-viewport = <6>; 877 num-viewport = <6>;
879 bus-range = <0x0 0xff>; 878 bus-range = <0x0 0xff>;
880 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 879 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -899,7 +898,6 @@
899 #address-cells = <3>; 898 #address-cells = <3>;
900 #size-cells = <2>; 899 #size-cells = <2>;
901 device_type = "pci"; 900 device_type = "pci";
902 num-lanes = <4>;
903 num-viewport = <6>; 901 num-viewport = <6>;
904 bus-range = <0x0 0xff>; 902 bus-range = <0x0 0xff>;
905 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 903 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 124a7e2d8442..337919366dc8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -486,7 +486,6 @@
486 #address-cells = <3>; 486 #address-cells = <3>;
487 #size-cells = <2>; 487 #size-cells = <2>;
488 device_type = "pci"; 488 device_type = "pci";
489 num-lanes = <4>;
490 num-viewport = <2>; 489 num-viewport = <2>;
491 bus-range = <0x0 0xff>; 490 bus-range = <0x0 0xff>;
492 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 491 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 71d9ed9ff985..c084c7a4b6a6 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -677,7 +677,6 @@
677 #size-cells = <2>; 677 #size-cells = <2>;
678 device_type = "pci"; 678 device_type = "pci";
679 dma-coherent; 679 dma-coherent;
680 num-lanes = <4>;
681 num-viewport = <6>; 680 num-viewport = <6>;
682 bus-range = <0x0 0xff>; 681 bus-range = <0x0 0xff>;
683 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 682 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -704,7 +703,6 @@
704 #size-cells = <2>; 703 #size-cells = <2>;
705 device_type = "pci"; 704 device_type = "pci";
706 dma-coherent; 705 dma-coherent;
707 num-lanes = <2>;
708 num-viewport = <6>; 706 num-viewport = <6>;
709 bus-range = <0x0 0xff>; 707 bus-range = <0x0 0xff>;
710 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 708 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -731,7 +729,6 @@
731 #size-cells = <2>; 729 #size-cells = <2>;
732 device_type = "pci"; 730 device_type = "pci";
733 dma-coherent; 731 dma-coherent;
734 num-lanes = <2>;
735 num-viewport = <6>; 732 num-viewport = <6>;
736 bus-range = <0x0 0xff>; 733 bus-range = <0x0 0xff>;
737 ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ 734 ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index b0ef08b090dd..d4c1da3d4bde 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -649,7 +649,6 @@
649 #size-cells = <2>; 649 #size-cells = <2>;
650 device_type = "pci"; 650 device_type = "pci";
651 dma-coherent; 651 dma-coherent;
652 num-lanes = <4>;
653 num-viewport = <8>; 652 num-viewport = <8>;
654 bus-range = <0x0 0xff>; 653 bus-range = <0x0 0xff>;
655 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 654 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -671,7 +670,6 @@
671 reg-names = "regs", "addr_space"; 670 reg-names = "regs", "addr_space";
672 num-ib-windows = <6>; 671 num-ib-windows = <6>;
673 num-ob-windows = <8>; 672 num-ob-windows = <8>;
674 num-lanes = <2>;
675 status = "disabled"; 673 status = "disabled";
676 }; 674 };
677 675
@@ -687,7 +685,6 @@
687 #size-cells = <2>; 685 #size-cells = <2>;
688 device_type = "pci"; 686 device_type = "pci";
689 dma-coherent; 687 dma-coherent;
690 num-lanes = <2>;
691 num-viewport = <8>; 688 num-viewport = <8>;
692 bus-range = <0x0 0xff>; 689 bus-range = <0x0 0xff>;
693 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 690 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -709,7 +706,6 @@
709 reg-names = "regs", "addr_space"; 706 reg-names = "regs", "addr_space";
710 num-ib-windows = <6>; 707 num-ib-windows = <6>;
711 num-ob-windows = <8>; 708 num-ob-windows = <8>;
712 num-lanes = <2>;
713 status = "disabled"; 709 status = "disabled";
714 }; 710 };
715 711
@@ -725,7 +721,6 @@
725 #size-cells = <2>; 721 #size-cells = <2>;
726 device_type = "pci"; 722 device_type = "pci";
727 dma-coherent; 723 dma-coherent;
728 num-lanes = <2>;
729 num-viewport = <8>; 724 num-viewport = <8>;
730 bus-range = <0x0 0xff>; 725 bus-range = <0x0 0xff>;
731 ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ 726 ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -747,7 +742,6 @@
747 reg-names = "regs", "addr_space"; 742 reg-names = "regs", "addr_space";
748 num-ib-windows = <6>; 743 num-ib-windows = <6>;
749 num-ob-windows = <8>; 744 num-ob-windows = <8>;
750 num-lanes = <2>;
751 status = "disabled"; 745 status = "disabled";
752 }; 746 };
753 747
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index d1469b0747c7..c676d0771762 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -469,7 +469,6 @@
469 #size-cells = <2>; 469 #size-cells = <2>;
470 device_type = "pci"; 470 device_type = "pci";
471 dma-coherent; 471 dma-coherent;
472 num-lanes = <4>;
473 num-viewport = <256>; 472 num-viewport = <256>;
474 bus-range = <0x0 0xff>; 473 bus-range = <0x0 0xff>;
475 ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ 474 ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -495,7 +494,6 @@
495 #size-cells = <2>; 494 #size-cells = <2>;
496 device_type = "pci"; 495 device_type = "pci";
497 dma-coherent; 496 dma-coherent;
498 num-lanes = <4>;
499 num-viewport = <6>; 497 num-viewport = <6>;
500 bus-range = <0x0 0xff>; 498 bus-range = <0x0 0xff>;
501 ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ 499 ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -521,7 +519,6 @@
521 #size-cells = <2>; 519 #size-cells = <2>;
522 device_type = "pci"; 520 device_type = "pci";
523 dma-coherent; 521 dma-coherent;
524 num-lanes = <8>;
525 num-viewport = <6>; 522 num-viewport = <6>;
526 bus-range = <0x0 0xff>; 523 bus-range = <0x0 0xff>;
527 ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ 524 ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index 64101c9962ce..7a0be8eaa84a 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -639,7 +639,6 @@
639 #size-cells = <2>; 639 #size-cells = <2>;
640 device_type = "pci"; 640 device_type = "pci";
641 dma-coherent; 641 dma-coherent;
642 num-lanes = <4>;
643 num-viewport = <6>; 642 num-viewport = <6>;
644 bus-range = <0x0 0xff>; 643 bus-range = <0x0 0xff>;
645 msi-parent = <&its>; 644 msi-parent = <&its>;
@@ -661,7 +660,6 @@
661 #size-cells = <2>; 660 #size-cells = <2>;
662 device_type = "pci"; 661 device_type = "pci";
663 dma-coherent; 662 dma-coherent;
664 num-lanes = <4>;
665 num-viewport = <6>; 663 num-viewport = <6>;
666 bus-range = <0x0 0xff>; 664 bus-range = <0x0 0xff>;
667 msi-parent = <&its>; 665 msi-parent = <&its>;
@@ -683,7 +681,6 @@
683 #size-cells = <2>; 681 #size-cells = <2>;
684 device_type = "pci"; 682 device_type = "pci";
685 dma-coherent; 683 dma-coherent;
686 num-lanes = <8>;
687 num-viewport = <256>; 684 num-viewport = <256>;
688 bus-range = <0x0 0xff>; 685 bus-range = <0x0 0xff>;
689 msi-parent = <&its>; 686 msi-parent = <&its>;
@@ -705,7 +702,6 @@
705 #size-cells = <2>; 702 #size-cells = <2>;
706 device_type = "pci"; 703 device_type = "pci";
707 dma-coherent; 704 dma-coherent;
708 num-lanes = <4>;
709 num-viewport = <6>; 705 num-viewport = <6>;
710 bus-range = <0x0 0xff>; 706 bus-range = <0x0 0xff>;
711 msi-parent = <&its>; 707 msi-parent = <&its>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
index 62e07e1197cc..4c38426a6969 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
@@ -289,5 +289,29 @@
289 gpio = <&gpio TEGRA194_MAIN_GPIO(A, 3) GPIO_ACTIVE_HIGH>; 289 gpio = <&gpio TEGRA194_MAIN_GPIO(A, 3) GPIO_ACTIVE_HIGH>;
290 enable-active-high; 290 enable-active-high;
291 }; 291 };
292
293 vdd_3v3_pcie: regulator@2 {
294 compatible = "regulator-fixed";
295 reg = <2>;
296
297 regulator-name = "PEX_3V3";
298 regulator-min-microvolt = <3300000>;
299 regulator-max-microvolt = <3300000>;
300 gpio = <&gpio TEGRA194_MAIN_GPIO(Z, 2) GPIO_ACTIVE_HIGH>;
301 regulator-boot-on;
302 enable-active-high;
303 };
304
305 vdd_12v_pcie: regulator@3 {
306 compatible = "regulator-fixed";
307 reg = <3>;
308
309 regulator-name = "VDD_12V";
310 regulator-min-microvolt = <1200000>;
311 regulator-max-microvolt = <1200000>;
312 gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>;
313 regulator-boot-on;
314 enable-active-low;
315 };
292 }; 316 };
293}; 317};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
index 23597d53c9c9..d47cd8c4dd24 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
@@ -93,9 +93,11 @@
93 }; 93 };
94 94
95 pcie@141a0000 { 95 pcie@141a0000 {
96 status = "disabled"; 96 status = "okay";
97 97
98 vddio-pex-ctl-supply = <&vdd_1v8ao>; 98 vddio-pex-ctl-supply = <&vdd_1v8ao>;
99 vpcie3v3-supply = <&vdd_3v3_pcie>;
100 vpcie12v-supply = <&vdd_12v_pcie>;
99 101
100 phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>, 102 phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
101 <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>, 103 <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index adebbbf36bd0..3c0cf54f0aab 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -3,8 +3,9 @@
3#include <dt-bindings/gpio/tegra194-gpio.h> 3#include <dt-bindings/gpio/tegra194-gpio.h>
4#include <dt-bindings/interrupt-controller/arm-gic.h> 4#include <dt-bindings/interrupt-controller/arm-gic.h>
5#include <dt-bindings/mailbox/tegra186-hsp.h> 5#include <dt-bindings/mailbox/tegra186-hsp.h>
6#include <dt-bindings/reset/tegra194-reset.h> 6#include <dt-bindings/pinctrl/pinctrl-tegra.h>
7#include <dt-bindings/power/tegra194-powergate.h> 7#include <dt-bindings/power/tegra194-powergate.h>
8#include <dt-bindings/reset/tegra194-reset.h>
8#include <dt-bindings/thermal/tegra194-bpmp-thermal.h> 9#include <dt-bindings/thermal/tegra194-bpmp-thermal.h>
9 10
10/ { 11/ {
@@ -130,6 +131,38 @@
130 }; 131 };
131 }; 132 };
132 133
134 pinmux: pinmux@2430000 {
135 compatible = "nvidia,tegra194-pinmux";
136 reg = <0x2430000 0x17000
137 0xc300000 0x4000>;
138
139 status = "okay";
140
141 pex_rst_c5_out_state: pex_rst_c5_out {
142 pex_rst {
143 nvidia,pins = "pex_l5_rst_n_pgg1";
144 nvidia,schmitt = <TEGRA_PIN_DISABLE>;
145 nvidia,lpdr = <TEGRA_PIN_ENABLE>;
146 nvidia,enable-input = <TEGRA_PIN_DISABLE>;
147 nvidia,io-high-voltage = <TEGRA_PIN_ENABLE>;
148 nvidia,tristate = <TEGRA_PIN_DISABLE>;
149 nvidia,pull = <TEGRA_PIN_PULL_NONE>;
150 };
151 };
152
153 clkreq_c5_bi_dir_state: clkreq_c5_bi_dir {
154 clkreq {
155 nvidia,pins = "pex_l5_clkreq_n_pgg0";
156 nvidia,schmitt = <TEGRA_PIN_DISABLE>;
157 nvidia,lpdr = <TEGRA_PIN_ENABLE>;
158 nvidia,enable-input = <TEGRA_PIN_ENABLE>;
159 nvidia,io-high-voltage = <TEGRA_PIN_ENABLE>;
160 nvidia,tristate = <TEGRA_PIN_DISABLE>;
161 nvidia,pull = <TEGRA_PIN_PULL_NONE>;
162 };
163 };
164 };
165
133 uarta: serial@3100000 { 166 uarta: serial@3100000 {
134 compatible = "nvidia,tegra194-uart", "nvidia,tegra20-uart"; 167 compatible = "nvidia,tegra194-uart", "nvidia,tegra20-uart";
135 reg = <0x03100000 0x40>; 168 reg = <0x03100000 0x40>;
@@ -1365,6 +1398,9 @@
1365 num-viewport = <8>; 1398 num-viewport = <8>;
1366 linux,pci-domain = <5>; 1399 linux,pci-domain = <5>;
1367 1400
1401 pinctrl-names = "default";
1402 pinctrl-0 = <&pex_rst_c5_out_state>, <&clkreq_c5_bi_dir_state>;
1403
1368 clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>, 1404 clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>,
1369 <&bpmp TEGRA194_CLK_PEX1_CORE_5M>; 1405 <&bpmp TEGRA194_CLK_PEX1_CORE_5M>;
1370 clock-names = "core", "core_m"; 1406 clock-names = "core", "core_m";
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index f10208478131..8e91c86e8072 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -583,6 +583,7 @@ void ia64_process_pending_intr(void)
583static irqreturn_t dummy_handler (int irq, void *dev_id) 583static irqreturn_t dummy_handler (int irq, void *dev_id)
584{ 584{
585 BUG(); 585 BUG();
586 return IRQ_NONE;
586} 587}
587 588
588static struct irqaction ipi_irqaction = { 589static struct irqaction ipi_irqaction = {
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index db09a693f094..5b00dc3898e1 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -108,7 +108,6 @@ setup_per_cpu_areas(void)
108 struct pcpu_group_info *gi; 108 struct pcpu_group_info *gi;
109 unsigned int cpu; 109 unsigned int cpu;
110 ssize_t static_size, reserved_size, dyn_size; 110 ssize_t static_size, reserved_size, dyn_size;
111 int rc;
112 111
113 ai = pcpu_alloc_alloc_info(1, num_possible_cpus()); 112 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
114 if (!ai) 113 if (!ai)
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 219fc640414b..4f33f6e7e206 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -186,7 +186,7 @@ void __init setup_per_cpu_areas(void)
186 unsigned long base_offset; 186 unsigned long base_offset;
187 unsigned int cpu; 187 unsigned int cpu;
188 ssize_t static_size, reserved_size, dyn_size; 188 ssize_t static_size, reserved_size, dyn_size;
189 int node, prev_node, unit, nr_units, rc; 189 int node, prev_node, unit, nr_units;
190 190
191 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids); 191 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
192 if (!ai) 192 if (!ai)
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 632c9477a0f6..c9c4be822456 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -5,15 +5,18 @@ config MICROBLAZE
5 select ARCH_NO_SWAP 5 select ARCH_NO_SWAP
6 select ARCH_HAS_BINFMT_FLAT if !MMU 6 select ARCH_HAS_BINFMT_FLAT if !MMU
7 select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU 7 select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
8 select ARCH_HAS_DMA_PREP_COHERENT
8 select ARCH_HAS_GCOV_PROFILE_ALL 9 select ARCH_HAS_GCOV_PROFILE_ALL
9 select ARCH_HAS_SYNC_DMA_FOR_CPU 10 select ARCH_HAS_SYNC_DMA_FOR_CPU
10 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 11 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
12 select ARCH_HAS_UNCACHED_SEGMENT if !MMU
11 select ARCH_MIGHT_HAVE_PC_PARPORT 13 select ARCH_MIGHT_HAVE_PC_PARPORT
12 select ARCH_WANT_IPC_PARSE_VERSION 14 select ARCH_WANT_IPC_PARSE_VERSION
13 select BUILDTIME_EXTABLE_SORT 15 select BUILDTIME_EXTABLE_SORT
14 select TIMER_OF 16 select TIMER_OF
15 select CLONE_BACKWARDS3 17 select CLONE_BACKWARDS3
16 select COMMON_CLK 18 select COMMON_CLK
19 select DMA_DIRECT_REMAP if MMU
17 select GENERIC_ATOMIC64 20 select GENERIC_ATOMIC64
18 select GENERIC_CLOCKEVENTS 21 select GENERIC_CLOCKEVENTS
19 select GENERIC_CPU_DEVICES 22 select GENERIC_CPU_DEVICES
diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
index 5a8a9d090c37..5b236527176e 100644
--- a/arch/microblaze/boot/dts/system.dts
+++ b/arch/microblaze/boot/dts/system.dts
@@ -18,7 +18,6 @@
18 #address-cells = <1>; 18 #address-cells = <1>;
19 #size-cells = <1>; 19 #size-cells = <1>;
20 compatible = "xlnx,microblaze"; 20 compatible = "xlnx,microblaze";
21 hard-reset-gpios = <&LEDs_8Bit 2 1>;
22 model = "testing"; 21 model = "testing";
23 DDR2_SDRAM: memory@90000000 { 22 DDR2_SDRAM: memory@90000000 {
24 device_type = "memory"; 23 device_type = "memory";
@@ -281,6 +280,21 @@
281 gpios = <&LEDs_8Bit 7 1>; 280 gpios = <&LEDs_8Bit 7 1>;
282 }; 281 };
283 } ; 282 } ;
283
284 gpio-restart {
285 compatible = "gpio-restart";
286 /*
287 * FIXME: is this active low or active high?
288 * the current flag (1) indicates active low.
289 * delay measures are templates, should be adjusted
290 * to datasheet or trial-and-error with real hardware.
291 */
292 gpios = <&LEDs_8Bit 2 1>;
293 active-delay = <100>;
294 inactive-delay = <10>;
295 wait-delay = <100>;
296 };
297
284 RS232_Uart_1: serial@84000000 { 298 RS232_Uart_1: serial@84000000 {
285 clock-frequency = <125000000>; 299 clock-frequency = <125000000>;
286 compatible = "xlnx,xps-uartlite-1.00.a"; 300 compatible = "xlnx,xps-uartlite-1.00.a";
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 92fd4e95b488..654edfdc7867 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -5,15 +5,10 @@ CONFIG_IKCONFIG=y
5CONFIG_IKCONFIG_PROC=y 5CONFIG_IKCONFIG_PROC=y
6CONFIG_SYSFS_DEPRECATED=y 6CONFIG_SYSFS_DEPRECATED=y
7CONFIG_SYSFS_DEPRECATED_V2=y 7CONFIG_SYSFS_DEPRECATED_V2=y
8CONFIG_KALLSYMS_ALL=y
9# CONFIG_BASE_FULL is not set 8# CONFIG_BASE_FULL is not set
9CONFIG_KALLSYMS_ALL=y
10CONFIG_EMBEDDED=y 10CONFIG_EMBEDDED=y
11CONFIG_SLAB=y 11CONFIG_SLAB=y
12CONFIG_MODULES=y
13CONFIG_MODULE_UNLOAD=y
14# CONFIG_BLK_DEV_BSG is not set
15CONFIG_PARTITION_ADVANCED=y
16# CONFIG_EFI_PARTITION is not set
17CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 12CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
18CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 13CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
19CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 14CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
@@ -25,14 +20,19 @@ CONFIG_MMU=y
25CONFIG_CMDLINE_BOOL=y 20CONFIG_CMDLINE_BOOL=y
26CONFIG_CMDLINE_FORCE=y 21CONFIG_CMDLINE_FORCE=y
27CONFIG_HIGHMEM=y 22CONFIG_HIGHMEM=y
28CONFIG_PCI=y
29CONFIG_PCI_XILINX=y 23CONFIG_PCI_XILINX=y
24CONFIG_MODULES=y
25CONFIG_MODULE_UNLOAD=y
26# CONFIG_BLK_DEV_BSG is not set
27CONFIG_PARTITION_ADVANCED=y
28# CONFIG_EFI_PARTITION is not set
30CONFIG_NET=y 29CONFIG_NET=y
31CONFIG_PACKET=y 30CONFIG_PACKET=y
32CONFIG_UNIX=y 31CONFIG_UNIX=y
33CONFIG_INET=y 32CONFIG_INET=y
34# CONFIG_IPV6 is not set 33# CONFIG_IPV6 is not set
35CONFIG_BRIDGE=m 34CONFIG_BRIDGE=m
35CONFIG_PCI=y
36CONFIG_MTD=y 36CONFIG_MTD=y
37CONFIG_MTD_CFI=y 37CONFIG_MTD_CFI=y
38CONFIG_MTD_CFI_INTELEXT=y 38CONFIG_MTD_CFI_INTELEXT=y
@@ -41,6 +41,7 @@ CONFIG_BLK_DEV_RAM=y
41CONFIG_BLK_DEV_RAM_SIZE=8192 41CONFIG_BLK_DEV_RAM_SIZE=8192
42CONFIG_NETDEVICES=y 42CONFIG_NETDEVICES=y
43CONFIG_XILINX_EMACLITE=y 43CONFIG_XILINX_EMACLITE=y
44CONFIG_XILINX_AXI_EMAC=y
44CONFIG_XILINX_LL_TEMAC=y 45CONFIG_XILINX_LL_TEMAC=y
45# CONFIG_INPUT is not set 46# CONFIG_INPUT is not set
46# CONFIG_SERIO is not set 47# CONFIG_SERIO is not set
@@ -59,6 +60,8 @@ CONFIG_SPI_XILINX=y
59CONFIG_GPIOLIB=y 60CONFIG_GPIOLIB=y
60CONFIG_GPIO_SYSFS=y 61CONFIG_GPIO_SYSFS=y
61CONFIG_GPIO_XILINX=y 62CONFIG_GPIO_XILINX=y
63CONFIG_POWER_RESET=y
64CONFIG_POWER_RESET_GPIO_RESTART=y
62# CONFIG_HWMON is not set 65# CONFIG_HWMON is not set
63CONFIG_WATCHDOG=y 66CONFIG_WATCHDOG=y
64CONFIG_XILINX_WATCHDOG=y 67CONFIG_XILINX_WATCHDOG=y
@@ -74,8 +77,8 @@ CONFIG_CRAMFS=y
74CONFIG_ROMFS_FS=y 77CONFIG_ROMFS_FS=y
75CONFIG_NFS_FS=y 78CONFIG_NFS_FS=y
76CONFIG_CIFS=y 79CONFIG_CIFS=y
77CONFIG_CIFS_STATS=y
78CONFIG_CIFS_STATS2=y 80CONFIG_CIFS_STATS2=y
81CONFIG_ENCRYPTED_KEYS=y
79CONFIG_DEBUG_INFO=y 82CONFIG_DEBUG_INFO=y
80CONFIG_DEBUG_SLAB=y 83CONFIG_DEBUG_SLAB=y
81CONFIG_DETECT_HUNG_TASK=y 84CONFIG_DETECT_HUNG_TASK=y
@@ -83,6 +86,3 @@ CONFIG_DEBUG_SPINLOCK=y
83CONFIG_KGDB=y 86CONFIG_KGDB=y
84CONFIG_KGDB_TESTS=y 87CONFIG_KGDB_TESTS=y
85CONFIG_KGDB_KDB=y 88CONFIG_KGDB_KDB=y
86CONFIG_EARLY_PRINTK=y
87CONFIG_KEYS=y
88CONFIG_ENCRYPTED_KEYS=y
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index 06d69a6e192d..377de39ccb8c 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -7,15 +7,10 @@ CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y 7CONFIG_IKCONFIG_PROC=y
8CONFIG_SYSFS_DEPRECATED=y 8CONFIG_SYSFS_DEPRECATED=y
9CONFIG_SYSFS_DEPRECATED_V2=y 9CONFIG_SYSFS_DEPRECATED_V2=y
10CONFIG_KALLSYMS_ALL=y
11# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
11CONFIG_KALLSYMS_ALL=y
12CONFIG_EMBEDDED=y 12CONFIG_EMBEDDED=y
13CONFIG_SLAB=y 13CONFIG_SLAB=y
14CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y
16# CONFIG_BLK_DEV_BSG is not set
17CONFIG_PARTITION_ADVANCED=y
18# CONFIG_EFI_PARTITION is not set
19CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 14CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
20CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 15CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
21CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 16CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
@@ -25,13 +20,18 @@ CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
25CONFIG_HZ_100=y 20CONFIG_HZ_100=y
26CONFIG_CMDLINE_BOOL=y 21CONFIG_CMDLINE_BOOL=y
27CONFIG_CMDLINE_FORCE=y 22CONFIG_CMDLINE_FORCE=y
28CONFIG_PCI=y
29CONFIG_PCI_XILINX=y 23CONFIG_PCI_XILINX=y
24CONFIG_MODULES=y
25CONFIG_MODULE_UNLOAD=y
26# CONFIG_BLK_DEV_BSG is not set
27CONFIG_PARTITION_ADVANCED=y
28# CONFIG_EFI_PARTITION is not set
30CONFIG_NET=y 29CONFIG_NET=y
31CONFIG_PACKET=y 30CONFIG_PACKET=y
32CONFIG_UNIX=y 31CONFIG_UNIX=y
33CONFIG_INET=y 32CONFIG_INET=y
34# CONFIG_IPV6 is not set 33# CONFIG_IPV6 is not set
34CONFIG_PCI=y
35CONFIG_MTD=y 35CONFIG_MTD=y
36CONFIG_MTD_CMDLINE_PARTS=y 36CONFIG_MTD_CMDLINE_PARTS=y
37CONFIG_MTD_BLOCK=y 37CONFIG_MTD_BLOCK=y
@@ -62,6 +62,8 @@ CONFIG_SPI_XILINX=y
62CONFIG_GPIOLIB=y 62CONFIG_GPIOLIB=y
63CONFIG_GPIO_SYSFS=y 63CONFIG_GPIO_SYSFS=y
64CONFIG_GPIO_XILINX=y 64CONFIG_GPIO_XILINX=y
65CONFIG_POWER_RESET=y
66CONFIG_POWER_RESET_GPIO_RESTART=y
65# CONFIG_HWMON is not set 67# CONFIG_HWMON is not set
66CONFIG_WATCHDOG=y 68CONFIG_WATCHDOG=y
67CONFIG_XILINX_WATCHDOG=y 69CONFIG_XILINX_WATCHDOG=y
@@ -75,11 +77,6 @@ CONFIG_ROMFS_FS=y
75CONFIG_NFS_FS=y 77CONFIG_NFS_FS=y
76CONFIG_NFS_V3_ACL=y 78CONFIG_NFS_V3_ACL=y
77CONFIG_NLS=y 79CONFIG_NLS=y
78CONFIG_DEBUG_INFO=y
79CONFIG_DEBUG_SLAB=y
80CONFIG_DETECT_HUNG_TASK=y
81CONFIG_DEBUG_SPINLOCK=y
82CONFIG_EARLY_PRINTK=y
83CONFIG_KEYS=y 80CONFIG_KEYS=y
84CONFIG_ENCRYPTED_KEYS=y 81CONFIG_ENCRYPTED_KEYS=y
85CONFIG_CRYPTO_ECB=y 82CONFIG_CRYPTO_ECB=y
@@ -87,3 +84,7 @@ CONFIG_CRYPTO_MD4=y
87CONFIG_CRYPTO_MD5=y 84CONFIG_CRYPTO_MD5=y
88CONFIG_CRYPTO_ARC4=y 85CONFIG_CRYPTO_ARC4=y
89CONFIG_CRYPTO_DES=y 86CONFIG_CRYPTO_DES=y
87CONFIG_DEBUG_INFO=y
88CONFIG_DEBUG_SLAB=y
89CONFIG_DETECT_HUNG_TASK=y
90CONFIG_DEBUG_SPINLOCK=y
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index c7968139486f..86c95b2a1ce1 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -40,7 +40,6 @@ extern void iounmap(volatile void __iomem *addr);
40 40
41extern void __iomem *ioremap(phys_addr_t address, unsigned long size); 41extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
42#define ioremap_nocache(addr, size) ioremap((addr), (size)) 42#define ioremap_nocache(addr, size) ioremap((addr), (size))
43#define ioremap_fullcache(addr, size) ioremap((addr), (size))
44#define ioremap_wc(addr, size) ioremap((addr), (size)) 43#define ioremap_wc(addr, size) ioremap((addr), (size))
45#define ioremap_wt(addr, size) ioremap((addr), (size)) 44#define ioremap_wt(addr, size) ioremap((addr), (size))
46 45
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 21ddba9188b2..7c4dc5d85f53 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -66,8 +66,6 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
66 unsigned long size, 66 unsigned long size,
67 pgprot_t prot); 67 pgprot_t prot);
68 68
69#define HAVE_ARCH_PCI_RESOURCE_TO_USER
70
71/* This part of code was originally in xilinx-pci.h */ 69/* This part of code was originally in xilinx-pci.h */
72#ifdef CONFIG_PCI_XILINX 70#ifdef CONFIG_PCI_XILINX
73extern void __init xilinx_pci_init(void); 71extern void __init xilinx_pci_init(void);
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index bff2a71c828a..a1f206b90753 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -163,44 +163,15 @@ extern long __user_bad(void);
163 * Returns zero on success, or -EFAULT on error. 163 * Returns zero on success, or -EFAULT on error.
164 * On error, the variable @x is set to zero. 164 * On error, the variable @x is set to zero.
165 */ 165 */
166#define get_user(x, ptr) \ 166#define get_user(x, ptr) ({ \
167 __get_user_check((x), (ptr), sizeof(*(ptr))) 167 const typeof(*(ptr)) __user *__gu_ptr = (ptr); \
168 168 access_ok(__gu_ptr, sizeof(*__gu_ptr)) ? \
169#define __get_user_check(x, ptr, size) \ 169 __get_user(x, __gu_ptr) : -EFAULT; \
170({ \
171 unsigned long __gu_val = 0; \
172 const typeof(*(ptr)) __user *__gu_addr = (ptr); \
173 int __gu_err = 0; \
174 \
175 if (access_ok(__gu_addr, size)) { \
176 switch (size) { \
177 case 1: \
178 __get_user_asm("lbu", __gu_addr, __gu_val, \
179 __gu_err); \
180 break; \
181 case 2: \
182 __get_user_asm("lhu", __gu_addr, __gu_val, \
183 __gu_err); \
184 break; \
185 case 4: \
186 __get_user_asm("lw", __gu_addr, __gu_val, \
187 __gu_err); \
188 break; \
189 default: \
190 __gu_err = __user_bad(); \
191 break; \
192 } \
193 } else { \
194 __gu_err = -EFAULT; \
195 } \
196 x = (__force typeof(*(ptr)))__gu_val; \
197 __gu_err; \
198}) 170})
199 171
200#define __get_user(x, ptr) \ 172#define __get_user(x, ptr) \
201({ \ 173({ \
202 unsigned long __gu_val = 0; \ 174 unsigned long __gu_val = 0; \
203 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
204 long __gu_err; \ 175 long __gu_err; \
205 switch (sizeof(*(ptr))) { \ 176 switch (sizeof(*(ptr))) { \
206 case 1: \ 177 case 1: \
@@ -212,6 +183,11 @@ extern long __user_bad(void);
212 case 4: \ 183 case 4: \
213 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ 184 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
214 break; \ 185 break; \
186 case 8: \
187 __gu_err = __copy_from_user(&__gu_val, ptr, 8); \
188 if (__gu_err) \
189 __gu_err = -EFAULT; \
190 break; \
215 default: \ 191 default: \
216 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\ 192 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
217 } \ 193 } \
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index fcbe1daf6316..5f4722908164 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -8,83 +8,9 @@
8 */ 8 */
9 9
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/delay.h>
11#include <linux/of_platform.h> 12#include <linux/of_platform.h>
12 13#include <linux/reboot.h>
13/* Trigger specific functions */
14#ifdef CONFIG_GPIOLIB
15
16#include <linux/of_gpio.h>
17
18static int handle; /* reset pin handle */
19static unsigned int reset_val;
20
21static int of_platform_reset_gpio_probe(void)
22{
23 int ret;
24 handle = of_get_named_gpio(of_find_node_by_path("/"),
25 "hard-reset-gpios", 0);
26
27 if (!gpio_is_valid(handle)) {
28 pr_info("Skipping unavailable RESET gpio %d (%s)\n",
29 handle, "reset");
30 return -ENODEV;
31 }
32
33 ret = gpio_request(handle, "reset");
34 if (ret < 0) {
35 pr_info("GPIO pin is already allocated\n");
36 return ret;
37 }
38
39 /* get current setup value */
40 reset_val = gpio_get_value(handle);
41 /* FIXME maybe worth to perform any action */
42 pr_debug("Reset: Gpio output state: 0x%x\n", reset_val);
43
44 /* Setup GPIO as output */
45 ret = gpio_direction_output(handle, 0);
46 if (ret < 0)
47 goto err;
48
49 /* Setup output direction */
50 gpio_set_value(handle, 0);
51
52 pr_info("RESET: Registered gpio device: %d, current val: %d\n",
53 handle, reset_val);
54 return 0;
55err:
56 gpio_free(handle);
57 return ret;
58}
59device_initcall(of_platform_reset_gpio_probe);
60
61
62static void gpio_system_reset(void)
63{
64 if (gpio_is_valid(handle))
65 gpio_set_value(handle, 1 - reset_val);
66 else
67 pr_notice("Reset GPIO unavailable - halting!\n");
68}
69#else
70static void gpio_system_reset(void)
71{
72 pr_notice("No reset GPIO present - halting!\n");
73}
74
75void of_platform_reset_gpio_probe(void)
76{
77 return;
78}
79#endif
80
81void machine_restart(char *cmd)
82{
83 pr_notice("Machine restart...\n");
84 gpio_system_reset();
85 while (1)
86 ;
87}
88 14
89void machine_shutdown(void) 15void machine_shutdown(void)
90{ 16{
@@ -106,3 +32,12 @@ void machine_power_off(void)
106 while (1) 32 while (1)
107 ; 33 ;
108} 34}
35
36void machine_restart(char *cmd)
37{
38 do_kernel_restart(cmd);
39 /* Give the restart hook 1 s to take us down */
40 mdelay(1000);
41 pr_emerg("Reboot failed -- System halted\n");
42 while (1);
43}
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index bc7042209c57..8c5f0c332d8b 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -4,217 +4,56 @@
4 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2010 PetaLogix 5 * Copyright (C) 2010 PetaLogix
6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> 6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
7 *
8 * Based on PowerPC version derived from arch/arm/mm/consistent.c
9 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
10 * Copyright (C) 2000 Russell King
11 */ 7 */
12 8
13#include <linux/export.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/kernel.h> 9#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h> 10#include <linux/string.h>
19#include <linux/types.h> 11#include <linux/types.h>
20#include <linux/ptrace.h>
21#include <linux/mman.h>
22#include <linux/mm.h> 12#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/stddef.h>
25#include <linux/vmalloc.h>
26#include <linux/init.h> 13#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/memblock.h>
29#include <linux/highmem.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/gfp.h>
33#include <linux/dma-noncoherent.h> 14#include <linux/dma-noncoherent.h>
34
35#include <asm/pgalloc.h>
36#include <linux/io.h>
37#include <linux/hardirq.h>
38#include <linux/mmu_context.h>
39#include <asm/mmu.h>
40#include <linux/uaccess.h>
41#include <asm/pgtable.h>
42#include <asm/cpuinfo.h> 15#include <asm/cpuinfo.h>
43#include <asm/tlbflush.h> 16#include <asm/cacheflush.h>
44 17
45#ifndef CONFIG_MMU 18void arch_dma_prep_coherent(struct page *page, size_t size)
46/* I have to use dcache values because I can't relate on ram size */
47# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
48#endif
49
50/*
51 * Consistent memory allocators. Used for DMA devices that want to
52 * share uncached memory with the processor core.
53 * My crufty no-MMU approach is simple. In the HW platform we can optionally
54 * mirror the DDR up above the processor cacheable region. So, memory accessed
55 * in this mirror region will not be cached. It's alloced from the same
56 * pool as normal memory, but the handle we return is shifted up into the
57 * uncached region. This will no doubt cause big problems if memory allocated
58 * here is not also freed properly. -- JW
59 */
60void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
61 gfp_t gfp, unsigned long attrs)
62{ 19{
63 unsigned long order, vaddr; 20 phys_addr_t paddr = page_to_phys(page);
64 void *ret;
65 unsigned int i, err = 0;
66 struct page *page, *end;
67
68#ifdef CONFIG_MMU
69 phys_addr_t pa;
70 struct vm_struct *area;
71 unsigned long va;
72#endif
73
74 if (in_interrupt())
75 BUG();
76
77 /* Only allocate page size areas. */
78 size = PAGE_ALIGN(size);
79 order = get_order(size);
80
81 vaddr = __get_free_pages(gfp | __GFP_ZERO, order);
82 if (!vaddr)
83 return NULL;
84 21
85 /* 22 flush_dcache_range(paddr, paddr + size);
86 * we need to ensure that there are no cachelines in use, 23}
87 * or worse dirty in this area.
88 */
89 flush_dcache_range(virt_to_phys((void *)vaddr),
90 virt_to_phys((void *)vaddr) + size);
91 24
92#ifndef CONFIG_MMU 25#ifndef CONFIG_MMU
93 ret = (void *)vaddr; 26/*
94 /* 27 * Consistent memory allocators. Used for DMA devices that want to share
95 * Here's the magic! Note if the uncached shadow is not implemented, 28 * uncached memory with the processor core. My crufty no-MMU approach is
96 * it's up to the calling code to also test that condition and make 29 * simple. In the HW platform we can optionally mirror the DDR up above the
97 * other arranegments, such as manually flushing the cache and so on. 30 * processor cacheable region. So, memory accessed in this mirror region will
98 */ 31 * not be cached. It's alloced from the same pool as normal memory, but the
99# ifdef CONFIG_XILINX_UNCACHED_SHADOW 32 * handle we return is shifted up into the uncached region. This will no doubt
100 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); 33 * cause big problems if memory allocated here is not also freed properly. -- JW
101# endif 34 *
102 if ((unsigned int)ret > cpuinfo.dcache_base && 35 * I have to use dcache values because I can't relate on ram size:
103 (unsigned int)ret < cpuinfo.dcache_high) 36 */
104 pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); 37#ifdef CONFIG_XILINX_UNCACHED_SHADOW
105 38#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
106 /* dma_handle is same as physical (shadowed) address */
107 *dma_handle = (dma_addr_t)ret;
108#else 39#else
109 /* Allocate some common virtual space to map the new pages. */ 40#define UNCACHED_SHADOW_MASK 0
110 area = get_vm_area(size, VM_ALLOC); 41#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
111 if (!area) {
112 free_pages(vaddr, order);
113 return NULL;
114 }
115 va = (unsigned long) area->addr;
116 ret = (void *)va;
117
118 /* This gives us the real physical address of the first page. */
119 *dma_handle = pa = __virt_to_phys(vaddr);
120#endif
121
122 /*
123 * free wasted pages. We skip the first page since we know
124 * that it will have count = 1 and won't require freeing.
125 * We also mark the pages in use as reserved so that
126 * remap_page_range works.
127 */
128 page = virt_to_page(vaddr);
129 end = page + (1 << order);
130
131 split_page(page, order);
132
133 for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
134#ifdef CONFIG_MMU
135 /* MS: This is the whole magic - use cache inhibit pages */
136 err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
137#endif
138 42
139 SetPageReserved(page); 43void *uncached_kernel_address(void *ptr)
140 page++;
141 }
142
143 /* Free the otherwise unused pages. */
144 while (page < end) {
145 __free_page(page);
146 page++;
147 }
148
149 if (err) {
150 free_pages(vaddr, order);
151 return NULL;
152 }
153
154 return ret;
155}
156
157#ifdef CONFIG_MMU
158static pte_t *consistent_virt_to_pte(void *vaddr)
159{ 44{
160 unsigned long addr = (unsigned long)vaddr; 45 unsigned long addr = (unsigned long)ptr;
161
162 return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
163}
164
165long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
166 dma_addr_t dma_addr)
167{
168 pte_t *ptep = consistent_virt_to_pte(vaddr);
169
170 if (pte_none(*ptep) || !pte_present(*ptep))
171 return 0;
172 46
173 return pte_pfn(*ptep); 47 addr |= UNCACHED_SHADOW_MASK;
48 if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
49 pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
50 return (void *)addr;
174} 51}
175#endif
176 52
177/* 53void *cached_kernel_address(void *ptr)
178 * free page(s) as defined by the above mapping.
179 */
180void arch_dma_free(struct device *dev, size_t size, void *vaddr,
181 dma_addr_t dma_addr, unsigned long attrs)
182{ 54{
183 struct page *page; 55 unsigned long addr = (unsigned long)ptr;
184
185 if (in_interrupt())
186 BUG();
187
188 size = PAGE_ALIGN(size);
189
190#ifndef CONFIG_MMU
191 /* Clear SHADOW_MASK bit in address, and free as per usual */
192# ifdef CONFIG_XILINX_UNCACHED_SHADOW
193 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
194# endif
195 page = virt_to_page(vaddr);
196
197 do {
198 __free_reserved_page(page);
199 page++;
200 } while (size -= PAGE_SIZE);
201#else
202 do {
203 pte_t *ptep = consistent_virt_to_pte(vaddr);
204 unsigned long pfn;
205
206 if (!pte_none(*ptep) && pte_present(*ptep)) {
207 pfn = pte_pfn(*ptep);
208 pte_clear(&init_mm, (unsigned int)vaddr, ptep);
209 if (pfn_valid(pfn)) {
210 page = pfn_to_page(pfn);
211 __free_reserved_page(page);
212 }
213 }
214 vaddr += PAGE_SIZE;
215 } while (size -= PAGE_SIZE);
216 56
217 /* flush tlb */ 57 return (void *)(addr & ~UNCACHED_SHADOW_MASK);
218 flush_tlb_all();
219#endif
220} 58}
59#endif /* CONFIG_MMU */
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 436099883022..6f48649201c5 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -108,7 +108,6 @@ extern unsigned long PCIBIOS_MIN_MEM;
108 108
109#define HAVE_PCI_MMAP 109#define HAVE_PCI_MMAP
110#define ARCH_GENERIC_PCI_MMAP_RESOURCE 110#define ARCH_GENERIC_PCI_MMAP_RESOURCE
111#define HAVE_ARCH_PCI_RESOURCE_TO_USER
112 111
113/* 112/*
114 * Dynamic DMA mapping stuff. 113 * Dynamic DMA mapping stuff.
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 2372d35533ad..327567b8f7d6 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -112,8 +112,6 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
112 unsigned long size, 112 unsigned long size,
113 pgprot_t prot); 113 pgprot_t prot);
114 114
115#define HAVE_ARCH_PCI_RESOURCE_TO_USER
116
117extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose); 115extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
118extern void pcibios_setup_bus_devices(struct pci_bus *bus); 116extern void pcibios_setup_bus_devices(struct pci_bus *bus);
119extern void pcibios_setup_bus_self(struct pci_bus *bus); 117extern void pcibios_setup_bus_self(struct pci_bus *bus);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 065ff14b76e1..1d93e55a2de1 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -10,6 +10,8 @@
10 10
11#include <linux/file.h> 11#include <linux/file.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/fs_context.h>
14#include <linux/fs_parser.h>
13#include <linux/fsnotify.h> 15#include <linux/fsnotify.h>
14#include <linux/backing-dev.h> 16#include <linux/backing-dev.h>
15#include <linux/init.h> 17#include <linux/init.h>
@@ -20,7 +22,6 @@
20#include <linux/pagemap.h> 22#include <linux/pagemap.h>
21#include <linux/poll.h> 23#include <linux/poll.h>
22#include <linux/slab.h> 24#include <linux/slab.h>
23#include <linux/parser.h>
24 25
25#include <asm/prom.h> 26#include <asm/prom.h>
26#include <asm/spu.h> 27#include <asm/spu.h>
@@ -30,7 +31,7 @@
30#include "spufs.h" 31#include "spufs.h"
31 32
32struct spufs_sb_info { 33struct spufs_sb_info {
33 int debug; 34 bool debug;
34}; 35};
35 36
36static struct kmem_cache *spufs_inode_cache; 37static struct kmem_cache *spufs_inode_cache;
@@ -574,16 +575,27 @@ long spufs_create(struct path *path, struct dentry *dentry,
574} 575}
575 576
576/* File system initialization */ 577/* File system initialization */
578struct spufs_fs_context {
579 kuid_t uid;
580 kgid_t gid;
581 umode_t mode;
582};
583
577enum { 584enum {
578 Opt_uid, Opt_gid, Opt_mode, Opt_debug, Opt_err, 585 Opt_uid, Opt_gid, Opt_mode, Opt_debug,
586};
587
588static const struct fs_parameter_spec spufs_param_specs[] = {
589 fsparam_u32 ("gid", Opt_gid),
590 fsparam_u32oct ("mode", Opt_mode),
591 fsparam_u32 ("uid", Opt_uid),
592 fsparam_flag ("debug", Opt_debug),
593 {}
579}; 594};
580 595
581static const match_table_t spufs_tokens = { 596static const struct fs_parameter_description spufs_fs_parameters = {
582 { Opt_uid, "uid=%d" }, 597 .name = "spufs",
583 { Opt_gid, "gid=%d" }, 598 .specs = spufs_param_specs,
584 { Opt_mode, "mode=%o" },
585 { Opt_debug, "debug" },
586 { Opt_err, NULL },
587}; 599};
588 600
589static int spufs_show_options(struct seq_file *m, struct dentry *root) 601static int spufs_show_options(struct seq_file *m, struct dentry *root)
@@ -604,47 +616,41 @@ static int spufs_show_options(struct seq_file *m, struct dentry *root)
604 return 0; 616 return 0;
605} 617}
606 618
607static int 619static int spufs_parse_param(struct fs_context *fc, struct fs_parameter *param)
608spufs_parse_options(struct super_block *sb, char *options, struct inode *root) 620{
609{ 621 struct spufs_fs_context *ctx = fc->fs_private;
610 char *p; 622 struct spufs_sb_info *sbi = fc->s_fs_info;
611 substring_t args[MAX_OPT_ARGS]; 623 struct fs_parse_result result;
612 624 kuid_t uid;
613 while ((p = strsep(&options, ",")) != NULL) { 625 kgid_t gid;
614 int token, option; 626 int opt;
615 627
616 if (!*p) 628 opt = fs_parse(fc, &spufs_fs_parameters, param, &result);
617 continue; 629 if (opt < 0)
618 630 return opt;
619 token = match_token(p, spufs_tokens, args); 631
620 switch (token) { 632 switch (opt) {
621 case Opt_uid: 633 case Opt_uid:
622 if (match_int(&args[0], &option)) 634 uid = make_kuid(current_user_ns(), result.uint_32);
623 return 0; 635 if (!uid_valid(uid))
624 root->i_uid = make_kuid(current_user_ns(), option); 636 return invalf(fc, "Unknown uid");
625 if (!uid_valid(root->i_uid)) 637 ctx->uid = uid;
626 return 0; 638 break;
627 break; 639 case Opt_gid:
628 case Opt_gid: 640 gid = make_kgid(current_user_ns(), result.uint_32);
629 if (match_int(&args[0], &option)) 641 if (!gid_valid(gid))
630 return 0; 642 return invalf(fc, "Unknown gid");
631 root->i_gid = make_kgid(current_user_ns(), option); 643 ctx->gid = gid;
632 if (!gid_valid(root->i_gid)) 644 break;
633 return 0; 645 case Opt_mode:
634 break; 646 ctx->mode = result.uint_32 & S_IALLUGO;
635 case Opt_mode: 647 break;
636 if (match_octal(&args[0], &option)) 648 case Opt_debug:
637 return 0; 649 sbi->debug = true;
638 root->i_mode = option | S_IFDIR; 650 break;
639 break;
640 case Opt_debug:
641 spufs_get_sb_info(sb)->debug = 1;
642 break;
643 default:
644 return 0;
645 }
646 } 651 }
647 return 1; 652
653 return 0;
648} 654}
649 655
650static void spufs_exit_isolated_loader(void) 656static void spufs_exit_isolated_loader(void)
@@ -678,79 +684,98 @@ spufs_init_isolated_loader(void)
678 printk(KERN_INFO "spufs: SPU isolation mode enabled\n"); 684 printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
679} 685}
680 686
681static int 687static int spufs_create_root(struct super_block *sb, struct fs_context *fc)
682spufs_create_root(struct super_block *sb, void *data)
683{ 688{
689 struct spufs_fs_context *ctx = fc->fs_private;
684 struct inode *inode; 690 struct inode *inode;
685 int ret;
686 691
687 ret = -ENODEV;
688 if (!spu_management_ops) 692 if (!spu_management_ops)
689 goto out; 693 return -ENODEV;
690 694
691 ret = -ENOMEM; 695 inode = spufs_new_inode(sb, S_IFDIR | ctx->mode);
692 inode = spufs_new_inode(sb, S_IFDIR | 0775);
693 if (!inode) 696 if (!inode)
694 goto out; 697 return -ENOMEM;
695 698
699 inode->i_uid = ctx->uid;
700 inode->i_gid = ctx->gid;
696 inode->i_op = &simple_dir_inode_operations; 701 inode->i_op = &simple_dir_inode_operations;
697 inode->i_fop = &simple_dir_operations; 702 inode->i_fop = &simple_dir_operations;
698 SPUFS_I(inode)->i_ctx = NULL; 703 SPUFS_I(inode)->i_ctx = NULL;
699 inc_nlink(inode); 704 inc_nlink(inode);
700 705
701 ret = -EINVAL;
702 if (!spufs_parse_options(sb, data, inode))
703 goto out_iput;
704
705 ret = -ENOMEM;
706 sb->s_root = d_make_root(inode); 706 sb->s_root = d_make_root(inode);
707 if (!sb->s_root) 707 if (!sb->s_root)
708 goto out; 708 return -ENOMEM;
709
710 return 0; 709 return 0;
711out_iput:
712 iput(inode);
713out:
714 return ret;
715} 710}
716 711
717static int 712static const struct super_operations spufs_ops = {
718spufs_fill_super(struct super_block *sb, void *data, int silent) 713 .alloc_inode = spufs_alloc_inode,
719{ 714 .free_inode = spufs_free_inode,
720 struct spufs_sb_info *info; 715 .statfs = simple_statfs,
721 static const struct super_operations s_ops = { 716 .evict_inode = spufs_evict_inode,
722 .alloc_inode = spufs_alloc_inode, 717 .show_options = spufs_show_options,
723 .free_inode = spufs_free_inode, 718};
724 .statfs = simple_statfs,
725 .evict_inode = spufs_evict_inode,
726 .show_options = spufs_show_options,
727 };
728
729 info = kzalloc(sizeof(*info), GFP_KERNEL);
730 if (!info)
731 return -ENOMEM;
732 719
720static int spufs_fill_super(struct super_block *sb, struct fs_context *fc)
721{
733 sb->s_maxbytes = MAX_LFS_FILESIZE; 722 sb->s_maxbytes = MAX_LFS_FILESIZE;
734 sb->s_blocksize = PAGE_SIZE; 723 sb->s_blocksize = PAGE_SIZE;
735 sb->s_blocksize_bits = PAGE_SHIFT; 724 sb->s_blocksize_bits = PAGE_SHIFT;
736 sb->s_magic = SPUFS_MAGIC; 725 sb->s_magic = SPUFS_MAGIC;
737 sb->s_op = &s_ops; 726 sb->s_op = &spufs_ops;
738 sb->s_fs_info = info;
739 727
740 return spufs_create_root(sb, data); 728 return spufs_create_root(sb, fc);
729}
730
731static int spufs_get_tree(struct fs_context *fc)
732{
733 return get_tree_single(fc, spufs_fill_super);
741} 734}
742 735
743static struct dentry * 736static void spufs_free_fc(struct fs_context *fc)
744spufs_mount(struct file_system_type *fstype, int flags,
745 const char *name, void *data)
746{ 737{
747 return mount_single(fstype, flags, data, spufs_fill_super); 738 kfree(fc->s_fs_info);
739}
740
741static const struct fs_context_operations spufs_context_ops = {
742 .free = spufs_free_fc,
743 .parse_param = spufs_parse_param,
744 .get_tree = spufs_get_tree,
745};
746
747static int spufs_init_fs_context(struct fs_context *fc)
748{
749 struct spufs_fs_context *ctx;
750 struct spufs_sb_info *sbi;
751
752 ctx = kzalloc(sizeof(struct spufs_fs_context), GFP_KERNEL);
753 if (!ctx)
754 goto nomem;
755
756 sbi = kzalloc(sizeof(struct spufs_sb_info), GFP_KERNEL);
757 if (!sbi)
758 goto nomem_ctx;
759
760 ctx->uid = current_uid();
761 ctx->gid = current_gid();
762 ctx->mode = 0755;
763
764 fc->s_fs_info = sbi;
765 fc->ops = &spufs_context_ops;
766 return 0;
767
768nomem_ctx:
769 kfree(ctx);
770nomem:
771 return -ENOMEM;
748} 772}
749 773
750static struct file_system_type spufs_type = { 774static struct file_system_type spufs_type = {
751 .owner = THIS_MODULE, 775 .owner = THIS_MODULE,
752 .name = "spufs", 776 .name = "spufs",
753 .mount = spufs_mount, 777 .init_fs_context = spufs_init_fs_context,
778 .parameters = &spufs_fs_parameters,
754 .kill_sb = kill_litter_super, 779 .kill_sb = kill_litter_super,
755}; 780};
756MODULE_ALIAS_FS("spufs"); 781MODULE_ALIAS_FS("spufs");
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index a4418fc425b8..70139d0791b6 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -12,17 +12,17 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/fs_context.h>
16#include <linux/fs_parser.h>
15#include <linux/namei.h> 17#include <linux/namei.h>
16#include <linux/vfs.h> 18#include <linux/vfs.h>
17#include <linux/slab.h> 19#include <linux/slab.h>
18#include <linux/pagemap.h> 20#include <linux/pagemap.h>
19#include <linux/time.h> 21#include <linux/time.h>
20#include <linux/parser.h>
21#include <linux/sysfs.h> 22#include <linux/sysfs.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/kobject.h> 24#include <linux/kobject.h>
24#include <linux/seq_file.h> 25#include <linux/seq_file.h>
25#include <linux/mount.h>
26#include <linux/uio.h> 26#include <linux/uio.h>
27#include <asm/ebcdic.h> 27#include <asm/ebcdic.h>
28#include "hypfs.h" 28#include "hypfs.h"
@@ -207,52 +207,44 @@ static int hypfs_release(struct inode *inode, struct file *filp)
207 return 0; 207 return 0;
208} 208}
209 209
210enum { opt_uid, opt_gid, opt_err }; 210enum { Opt_uid, Opt_gid, };
211 211
212static const match_table_t hypfs_tokens = { 212static const struct fs_parameter_spec hypfs_param_specs[] = {
213 {opt_uid, "uid=%u"}, 213 fsparam_u32("gid", Opt_gid),
214 {opt_gid, "gid=%u"}, 214 fsparam_u32("uid", Opt_uid),
215 {opt_err, NULL} 215 {}
216}; 216};
217 217
218static int hypfs_parse_options(char *options, struct super_block *sb) 218static const struct fs_parameter_description hypfs_fs_parameters = {
219 .name = "hypfs",
220 .specs = hypfs_param_specs,
221};
222
223static int hypfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
219{ 224{
220 char *str; 225 struct hypfs_sb_info *hypfs_info = fc->s_fs_info;
221 substring_t args[MAX_OPT_ARGS]; 226 struct fs_parse_result result;
222 kuid_t uid; 227 kuid_t uid;
223 kgid_t gid; 228 kgid_t gid;
224 229 int opt;
225 if (!options) 230
226 return 0; 231 opt = fs_parse(fc, &hypfs_fs_parameters, param, &result);
227 while ((str = strsep(&options, ",")) != NULL) { 232 if (opt < 0)
228 int token, option; 233 return opt;
229 struct hypfs_sb_info *hypfs_info = sb->s_fs_info; 234
230 235 switch (opt) {
231 if (!*str) 236 case Opt_uid:
232 continue; 237 uid = make_kuid(current_user_ns(), result.uint_32);
233 token = match_token(str, hypfs_tokens, args); 238 if (!uid_valid(uid))
234 switch (token) { 239 return invalf(fc, "Unknown uid");
235 case opt_uid: 240 hypfs_info->uid = uid;
236 if (match_int(&args[0], &option)) 241 break;
237 return -EINVAL; 242 case Opt_gid:
238 uid = make_kuid(current_user_ns(), option); 243 gid = make_kgid(current_user_ns(), result.uint_32);
239 if (!uid_valid(uid)) 244 if (!gid_valid(gid))
240 return -EINVAL; 245 return invalf(fc, "Unknown gid");
241 hypfs_info->uid = uid; 246 hypfs_info->gid = gid;
242 break; 247 break;
243 case opt_gid:
244 if (match_int(&args[0], &option))
245 return -EINVAL;
246 gid = make_kgid(current_user_ns(), option);
247 if (!gid_valid(gid))
248 return -EINVAL;
249 hypfs_info->gid = gid;
250 break;
251 case opt_err:
252 default:
253 pr_err("%s is not a valid mount option\n", str);
254 return -EINVAL;
255 }
256 } 248 }
257 return 0; 249 return 0;
258} 250}
@@ -266,26 +258,18 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
266 return 0; 258 return 0;
267} 259}
268 260
269static int hypfs_fill_super(struct super_block *sb, void *data, int silent) 261static int hypfs_fill_super(struct super_block *sb, struct fs_context *fc)
270{ 262{
263 struct hypfs_sb_info *sbi = sb->s_fs_info;
271 struct inode *root_inode; 264 struct inode *root_inode;
272 struct dentry *root_dentry; 265 struct dentry *root_dentry, *update_file;
273 int rc = 0; 266 int rc;
274 struct hypfs_sb_info *sbi;
275 267
276 sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL);
277 if (!sbi)
278 return -ENOMEM;
279 mutex_init(&sbi->lock);
280 sbi->uid = current_uid();
281 sbi->gid = current_gid();
282 sb->s_fs_info = sbi;
283 sb->s_blocksize = PAGE_SIZE; 268 sb->s_blocksize = PAGE_SIZE;
284 sb->s_blocksize_bits = PAGE_SHIFT; 269 sb->s_blocksize_bits = PAGE_SHIFT;
285 sb->s_magic = HYPFS_MAGIC; 270 sb->s_magic = HYPFS_MAGIC;
286 sb->s_op = &hypfs_s_ops; 271 sb->s_op = &hypfs_s_ops;
287 if (hypfs_parse_options(data, sb)) 272
288 return -EINVAL;
289 root_inode = hypfs_make_inode(sb, S_IFDIR | 0755); 273 root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
290 if (!root_inode) 274 if (!root_inode)
291 return -ENOMEM; 275 return -ENOMEM;
@@ -300,18 +284,46 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
300 rc = hypfs_diag_create_files(root_dentry); 284 rc = hypfs_diag_create_files(root_dentry);
301 if (rc) 285 if (rc)
302 return rc; 286 return rc;
303 sbi->update_file = hypfs_create_update_file(root_dentry); 287 update_file = hypfs_create_update_file(root_dentry);
304 if (IS_ERR(sbi->update_file)) 288 if (IS_ERR(update_file))
305 return PTR_ERR(sbi->update_file); 289 return PTR_ERR(update_file);
290 sbi->update_file = update_file;
306 hypfs_update_update(sb); 291 hypfs_update_update(sb);
307 pr_info("Hypervisor filesystem mounted\n"); 292 pr_info("Hypervisor filesystem mounted\n");
308 return 0; 293 return 0;
309} 294}
310 295
311static struct dentry *hypfs_mount(struct file_system_type *fst, int flags, 296static int hypfs_get_tree(struct fs_context *fc)
312 const char *devname, void *data) 297{
298 return get_tree_single(fc, hypfs_fill_super);
299}
300
301static void hypfs_free_fc(struct fs_context *fc)
313{ 302{
314 return mount_single(fst, flags, data, hypfs_fill_super); 303 kfree(fc->s_fs_info);
304}
305
306static const struct fs_context_operations hypfs_context_ops = {
307 .free = hypfs_free_fc,
308 .parse_param = hypfs_parse_param,
309 .get_tree = hypfs_get_tree,
310};
311
312static int hypfs_init_fs_context(struct fs_context *fc)
313{
314 struct hypfs_sb_info *sbi;
315
316 sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL);
317 if (!sbi)
318 return -ENOMEM;
319
320 mutex_init(&sbi->lock);
321 sbi->uid = current_uid();
322 sbi->gid = current_gid();
323
324 fc->s_fs_info = sbi;
325 fc->ops = &hypfs_context_ops;
326 return 0;
315} 327}
316 328
317static void hypfs_kill_super(struct super_block *sb) 329static void hypfs_kill_super(struct super_block *sb)
@@ -442,7 +454,8 @@ static const struct file_operations hypfs_file_ops = {
442static struct file_system_type hypfs_type = { 454static struct file_system_type hypfs_type = {
443 .owner = THIS_MODULE, 455 .owner = THIS_MODULE,
444 .name = "s390_hypfs", 456 .name = "s390_hypfs",
445 .mount = hypfs_mount, 457 .init_fs_context = hypfs_init_fs_context,
458 .parameters = &hypfs_fs_parameters,
446 .kill_sb = hypfs_kill_super 459 .kill_sb = hypfs_kill_super
447}; 460};
448 461
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index cfec79bb1831..4deddf430e5d 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -38,8 +38,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)
38#define arch_can_pci_mmap_io() 1 38#define arch_can_pci_mmap_io() 1
39#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA 39#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
40#define get_pci_unmapped_area get_fb_unmapped_area 40#define get_pci_unmapped_area get_fb_unmapped_area
41
42#define HAVE_ARCH_PCI_RESOURCE_TO_USER
43#endif /* CONFIG_SPARC64 */ 41#endif /* CONFIG_SPARC64 */
44 42
45#if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI) 43#if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 314a187ed572..d1e666ef3fcc 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -15,7 +15,6 @@
15#include <linux/pm_runtime.h> 15#include <linux/pm_runtime.h>
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <linux/pci-acpi.h> 17#include <linux/pci-acpi.h>
18#include <linux/pci-aspm.h>
19#include <linux/dmar.h> 18#include <linux/dmar.h>
20#include <linux/acpi.h> 19#include <linux/acpi.h>
21#include <linux/slab.h> 20#include <linux/slab.h>
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index bdab5d9af8d2..80b850ef1bf6 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -68,7 +68,7 @@ static void add_early_randomness(struct hwrng *rng)
68 size_t size = min_t(size_t, 16, rng_buffer_size()); 68 size_t size = min_t(size_t, 16, rng_buffer_size());
69 69
70 mutex_lock(&reading_mutex); 70 mutex_lock(&reading_mutex);
71 bytes_read = rng_get_data(rng, rng_buffer, size, 1); 71 bytes_read = rng_get_data(rng, rng_buffer, size, 0);
72 mutex_unlock(&reading_mutex); 72 mutex_unlock(&reading_mutex);
73 if (bytes_read > 0) 73 if (bytes_read > 0)
74 add_device_randomness(rng_buffer, bytes_read); 74 add_device_randomness(rng_buffer, bytes_read);
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
index 02c15952b103..18b0c392bc93 100644
--- a/drivers/char/xillybus/xillybus_pcie.c
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/pci-aspm.h>
13#include <linux/slab.h> 12#include <linux/slab.h>
14#include "xillybus.h" 13#include "xillybus.h"
15 14
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index e0508ea160f1..c27e7160d2df 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -153,6 +153,24 @@ static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
153 ctx->cipher_alg); 153 ctx->cipher_alg);
154} 154}
155 155
156static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
157 dma_addr_t psec_sgl, struct sec_dev_info *info)
158{
159 struct sec_hw_sgl *sgl_current, *sgl_next;
160 dma_addr_t sgl_next_dma;
161
162 sgl_current = hw_sgl;
163 while (sgl_current) {
164 sgl_next = sgl_current->next;
165 sgl_next_dma = sgl_current->next_sgl;
166
167 dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
168
169 sgl_current = sgl_next;
170 psec_sgl = sgl_next_dma;
171 }
172}
173
156static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, 174static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
157 dma_addr_t *psec_sgl, 175 dma_addr_t *psec_sgl,
158 struct scatterlist *sgl, 176 struct scatterlist *sgl,
@@ -199,35 +217,12 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
199 return 0; 217 return 0;
200 218
201err_free_hw_sgls: 219err_free_hw_sgls:
202 sgl_current = *sec_sgl; 220 sec_free_hw_sgl(*sec_sgl, *psec_sgl, info);
203 while (sgl_current) {
204 sgl_next = sgl_current->next;
205 dma_pool_free(info->hw_sgl_pool, sgl_current,
206 sgl_current->next_sgl);
207 sgl_current = sgl_next;
208 }
209 *psec_sgl = 0; 221 *psec_sgl = 0;
210 222
211 return ret; 223 return ret;
212} 224}
213 225
214static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
215 dma_addr_t psec_sgl, struct sec_dev_info *info)
216{
217 struct sec_hw_sgl *sgl_current, *sgl_next;
218
219 if (!hw_sgl)
220 return;
221 sgl_current = hw_sgl;
222 while (sgl_current->next) {
223 sgl_next = sgl_current->next;
224 dma_pool_free(info->hw_sgl_pool, sgl_current,
225 sgl_current->next_sgl);
226 sgl_current = sgl_next;
227 }
228 dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
229}
230
231static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, 226static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
232 const u8 *key, unsigned int keylen, 227 const u8 *key, unsigned int keylen,
233 enum sec_cipher_alg alg) 228 enum sec_cipher_alg alg)
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 5a3f84dcdcde..59023545a1c4 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -559,7 +559,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
559 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); 559 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
560 struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_COMP]; 560 struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_COMP];
561 struct hisi_zip_req *req; 561 struct hisi_zip_req *req;
562 size_t head_size; 562 int head_size;
563 int ret; 563 int ret;
564 564
565 /* let's output compression head now */ 565 /* let's output compression head now */
@@ -567,7 +567,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
567 if (head_size < 0) 567 if (head_size < 0)
568 return -ENOMEM; 568 return -ENOMEM;
569 569
570 req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); 570 req = hisi_zip_create_req(acomp_req, qp_ctx, (size_t)head_size, true);
571 if (IS_ERR(req)) 571 if (IS_ERR(req))
572 return PTR_ERR(req); 572 return PTR_ERR(req);
573 573
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 6e0ca75585d4..1b2ee96c888d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -785,7 +785,6 @@ static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
785 785
786static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) 786static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
787{ 787{
788#ifdef CONFIG_PCI_IOV
789 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); 788 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
790 int pre_existing_vfs, num_vfs, ret; 789 int pre_existing_vfs, num_vfs, ret;
791 790
@@ -815,9 +814,6 @@ static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
815 } 814 }
816 815
817 return num_vfs; 816 return num_vfs;
818#else
819 return 0;
820#endif
821} 817}
822 818
823static int hisi_zip_sriov_disable(struct pci_dev *pdev) 819static int hisi_zip_sriov_disable(struct pci_dev *pdev)
@@ -948,7 +944,8 @@ static struct pci_driver hisi_zip_pci_driver = {
948 .id_table = hisi_zip_dev_ids, 944 .id_table = hisi_zip_dev_ids,
949 .probe = hisi_zip_probe, 945 .probe = hisi_zip_probe,
950 .remove = hisi_zip_remove, 946 .remove = hisi_zip_remove,
951 .sriov_configure = hisi_zip_sriov_configure, 947 .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
948 hisi_zip_sriov_configure : 0,
952 .err_handler = &hisi_zip_err_handler, 949 .err_handler = &hisi_zip_err_handler,
953}; 950};
954 951
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index b456b85f46d3..4ab1bde8dd9b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1789,32 +1789,50 @@ static struct pci_driver safexcel_pci_driver = {
1789}; 1789};
1790#endif 1790#endif
1791 1791
1792static int __init safexcel_init(void) 1792/* Unfortunately, we have to resort to global variables here */
1793{ 1793#if IS_ENABLED(CONFIG_PCI)
1794 int rc; 1794int pcireg_rc = -EINVAL; /* Default safe value */
1795 1795#endif
1796#if IS_ENABLED(CONFIG_OF) 1796#if IS_ENABLED(CONFIG_OF)
1797 /* Register platform driver */ 1797int ofreg_rc = -EINVAL; /* Default safe value */
1798 platform_driver_register(&crypto_safexcel);
1799#endif 1798#endif
1800 1799
1800static int __init safexcel_init(void)
1801{
1801#if IS_ENABLED(CONFIG_PCI) 1802#if IS_ENABLED(CONFIG_PCI)
1802 /* Register PCI driver */ 1803 /* Register PCI driver */
1803 rc = pci_register_driver(&safexcel_pci_driver); 1804 pcireg_rc = pci_register_driver(&safexcel_pci_driver);
1804#endif 1805#endif
1805 1806
1806 return 0; 1807#if IS_ENABLED(CONFIG_OF)
1808 /* Register platform driver */
1809 ofreg_rc = platform_driver_register(&crypto_safexcel);
1810 #if IS_ENABLED(CONFIG_PCI)
1811 /* Return success if either PCI or OF registered OK */
1812 return pcireg_rc ? ofreg_rc : 0;
1813 #else
1814 return ofreg_rc;
1815 #endif
1816#else
1817 #if IS_ENABLED(CONFIG_PCI)
1818 return pcireg_rc;
1819 #else
1820 return -EINVAL;
1821 #endif
1822#endif
1807} 1823}
1808 1824
1809static void __exit safexcel_exit(void) 1825static void __exit safexcel_exit(void)
1810{ 1826{
1811#if IS_ENABLED(CONFIG_OF) 1827#if IS_ENABLED(CONFIG_OF)
1812 /* Unregister platform driver */ 1828 /* Unregister platform driver */
1829 if (!ofreg_rc)
1813 platform_driver_unregister(&crypto_safexcel); 1830 platform_driver_unregister(&crypto_safexcel);
1814#endif 1831#endif
1815 1832
1816#if IS_ENABLED(CONFIG_PCI) 1833#if IS_ENABLED(CONFIG_PCI)
1817 /* Unregister PCI driver if successfully registered before */ 1834 /* Unregister PCI driver if successfully registered before */
1835 if (!pcireg_rc)
1818 pci_unregister_driver(&safexcel_pci_driver); 1836 pci_unregister_driver(&safexcel_pci_driver);
1819#endif 1837#endif
1820} 1838}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index cb6c10b1bf36..56e3068c9947 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -3116,6 +3116,7 @@ static int talitos_remove(struct platform_device *ofdev)
3116 break; 3116 break;
3117 case CRYPTO_ALG_TYPE_AEAD: 3117 case CRYPTO_ALG_TYPE_AEAD:
3118 crypto_unregister_aead(&t_alg->algt.alg.aead); 3118 crypto_unregister_aead(&t_alg->algt.alg.aead);
3119 break;
3119 case CRYPTO_ALG_TYPE_AHASH: 3120 case CRYPTO_ALG_TYPE_AHASH:
3120 crypto_unregister_ahash(&t_alg->algt.alg.hash); 3121 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3121 break; 3122 break;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a958b9625bba..1ecb5124421c 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -273,6 +273,15 @@ config HID_CP2112
273 and gpiochip to expose these functions of the CP2112. The 273 and gpiochip to expose these functions of the CP2112. The
274 customizable USB descriptor fields are exposed as sysfs attributes. 274 customizable USB descriptor fields are exposed as sysfs attributes.
275 275
276config HID_CREATIVE_SB0540
277 tristate "Creative SB0540 infrared receiver"
278 depends on USB_HID
279 help
280 Support for Creative infrared SB0540-compatible remote controls, such
281 as the RM-1500 and RM-1800 remotes.
282
283 Say Y here if you want support for Creative SB0540 infrared receiver.
284
276config HID_CYPRESS 285config HID_CYPRESS
277 tristate "Cypress mouse and barcode readers" 286 tristate "Cypress mouse and barcode readers"
278 depends on HID 287 depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index cc5d827c9164..0c03308cfb08 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_HID_ALPS) += hid-alps.o
27obj-$(CONFIG_HID_ACRUX) += hid-axff.o 27obj-$(CONFIG_HID_ACRUX) += hid-axff.o
28obj-$(CONFIG_HID_APPLE) += hid-apple.o 28obj-$(CONFIG_HID_APPLE) += hid-apple.o
29obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o 29obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o
30obj-$(CONFIG_HID_CREATIVE_SB0540) += hid-creative-sb0540.o
30obj-$(CONFIG_HID_ASUS) += hid-asus.o 31obj-$(CONFIG_HID_ASUS) += hid-asus.o
31obj-$(CONFIG_HID_AUREAL) += hid-aureal.o 32obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
32obj-$(CONFIG_HID_BELKIN) += hid-belkin.o 33obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 81df62f48c4c..6ac8becc2372 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -54,7 +54,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
54struct apple_sc { 54struct apple_sc {
55 unsigned long quirks; 55 unsigned long quirks;
56 unsigned int fn_on; 56 unsigned int fn_on;
57 DECLARE_BITMAP(pressed_fn, KEY_CNT);
58 DECLARE_BITMAP(pressed_numlock, KEY_CNT); 57 DECLARE_BITMAP(pressed_numlock, KEY_CNT);
59}; 58};
60 59
@@ -181,6 +180,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
181{ 180{
182 struct apple_sc *asc = hid_get_drvdata(hid); 181 struct apple_sc *asc = hid_get_drvdata(hid);
183 const struct apple_key_translation *trans, *table; 182 const struct apple_key_translation *trans, *table;
183 bool do_translate;
184 u16 code = 0;
184 185
185 if (usage->code == KEY_FN) { 186 if (usage->code == KEY_FN) {
186 asc->fn_on = !!value; 187 asc->fn_on = !!value;
@@ -189,8 +190,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
189 } 190 }
190 191
191 if (fnmode) { 192 if (fnmode) {
192 int do_translate;
193
194 if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI && 193 if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
195 hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) 194 hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
196 table = macbookair_fn_keys; 195 table = macbookair_fn_keys;
@@ -202,25 +201,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
202 trans = apple_find_translation (table, usage->code); 201 trans = apple_find_translation (table, usage->code);
203 202
204 if (trans) { 203 if (trans) {
205 if (test_bit(usage->code, asc->pressed_fn)) 204 if (test_bit(trans->from, input->key))
206 do_translate = 1; 205 code = trans->from;
207 else if (trans->flags & APPLE_FLAG_FKEY) 206 else if (test_bit(trans->to, input->key))
208 do_translate = (fnmode == 2 && asc->fn_on) || 207 code = trans->to;
209 (fnmode == 1 && !asc->fn_on); 208
210 else 209 if (!code) {
211 do_translate = asc->fn_on; 210 if (trans->flags & APPLE_FLAG_FKEY) {
212 211 switch (fnmode) {
213 if (do_translate) { 212 case 1:
214 if (value) 213 do_translate = !asc->fn_on;
215 set_bit(usage->code, asc->pressed_fn); 214 break;
216 else 215 case 2:
217 clear_bit(usage->code, asc->pressed_fn); 216 do_translate = asc->fn_on;
218 217 break;
219 input_event(input, usage->type, trans->to, 218 default:
220 value); 219 /* should never happen */
221 220 do_translate = false;
222 return 1; 221 }
222 } else {
223 do_translate = asc->fn_on;
224 }
225
226 code = do_translate ? trans->to : trans->from;
223 } 227 }
228
229 input_event(input, usage->type, code, value);
230 return 1;
224 } 231 }
225 232
226 if (asc->quirks & APPLE_NUMLOCK_EMULATION && 233 if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 210b81a56e1a..3eaee2c37931 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1311,8 +1311,8 @@ u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1311 unsigned offset, unsigned n) 1311 unsigned offset, unsigned n)
1312{ 1312{
1313 if (n > 32) { 1313 if (n > 32) {
1314 hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", 1314 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1315 n, current->comm); 1315 __func__, n, current->comm);
1316 n = 32; 1316 n = 32;
1317 } 1317 }
1318 1318
diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
index e0bb7b34f3a4..4ff3bc1d25e2 100644
--- a/drivers/hid/hid-cougar.c
+++ b/drivers/hid/hid-cougar.c
@@ -207,7 +207,7 @@ static int cougar_probe(struct hid_device *hdev,
207 error = hid_parse(hdev); 207 error = hid_parse(hdev);
208 if (error) { 208 if (error) {
209 hid_err(hdev, "parse failed\n"); 209 hid_err(hdev, "parse failed\n");
210 goto fail; 210 return error;
211 } 211 }
212 212
213 if (hdev->collection->usage == COUGAR_VENDOR_USAGE) { 213 if (hdev->collection->usage == COUGAR_VENDOR_USAGE) {
@@ -219,7 +219,7 @@ static int cougar_probe(struct hid_device *hdev,
219 error = hid_hw_start(hdev, connect_mask); 219 error = hid_hw_start(hdev, connect_mask);
220 if (error) { 220 if (error) {
221 hid_err(hdev, "hw start failed\n"); 221 hid_err(hdev, "hw start failed\n");
222 goto fail; 222 return error;
223 } 223 }
224 224
225 error = cougar_bind_shared_data(hdev, cougar); 225 error = cougar_bind_shared_data(hdev, cougar);
@@ -249,8 +249,6 @@ static int cougar_probe(struct hid_device *hdev,
249 249
250fail_stop_and_cleanup: 250fail_stop_and_cleanup:
251 hid_hw_stop(hdev); 251 hid_hw_stop(hdev);
252fail:
253 hid_set_drvdata(hdev, NULL);
254 return error; 252 return error;
255} 253}
256 254
diff --git a/drivers/hid/hid-creative-sb0540.c b/drivers/hid/hid-creative-sb0540.c
new file mode 100644
index 000000000000..b4c8e7a5d3e0
--- /dev/null
+++ b/drivers/hid/hid-creative-sb0540.c
@@ -0,0 +1,268 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * HID driver for the Creative SB0540 receiver
4 *
5 * Copyright (C) 2019 Red Hat Inc. All Rights Reserved
6 *
7 */
8
9#include <linux/device.h>
10#include <linux/hid.h>
11#include <linux/module.h>
12#include "hid-ids.h"
13
14MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
15MODULE_DESCRIPTION("HID Creative SB0540 receiver");
16MODULE_LICENSE("GPL");
17
18static const unsigned short creative_sb0540_key_table[] = {
19 KEY_POWER,
20 KEY_RESERVED, /* text: 24bit */
21 KEY_RESERVED, /* 24bit wheel up */
22 KEY_RESERVED, /* 24bit wheel down */
23 KEY_RESERVED, /* text: CMSS */
24 KEY_RESERVED, /* CMSS wheel Up */
25 KEY_RESERVED, /* CMSS wheel Down */
26 KEY_RESERVED, /* text: EAX */
27 KEY_RESERVED, /* EAX wheel up */
28 KEY_RESERVED, /* EAX wheel down */
29 KEY_RESERVED, /* text: 3D Midi */
30 KEY_RESERVED, /* 3D Midi wheel up */
31 KEY_RESERVED, /* 3D Midi wheel down */
32 KEY_MUTE,
33 KEY_VOLUMEUP,
34 KEY_VOLUMEDOWN,
35 KEY_UP,
36 KEY_LEFT,
37 KEY_RIGHT,
38 KEY_REWIND,
39 KEY_OK,
40 KEY_FASTFORWARD,
41 KEY_DOWN,
42 KEY_AGAIN, /* text: Return, symbol: Jump to */
43 KEY_PLAY, /* text: Start */
44 KEY_ESC, /* text: Cancel */
45 KEY_RECORD,
46 KEY_OPTION,
47 KEY_MENU, /* text: Display */
48 KEY_PREVIOUS,
49 KEY_PLAYPAUSE,
50 KEY_NEXT,
51 KEY_SLOW,
52 KEY_STOP,
53 KEY_NUMERIC_1,
54 KEY_NUMERIC_2,
55 KEY_NUMERIC_3,
56 KEY_NUMERIC_4,
57 KEY_NUMERIC_5,
58 KEY_NUMERIC_6,
59 KEY_NUMERIC_7,
60 KEY_NUMERIC_8,
61 KEY_NUMERIC_9,
62 KEY_NUMERIC_0
63};
64
65/*
66 * Codes and keys from lirc's
67 * remotes/creative/lircd.conf.alsa_usb
68 * order and size must match creative_sb0540_key_table[] above
69 */
70static const unsigned short creative_sb0540_codes[] = {
71 0x619E,
72 0x916E,
73 0x926D,
74 0x936C,
75 0x718E,
76 0x946B,
77 0x956A,
78 0x8C73,
79 0x9669,
80 0x9768,
81 0x9867,
82 0x9966,
83 0x9A65,
84 0x6E91,
85 0x629D,
86 0x639C,
87 0x7B84,
88 0x6B94,
89 0x728D,
90 0x8778,
91 0x817E,
92 0x758A,
93 0x8D72,
94 0x8E71,
95 0x8877,
96 0x7C83,
97 0x738C,
98 0x827D,
99 0x7689,
100 0x7F80,
101 0x7986,
102 0x7A85,
103 0x7D82,
104 0x857A,
105 0x8B74,
106 0x8F70,
107 0x906F,
108 0x8A75,
109 0x847B,
110 0x7887,
111 0x8976,
112 0x837C,
113 0x7788,
114 0x807F
115};
116
117struct creative_sb0540 {
118 struct input_dev *input_dev;
119 struct hid_device *hid;
120 unsigned short keymap[ARRAY_SIZE(creative_sb0540_key_table)];
121};
122
123static inline u64 reverse(u64 data, int bits)
124{
125 int i;
126 u64 c;
127
128 c = 0;
129 for (i = 0; i < bits; i++) {
130 c |= (u64) (((data & (((u64) 1) << i)) ? 1 : 0))
131 << (bits - 1 - i);
132 }
133 return (c);
134}
135
136static int get_key(struct creative_sb0540 *creative_sb0540, u64 keycode)
137{
138 int i;
139
140 for (i = 0; i < ARRAY_SIZE(creative_sb0540_codes); i++) {
141 if (creative_sb0540_codes[i] == keycode)
142 return creative_sb0540->keymap[i];
143 }
144
145 return 0;
146
147}
148
149static int creative_sb0540_raw_event(struct hid_device *hid,
150 struct hid_report *report, u8 *data, int len)
151{
152 struct creative_sb0540 *creative_sb0540 = hid_get_drvdata(hid);
153 u64 code, main_code;
154 int key;
155
156 if (len != 6)
157 return 0;
158
159 /* From daemons/hw_hiddev.c sb0540_rec() in lirc */
160 code = reverse(data[5], 8);
161 main_code = (code << 8) + ((~code) & 0xff);
162
163 /*
164 * Flip to get values in the same format as
165 * remotes/creative/lircd.conf.alsa_usb in lirc
166 */
167 main_code = ((main_code & 0xff) << 8) +
168 ((main_code & 0xff00) >> 8);
169
170 key = get_key(creative_sb0540, main_code);
171 if (key == 0 || key == KEY_RESERVED) {
172 hid_err(hid, "Could not get a key for main_code %llX\n",
173 main_code);
174 return 0;
175 }
176
177 input_report_key(creative_sb0540->input_dev, key, 1);
178 input_report_key(creative_sb0540->input_dev, key, 0);
179 input_sync(creative_sb0540->input_dev);
180
181 /* let hidraw and hiddev handle the report */
182 return 0;
183}
184
185static int creative_sb0540_input_configured(struct hid_device *hid,
186 struct hid_input *hidinput)
187{
188 struct input_dev *input_dev = hidinput->input;
189 struct creative_sb0540 *creative_sb0540 = hid_get_drvdata(hid);
190 int i;
191
192 creative_sb0540->input_dev = input_dev;
193
194 input_dev->keycode = creative_sb0540->keymap;
195 input_dev->keycodesize = sizeof(unsigned short);
196 input_dev->keycodemax = ARRAY_SIZE(creative_sb0540->keymap);
197
198 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
199
200 memcpy(creative_sb0540->keymap, creative_sb0540_key_table,
201 sizeof(creative_sb0540->keymap));
202 for (i = 0; i < ARRAY_SIZE(creative_sb0540_key_table); i++)
203 set_bit(creative_sb0540->keymap[i], input_dev->keybit);
204 clear_bit(KEY_RESERVED, input_dev->keybit);
205
206 return 0;
207}
208
209static int creative_sb0540_input_mapping(struct hid_device *hid,
210 struct hid_input *hi, struct hid_field *field,
211 struct hid_usage *usage, unsigned long **bit, int *max)
212{
213 /*
214 * We are remapping the keys ourselves, so ignore the hid-input
215 * keymap processing.
216 */
217 return -1;
218}
219
220static int creative_sb0540_probe(struct hid_device *hid,
221 const struct hid_device_id *id)
222{
223 int ret;
224 struct creative_sb0540 *creative_sb0540;
225
226 creative_sb0540 = devm_kzalloc(&hid->dev,
227 sizeof(struct creative_sb0540), GFP_KERNEL);
228
229 if (!creative_sb0540)
230 return -ENOMEM;
231
232 creative_sb0540->hid = hid;
233
234 /* force input as some remotes bypass the input registration */
235 hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
236
237 hid_set_drvdata(hid, creative_sb0540);
238
239 ret = hid_parse(hid);
240 if (ret) {
241 hid_err(hid, "parse failed\n");
242 return ret;
243 }
244
245 ret = hid_hw_start(hid, HID_CONNECT_DEFAULT);
246 if (ret) {
247 hid_err(hid, "hw start failed\n");
248 return ret;
249 }
250
251 return ret;
252}
253
254static const struct hid_device_id creative_sb0540_devices[] = {
255 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB0540) },
256 { }
257};
258MODULE_DEVICE_TABLE(hid, creative_sb0540_devices);
259
260static struct hid_driver creative_sb0540_driver = {
261 .name = "creative-sb0540",
262 .id_table = creative_sb0540_devices,
263 .raw_event = creative_sb0540_raw_event,
264 .input_configured = creative_sb0540_input_configured,
265 .probe = creative_sb0540_probe,
266 .input_mapping = creative_sb0540_input_mapping,
267};
268module_hid_driver(creative_sb0540_driver);
diff --git a/drivers/hid/hid-gfrm.c b/drivers/hid/hid-gfrm.c
index 86c317320bf2..699186ff2349 100644
--- a/drivers/hid/hid-gfrm.c
+++ b/drivers/hid/hid-gfrm.c
@@ -123,12 +123,6 @@ done:
123 return ret; 123 return ret;
124} 124}
125 125
126static void gfrm_remove(struct hid_device *hdev)
127{
128 hid_hw_stop(hdev);
129 hid_set_drvdata(hdev, NULL);
130}
131
132static const struct hid_device_id gfrm_devices[] = { 126static const struct hid_device_id gfrm_devices[] = {
133 { HID_BLUETOOTH_DEVICE(0x58, 0x2000), 127 { HID_BLUETOOTH_DEVICE(0x58, 0x2000),
134 .driver_data = GFRM100 }, 128 .driver_data = GFRM100 },
@@ -142,7 +136,6 @@ static struct hid_driver gfrm_driver = {
142 .name = "gfrm", 136 .name = "gfrm",
143 .id_table = gfrm_devices, 137 .id_table = gfrm_devices,
144 .probe = gfrm_probe, 138 .probe = gfrm_probe,
145 .remove = gfrm_remove,
146 .input_mapping = gfrm_input_mapping, 139 .input_mapping = gfrm_input_mapping,
147 .raw_event = gfrm_raw_event, 140 .raw_event = gfrm_raw_event,
148 .input_configured = gfrm_input_configured, 141 .input_configured = gfrm_input_configured,
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 7795831d37c2..cc5b09b87ab0 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -104,8 +104,8 @@ struct synthhid_input_report {
104 104
105#pragma pack(pop) 105#pragma pack(pop)
106 106
107#define INPUTVSC_SEND_RING_BUFFER_SIZE (10*PAGE_SIZE) 107#define INPUTVSC_SEND_RING_BUFFER_SIZE (40 * 1024)
108#define INPUTVSC_RECV_RING_BUFFER_SIZE (10*PAGE_SIZE) 108#define INPUTVSC_RECV_RING_BUFFER_SIZE (40 * 1024)
109 109
110 110
111enum pipe_prot_msg_type { 111enum pipe_prot_msg_type {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0a00be19f7a0..76969a22b0f2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -314,6 +314,7 @@
314#define USB_VENDOR_ID_CREATIVELABS 0x041e 314#define USB_VENDOR_ID_CREATIVELABS 0x041e
315#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c 315#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
316#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801 316#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
317#define USB_DEVICE_ID_CREATIVE_SB0540 0x3100
317 318
318#define USB_VENDOR_ID_CVTOUCH 0x1ff7 319#define USB_VENDOR_ID_CVTOUCH 0x1ff7
319#define USB_DEVICE_ID_CVTOUCH_SCREEN 0x0013 320#define USB_DEVICE_ID_CVTOUCH_SCREEN 0x0013
@@ -568,6 +569,7 @@
568#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a 569#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
569#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a 570#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
570#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a 571#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
572#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941
571#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641 573#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
572 574
573#define USB_VENDOR_ID_HUION 0x256c 575#define USB_VENDOR_ID_HUION 0x256c
@@ -769,7 +771,8 @@
769#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f 771#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f
770#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532 772#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
771#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534 773#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
772#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED 0xc539 774#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
775#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
773#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a 776#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
774#define USB_DEVICE_ID_SPACETRAVELLER 0xc623 777#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
775#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 778#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 364bc7f11d9d..96fa2a2c2cd3 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -866,8 +866,6 @@ static void lenovo_remove_tpkbd(struct hid_device *hdev)
866 866
867 led_classdev_unregister(&data_pointer->led_micmute); 867 led_classdev_unregister(&data_pointer->led_micmute);
868 led_classdev_unregister(&data_pointer->led_mute); 868 led_classdev_unregister(&data_pointer->led_mute);
869
870 hid_set_drvdata(hdev, NULL);
871} 869}
872 870
873static void lenovo_remove_cptkbd(struct hid_device *hdev) 871static void lenovo_remove_cptkbd(struct hid_device *hdev)
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 5008a3dc28f4..0dc7cdfc56f7 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -818,7 +818,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
818 818
819 if (!buf) { 819 if (!buf) {
820 ret = -ENOMEM; 820 ret = -ENOMEM;
821 goto err_free; 821 goto err_stop;
822 } 822 }
823 823
824 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), 824 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
@@ -850,9 +850,12 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
850 ret = lg4ff_init(hdev); 850 ret = lg4ff_init(hdev);
851 851
852 if (ret) 852 if (ret)
853 goto err_free; 853 goto err_stop;
854 854
855 return 0; 855 return 0;
856
857err_stop:
858 hid_hw_stop(hdev);
856err_free: 859err_free:
857 kfree(drv_data); 860 kfree(drv_data);
858 return ret; 861 return ret;
@@ -863,8 +866,7 @@ static void lg_remove(struct hid_device *hdev)
863 struct lg_drv_data *drv_data = hid_get_drvdata(hdev); 866 struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
864 if (drv_data->quirks & LG_FF4) 867 if (drv_data->quirks & LG_FF4)
865 lg4ff_deinit(hdev); 868 lg4ff_deinit(hdev);
866 else 869 hid_hw_stop(hdev);
867 hid_hw_stop(hdev);
868 kfree(drv_data); 870 kfree(drv_data);
869} 871}
870 872
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index cefba038520c..03f0220062ca 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -1477,7 +1477,6 @@ int lg4ff_deinit(struct hid_device *hid)
1477 } 1477 }
1478 } 1478 }
1479#endif 1479#endif
1480 hid_hw_stop(hid);
1481 drv_data->device_props = NULL; 1480 drv_data->device_props = NULL;
1482 1481
1483 kfree(entry); 1482 kfree(entry);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index cc47f948c1d0..bb50d6e7745b 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -380,9 +380,9 @@ static const char consumer_descriptor[] = {
380 0x75, 0x10, /* REPORT_SIZE (16) */ 380 0x75, 0x10, /* REPORT_SIZE (16) */
381 0x95, 0x02, /* REPORT_COUNT (2) */ 381 0x95, 0x02, /* REPORT_COUNT (2) */
382 0x15, 0x01, /* LOGICAL_MIN (1) */ 382 0x15, 0x01, /* LOGICAL_MIN (1) */
383 0x26, 0x8C, 0x02, /* LOGICAL_MAX (652) */ 383 0x26, 0xFF, 0x02, /* LOGICAL_MAX (767) */
384 0x19, 0x01, /* USAGE_MIN (1) */ 384 0x19, 0x01, /* USAGE_MIN (1) */
385 0x2A, 0x8C, 0x02, /* USAGE_MAX (652) */ 385 0x2A, 0xFF, 0x02, /* USAGE_MAX (767) */
386 0x81, 0x00, /* INPUT (Data Ary Abs) */ 386 0x81, 0x00, /* INPUT (Data Ary Abs) */
387 0xC0, /* END_COLLECTION */ 387 0xC0, /* END_COLLECTION */
388}; /* */ 388}; /* */
@@ -959,6 +959,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
959 break; 959 break;
960 case 0x07: 960 case 0x07:
961 device_type = "eQUAD step 4 Gaming"; 961 device_type = "eQUAD step 4 Gaming";
962 logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
962 break; 963 break;
963 case 0x08: 964 case 0x08:
964 device_type = "eQUAD step 4 for gamepads"; 965 device_type = "eQUAD step 4 for gamepads";
@@ -968,7 +969,12 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
968 logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); 969 logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
969 break; 970 break;
970 case 0x0c: 971 case 0x0c:
971 device_type = "eQUAD Lightspeed"; 972 device_type = "eQUAD Lightspeed 1";
973 logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
974 workitem.reports_supported |= STD_KEYBOARD;
975 break;
976 case 0x0d:
977 device_type = "eQUAD Lightspeed 1_1";
972 logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); 978 logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
973 workitem.reports_supported |= STD_KEYBOARD; 979 workitem.reports_supported |= STD_KEYBOARD;
974 break; 980 break;
@@ -1734,14 +1740,14 @@ static int logi_dj_probe(struct hid_device *hdev,
1734 if (retval < 0) { 1740 if (retval < 0) {
1735 hid_err(hdev, "%s: logi_dj_recv_query_paired_devices error:%d\n", 1741 hid_err(hdev, "%s: logi_dj_recv_query_paired_devices error:%d\n",
1736 __func__, retval); 1742 __func__, retval);
1737 goto logi_dj_recv_query_paired_devices_failed; 1743 /*
1744 * This can happen with a KVM, let the probe succeed,
1745 * logi_dj_recv_queue_unknown_work will retry later.
1746 */
1738 } 1747 }
1739 } 1748 }
1740 1749
1741 return retval; 1750 return 0;
1742
1743logi_dj_recv_query_paired_devices_failed:
1744 hid_hw_close(hdev);
1745 1751
1746llopen_failed: 1752llopen_failed:
1747switch_to_dj_mode_fail: 1753switch_to_dj_mode_fail:
@@ -1832,9 +1838,17 @@ static const struct hid_device_id logi_dj_receivers[] = {
1832 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 1838 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
1833 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2), 1839 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
1834 .driver_data = recvr_type_hidpp}, 1840 .driver_data = recvr_type_hidpp},
1841 { /* Logitech G700(s) receiver (0xc531) */
1842 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
1843 0xc531),
1844 .driver_data = recvr_type_gaming_hidpp},
1835 { /* Logitech lightspeed receiver (0xc539) */ 1845 { /* Logitech lightspeed receiver (0xc539) */
1836 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 1846 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
1837 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED), 1847 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
1848 .driver_data = recvr_type_gaming_hidpp},
1849 { /* Logitech lightspeed receiver (0xc53f) */
1850 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
1851 USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
1838 .driver_data = recvr_type_gaming_hidpp}, 1852 .driver_data = recvr_type_gaming_hidpp},
1839 { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */ 1853 { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
1840 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER), 1854 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index b603c14d043b..3cfeb1629f79 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -68,6 +68,7 @@ MODULE_LICENSE("GPL");
68#define MT_QUIRK_STICKY_FINGERS BIT(16) 68#define MT_QUIRK_STICKY_FINGERS BIT(16)
69#define MT_QUIRK_ASUS_CUSTOM_UP BIT(17) 69#define MT_QUIRK_ASUS_CUSTOM_UP BIT(17)
70#define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18) 70#define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18)
71#define MT_QUIRK_SEPARATE_APP_REPORT BIT(19)
71 72
72#define MT_INPUTMODE_TOUCHSCREEN 0x02 73#define MT_INPUTMODE_TOUCHSCREEN 0x02
73#define MT_INPUTMODE_TOUCHPAD 0x03 74#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -103,6 +104,7 @@ struct mt_usages {
103struct mt_application { 104struct mt_application {
104 struct list_head list; 105 struct list_head list;
105 unsigned int application; 106 unsigned int application;
107 unsigned int report_id;
106 struct list_head mt_usages; /* mt usages list */ 108 struct list_head mt_usages; /* mt usages list */
107 109
108 __s32 quirks; 110 __s32 quirks;
@@ -203,6 +205,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
203#define MT_CLS_VTL 0x0110 205#define MT_CLS_VTL 0x0110
204#define MT_CLS_GOOGLE 0x0111 206#define MT_CLS_GOOGLE 0x0111
205#define MT_CLS_RAZER_BLADE_STEALTH 0x0112 207#define MT_CLS_RAZER_BLADE_STEALTH 0x0112
208#define MT_CLS_SMART_TECH 0x0113
206 209
207#define MT_DEFAULT_MAXCONTACT 10 210#define MT_DEFAULT_MAXCONTACT 10
208#define MT_MAX_MAXCONTACT 250 211#define MT_MAX_MAXCONTACT 250
@@ -263,7 +266,8 @@ static const struct mt_class mt_classes[] = {
263 MT_QUIRK_HOVERING | 266 MT_QUIRK_HOVERING |
264 MT_QUIRK_CONTACT_CNT_ACCURATE | 267 MT_QUIRK_CONTACT_CNT_ACCURATE |
265 MT_QUIRK_STICKY_FINGERS | 268 MT_QUIRK_STICKY_FINGERS |
266 MT_QUIRK_WIN8_PTP_BUTTONS }, 269 MT_QUIRK_WIN8_PTP_BUTTONS,
270 .export_all_inputs = true },
267 { .name = MT_CLS_EXPORT_ALL_INPUTS, 271 { .name = MT_CLS_EXPORT_ALL_INPUTS,
268 .quirks = MT_QUIRK_ALWAYS_VALID | 272 .quirks = MT_QUIRK_ALWAYS_VALID |
269 MT_QUIRK_CONTACT_CNT_ACCURATE, 273 MT_QUIRK_CONTACT_CNT_ACCURATE,
@@ -353,6 +357,12 @@ static const struct mt_class mt_classes[] = {
353 MT_QUIRK_CONTACT_CNT_ACCURATE | 357 MT_QUIRK_CONTACT_CNT_ACCURATE |
354 MT_QUIRK_WIN8_PTP_BUTTONS, 358 MT_QUIRK_WIN8_PTP_BUTTONS,
355 }, 359 },
360 { .name = MT_CLS_SMART_TECH,
361 .quirks = MT_QUIRK_ALWAYS_VALID |
362 MT_QUIRK_IGNORE_DUPLICATES |
363 MT_QUIRK_CONTACT_CNT_ACCURATE |
364 MT_QUIRK_SEPARATE_APP_REPORT,
365 },
356 { } 366 { }
357}; 367};
358 368
@@ -509,8 +519,9 @@ static struct mt_usages *mt_allocate_usage(struct hid_device *hdev,
509} 519}
510 520
511static struct mt_application *mt_allocate_application(struct mt_device *td, 521static struct mt_application *mt_allocate_application(struct mt_device *td,
512 unsigned int application) 522 struct hid_report *report)
513{ 523{
524 unsigned int application = report->application;
514 struct mt_application *mt_application; 525 struct mt_application *mt_application;
515 526
516 mt_application = devm_kzalloc(&td->hdev->dev, sizeof(*mt_application), 527 mt_application = devm_kzalloc(&td->hdev->dev, sizeof(*mt_application),
@@ -535,6 +546,7 @@ static struct mt_application *mt_allocate_application(struct mt_device *td,
535 mt_application->scantime = DEFAULT_ZERO; 546 mt_application->scantime = DEFAULT_ZERO;
536 mt_application->raw_cc = DEFAULT_ZERO; 547 mt_application->raw_cc = DEFAULT_ZERO;
537 mt_application->quirks = td->mtclass.quirks; 548 mt_application->quirks = td->mtclass.quirks;
549 mt_application->report_id = report->id;
538 550
539 list_add_tail(&mt_application->list, &td->applications); 551 list_add_tail(&mt_application->list, &td->applications);
540 552
@@ -542,19 +554,23 @@ static struct mt_application *mt_allocate_application(struct mt_device *td,
542} 554}
543 555
544static struct mt_application *mt_find_application(struct mt_device *td, 556static struct mt_application *mt_find_application(struct mt_device *td,
545 unsigned int application) 557 struct hid_report *report)
546{ 558{
559 unsigned int application = report->application;
547 struct mt_application *tmp, *mt_application = NULL; 560 struct mt_application *tmp, *mt_application = NULL;
548 561
549 list_for_each_entry(tmp, &td->applications, list) { 562 list_for_each_entry(tmp, &td->applications, list) {
550 if (application == tmp->application) { 563 if (application == tmp->application) {
551 mt_application = tmp; 564 if (!(td->mtclass.quirks & MT_QUIRK_SEPARATE_APP_REPORT) ||
552 break; 565 tmp->report_id == report->id) {
566 mt_application = tmp;
567 break;
568 }
553 } 569 }
554 } 570 }
555 571
556 if (!mt_application) 572 if (!mt_application)
557 mt_application = mt_allocate_application(td, application); 573 mt_application = mt_allocate_application(td, report);
558 574
559 return mt_application; 575 return mt_application;
560} 576}
@@ -571,7 +587,7 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
571 return NULL; 587 return NULL;
572 588
573 rdata->report = report; 589 rdata->report = report;
574 rdata->application = mt_find_application(td, report->application); 590 rdata->application = mt_find_application(td, report);
575 591
576 if (!rdata->application) { 592 if (!rdata->application) {
577 devm_kfree(&td->hdev->dev, rdata); 593 devm_kfree(&td->hdev->dev, rdata);
@@ -1561,6 +1577,9 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
1561 case HID_VD_ASUS_CUSTOM_MEDIA_KEYS: 1577 case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
1562 suffix = "Custom Media Keys"; 1578 suffix = "Custom Media Keys";
1563 break; 1579 break;
1580 case HID_DG_PEN:
1581 suffix = "Stylus";
1582 break;
1564 default: 1583 default:
1565 suffix = "UNKNOWN"; 1584 suffix = "UNKNOWN";
1566 break; 1585 break;
@@ -2022,6 +2041,10 @@ static const struct hid_device_id mt_devices[] = {
2022 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, 2041 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
2023 USB_VENDOR_ID_SYNAPTICS, 0x8323) }, 2042 USB_VENDOR_ID_SYNAPTICS, 0x8323) },
2024 2043
2044 /* Smart Tech panels */
2045 { .driver_data = MT_CLS_SMART_TECH,
2046 MT_USB_DEVICE(0x0b8c, 0x0092)},
2047
2025 /* Stantum panels */ 2048 /* Stantum panels */
2026 { .driver_data = MT_CLS_CONFIDENCE, 2049 { .driver_data = MT_CLS_CONFIDENCE,
2027 MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, 2050 MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index 5f7a39a5d4af..1b5c63241af0 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -534,8 +534,7 @@ static int picolcd_probe(struct hid_device *hdev,
534 data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL); 534 data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL);
535 if (data == NULL) { 535 if (data == NULL) {
536 hid_err(hdev, "can't allocate space for Minibox PicoLCD device data\n"); 536 hid_err(hdev, "can't allocate space for Minibox PicoLCD device data\n");
537 error = -ENOMEM; 537 return -ENOMEM;
538 goto err_no_cleanup;
539 } 538 }
540 539
541 spin_lock_init(&data->lock); 540 spin_lock_init(&data->lock);
@@ -597,9 +596,6 @@ err_cleanup_hid_hw:
597 hid_hw_stop(hdev); 596 hid_hw_stop(hdev);
598err_cleanup_data: 597err_cleanup_data:
599 kfree(data); 598 kfree(data);
600err_no_cleanup:
601 hid_set_drvdata(hdev, NULL);
602
603 return error; 599 return error;
604} 600}
605 601
@@ -635,7 +631,6 @@ static void picolcd_remove(struct hid_device *hdev)
635 picolcd_exit_cir(data); 631 picolcd_exit_cir(data);
636 picolcd_exit_keys(data); 632 picolcd_exit_keys(data);
637 633
638 hid_set_drvdata(hdev, NULL);
639 mutex_destroy(&data->mutex); 634 mutex_destroy(&data->mutex);
640 /* Finally, clean up the picolcd data itself */ 635 /* Finally, clean up the picolcd data itself */
641 kfree(data); 636 kfree(data);
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 21544ebff855..5a3b3d974d84 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -551,10 +551,14 @@ static void pcmidi_setup_extra_keys(
551 551
552static int pcmidi_set_operational(struct pcmidi_snd *pm) 552static int pcmidi_set_operational(struct pcmidi_snd *pm)
553{ 553{
554 int rc;
555
554 if (pm->ifnum != 1) 556 if (pm->ifnum != 1)
555 return 0; /* only set up ONCE for interace 1 */ 557 return 0; /* only set up ONCE for interace 1 */
556 558
557 pcmidi_get_output_report(pm); 559 rc = pcmidi_get_output_report(pm);
560 if (rc < 0)
561 return rc;
558 pcmidi_submit_output_report(pm, 0xc1); 562 pcmidi_submit_output_report(pm, 0xc1);
559 return 0; 563 return 0;
560} 564}
@@ -683,7 +687,11 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
683 spin_lock_init(&pm->rawmidi_in_lock); 687 spin_lock_init(&pm->rawmidi_in_lock);
684 688
685 init_sustain_timers(pm); 689 init_sustain_timers(pm);
686 pcmidi_set_operational(pm); 690 err = pcmidi_set_operational(pm);
691 if (err < 0) {
692 pk_error("failed to find output report\n");
693 goto fail_register;
694 }
687 695
688 /* register it */ 696 /* register it */
689 err = snd_card_register(card); 697 err = snd_card_register(card);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 166f41f3173b..c50bcd967d99 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -92,6 +92,7 @@ static const struct hid_device_id hid_quirks[] = {
92 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, 92 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
93 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, 93 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
94 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, 94 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
95 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
95 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL }, 96 { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
96 { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT }, 97 { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
97 { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT }, 98 { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index be92a6f79687..94c7398b5c27 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -742,7 +742,6 @@ static void sensor_hub_remove(struct hid_device *hdev)
742 } 742 }
743 spin_unlock_irqrestore(&data->lock, flags); 743 spin_unlock_irqrestore(&data->lock, flags);
744 mfd_remove_devices(&hdev->dev); 744 mfd_remove_devices(&hdev->dev);
745 hid_set_drvdata(hdev, NULL);
746 mutex_destroy(&data->mutex); 745 mutex_destroy(&data->mutex);
747} 746}
748 747
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 49dd2d905c7f..73c0f7a95e2d 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2811,7 +2811,6 @@ err_stop:
2811 sony_cancel_work_sync(sc); 2811 sony_cancel_work_sync(sc);
2812 sony_remove_dev_list(sc); 2812 sony_remove_dev_list(sc);
2813 sony_release_device_id(sc); 2813 sony_release_device_id(sc);
2814 hid_hw_stop(hdev);
2815 return ret; 2814 return ret;
2816} 2815}
2817 2816
@@ -2876,6 +2875,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
2876 */ 2875 */
2877 if (!(hdev->claimed & HID_CLAIMED_INPUT)) { 2876 if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
2878 hid_err(hdev, "failed to claim input\n"); 2877 hid_err(hdev, "failed to claim input\n");
2878 hid_hw_stop(hdev);
2879 return -ENODEV; 2879 return -ENODEV;
2880 } 2880 }
2881 2881
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 006bd6f4f653..bbc6ec1aa5cb 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -252,7 +252,7 @@ static __poll_t hidraw_poll(struct file *file, poll_table *wait)
252 252
253 poll_wait(file, &list->hidraw->wait, wait); 253 poll_wait(file, &list->hidraw->wait, wait);
254 if (list->head != list->tail) 254 if (list->head != list->tail)
255 return EPOLLIN | EPOLLRDNORM; 255 return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
256 if (!list->hidraw->exist) 256 if (!list->hidraw->exist)
257 return EPOLLERR | EPOLLHUP; 257 return EPOLLERR | EPOLLHUP;
258 return 0; 258 return 0;
@@ -370,7 +370,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
370 370
371 mutex_lock(&minors_lock); 371 mutex_lock(&minors_lock);
372 dev = hidraw_table[minor]; 372 dev = hidraw_table[minor];
373 if (!dev) { 373 if (!dev || !dev->exist) {
374 ret = -ENODEV; 374 ret = -ENODEV;
375 goto out; 375 goto out;
376 } 376 }
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 90164fed08d3..2a7c6e33bb1c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -169,9 +169,7 @@ static const struct i2c_hid_quirks {
169 __u16 idProduct; 169 __u16 idProduct;
170 __u32 quirks; 170 __u32 quirks;
171} i2c_hid_quirks[] = { 171} i2c_hid_quirks[] = {
172 { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752, 172 { USB_VENDOR_ID_WEIDA, HID_ANY_ID,
173 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
174 { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
175 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, 173 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
176 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 174 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
177 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET | 175 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 5792a104000a..6c1e6110867f 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -78,5 +78,6 @@ irqreturn_t ish_irq_handler(int irq, void *dev_id);
78struct ishtp_device *ish_dev_init(struct pci_dev *pdev); 78struct ishtp_device *ish_dev_init(struct pci_dev *pdev);
79int ish_hw_start(struct ishtp_device *dev); 79int ish_hw_start(struct ishtp_device *dev);
80void ish_device_disable(struct ishtp_device *dev); 80void ish_device_disable(struct ishtp_device *dev);
81int ish_disable_dma(struct ishtp_device *dev);
81 82
82#endif /* _ISHTP_HW_ISH_H_ */ 83#endif /* _ISHTP_HW_ISH_H_ */
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 18fe8af89aad..8f8dfdf64833 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -672,7 +672,7 @@ eoi:
672 * 672 *
673 * Return: 0 for success else error code. 673 * Return: 0 for success else error code.
674 */ 674 */
675static int ish_disable_dma(struct ishtp_device *dev) 675int ish_disable_dma(struct ishtp_device *dev)
676{ 676{
677 unsigned int dma_delay; 677 unsigned int dma_delay;
678 678
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 279567baca3d..784dcc8c7022 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/suspend.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/workqueue.h> 19#include <linux/workqueue.h>
19#define CREATE_TRACE_POINTS 20#define CREATE_TRACE_POINTS
@@ -98,6 +99,11 @@ static const struct pci_device_id ish_invalid_pci_ids[] = {
98 {} 99 {}
99}; 100};
100 101
102static inline bool ish_should_enter_d0i3(struct pci_dev *pdev)
103{
104 return !pm_suspend_via_firmware() || pdev->device == CHV_DEVICE_ID;
105}
106
101/** 107/**
102 * ish_probe() - PCI driver probe callback 108 * ish_probe() - PCI driver probe callback
103 * @pdev: pci device 109 * @pdev: pci device
@@ -148,7 +154,6 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
148 /* mapping IO device memory */ 154 /* mapping IO device memory */
149 hw->mem_addr = pcim_iomap_table(pdev)[0]; 155 hw->mem_addr = pcim_iomap_table(pdev)[0];
150 ishtp->pdev = pdev; 156 ishtp->pdev = pdev;
151 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
152 157
153 /* request and enable interrupt */ 158 /* request and enable interrupt */
154 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 159 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
@@ -185,7 +190,6 @@ static void ish_remove(struct pci_dev *pdev)
185 struct ishtp_device *ishtp_dev = pci_get_drvdata(pdev); 190 struct ishtp_device *ishtp_dev = pci_get_drvdata(pdev);
186 191
187 ishtp_bus_remove_all_clients(ishtp_dev, false); 192 ishtp_bus_remove_all_clients(ishtp_dev, false);
188 pdev->dev_flags &= ~PCI_DEV_FLAGS_NO_D3;
189 ish_device_disable(ishtp_dev); 193 ish_device_disable(ishtp_dev);
190} 194}
191 195
@@ -207,17 +211,13 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
207{ 211{
208 struct pci_dev *pdev = to_pci_dev(ish_resume_device); 212 struct pci_dev *pdev = to_pci_dev(ish_resume_device);
209 struct ishtp_device *dev = pci_get_drvdata(pdev); 213 struct ishtp_device *dev = pci_get_drvdata(pdev);
210 uint32_t fwsts;
211 int ret; 214 int ret;
212 215
213 /* Get ISH FW status */ 216 /* Check the NO_D3 flag to distinguish the resume paths */
214 fwsts = IPC_GET_ISH_FWSTS(dev->ops->get_fw_status(dev)); 217 if (pdev->dev_flags & PCI_DEV_FLAGS_NO_D3) {
218 pdev->dev_flags &= ~PCI_DEV_FLAGS_NO_D3;
219 disable_irq_wake(pdev->irq);
215 220
216 /*
217 * If currently, in ISH FW, sensor app is loaded or beyond that,
218 * it means ISH isn't powered off, in this case, send a resume message.
219 */
220 if (fwsts >= FWSTS_SENSOR_APP_LOADED) {
221 ishtp_send_resume(dev); 221 ishtp_send_resume(dev);
222 222
223 /* Waiting to get resume response */ 223 /* Waiting to get resume response */
@@ -225,16 +225,20 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
225 ret = wait_event_interruptible_timeout(dev->resume_wait, 225 ret = wait_event_interruptible_timeout(dev->resume_wait,
226 !dev->resume_flag, 226 !dev->resume_flag,
227 msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS)); 227 msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
228 }
229 228
230 /* 229 /*
231 * If in ISH FW, sensor app isn't loaded yet, or no resume response. 230 * If the flag is not cleared, something is wrong with ISH FW.
232 * That means this platform is not S0ix compatible, or something is 231 * So on resume, need to go through init sequence again.
233 * wrong with ISH FW. So on resume, full reboot of ISH processor will 232 */
234 * happen, so need to go through init sequence again. 233 if (dev->resume_flag)
235 */ 234 ish_init(dev);
236 if (dev->resume_flag) 235 } else {
236 /*
237 * Resume from the D3, full reboot of ISH processor will happen,
238 * so need to go through init sequence again.
239 */
237 ish_init(dev); 240 ish_init(dev);
241 }
238} 242}
239 243
240/** 244/**
@@ -250,23 +254,43 @@ static int __maybe_unused ish_suspend(struct device *device)
250 struct pci_dev *pdev = to_pci_dev(device); 254 struct pci_dev *pdev = to_pci_dev(device);
251 struct ishtp_device *dev = pci_get_drvdata(pdev); 255 struct ishtp_device *dev = pci_get_drvdata(pdev);
252 256
253 enable_irq_wake(pdev->irq); 257 if (ish_should_enter_d0i3(pdev)) {
254 /* 258 /*
255 * If previous suspend hasn't been asnwered then ISH is likely dead, 259 * If previous suspend hasn't been asnwered then ISH is likely
256 * don't attempt nested notification 260 * dead, don't attempt nested notification
257 */ 261 */
258 if (dev->suspend_flag) 262 if (dev->suspend_flag)
259 return 0; 263 return 0;
260 264
261 dev->resume_flag = 0; 265 dev->resume_flag = 0;
262 dev->suspend_flag = 1; 266 dev->suspend_flag = 1;
263 ishtp_send_suspend(dev); 267 ishtp_send_suspend(dev);
264 268
265 /* 25 ms should be enough for live ISH to flush all IPC buf */ 269 /* 25 ms should be enough for live ISH to flush all IPC buf */
266 if (dev->suspend_flag) 270 if (dev->suspend_flag)
267 wait_event_interruptible_timeout(dev->suspend_wait, 271 wait_event_interruptible_timeout(dev->suspend_wait,
268 !dev->suspend_flag, 272 !dev->suspend_flag,
269 msecs_to_jiffies(25)); 273 msecs_to_jiffies(25));
274
275 if (dev->suspend_flag) {
276 /*
277 * It looks like FW halt, clear the DMA bit, and put
278 * ISH into D3, and FW would reset on resume.
279 */
280 ish_disable_dma(dev);
281 } else {
282 /* Set the NO_D3 flag, the ISH would enter D0i3 */
283 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
284
285 enable_irq_wake(pdev->irq);
286 }
287 } else {
288 /*
289 * Clear the DMA bit before putting ISH into D3,
290 * or ISH FW would reset automatically.
291 */
292 ish_disable_dma(dev);
293 }
270 294
271 return 0; 295 return 0;
272} 296}
@@ -288,7 +312,6 @@ static int __maybe_unused ish_resume(struct device *device)
288 ish_resume_device = device; 312 ish_resume_device = device;
289 dev->resume_flag = 1; 313 dev->resume_flag = 1;
290 314
291 disable_irq_wake(pdev->irq);
292 schedule_work(&resume_work); 315 schedule_work(&resume_work);
293 316
294 return 0; 317 return 0;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 4e11cc6fc34b..1f9bc4483465 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -428,7 +428,7 @@ static __poll_t hiddev_poll(struct file *file, poll_table *wait)
428 428
429 poll_wait(file, &list->hiddev->wait, wait); 429 poll_wait(file, &list->hiddev->wait, wait);
430 if (list->head != list->tail) 430 if (list->head != list->tail)
431 return EPOLLIN | EPOLLRDNORM; 431 return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
432 if (!list->hiddev->exist) 432 if (!list->hiddev->exist)
433 return EPOLLERR | EPOLLHUP; 433 return EPOLLERR | EPOLLHUP;
434 return 0; 434 return 0;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 53bddb50aeba..5ded94b7bf68 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -88,7 +88,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
88} 88}
89 89
90static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, 90static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
91 struct hid_report *report, u8 *raw_data, int size) 91 struct hid_report *report, u8 *raw_data, int report_size)
92{ 92{
93 struct wacom *wacom = hid_get_drvdata(hdev); 93 struct wacom *wacom = hid_get_drvdata(hdev);
94 struct wacom_wac *wacom_wac = &wacom->wacom_wac; 94 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
@@ -149,7 +149,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
149 if (flush) 149 if (flush)
150 wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo); 150 wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
151 else if (insert) 151 else if (insert)
152 wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size); 152 wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
153 raw_data, report_size);
153 154
154 return insert && !flush; 155 return insert && !flush;
155} 156}
@@ -2176,7 +2177,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
2176{ 2177{
2177 struct wacom_wac *wacom_wac = &wacom->wacom_wac; 2178 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
2178 struct wacom_features *features = &wacom_wac->features; 2179 struct wacom_features *features = &wacom_wac->features;
2179 char name[WACOM_NAME_MAX]; 2180 char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */
2180 2181
2181 /* Generic devices name unspecified */ 2182 /* Generic devices name unspecified */
2182 if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) { 2183 if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
@@ -2718,14 +2719,12 @@ static int wacom_probe(struct hid_device *hdev,
2718 wacom_wac->features = *((struct wacom_features *)id->driver_data); 2719 wacom_wac->features = *((struct wacom_features *)id->driver_data);
2719 features = &wacom_wac->features; 2720 features = &wacom_wac->features;
2720 2721
2721 if (features->check_for_hid_type && features->hid_type != hdev->type) { 2722 if (features->check_for_hid_type && features->hid_type != hdev->type)
2722 error = -ENODEV; 2723 return -ENODEV;
2723 goto fail;
2724 }
2725 2724
2726 error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL); 2725 error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
2727 if (error) 2726 if (error)
2728 goto fail; 2727 return error;
2729 2728
2730 wacom_wac->hid_data.inputmode = -1; 2729 wacom_wac->hid_data.inputmode = -1;
2731 wacom_wac->mode_report = -1; 2730 wacom_wac->mode_report = -1;
@@ -2743,12 +2742,12 @@ static int wacom_probe(struct hid_device *hdev,
2743 error = hid_parse(hdev); 2742 error = hid_parse(hdev);
2744 if (error) { 2743 if (error) {
2745 hid_err(hdev, "parse failed\n"); 2744 hid_err(hdev, "parse failed\n");
2746 goto fail; 2745 return error;
2747 } 2746 }
2748 2747
2749 error = wacom_parse_and_register(wacom, false); 2748 error = wacom_parse_and_register(wacom, false);
2750 if (error) 2749 if (error)
2751 goto fail; 2750 return error;
2752 2751
2753 if (hdev->bus == BUS_BLUETOOTH) { 2752 if (hdev->bus == BUS_BLUETOOTH) {
2754 error = device_create_file(&hdev->dev, &dev_attr_speed); 2753 error = device_create_file(&hdev->dev, &dev_attr_speed);
@@ -2759,10 +2758,6 @@ static int wacom_probe(struct hid_device *hdev,
2759 } 2758 }
2760 2759
2761 return 0; 2760 return 0;
2762
2763fail:
2764 hid_set_drvdata(hdev, NULL);
2765 return error;
2766} 2761}
2767 2762
2768static void wacom_remove(struct hid_device *hdev) 2763static void wacom_remove(struct hid_device *hdev)
@@ -2791,8 +2786,6 @@ static void wacom_remove(struct hid_device *hdev)
2791 wacom_release_resources(wacom); 2786 wacom_release_resources(wacom);
2792 2787
2793 kfifo_free(&wacom_wac->pen_fifo); 2788 kfifo_free(&wacom_wac->pen_fifo);
2794
2795 hid_set_drvdata(hdev, NULL);
2796} 2789}
2797 2790
2798#ifdef CONFIG_PM 2791#ifdef CONFIG_PM
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1713235d28cb..2b0a5b8ca6e6 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -251,7 +251,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
251 251
252static int wacom_dtus_irq(struct wacom_wac *wacom) 252static int wacom_dtus_irq(struct wacom_wac *wacom)
253{ 253{
254 char *data = wacom->data; 254 unsigned char *data = wacom->data;
255 struct input_dev *input = wacom->pen_input; 255 struct input_dev *input = wacom->pen_input;
256 unsigned short prox, pressure = 0; 256 unsigned short prox, pressure = 0;
257 257
@@ -483,6 +483,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
483 int ring1 = 0, ring2 = 0; 483 int ring1 = 0, ring2 = 0;
484 int strip1 = 0, strip2 = 0; 484 int strip1 = 0, strip2 = 0;
485 bool prox = false; 485 bool prox = false;
486 bool wrench = false, keyboard = false, mute_touch = false, menu = false,
487 info = false;
486 488
487 /* pad packets. Works as a second tool and is always in prox */ 489 /* pad packets. Works as a second tool and is always in prox */
488 if (!(data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD || 490 if (!(data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD ||
@@ -512,10 +514,32 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
512 keys = ((data[3] & 0x1C) ? 1<<2 : 0) | 514 keys = ((data[3] & 0x1C) ? 1<<2 : 0) |
513 ((data[4] & 0xE0) ? 1<<1 : 0) | 515 ((data[4] & 0xE0) ? 1<<1 : 0) |
514 ((data[4] & 0x07) ? 1<<0 : 0); 516 ((data[4] & 0x07) ? 1<<0 : 0);
517 keyboard = !!(data[4] & 0xE0);
518 info = !!(data[3] & 0x1C);
519
520 if (features->oPid) {
521 mute_touch = !!(data[4] & 0x07);
522 if (mute_touch)
523 wacom->shared->is_touch_on =
524 !wacom->shared->is_touch_on;
525 } else {
526 wrench = !!(data[4] & 0x07);
527 }
515 } else if (features->type == WACOM_27QHD) { 528 } else if (features->type == WACOM_27QHD) {
516 nkeys = 3; 529 nkeys = 3;
517 keys = data[2] & 0x07; 530 keys = data[2] & 0x07;
518 531
532 wrench = !!(data[2] & 0x01);
533 keyboard = !!(data[2] & 0x02);
534
535 if (features->oPid) {
536 mute_touch = !!(data[2] & 0x04);
537 if (mute_touch)
538 wacom->shared->is_touch_on =
539 !wacom->shared->is_touch_on;
540 } else {
541 menu = !!(data[2] & 0x04);
542 }
519 input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4])); 543 input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
520 input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6])); 544 input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
521 input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8])); 545 input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
@@ -561,6 +585,9 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
561 if (features->type == WACOM_22HD) { 585 if (features->type == WACOM_22HD) {
562 nkeys = 3; 586 nkeys = 3;
563 keys = data[9] & 0x07; 587 keys = data[9] & 0x07;
588
589 info = !!(data[9] & 0x01);
590 wrench = !!(data[9] & 0x02);
564 } 591 }
565 } else { 592 } else {
566 buttons = ((data[6] & 0x10) << 5) | 593 buttons = ((data[6] & 0x10) << 5) |
@@ -572,7 +599,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
572 strip2 = ((data[3] & 0x1f) << 8) | data[4]; 599 strip2 = ((data[3] & 0x1f) << 8) | data[4];
573 } 600 }
574 601
575 prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) | 602 prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) |
576 (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2; 603 (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2;
577 604
578 wacom_report_numbered_buttons(input, nbuttons, buttons); 605 wacom_report_numbered_buttons(input, nbuttons, buttons);
@@ -580,6 +607,18 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
580 for (i = 0; i < nkeys; i++) 607 for (i = 0; i < nkeys; i++)
581 input_report_key(input, KEY_PROG1 + i, keys & (1 << i)); 608 input_report_key(input, KEY_PROG1 + i, keys & (1 << i));
582 609
610 input_report_key(input, KEY_BUTTONCONFIG, wrench);
611 input_report_key(input, KEY_ONSCREEN_KEYBOARD, keyboard);
612 input_report_key(input, KEY_CONTROLPANEL, menu);
613 input_report_key(input, KEY_INFO, info);
614
615 if (wacom->shared && wacom->shared->touch_input) {
616 input_report_switch(wacom->shared->touch_input,
617 SW_MUTE_DEVICE,
618 !wacom->shared->is_touch_on);
619 input_sync(wacom->shared->touch_input);
620 }
621
583 input_report_abs(input, ABS_RX, strip1); 622 input_report_abs(input, ABS_RX, strip1);
584 input_report_abs(input, ABS_RY, strip2); 623 input_report_abs(input, ABS_RY, strip2);
585 624
@@ -1483,6 +1522,12 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
1483 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; 1522 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET;
1484 int y_offset = 2; 1523 int y_offset = 2;
1485 1524
1525 if (wacom->shared->has_mute_touch_switch &&
1526 !wacom->shared->is_touch_on) {
1527 if (!wacom->shared->touch_down)
1528 return 0;
1529 }
1530
1486 if (wacom->features.type == WACOM_27QHDT) { 1531 if (wacom->features.type == WACOM_27QHDT) {
1487 current_num_contacts = data[63]; 1532 current_num_contacts = data[63];
1488 num_contacts_left = 10; 1533 num_contacts_left = 10;
@@ -2051,14 +2096,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
2051 (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */ 2096 (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
2052 hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */ 2097 hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */
2053 hdev->product == 0x392 || /* Intuos Pro 2 */ 2098 hdev->product == 0x392 || /* Intuos Pro 2 */
2054 hdev->product == 0x399)) { /* MobileStudio Pro */ 2099 hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */
2055 value = (field->logical_maximum - value); 2100 value = (field->logical_maximum - value);
2056 2101
2057 if (hdev->product == 0x357 || hdev->product == 0x358 || 2102 if (hdev->product == 0x357 || hdev->product == 0x358 ||
2058 hdev->product == 0x392) 2103 hdev->product == 0x392)
2059 value = wacom_offset_rotation(input, usage, value, 3, 16); 2104 value = wacom_offset_rotation(input, usage, value, 3, 16);
2060 else if (hdev->product == 0x34d || hdev->product == 0x34e || 2105 else if (hdev->product == 0x34d || hdev->product == 0x34e ||
2061 hdev->product == 0x399) 2106 hdev->product == 0x398 || hdev->product == 0x399)
2062 value = wacom_offset_rotation(input, usage, value, 1, 2); 2107 value = wacom_offset_rotation(input, usage, value, 1, 2);
2063 } 2108 }
2064 else { 2109 else {
@@ -3815,6 +3860,14 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
3815 /* fall through */ 3860 /* fall through */
3816 3861
3817 case WACOM_27QHDT: 3862 case WACOM_27QHDT:
3863 if (wacom_wac->shared->touch->product == 0x32C ||
3864 wacom_wac->shared->touch->product == 0xF6) {
3865 input_dev->evbit[0] |= BIT_MASK(EV_SW);
3866 __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
3867 wacom_wac->shared->has_mute_touch_switch = true;
3868 }
3869 /* fall through */
3870
3818 case MTSCREEN: 3871 case MTSCREEN:
3819 case MTTPC: 3872 case MTTPC:
3820 case MTTPC_B: 3873 case MTTPC_B:
@@ -4050,6 +4103,12 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
4050 __set_bit(KEY_PROG2, input_dev->keybit); 4103 __set_bit(KEY_PROG2, input_dev->keybit);
4051 __set_bit(KEY_PROG3, input_dev->keybit); 4104 __set_bit(KEY_PROG3, input_dev->keybit);
4052 4105
4106 __set_bit(KEY_ONSCREEN_KEYBOARD, input_dev->keybit);
4107 __set_bit(KEY_INFO, input_dev->keybit);
4108
4109 if (!features->oPid)
4110 __set_bit(KEY_BUTTONCONFIG, input_dev->keybit);
4111
4053 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); 4112 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
4054 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); 4113 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
4055 break; 4114 break;
@@ -4058,6 +4117,12 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
4058 __set_bit(KEY_PROG1, input_dev->keybit); 4117 __set_bit(KEY_PROG1, input_dev->keybit);
4059 __set_bit(KEY_PROG2, input_dev->keybit); 4118 __set_bit(KEY_PROG2, input_dev->keybit);
4060 __set_bit(KEY_PROG3, input_dev->keybit); 4119 __set_bit(KEY_PROG3, input_dev->keybit);
4120
4121 __set_bit(KEY_ONSCREEN_KEYBOARD, input_dev->keybit);
4122 __set_bit(KEY_BUTTONCONFIG, input_dev->keybit);
4123
4124 if (!features->oPid)
4125 __set_bit(KEY_CONTROLPANEL, input_dev->keybit);
4061 input_set_abs_params(input_dev, ABS_X, -2048, 2048, 0, 0); 4126 input_set_abs_params(input_dev, ABS_X, -2048, 2048, 0, 0);
4062 input_abs_set_res(input_dev, ABS_X, 1024); /* points/g */ 4127 input_abs_set_res(input_dev, ABS_X, 1024); /* points/g */
4063 input_set_abs_params(input_dev, ABS_Y, -2048, 2048, 0, 0); 4128 input_set_abs_params(input_dev, ABS_Y, -2048, 2048, 0, 0);
@@ -4071,6 +4136,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
4071 __set_bit(KEY_PROG1, input_dev->keybit); 4136 __set_bit(KEY_PROG1, input_dev->keybit);
4072 __set_bit(KEY_PROG2, input_dev->keybit); 4137 __set_bit(KEY_PROG2, input_dev->keybit);
4073 __set_bit(KEY_PROG3, input_dev->keybit); 4138 __set_bit(KEY_PROG3, input_dev->keybit);
4139
4140 __set_bit(KEY_BUTTONCONFIG, input_dev->keybit);
4141 __set_bit(KEY_INFO, input_dev->keybit);
4074 /* fall through */ 4142 /* fall through */
4075 4143
4076 case WACOM_21UX2: 4144 case WACOM_21UX2:
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index addcef50df7a..8eb167540b4f 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -407,7 +407,15 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
407 cpumask_clear_cpu(channel->target_cpu, 407 cpumask_clear_cpu(channel->target_cpu,
408 &primary_channel->alloced_cpus_in_node); 408 &primary_channel->alloced_cpus_in_node);
409 409
410 vmbus_release_relid(channel->offermsg.child_relid); 410 /*
411 * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
412 * the relid is invalidated; after hibernation, when the user-space app
413 * destroys the channel, the relid is INVALID_RELID, and in this case
414 * it's unnecessary and unsafe to release the old relid, since the same
415 * relid can refer to a completely different channel now.
416 */
417 if (channel->offermsg.child_relid != INVALID_RELID)
418 vmbus_release_relid(channel->offermsg.child_relid);
411 419
412 free_channel(channel); 420 free_channel(channel);
413} 421}
@@ -545,6 +553,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
545 553
546 mutex_lock(&vmbus_connection.channel_mutex); 554 mutex_lock(&vmbus_connection.channel_mutex);
547 555
556 /* Remember the channels that should be cleaned up upon suspend. */
557 if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
558 atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
559
548 /* 560 /*
549 * Now that we have acquired the channel_mutex, 561 * Now that we have acquired the channel_mutex,
550 * we can release the potentially racing rescind thread. 562 * we can release the potentially racing rescind thread.
@@ -847,6 +859,67 @@ void vmbus_initiate_unload(bool crash)
847 vmbus_wait_for_unload(); 859 vmbus_wait_for_unload();
848} 860}
849 861
862static void check_ready_for_resume_event(void)
863{
864 /*
865 * If all the old primary channels have been fixed up, then it's safe
866 * to resume.
867 */
868 if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
869 complete(&vmbus_connection.ready_for_resume_event);
870}
871
872static void vmbus_setup_channel_state(struct vmbus_channel *channel,
873 struct vmbus_channel_offer_channel *offer)
874{
875 /*
876 * Setup state for signalling the host.
877 */
878 channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
879
880 if (vmbus_proto_version != VERSION_WS2008) {
881 channel->is_dedicated_interrupt =
882 (offer->is_dedicated_interrupt != 0);
883 channel->sig_event = offer->connection_id;
884 }
885
886 memcpy(&channel->offermsg, offer,
887 sizeof(struct vmbus_channel_offer_channel));
888 channel->monitor_grp = (u8)offer->monitorid / 32;
889 channel->monitor_bit = (u8)offer->monitorid % 32;
890}
891
892/*
893 * find_primary_channel_by_offer - Get the channel object given the new offer.
894 * This is only used in the resume path of hibernation.
895 */
896static struct vmbus_channel *
897find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
898{
899 struct vmbus_channel *channel = NULL, *iter;
900 const guid_t *inst1, *inst2;
901
902 /* Ignore sub-channel offers. */
903 if (offer->offer.sub_channel_index != 0)
904 return NULL;
905
906 mutex_lock(&vmbus_connection.channel_mutex);
907
908 list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
909 inst1 = &iter->offermsg.offer.if_instance;
910 inst2 = &offer->offer.if_instance;
911
912 if (guid_equal(inst1, inst2)) {
913 channel = iter;
914 break;
915 }
916 }
917
918 mutex_unlock(&vmbus_connection.channel_mutex);
919
920 return channel;
921}
922
850/* 923/*
851 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition. 924 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
852 * 925 *
@@ -854,12 +927,58 @@ void vmbus_initiate_unload(bool crash)
854static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) 927static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
855{ 928{
856 struct vmbus_channel_offer_channel *offer; 929 struct vmbus_channel_offer_channel *offer;
857 struct vmbus_channel *newchannel; 930 struct vmbus_channel *oldchannel, *newchannel;
931 size_t offer_sz;
858 932
859 offer = (struct vmbus_channel_offer_channel *)hdr; 933 offer = (struct vmbus_channel_offer_channel *)hdr;
860 934
861 trace_vmbus_onoffer(offer); 935 trace_vmbus_onoffer(offer);
862 936
937 oldchannel = find_primary_channel_by_offer(offer);
938
939 if (oldchannel != NULL) {
940 atomic_dec(&vmbus_connection.offer_in_progress);
941
942 /*
943 * We're resuming from hibernation: all the sub-channel and
944 * hv_sock channels we had before the hibernation should have
945 * been cleaned up, and now we must be seeing a re-offered
946 * primary channel that we had before the hibernation.
947 */
948
949 WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
950 /* Fix up the relid. */
951 oldchannel->offermsg.child_relid = offer->child_relid;
952
953 offer_sz = sizeof(*offer);
954 if (memcmp(offer, &oldchannel->offermsg, offer_sz) == 0) {
955 check_ready_for_resume_event();
956 return;
957 }
958
959 /*
960 * This is not an error, since the host can also change the
961 * other field(s) of the offer, e.g. on WS RS5 (Build 17763),
962 * the offer->connection_id of the Mellanox VF vmbus device
963 * can change when the host reoffers the device upon resume.
964 */
965 pr_debug("vmbus offer changed: relid=%d\n",
966 offer->child_relid);
967
968 print_hex_dump_debug("Old vmbus offer: ", DUMP_PREFIX_OFFSET,
969 16, 4, &oldchannel->offermsg, offer_sz,
970 false);
971 print_hex_dump_debug("New vmbus offer: ", DUMP_PREFIX_OFFSET,
972 16, 4, offer, offer_sz, false);
973
974 /* Fix up the old channel. */
975 vmbus_setup_channel_state(oldchannel, offer);
976
977 check_ready_for_resume_event();
978
979 return;
980 }
981
863 /* Allocate the channel object and save this offer. */ 982 /* Allocate the channel object and save this offer. */
864 newchannel = alloc_channel(); 983 newchannel = alloc_channel();
865 if (!newchannel) { 984 if (!newchannel) {
@@ -869,25 +988,21 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
869 return; 988 return;
870 } 989 }
871 990
872 /* 991 vmbus_setup_channel_state(newchannel, offer);
873 * Setup state for signalling the host.
874 */
875 newchannel->sig_event = VMBUS_EVENT_CONNECTION_ID;
876
877 if (vmbus_proto_version != VERSION_WS2008) {
878 newchannel->is_dedicated_interrupt =
879 (offer->is_dedicated_interrupt != 0);
880 newchannel->sig_event = offer->connection_id;
881 }
882
883 memcpy(&newchannel->offermsg, offer,
884 sizeof(struct vmbus_channel_offer_channel));
885 newchannel->monitor_grp = (u8)offer->monitorid / 32;
886 newchannel->monitor_bit = (u8)offer->monitorid % 32;
887 992
888 vmbus_process_offer(newchannel); 993 vmbus_process_offer(newchannel);
889} 994}
890 995
996static void check_ready_for_suspend_event(void)
997{
998 /*
999 * If all the sub-channels or hv_sock channels have been cleaned up,
1000 * then it's safe to suspend.
1001 */
1002 if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
1003 complete(&vmbus_connection.ready_for_suspend_event);
1004}
1005
891/* 1006/*
892 * vmbus_onoffer_rescind - Rescind offer handler. 1007 * vmbus_onoffer_rescind - Rescind offer handler.
893 * 1008 *
@@ -898,6 +1013,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
898 struct vmbus_channel_rescind_offer *rescind; 1013 struct vmbus_channel_rescind_offer *rescind;
899 struct vmbus_channel *channel; 1014 struct vmbus_channel *channel;
900 struct device *dev; 1015 struct device *dev;
1016 bool clean_up_chan_for_suspend;
901 1017
902 rescind = (struct vmbus_channel_rescind_offer *)hdr; 1018 rescind = (struct vmbus_channel_rescind_offer *)hdr;
903 1019
@@ -937,6 +1053,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
937 return; 1053 return;
938 } 1054 }
939 1055
1056 clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
1057 is_sub_channel(channel);
940 /* 1058 /*
941 * Before setting channel->rescind in vmbus_rescind_cleanup(), we 1059 * Before setting channel->rescind in vmbus_rescind_cleanup(), we
942 * should make sure the channel callback is not running any more. 1060 * should make sure the channel callback is not running any more.
@@ -962,6 +1080,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
962 if (channel->device_obj) { 1080 if (channel->device_obj) {
963 if (channel->chn_rescind_callback) { 1081 if (channel->chn_rescind_callback) {
964 channel->chn_rescind_callback(channel); 1082 channel->chn_rescind_callback(channel);
1083
1084 if (clean_up_chan_for_suspend)
1085 check_ready_for_suspend_event();
1086
965 return; 1087 return;
966 } 1088 }
967 /* 1089 /*
@@ -994,6 +1116,11 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
994 } 1116 }
995 mutex_unlock(&vmbus_connection.channel_mutex); 1117 mutex_unlock(&vmbus_connection.channel_mutex);
996 } 1118 }
1119
1120 /* The "channel" may have been freed. Do not access it any longer. */
1121
1122 if (clean_up_chan_for_suspend)
1123 check_ready_for_suspend_event();
997} 1124}
998 1125
999void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) 1126void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 09829e15d4a0..6e4c015783ff 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -26,6 +26,11 @@
26struct vmbus_connection vmbus_connection = { 26struct vmbus_connection vmbus_connection = {
27 .conn_state = DISCONNECTED, 27 .conn_state = DISCONNECTED,
28 .next_gpadl_handle = ATOMIC_INIT(0xE1E10), 28 .next_gpadl_handle = ATOMIC_INIT(0xE1E10),
29
30 .ready_for_suspend_event= COMPLETION_INITIALIZER(
31 vmbus_connection.ready_for_suspend_event),
32 .ready_for_resume_event = COMPLETION_INITIALIZER(
33 vmbus_connection.ready_for_resume_event),
29}; 34};
30EXPORT_SYMBOL_GPL(vmbus_connection); 35EXPORT_SYMBOL_GPL(vmbus_connection);
31 36
@@ -59,8 +64,7 @@ static __u32 vmbus_get_next_version(__u32 current_version)
59 } 64 }
60} 65}
61 66
62static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, 67int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
63 __u32 version)
64{ 68{
65 int ret = 0; 69 int ret = 0;
66 unsigned int cur_cpu; 70 unsigned int cur_cpu;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 6188fb7dda42..fcc52797c169 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -154,7 +154,7 @@ void hv_synic_free(void)
154 * retrieve the initialized message and event pages. Otherwise, we create and 154 * retrieve the initialized message and event pages. Otherwise, we create and
155 * initialize the message and event pages. 155 * initialize the message and event pages.
156 */ 156 */
157int hv_synic_init(unsigned int cpu) 157void hv_synic_enable_regs(unsigned int cpu)
158{ 158{
159 struct hv_per_cpu_context *hv_cpu 159 struct hv_per_cpu_context *hv_cpu
160 = per_cpu_ptr(hv_context.cpu_context, cpu); 160 = per_cpu_ptr(hv_context.cpu_context, cpu);
@@ -196,6 +196,11 @@ int hv_synic_init(unsigned int cpu)
196 sctrl.enable = 1; 196 sctrl.enable = 1;
197 197
198 hv_set_synic_state(sctrl.as_uint64); 198 hv_set_synic_state(sctrl.as_uint64);
199}
200
201int hv_synic_init(unsigned int cpu)
202{
203 hv_synic_enable_regs(cpu);
199 204
200 hv_stimer_init(cpu); 205 hv_stimer_init(cpu);
201 206
@@ -205,20 +210,45 @@ int hv_synic_init(unsigned int cpu)
205/* 210/*
206 * hv_synic_cleanup - Cleanup routine for hv_synic_init(). 211 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
207 */ 212 */
208int hv_synic_cleanup(unsigned int cpu) 213void hv_synic_disable_regs(unsigned int cpu)
209{ 214{
210 union hv_synic_sint shared_sint; 215 union hv_synic_sint shared_sint;
211 union hv_synic_simp simp; 216 union hv_synic_simp simp;
212 union hv_synic_siefp siefp; 217 union hv_synic_siefp siefp;
213 union hv_synic_scontrol sctrl; 218 union hv_synic_scontrol sctrl;
219
220 hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
221
222 shared_sint.masked = 1;
223
224 /* Need to correctly cleanup in the case of SMP!!! */
225 /* Disable the interrupt */
226 hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
227
228 hv_get_simp(simp.as_uint64);
229 simp.simp_enabled = 0;
230 simp.base_simp_gpa = 0;
231
232 hv_set_simp(simp.as_uint64);
233
234 hv_get_siefp(siefp.as_uint64);
235 siefp.siefp_enabled = 0;
236 siefp.base_siefp_gpa = 0;
237
238 hv_set_siefp(siefp.as_uint64);
239
240 /* Disable the global synic bit */
241 hv_get_synic_state(sctrl.as_uint64);
242 sctrl.enable = 0;
243 hv_set_synic_state(sctrl.as_uint64);
244}
245
246int hv_synic_cleanup(unsigned int cpu)
247{
214 struct vmbus_channel *channel, *sc; 248 struct vmbus_channel *channel, *sc;
215 bool channel_found = false; 249 bool channel_found = false;
216 unsigned long flags; 250 unsigned long flags;
217 251
218 hv_get_synic_state(sctrl.as_uint64);
219 if (sctrl.enable != 1)
220 return -EFAULT;
221
222 /* 252 /*
223 * Search for channels which are bound to the CPU we're about to 253 * Search for channels which are bound to the CPU we're about to
224 * cleanup. In case we find one and vmbus is still connected we need to 254 * cleanup. In case we find one and vmbus is still connected we need to
@@ -249,29 +279,7 @@ int hv_synic_cleanup(unsigned int cpu)
249 279
250 hv_stimer_cleanup(cpu); 280 hv_stimer_cleanup(cpu);
251 281
252 hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 282 hv_synic_disable_regs(cpu);
253
254 shared_sint.masked = 1;
255
256 /* Need to correctly cleanup in the case of SMP!!! */
257 /* Disable the interrupt */
258 hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
259
260 hv_get_simp(simp.as_uint64);
261 simp.simp_enabled = 0;
262 simp.base_simp_gpa = 0;
263
264 hv_set_simp(simp.as_uint64);
265
266 hv_get_siefp(siefp.as_uint64);
267 siefp.siefp_enabled = 0;
268 siefp.base_siefp_gpa = 0;
269
270 hv_set_siefp(siefp.as_uint64);
271
272 /* Disable the global synic bit */
273 sctrl.enable = 0;
274 hv_set_synic_state(sctrl.as_uint64);
275 283
276 return 0; 284 return 0;
277} 285}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 6fb4ea5f0304..34bd73526afd 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -494,7 +494,7 @@ enum hv_dm_state {
494 494
495 495
496static __u8 recv_buffer[PAGE_SIZE]; 496static __u8 recv_buffer[PAGE_SIZE];
497static __u8 *send_buffer; 497static __u8 balloon_up_send_buffer[PAGE_SIZE];
498#define PAGES_IN_2M 512 498#define PAGES_IN_2M 512
499#define HA_CHUNK (32 * 1024) 499#define HA_CHUNK (32 * 1024)
500 500
@@ -1292,8 +1292,8 @@ static void balloon_up(struct work_struct *dummy)
1292 } 1292 }
1293 1293
1294 while (!done) { 1294 while (!done) {
1295 bl_resp = (struct dm_balloon_response *)send_buffer; 1295 memset(balloon_up_send_buffer, 0, PAGE_SIZE);
1296 memset(send_buffer, 0, PAGE_SIZE); 1296 bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
1297 bl_resp->hdr.type = DM_BALLOON_RESPONSE; 1297 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1298 bl_resp->hdr.size = sizeof(struct dm_balloon_response); 1298 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1299 bl_resp->more_pages = 1; 1299 bl_resp->more_pages = 1;
@@ -1564,58 +1564,18 @@ static void balloon_onchannelcallback(void *context)
1564 1564
1565} 1565}
1566 1566
1567static int balloon_probe(struct hv_device *dev, 1567static int balloon_connect_vsp(struct hv_device *dev)
1568 const struct hv_vmbus_device_id *dev_id)
1569{ 1568{
1570 int ret;
1571 unsigned long t;
1572 struct dm_version_request version_req; 1569 struct dm_version_request version_req;
1573 struct dm_capabilities cap_msg; 1570 struct dm_capabilities cap_msg;
1574 1571 unsigned long t;
1575#ifdef CONFIG_MEMORY_HOTPLUG 1572 int ret;
1576 do_hot_add = hot_add;
1577#else
1578 do_hot_add = false;
1579#endif
1580
1581 /*
1582 * First allocate a send buffer.
1583 */
1584
1585 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1586 if (!send_buffer)
1587 return -ENOMEM;
1588 1573
1589 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0, 1574 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1590 balloon_onchannelcallback, dev); 1575 balloon_onchannelcallback, dev);
1591
1592 if (ret) 1576 if (ret)
1593 goto probe_error0; 1577 return ret;
1594 1578
1595 dm_device.dev = dev;
1596 dm_device.state = DM_INITIALIZING;
1597 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1598 init_completion(&dm_device.host_event);
1599 init_completion(&dm_device.config_event);
1600 INIT_LIST_HEAD(&dm_device.ha_region_list);
1601 spin_lock_init(&dm_device.ha_lock);
1602 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1603 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1604 dm_device.host_specified_ha_region = false;
1605
1606 dm_device.thread =
1607 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1608 if (IS_ERR(dm_device.thread)) {
1609 ret = PTR_ERR(dm_device.thread);
1610 goto probe_error1;
1611 }
1612
1613#ifdef CONFIG_MEMORY_HOTPLUG
1614 set_online_page_callback(&hv_online_page);
1615 register_memory_notifier(&hv_memory_nb);
1616#endif
1617
1618 hv_set_drvdata(dev, &dm_device);
1619 /* 1579 /*
1620 * Initiate the hand shake with the host and negotiate 1580 * Initiate the hand shake with the host and negotiate
1621 * a version that the host can support. We start with the 1581 * a version that the host can support. We start with the
@@ -1631,16 +1591,15 @@ static int balloon_probe(struct hv_device *dev,
1631 dm_device.version = version_req.version.version; 1591 dm_device.version = version_req.version.version;
1632 1592
1633 ret = vmbus_sendpacket(dev->channel, &version_req, 1593 ret = vmbus_sendpacket(dev->channel, &version_req,
1634 sizeof(struct dm_version_request), 1594 sizeof(struct dm_version_request),
1635 (unsigned long)NULL, 1595 (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
1636 VM_PKT_DATA_INBAND, 0);
1637 if (ret) 1596 if (ret)
1638 goto probe_error2; 1597 goto out;
1639 1598
1640 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1599 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1641 if (t == 0) { 1600 if (t == 0) {
1642 ret = -ETIMEDOUT; 1601 ret = -ETIMEDOUT;
1643 goto probe_error2; 1602 goto out;
1644 } 1603 }
1645 1604
1646 /* 1605 /*
@@ -1648,8 +1607,8 @@ static int balloon_probe(struct hv_device *dev,
1648 * fail the probe function. 1607 * fail the probe function.
1649 */ 1608 */
1650 if (dm_device.state == DM_INIT_ERROR) { 1609 if (dm_device.state == DM_INIT_ERROR) {
1651 ret = -ETIMEDOUT; 1610 ret = -EPROTO;
1652 goto probe_error2; 1611 goto out;
1653 } 1612 }
1654 1613
1655 pr_info("Using Dynamic Memory protocol version %u.%u\n", 1614 pr_info("Using Dynamic Memory protocol version %u.%u\n",
@@ -1682,16 +1641,15 @@ static int balloon_probe(struct hv_device *dev,
1682 cap_msg.max_page_number = -1; 1641 cap_msg.max_page_number = -1;
1683 1642
1684 ret = vmbus_sendpacket(dev->channel, &cap_msg, 1643 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1685 sizeof(struct dm_capabilities), 1644 sizeof(struct dm_capabilities),
1686 (unsigned long)NULL, 1645 (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
1687 VM_PKT_DATA_INBAND, 0);
1688 if (ret) 1646 if (ret)
1689 goto probe_error2; 1647 goto out;
1690 1648
1691 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1649 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1692 if (t == 0) { 1650 if (t == 0) {
1693 ret = -ETIMEDOUT; 1651 ret = -ETIMEDOUT;
1694 goto probe_error2; 1652 goto out;
1695 } 1653 }
1696 1654
1697 /* 1655 /*
@@ -1699,25 +1657,65 @@ static int balloon_probe(struct hv_device *dev,
1699 * fail the probe function. 1657 * fail the probe function.
1700 */ 1658 */
1701 if (dm_device.state == DM_INIT_ERROR) { 1659 if (dm_device.state == DM_INIT_ERROR) {
1702 ret = -ETIMEDOUT; 1660 ret = -EPROTO;
1703 goto probe_error2; 1661 goto out;
1704 } 1662 }
1705 1663
1664 return 0;
1665out:
1666 vmbus_close(dev->channel);
1667 return ret;
1668}
1669
1670static int balloon_probe(struct hv_device *dev,
1671 const struct hv_vmbus_device_id *dev_id)
1672{
1673 int ret;
1674
1675#ifdef CONFIG_MEMORY_HOTPLUG
1676 do_hot_add = hot_add;
1677#else
1678 do_hot_add = false;
1679#endif
1680 dm_device.dev = dev;
1681 dm_device.state = DM_INITIALIZING;
1682 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1683 init_completion(&dm_device.host_event);
1684 init_completion(&dm_device.config_event);
1685 INIT_LIST_HEAD(&dm_device.ha_region_list);
1686 spin_lock_init(&dm_device.ha_lock);
1687 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1688 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1689 dm_device.host_specified_ha_region = false;
1690
1691#ifdef CONFIG_MEMORY_HOTPLUG
1692 set_online_page_callback(&hv_online_page);
1693 register_memory_notifier(&hv_memory_nb);
1694#endif
1695
1696 hv_set_drvdata(dev, &dm_device);
1697
1698 ret = balloon_connect_vsp(dev);
1699 if (ret != 0)
1700 return ret;
1701
1706 dm_device.state = DM_INITIALIZED; 1702 dm_device.state = DM_INITIALIZED;
1707 last_post_time = jiffies; 1703
1704 dm_device.thread =
1705 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1706 if (IS_ERR(dm_device.thread)) {
1707 ret = PTR_ERR(dm_device.thread);
1708 goto probe_error;
1709 }
1708 1710
1709 return 0; 1711 return 0;
1710 1712
1711probe_error2: 1713probe_error:
1714 vmbus_close(dev->channel);
1712#ifdef CONFIG_MEMORY_HOTPLUG 1715#ifdef CONFIG_MEMORY_HOTPLUG
1716 unregister_memory_notifier(&hv_memory_nb);
1713 restore_online_page_callback(&hv_online_page); 1717 restore_online_page_callback(&hv_online_page);
1714#endif 1718#endif
1715 kthread_stop(dm_device.thread);
1716
1717probe_error1:
1718 vmbus_close(dev->channel);
1719probe_error0:
1720 kfree(send_buffer);
1721 return ret; 1719 return ret;
1722} 1720}
1723 1721
@@ -1734,12 +1732,11 @@ static int balloon_remove(struct hv_device *dev)
1734 cancel_work_sync(&dm->balloon_wrk.wrk); 1732 cancel_work_sync(&dm->balloon_wrk.wrk);
1735 cancel_work_sync(&dm->ha_wrk.wrk); 1733 cancel_work_sync(&dm->ha_wrk.wrk);
1736 1734
1737 vmbus_close(dev->channel);
1738 kthread_stop(dm->thread); 1735 kthread_stop(dm->thread);
1739 kfree(send_buffer); 1736 vmbus_close(dev->channel);
1740#ifdef CONFIG_MEMORY_HOTPLUG 1737#ifdef CONFIG_MEMORY_HOTPLUG
1741 restore_online_page_callback(&hv_online_page);
1742 unregister_memory_notifier(&hv_memory_nb); 1738 unregister_memory_notifier(&hv_memory_nb);
1739 restore_online_page_callback(&hv_online_page);
1743#endif 1740#endif
1744 spin_lock_irqsave(&dm_device.ha_lock, flags); 1741 spin_lock_irqsave(&dm_device.ha_lock, flags);
1745 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) { 1742 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 50eaa1fd6e45..af9379a3bf89 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -169,8 +169,10 @@ extern int hv_synic_alloc(void);
169 169
170extern void hv_synic_free(void); 170extern void hv_synic_free(void);
171 171
172extern void hv_synic_enable_regs(unsigned int cpu);
172extern int hv_synic_init(unsigned int cpu); 173extern int hv_synic_init(unsigned int cpu);
173 174
175extern void hv_synic_disable_regs(unsigned int cpu);
174extern int hv_synic_cleanup(unsigned int cpu); 176extern int hv_synic_cleanup(unsigned int cpu);
175 177
176/* Interface */ 178/* Interface */
@@ -256,6 +258,32 @@ struct vmbus_connection {
256 struct workqueue_struct *work_queue; 258 struct workqueue_struct *work_queue;
257 struct workqueue_struct *handle_primary_chan_wq; 259 struct workqueue_struct *handle_primary_chan_wq;
258 struct workqueue_struct *handle_sub_chan_wq; 260 struct workqueue_struct *handle_sub_chan_wq;
261
262 /*
263 * The number of sub-channels and hv_sock channels that should be
264 * cleaned up upon suspend: sub-channels will be re-created upon
265 * resume, and hv_sock channels should not survive suspend.
266 */
267 atomic_t nr_chan_close_on_suspend;
268 /*
269 * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
270 * drop to zero.
271 */
272 struct completion ready_for_suspend_event;
273
274 /*
275 * The number of primary channels that should be "fixed up"
276 * upon resume: these channels are re-offered upon resume, and some
277 * fields of the channel offers (i.e. child_relid and connection_id)
278 * can change, so the old offermsg must be fixed up, before the resume
279 * callbacks of the VSC drivers start to further touch the channels.
280 */
281 atomic_t nr_chan_fixup_on_resume;
282 /*
283 * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
284 * drop to zero.
285 */
286 struct completion ready_for_resume_event;
259}; 287};
260 288
261 289
@@ -270,6 +298,8 @@ struct vmbus_msginfo {
270 298
271extern struct vmbus_connection vmbus_connection; 299extern struct vmbus_connection vmbus_connection;
272 300
301int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
302
273static inline void vmbus_send_interrupt(u32 relid) 303static inline void vmbus_send_interrupt(u32 relid)
274{ 304{
275 sync_set_bit(relid, vmbus_connection.send_int_page); 305 sync_set_bit(relid, vmbus_connection.send_int_page);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index ebd35fc35290..391f0b225c9a 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -24,12 +24,14 @@
24#include <linux/sched/task_stack.h> 24#include <linux/sched/task_stack.h>
25 25
26#include <asm/mshyperv.h> 26#include <asm/mshyperv.h>
27#include <linux/delay.h>
27#include <linux/notifier.h> 28#include <linux/notifier.h>
28#include <linux/ptrace.h> 29#include <linux/ptrace.h>
29#include <linux/screen_info.h> 30#include <linux/screen_info.h>
30#include <linux/kdebug.h> 31#include <linux/kdebug.h>
31#include <linux/efi.h> 32#include <linux/efi.h>
32#include <linux/random.h> 33#include <linux/random.h>
34#include <linux/syscore_ops.h>
33#include <clocksource/hyperv_timer.h> 35#include <clocksource/hyperv_timer.h>
34#include "hyperv_vmbus.h" 36#include "hyperv_vmbus.h"
35 37
@@ -910,6 +912,43 @@ static void vmbus_shutdown(struct device *child_device)
910 drv->shutdown(dev); 912 drv->shutdown(dev);
911} 913}
912 914
915/*
916 * vmbus_suspend - Suspend a vmbus device
917 */
918static int vmbus_suspend(struct device *child_device)
919{
920 struct hv_driver *drv;
921 struct hv_device *dev = device_to_hv_device(child_device);
922
923 /* The device may not be attached yet */
924 if (!child_device->driver)
925 return 0;
926
927 drv = drv_to_hv_drv(child_device->driver);
928 if (!drv->suspend)
929 return -EOPNOTSUPP;
930
931 return drv->suspend(dev);
932}
933
934/*
935 * vmbus_resume - Resume a vmbus device
936 */
937static int vmbus_resume(struct device *child_device)
938{
939 struct hv_driver *drv;
940 struct hv_device *dev = device_to_hv_device(child_device);
941
942 /* The device may not be attached yet */
943 if (!child_device->driver)
944 return 0;
945
946 drv = drv_to_hv_drv(child_device->driver);
947 if (!drv->resume)
948 return -EOPNOTSUPP;
949
950 return drv->resume(dev);
951}
913 952
914/* 953/*
915 * vmbus_device_release - Final callback release of the vmbus child device 954 * vmbus_device_release - Final callback release of the vmbus child device
@@ -925,6 +964,14 @@ static void vmbus_device_release(struct device *device)
925 kfree(hv_dev); 964 kfree(hv_dev);
926} 965}
927 966
967/*
968 * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
969 * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm.
970 */
971static const struct dev_pm_ops vmbus_pm = {
972 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume)
973};
974
928/* The one and only one */ 975/* The one and only one */
929static struct bus_type hv_bus = { 976static struct bus_type hv_bus = {
930 .name = "vmbus", 977 .name = "vmbus",
@@ -935,6 +982,7 @@ static struct bus_type hv_bus = {
935 .uevent = vmbus_uevent, 982 .uevent = vmbus_uevent,
936 .dev_groups = vmbus_dev_groups, 983 .dev_groups = vmbus_dev_groups,
937 .drv_groups = vmbus_drv_groups, 984 .drv_groups = vmbus_drv_groups,
985 .pm = &vmbus_pm,
938}; 986};
939 987
940struct onmessage_work_context { 988struct onmessage_work_context {
@@ -1022,6 +1070,41 @@ msg_handled:
1022 vmbus_signal_eom(msg, message_type); 1070 vmbus_signal_eom(msg, message_type);
1023} 1071}
1024 1072
1073/*
1074 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1075 * hibernation, because hv_sock connections can not persist across hibernation.
1076 */
1077static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1078{
1079 struct onmessage_work_context *ctx;
1080 struct vmbus_channel_rescind_offer *rescind;
1081
1082 WARN_ON(!is_hvsock_channel(channel));
1083
1084 /*
1085 * sizeof(*ctx) is small and the allocation should really not fail,
1086 * otherwise the state of the hv_sock connections ends up in limbo.
1087 */
1088 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
1089
1090 /*
1091 * So far, these are not really used by Linux. Just set them to the
1092 * reasonable values conforming to the definitions of the fields.
1093 */
1094 ctx->msg.header.message_type = 1;
1095 ctx->msg.header.payload_size = sizeof(*rescind);
1096
1097 /* These values are actually used by Linux. */
1098 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.u.payload;
1099 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1100 rescind->child_relid = channel->offermsg.child_relid;
1101
1102 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1103
1104 queue_work_on(vmbus_connection.connect_cpu,
1105 vmbus_connection.work_queue,
1106 &ctx->work);
1107}
1025 1108
1026/* 1109/*
1027 * Direct callback for channels using other deferred processing 1110 * Direct callback for channels using other deferred processing
@@ -2042,6 +2125,129 @@ acpi_walk_err:
2042 return ret_val; 2125 return ret_val;
2043} 2126}
2044 2127
2128static int vmbus_bus_suspend(struct device *dev)
2129{
2130 struct vmbus_channel *channel, *sc;
2131 unsigned long flags;
2132
2133 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
2134 /*
2135 * We wait here until the completion of any channel
2136 * offers that are currently in progress.
2137 */
2138 msleep(1);
2139 }
2140
2141 mutex_lock(&vmbus_connection.channel_mutex);
2142 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2143 if (!is_hvsock_channel(channel))
2144 continue;
2145
2146 vmbus_force_channel_rescinded(channel);
2147 }
2148 mutex_unlock(&vmbus_connection.channel_mutex);
2149
2150 /*
2151 * Wait until all the sub-channels and hv_sock channels have been
2152 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2153 * they would conflict with the new sub-channels that will be created
2154 * in the resume path. hv_sock channels should also be destroyed, but
2155 * a hv_sock channel of an established hv_sock connection can not be
2156 * really destroyed since it may still be referenced by the userspace
2157 * application, so we just force the hv_sock channel to be rescinded
2158 * by vmbus_force_channel_rescinded(), and the userspace application
2159 * will thoroughly destroy the channel after hibernation.
2160 *
2161 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2162 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2163 */
2164 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2165 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2166
2167 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
2168
2169 mutex_lock(&vmbus_connection.channel_mutex);
2170
2171 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2172 /*
2173 * Invalidate the field. Upon resume, vmbus_onoffer() will fix
2174 * up the field, and the other fields (if necessary).
2175 */
2176 channel->offermsg.child_relid = INVALID_RELID;
2177
2178 if (is_hvsock_channel(channel)) {
2179 if (!channel->rescind) {
2180 pr_err("hv_sock channel not rescinded!\n");
2181 WARN_ON_ONCE(1);
2182 }
2183 continue;
2184 }
2185
2186 spin_lock_irqsave(&channel->lock, flags);
2187 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2188 pr_err("Sub-channel not deleted!\n");
2189 WARN_ON_ONCE(1);
2190 }
2191 spin_unlock_irqrestore(&channel->lock, flags);
2192
2193 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2194 }
2195
2196 mutex_unlock(&vmbus_connection.channel_mutex);
2197
2198 vmbus_initiate_unload(false);
2199
2200 vmbus_connection.conn_state = DISCONNECTED;
2201
2202 /* Reset the event for the next resume. */
2203 reinit_completion(&vmbus_connection.ready_for_resume_event);
2204
2205 return 0;
2206}
2207
2208static int vmbus_bus_resume(struct device *dev)
2209{
2210 struct vmbus_channel_msginfo *msginfo;
2211 size_t msgsize;
2212 int ret;
2213
2214 /*
2215 * We only use the 'vmbus_proto_version', which was in use before
2216 * hibernation, to re-negotiate with the host.
2217 */
2218 if (vmbus_proto_version == VERSION_INVAL ||
2219 vmbus_proto_version == 0) {
2220 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2221 return -EINVAL;
2222 }
2223
2224 msgsize = sizeof(*msginfo) +
2225 sizeof(struct vmbus_channel_initiate_contact);
2226
2227 msginfo = kzalloc(msgsize, GFP_KERNEL);
2228
2229 if (msginfo == NULL)
2230 return -ENOMEM;
2231
2232 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2233
2234 kfree(msginfo);
2235
2236 if (ret != 0)
2237 return ret;
2238
2239 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2240
2241 vmbus_request_offers();
2242
2243 wait_for_completion(&vmbus_connection.ready_for_resume_event);
2244
2245 /* Reset the event for the next suspend. */
2246 reinit_completion(&vmbus_connection.ready_for_suspend_event);
2247
2248 return 0;
2249}
2250
2045static const struct acpi_device_id vmbus_acpi_device_ids[] = { 2251static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2046 {"VMBUS", 0}, 2252 {"VMBUS", 0},
2047 {"VMBus", 0}, 2253 {"VMBus", 0},
@@ -2049,6 +2255,19 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2049}; 2255};
2050MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); 2256MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2051 2257
2258/*
2259 * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
2260 * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the
2261 * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the
2262 * pci "noirq" restore callback runs before "non-noirq" callbacks (see
2263 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2264 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2265 * resume callback must also run via the "noirq" callbacks.
2266 */
2267static const struct dev_pm_ops vmbus_bus_pm = {
2268 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume)
2269};
2270
2052static struct acpi_driver vmbus_acpi_driver = { 2271static struct acpi_driver vmbus_acpi_driver = {
2053 .name = "vmbus", 2272 .name = "vmbus",
2054 .ids = vmbus_acpi_device_ids, 2273 .ids = vmbus_acpi_device_ids,
@@ -2056,6 +2275,7 @@ static struct acpi_driver vmbus_acpi_driver = {
2056 .add = vmbus_acpi_add, 2275 .add = vmbus_acpi_add,
2057 .remove = vmbus_acpi_remove, 2276 .remove = vmbus_acpi_remove,
2058 }, 2277 },
2278 .drv.pm = &vmbus_bus_pm,
2059}; 2279};
2060 2280
2061static void hv_kexec_handler(void) 2281static void hv_kexec_handler(void)
@@ -2086,6 +2306,47 @@ static void hv_crash_handler(struct pt_regs *regs)
2086 hyperv_cleanup(); 2306 hyperv_cleanup();
2087}; 2307};
2088 2308
2309static int hv_synic_suspend(void)
2310{
2311 /*
2312 * When we reach here, all the non-boot CPUs have been offlined, and
2313 * the stimers on them have been unbound in hv_synic_cleanup() ->
2314 * hv_stimer_cleanup() -> clockevents_unbind_device().
2315 *
2316 * hv_synic_suspend() only runs on CPU0 with interrupts disabled. Here
2317 * we do not unbind the stimer on CPU0 because: 1) it's unnecessary
2318 * because the interrupts remain disabled between syscore_suspend()
2319 * and syscore_resume(): see create_image() and resume_target_kernel();
2320 * 2) the stimer on CPU0 is automatically disabled later by
2321 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2322 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown(); 3) a warning
2323 * would be triggered if we call clockevents_unbind_device(), which
2324 * may sleep, in an interrupts-disabled context. So, we intentionally
2325 * don't call hv_stimer_cleanup(0) here.
2326 */
2327
2328 hv_synic_disable_regs(0);
2329
2330 return 0;
2331}
2332
2333static void hv_synic_resume(void)
2334{
2335 hv_synic_enable_regs(0);
2336
2337 /*
2338 * Note: we don't need to call hv_stimer_init(0), because the timer
2339 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2340 * automatically re-enabled in timekeeping_resume().
2341 */
2342}
2343
2344/* The callbacks run only on CPU0, with irqs_disabled. */
2345static struct syscore_ops hv_synic_syscore_ops = {
2346 .suspend = hv_synic_suspend,
2347 .resume = hv_synic_resume,
2348};
2349
2089static int __init hv_acpi_init(void) 2350static int __init hv_acpi_init(void)
2090{ 2351{
2091 int ret, t; 2352 int ret, t;
@@ -2116,6 +2377,8 @@ static int __init hv_acpi_init(void)
2116 hv_setup_kexec_handler(hv_kexec_handler); 2377 hv_setup_kexec_handler(hv_kexec_handler);
2117 hv_setup_crash_handler(hv_crash_handler); 2378 hv_setup_crash_handler(hv_crash_handler);
2118 2379
2380 register_syscore_ops(&hv_synic_syscore_ops);
2381
2119 return 0; 2382 return 0;
2120 2383
2121cleanup: 2384cleanup:
@@ -2128,6 +2391,8 @@ static void __exit vmbus_exit(void)
2128{ 2391{
2129 int cpu; 2392 int cpu;
2130 2393
2394 unregister_syscore_ops(&hv_synic_syscore_ops);
2395
2131 hv_remove_kexec_handler(); 2396 hv_remove_kexec_handler();
2132 hv_remove_crash_handler(); 2397 hv_remove_crash_handler();
2133 vmbus_connection.conn_state = DISCONNECTED; 2398 vmbus_connection.conn_state = DISCONNECTED;
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index dce06108c8c3..5337393d4dfe 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -583,8 +583,10 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
583 break; 583 break;
584 } 584 }
585 585
586 /* P2PDMA contexts do not need to be unmapped */ 586 if (is_pci_p2pdma_page(sg_page(sg)))
587 if (!is_pci_p2pdma_page(sg_page(sg))) 587 pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg,
588 sg_cnt, dir);
589 else
588 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); 590 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
589} 591}
590EXPORT_SYMBOL(rdma_rw_ctx_destroy); 592EXPORT_SYMBOL(rdma_rw_ctx_destroy);
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index f2d9fb4c4e8e..4e8d0d6b9b5c 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -425,10 +425,10 @@ static int pm800_pages_init(struct pm80x_chip *chip)
425 return -ENODEV; 425 return -ENODEV;
426 426
427 /* PM800 block power page */ 427 /* PM800 block power page */
428 subchip->power_page = i2c_new_dummy(client->adapter, 428 subchip->power_page = i2c_new_dummy_device(client->adapter,
429 subchip->power_page_addr); 429 subchip->power_page_addr);
430 if (subchip->power_page == NULL) { 430 if (IS_ERR(subchip->power_page)) {
431 ret = -ENODEV; 431 ret = PTR_ERR(subchip->power_page);
432 goto out; 432 goto out;
433 } 433 }
434 434
@@ -444,10 +444,10 @@ static int pm800_pages_init(struct pm80x_chip *chip)
444 i2c_set_clientdata(subchip->power_page, chip); 444 i2c_set_clientdata(subchip->power_page, chip);
445 445
446 /* PM800 block GPADC */ 446 /* PM800 block GPADC */
447 subchip->gpadc_page = i2c_new_dummy(client->adapter, 447 subchip->gpadc_page = i2c_new_dummy_device(client->adapter,
448 subchip->gpadc_page_addr); 448 subchip->gpadc_page_addr);
449 if (subchip->gpadc_page == NULL) { 449 if (IS_ERR(subchip->gpadc_page)) {
450 ret = -ENODEV; 450 ret = PTR_ERR(subchip->gpadc_page);
451 goto out; 451 goto out;
452 } 452 }
453 453
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 9e0bd135730f..c9bae71f643a 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1178,12 +1178,12 @@ static int pm860x_probe(struct i2c_client *client)
1178 */ 1178 */
1179 if (pdata->companion_addr && (pdata->companion_addr != client->addr)) { 1179 if (pdata->companion_addr && (pdata->companion_addr != client->addr)) {
1180 chip->companion_addr = pdata->companion_addr; 1180 chip->companion_addr = pdata->companion_addr;
1181 chip->companion = i2c_new_dummy(chip->client->adapter, 1181 chip->companion = i2c_new_dummy_device(chip->client->adapter,
1182 chip->companion_addr); 1182 chip->companion_addr);
1183 if (!chip->companion) { 1183 if (IS_ERR(chip->companion)) {
1184 dev_err(&client->dev, 1184 dev_err(&client->dev,
1185 "Failed to allocate I2C companion device\n"); 1185 "Failed to allocate I2C companion device\n");
1186 return -ENODEV; 1186 return PTR_ERR(chip->companion);
1187 } 1187 }
1188 chip->regmap_companion = regmap_init_i2c(chip->companion, 1188 chip->regmap_companion = regmap_init_i2c(chip->companion,
1189 &pm860x_regmap_config); 1189 &pm860x_regmap_config);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index c9c49da42446..ae24d3ea68ea 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -589,6 +589,17 @@ config INTEL_SOC_PMIC_CHTDC_TI
589 Select this option for supporting Dollar Cove (TI version) PMIC 589 Select this option for supporting Dollar Cove (TI version) PMIC
590 device that is found on some Intel Cherry Trail systems. 590 device that is found on some Intel Cherry Trail systems.
591 591
592config INTEL_SOC_PMIC_MRFLD
593 tristate "Support for Intel Merrifield Basin Cove PMIC"
594 depends on GPIOLIB
595 depends on ACPI
596 depends on INTEL_SCU_IPC
597 select MFD_CORE
598 select REGMAP_IRQ
599 help
600 Select this option for supporting Basin Cove PMIC device
601 that is found on Intel Merrifield systems.
602
592config MFD_INTEL_LPSS 603config MFD_INTEL_LPSS
593 tristate 604 tristate
594 select COMMON_CLK 605 select COMMON_CLK
@@ -641,15 +652,6 @@ config MFD_JANZ_CMODIO
641 host many different types of MODULbus daughterboards, including 652 host many different types of MODULbus daughterboards, including
642 CAN and GPIO controllers. 653 CAN and GPIO controllers.
643 654
644config MFD_JZ4740_ADC
645 bool "Janz JZ4740 ADC core"
646 select MFD_CORE
647 select GENERIC_IRQ_CHIP
648 depends on MACH_JZ4740
649 help
650 Say yes here if you want support for the ADC unit in the JZ4740 SoC.
651 This driver is necessary for jz4740-battery and jz4740-hwmon driver.
652
653config MFD_KEMPLD 655config MFD_KEMPLD
654 tristate "Kontron module PLD device" 656 tristate "Kontron module PLD device"
655 select MFD_CORE 657 select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 0c0a848e62df..c1067ea46204 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -189,7 +189,6 @@ obj-$(CONFIG_LPC_SCH) += lpc_sch.o
189obj-$(CONFIG_LPC_ICH) += lpc_ich.o 189obj-$(CONFIG_LPC_ICH) += lpc_ich.o
190obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o 190obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
191obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o 191obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
192obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
193obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o 192obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o
194obj-$(CONFIG_MFD_VX855) += vx855.o 193obj-$(CONFIG_MFD_VX855) += vx855.o
195obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o 194obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o
@@ -239,7 +238,9 @@ obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
239obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o 238obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o
240obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o 239obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o
241obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o 240obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
242obj-$(CONFIG_MFD_MT6397) += mt6397-core.o 241mt6397-objs := mt6397-core.o mt6397-irq.o
242obj-$(CONFIG_MFD_MT6397) += mt6397.o
243obj-$(CONFIG_INTEL_SOC_PMIC_MRFLD) += intel_soc_pmic_mrfld.o
243 244
244obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o 245obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
245obj-$(CONFIG_MFD_ALTERA_SYSMGR) += altera-sysmgr.o 246obj-$(CONFIG_MFD_ALTERA_SYSMGR) += altera-sysmgr.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 9f3dbc31d3e9..57723f116bb5 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -865,10 +865,10 @@ static int ab3100_probe(struct i2c_client *client,
865 &ab3100->chip_name[0]); 865 &ab3100->chip_name[0]);
866 866
867 /* Attach a second dummy i2c_client to the test register address */ 867 /* Attach a second dummy i2c_client to the test register address */
868 ab3100->testreg_client = i2c_new_dummy(client->adapter, 868 ab3100->testreg_client = i2c_new_dummy_device(client->adapter,
869 client->addr + 1); 869 client->addr + 1);
870 if (!ab3100->testreg_client) { 870 if (IS_ERR(ab3100->testreg_client)) {
871 err = -ENOMEM; 871 err = PTR_ERR(ab3100->testreg_client);
872 goto exit_no_testreg_client; 872 goto exit_no_testreg_client;
873 } 873 }
874 874
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 567a34b073dd..f4e26b6e5362 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -2680,16 +2680,12 @@ static int ab8500_debug_probe(struct platform_device *plf)
2680 irq_ab8500 = res->start; 2680 irq_ab8500 = res->start;
2681 2681
2682 irq_first = platform_get_irq_byname(plf, "IRQ_FIRST"); 2682 irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
2683 if (irq_first < 0) { 2683 if (irq_first < 0)
2684 dev_err(&plf->dev, "First irq not found, err %d\n", irq_first);
2685 return irq_first; 2684 return irq_first;
2686 }
2687 2685
2688 irq_last = platform_get_irq_byname(plf, "IRQ_LAST"); 2686 irq_last = platform_get_irq_byname(plf, "IRQ_LAST");
2689 if (irq_last < 0) { 2687 if (irq_last < 0)
2690 dev_err(&plf->dev, "Last irq not found, err %d\n", irq_last);
2691 return irq_last; 2688 return irq_last;
2692 }
2693 2689
2694 ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL); 2690 ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
2695 2691
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 83b18c998d6f..a6bd2134cea2 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -15,7 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/gpio.h> 18#include <linux/gpio/driver.h>
19#include <linux/export.h> 19#include <linux/export.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 1aeb5e498d91..bfac5dc091ca 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -61,11 +61,11 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri,
61 } 61 }
62 62
63 /* Secondary I2C slave address is the base address with A(2) asserted */ 63 /* Secondary I2C slave address is the base address with A(2) asserted */
64 bcm590xx->i2c_sec = i2c_new_dummy(i2c_pri->adapter, 64 bcm590xx->i2c_sec = i2c_new_dummy_device(i2c_pri->adapter,
65 i2c_pri->addr | BIT(2)); 65 i2c_pri->addr | BIT(2));
66 if (!bcm590xx->i2c_sec) { 66 if (IS_ERR(bcm590xx->i2c_sec)) {
67 dev_err(&i2c_pri->dev, "failed to add secondary I2C device\n"); 67 dev_err(&i2c_pri->dev, "failed to add secondary I2C device\n");
68 return -ENODEV; 68 return PTR_ERR(bcm590xx->i2c_sec);
69 } 69 }
70 i2c_set_clientdata(bcm590xx->i2c_sec, bcm590xx); 70 i2c_set_clientdata(bcm590xx->i2c_sec, bcm590xx);
71 71
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
index 13033068721a..7f0aa1e8db96 100644
--- a/drivers/mfd/da9150-core.c
+++ b/drivers/mfd/da9150-core.c
@@ -420,10 +420,10 @@ static int da9150_probe(struct i2c_client *client,
420 qif_addr = da9150_reg_read(da9150, DA9150_CORE2WIRE_CTRL_A); 420 qif_addr = da9150_reg_read(da9150, DA9150_CORE2WIRE_CTRL_A);
421 qif_addr = (qif_addr & DA9150_CORE_BASE_ADDR_MASK) >> 1; 421 qif_addr = (qif_addr & DA9150_CORE_BASE_ADDR_MASK) >> 1;
422 qif_addr |= DA9150_QIF_I2C_ADDR_LSB; 422 qif_addr |= DA9150_QIF_I2C_ADDR_LSB;
423 da9150->core_qif = i2c_new_dummy(client->adapter, qif_addr); 423 da9150->core_qif = i2c_new_dummy_device(client->adapter, qif_addr);
424 if (!da9150->core_qif) { 424 if (IS_ERR(da9150->core_qif)) {
425 dev_err(da9150->dev, "Failed to attach QIF client\n"); 425 dev_err(da9150->dev, "Failed to attach QIF client\n");
426 return -ENODEV; 426 return PTR_ERR(da9150->core_qif);
427 } 427 }
428 428
429 i2c_set_clientdata(da9150->core_qif, da9150); 429 i2c_set_clientdata(da9150->core_qif, da9150);
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 13ca7203e193..e5c8bc998eb4 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -19,7 +19,6 @@
19#include <sound/pcm.h> 19#include <sound/pcm.h>
20 20
21#include <linux/mfd/davinci_voicecodec.h> 21#include <linux/mfd/davinci_voicecodec.h>
22#include <mach/hardware.h>
23 22
24static const struct regmap_config davinci_vc_regmap = { 23static const struct regmap_config davinci_vc_regmap = {
25 .reg_bits = 32, 24 .reg_bits = 32,
@@ -31,6 +30,7 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
31 struct davinci_vc *davinci_vc; 30 struct davinci_vc *davinci_vc;
32 struct resource *res; 31 struct resource *res;
33 struct mfd_cell *cell = NULL; 32 struct mfd_cell *cell = NULL;
33 dma_addr_t fifo_base;
34 int ret; 34 int ret;
35 35
36 davinci_vc = devm_kzalloc(&pdev->dev, 36 davinci_vc = devm_kzalloc(&pdev->dev,
@@ -48,6 +48,7 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
48 48
49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
50 50
51 fifo_base = (dma_addr_t)res->start;
51 davinci_vc->base = devm_ioremap_resource(&pdev->dev, res); 52 davinci_vc->base = devm_ioremap_resource(&pdev->dev, res);
52 if (IS_ERR(davinci_vc->base)) { 53 if (IS_ERR(davinci_vc->base)) {
53 ret = PTR_ERR(davinci_vc->base); 54 ret = PTR_ERR(davinci_vc->base);
@@ -70,8 +71,7 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
70 } 71 }
71 72
72 davinci_vc->davinci_vcif.dma_tx_channel = res->start; 73 davinci_vc->davinci_vcif.dma_tx_channel = res->start;
73 davinci_vc->davinci_vcif.dma_tx_addr = 74 davinci_vc->davinci_vcif.dma_tx_addr = fifo_base + DAVINCI_VC_WFIFO;
74 (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_WFIFO);
75 75
76 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 76 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
77 if (!res) { 77 if (!res) {
@@ -81,8 +81,7 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
81 } 81 }
82 82
83 davinci_vc->davinci_vcif.dma_rx_channel = res->start; 83 davinci_vc->davinci_vcif.dma_rx_channel = res->start;
84 davinci_vc->davinci_vcif.dma_rx_addr = 84 davinci_vc->davinci_vcif.dma_rx_addr = fifo_base + DAVINCI_VC_RFIFO;
85 (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_RFIFO);
86 85
87 davinci_vc->dev = &pdev->dev; 86 davinci_vc->dev = &pdev->dev;
88 davinci_vc->pdev = pdev; 87 davinci_vc->pdev = pdev;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 90e0f21bc49c..0e019cc5da42 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -1695,21 +1695,41 @@ static long round_clock_rate(u8 clock, unsigned long rate)
1695 return rounded_rate; 1695 return rounded_rate;
1696} 1696}
1697 1697
1698static const unsigned long armss_freqs[] = { 1698static const unsigned long db8500_armss_freqs[] = {
1699 200000000, 1699 200000000,
1700 400000000, 1700 400000000,
1701 800000000, 1701 800000000,
1702 998400000 1702 998400000
1703}; 1703};
1704 1704
1705/* The DB8520 has slightly higher ARMSS max frequency */
1706static const unsigned long db8520_armss_freqs[] = {
1707 200000000,
1708 400000000,
1709 800000000,
1710 1152000000
1711};
1712
1713
1714
1705static long round_armss_rate(unsigned long rate) 1715static long round_armss_rate(unsigned long rate)
1706{ 1716{
1707 unsigned long freq = 0; 1717 unsigned long freq = 0;
1718 const unsigned long *freqs;
1719 int nfreqs;
1708 int i; 1720 int i;
1709 1721
1722 if (fw_info.version.project == PRCMU_FW_PROJECT_U8520) {
1723 freqs = db8520_armss_freqs;
1724 nfreqs = ARRAY_SIZE(db8520_armss_freqs);
1725 } else {
1726 freqs = db8500_armss_freqs;
1727 nfreqs = ARRAY_SIZE(db8500_armss_freqs);
1728 }
1729
1710 /* Find the corresponding arm opp from the cpufreq table. */ 1730 /* Find the corresponding arm opp from the cpufreq table. */
1711 for (i = 0; i < ARRAY_SIZE(armss_freqs); i++) { 1731 for (i = 0; i < nfreqs; i++) {
1712 freq = armss_freqs[i]; 1732 freq = freqs[i];
1713 if (rate <= freq) 1733 if (rate <= freq)
1714 break; 1734 break;
1715 } 1735 }
@@ -1854,11 +1874,21 @@ static int set_armss_rate(unsigned long rate)
1854{ 1874{
1855 unsigned long freq; 1875 unsigned long freq;
1856 u8 opps[] = { ARM_EXTCLK, ARM_50_OPP, ARM_100_OPP, ARM_MAX_OPP }; 1876 u8 opps[] = { ARM_EXTCLK, ARM_50_OPP, ARM_100_OPP, ARM_MAX_OPP };
1877 const unsigned long *freqs;
1878 int nfreqs;
1857 int i; 1879 int i;
1858 1880
1881 if (fw_info.version.project == PRCMU_FW_PROJECT_U8520) {
1882 freqs = db8520_armss_freqs;
1883 nfreqs = ARRAY_SIZE(db8520_armss_freqs);
1884 } else {
1885 freqs = db8500_armss_freqs;
1886 nfreqs = ARRAY_SIZE(db8500_armss_freqs);
1887 }
1888
1859 /* Find the corresponding arm opp from the cpufreq table. */ 1889 /* Find the corresponding arm opp from the cpufreq table. */
1860 for (i = 0; i < ARRAY_SIZE(armss_freqs); i++) { 1890 for (i = 0; i < nfreqs; i++) {
1861 freq = armss_freqs[i]; 1891 freq = freqs[i];
1862 if (rate == freq) 1892 if (rate == freq)
1863 break; 1893 break;
1864 } 1894 }
@@ -3130,10 +3160,8 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
3130 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR); 3160 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
3131 3161
3132 irq = platform_get_irq(pdev, 0); 3162 irq = platform_get_irq(pdev, 0);
3133 if (irq <= 0) { 3163 if (irq <= 0)
3134 dev_err(&pdev->dev, "no prcmu irq provided\n");
3135 return irq; 3164 return irq;
3136 }
3137 3165
3138 err = request_threaded_irq(irq, prcmu_irq_handler, 3166 err = request_threaded_irq(irq, prcmu_irq_handler,
3139 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); 3167 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index f505e3e1274b..70fa18b04ad2 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -35,7 +35,7 @@ struct pcap_chip {
35 35
36 /* IO */ 36 /* IO */
37 u32 buf; 37 u32 buf;
38 struct mutex io_mutex; 38 spinlock_t io_lock;
39 39
40 /* IRQ */ 40 /* IRQ */
41 unsigned int irq_base; 41 unsigned int irq_base;
@@ -48,7 +48,7 @@ struct pcap_chip {
48 struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ]; 48 struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ];
49 u8 adc_head; 49 u8 adc_head;
50 u8 adc_tail; 50 u8 adc_tail;
51 struct mutex adc_mutex; 51 spinlock_t adc_lock;
52}; 52};
53 53
54/* IO */ 54/* IO */
@@ -76,14 +76,15 @@ static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
76 76
77int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value) 77int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value)
78{ 78{
79 unsigned long flags;
79 int ret; 80 int ret;
80 81
81 mutex_lock(&pcap->io_mutex); 82 spin_lock_irqsave(&pcap->io_lock, flags);
82 value &= PCAP_REGISTER_VALUE_MASK; 83 value &= PCAP_REGISTER_VALUE_MASK;
83 value |= PCAP_REGISTER_WRITE_OP_BIT 84 value |= PCAP_REGISTER_WRITE_OP_BIT
84 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 85 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
85 ret = ezx_pcap_putget(pcap, &value); 86 ret = ezx_pcap_putget(pcap, &value);
86 mutex_unlock(&pcap->io_mutex); 87 spin_unlock_irqrestore(&pcap->io_lock, flags);
87 88
88 return ret; 89 return ret;
89} 90}
@@ -91,14 +92,15 @@ EXPORT_SYMBOL_GPL(ezx_pcap_write);
91 92
92int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value) 93int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
93{ 94{
95 unsigned long flags;
94 int ret; 96 int ret;
95 97
96 mutex_lock(&pcap->io_mutex); 98 spin_lock_irqsave(&pcap->io_lock, flags);
97 *value = PCAP_REGISTER_READ_OP_BIT 99 *value = PCAP_REGISTER_READ_OP_BIT
98 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 100 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
99 101
100 ret = ezx_pcap_putget(pcap, value); 102 ret = ezx_pcap_putget(pcap, value);
101 mutex_unlock(&pcap->io_mutex); 103 spin_unlock_irqrestore(&pcap->io_lock, flags);
102 104
103 return ret; 105 return ret;
104} 106}
@@ -106,11 +108,12 @@ EXPORT_SYMBOL_GPL(ezx_pcap_read);
106 108
107int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val) 109int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val)
108{ 110{
111 unsigned long flags;
109 int ret; 112 int ret;
110 u32 tmp = PCAP_REGISTER_READ_OP_BIT | 113 u32 tmp = PCAP_REGISTER_READ_OP_BIT |
111 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 114 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
112 115
113 mutex_lock(&pcap->io_mutex); 116 spin_lock_irqsave(&pcap->io_lock, flags);
114 ret = ezx_pcap_putget(pcap, &tmp); 117 ret = ezx_pcap_putget(pcap, &tmp);
115 if (ret) 118 if (ret)
116 goto out_unlock; 119 goto out_unlock;
@@ -121,7 +124,7 @@ int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val)
121 124
122 ret = ezx_pcap_putget(pcap, &tmp); 125 ret = ezx_pcap_putget(pcap, &tmp);
123out_unlock: 126out_unlock:
124 mutex_unlock(&pcap->io_mutex); 127 spin_unlock_irqrestore(&pcap->io_lock, flags);
125 128
126 return ret; 129 return ret;
127} 130}
@@ -212,14 +215,15 @@ static void pcap_irq_handler(struct irq_desc *desc)
212/* ADC */ 215/* ADC */
213void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits) 216void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits)
214{ 217{
218 unsigned long flags;
215 u32 tmp; 219 u32 tmp;
216 220
217 mutex_lock(&pcap->adc_mutex); 221 spin_lock_irqsave(&pcap->adc_lock, flags);
218 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 222 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
219 tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); 223 tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
220 tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); 224 tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
221 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 225 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
222 mutex_unlock(&pcap->adc_mutex); 226 spin_unlock_irqrestore(&pcap->adc_lock, flags);
223} 227}
224EXPORT_SYMBOL_GPL(pcap_set_ts_bits); 228EXPORT_SYMBOL_GPL(pcap_set_ts_bits);
225 229
@@ -234,15 +238,16 @@ static void pcap_disable_adc(struct pcap_chip *pcap)
234 238
235static void pcap_adc_trigger(struct pcap_chip *pcap) 239static void pcap_adc_trigger(struct pcap_chip *pcap)
236{ 240{
241 unsigned long flags;
237 u32 tmp; 242 u32 tmp;
238 u8 head; 243 u8 head;
239 244
240 mutex_lock(&pcap->adc_mutex); 245 spin_lock_irqsave(&pcap->adc_lock, flags);
241 head = pcap->adc_head; 246 head = pcap->adc_head;
242 if (!pcap->adc_queue[head]) { 247 if (!pcap->adc_queue[head]) {
243 /* queue is empty, save power */ 248 /* queue is empty, save power */
244 pcap_disable_adc(pcap); 249 pcap_disable_adc(pcap);
245 mutex_unlock(&pcap->adc_mutex); 250 spin_unlock_irqrestore(&pcap->adc_lock, flags);
246 return; 251 return;
247 } 252 }
248 /* start conversion on requested bank, save TS_M bits */ 253 /* start conversion on requested bank, save TS_M bits */
@@ -254,7 +259,7 @@ static void pcap_adc_trigger(struct pcap_chip *pcap)
254 tmp |= PCAP_ADC_AD_SEL1; 259 tmp |= PCAP_ADC_AD_SEL1;
255 260
256 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 261 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
257 mutex_unlock(&pcap->adc_mutex); 262 spin_unlock_irqrestore(&pcap->adc_lock, flags);
258 ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC); 263 ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
259} 264}
260 265
@@ -265,11 +270,11 @@ static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
265 u16 res[2]; 270 u16 res[2];
266 u32 tmp; 271 u32 tmp;
267 272
268 mutex_lock(&pcap->adc_mutex); 273 spin_lock(&pcap->adc_lock);
269 req = pcap->adc_queue[pcap->adc_head]; 274 req = pcap->adc_queue[pcap->adc_head];
270 275
271 if (WARN(!req, "adc irq without pending request\n")) { 276 if (WARN(!req, "adc irq without pending request\n")) {
272 mutex_unlock(&pcap->adc_mutex); 277 spin_unlock(&pcap->adc_lock);
273 return IRQ_HANDLED; 278 return IRQ_HANDLED;
274 } 279 }
275 280
@@ -285,7 +290,7 @@ static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
285 290
286 pcap->adc_queue[pcap->adc_head] = NULL; 291 pcap->adc_queue[pcap->adc_head] = NULL;
287 pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1); 292 pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1);
288 mutex_unlock(&pcap->adc_mutex); 293 spin_unlock(&pcap->adc_lock);
289 294
290 /* pass the results and release memory */ 295 /* pass the results and release memory */
291 req->callback(req->data, res); 296 req->callback(req->data, res);
@@ -301,6 +306,7 @@ int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
301 void *callback, void *data) 306 void *callback, void *data)
302{ 307{
303 struct pcap_adc_request *req; 308 struct pcap_adc_request *req;
309 unsigned long irq_flags;
304 310
305 /* This will be freed after we have a result */ 311 /* This will be freed after we have a result */
306 req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL); 312 req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
@@ -314,15 +320,15 @@ int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
314 req->callback = callback; 320 req->callback = callback;
315 req->data = data; 321 req->data = data;
316 322
317 mutex_lock(&pcap->adc_mutex); 323 spin_lock_irqsave(&pcap->adc_lock, irq_flags);
318 if (pcap->adc_queue[pcap->adc_tail]) { 324 if (pcap->adc_queue[pcap->adc_tail]) {
319 mutex_unlock(&pcap->adc_mutex); 325 spin_unlock_irqrestore(&pcap->adc_lock, irq_flags);
320 kfree(req); 326 kfree(req);
321 return -EBUSY; 327 return -EBUSY;
322 } 328 }
323 pcap->adc_queue[pcap->adc_tail] = req; 329 pcap->adc_queue[pcap->adc_tail] = req;
324 pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1); 330 pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1);
325 mutex_unlock(&pcap->adc_mutex); 331 spin_unlock_irqrestore(&pcap->adc_lock, irq_flags);
326 332
327 /* start conversion */ 333 /* start conversion */
328 pcap_adc_trigger(pcap); 334 pcap_adc_trigger(pcap);
@@ -389,16 +395,17 @@ static int pcap_add_subdev(struct pcap_chip *pcap,
389static int ezx_pcap_remove(struct spi_device *spi) 395static int ezx_pcap_remove(struct spi_device *spi)
390{ 396{
391 struct pcap_chip *pcap = spi_get_drvdata(spi); 397 struct pcap_chip *pcap = spi_get_drvdata(spi);
398 unsigned long flags;
392 int i; 399 int i;
393 400
394 /* remove all registered subdevs */ 401 /* remove all registered subdevs */
395 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); 402 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
396 403
397 /* cleanup ADC */ 404 /* cleanup ADC */
398 mutex_lock(&pcap->adc_mutex); 405 spin_lock_irqsave(&pcap->adc_lock, flags);
399 for (i = 0; i < PCAP_ADC_MAXQ; i++) 406 for (i = 0; i < PCAP_ADC_MAXQ; i++)
400 kfree(pcap->adc_queue[i]); 407 kfree(pcap->adc_queue[i]);
401 mutex_unlock(&pcap->adc_mutex); 408 spin_unlock_irqrestore(&pcap->adc_lock, flags);
402 409
403 /* cleanup irqchip */ 410 /* cleanup irqchip */
404 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) 411 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
@@ -426,8 +433,8 @@ static int ezx_pcap_probe(struct spi_device *spi)
426 goto ret; 433 goto ret;
427 } 434 }
428 435
429 mutex_init(&pcap->io_mutex); 436 spin_lock_init(&pcap->io_lock);
430 mutex_init(&pcap->adc_mutex); 437 spin_lock_init(&pcap->adc_lock);
431 INIT_WORK(&pcap->isr_work, pcap_isr_work); 438 INIT_WORK(&pcap->isr_work, pcap_isr_work);
432 INIT_WORK(&pcap->msr_work, pcap_msr_work); 439 INIT_WORK(&pcap->msr_work, pcap_msr_work);
433 spi_set_drvdata(spi, pcap); 440 spi_set_drvdata(spi, pcap);
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 20791cab7263..a016b39fe9b0 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -69,10 +69,8 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
69 int irq; 69 int irq;
70 70
71 irq = platform_get_irq(pdev, 0); 71 irq = platform_get_irq(pdev, 0);
72 if (irq <= 0) { 72 if (irq <= 0)
73 dev_err(dev, "Failed to get irq\n");
74 return irq; 73 return irq;
75 }
76 74
77 tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops, 75 tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops,
78 tsadc); 76 tsadc);
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 370519af5d0b..8ad6768bd7a2 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -385,8 +385,7 @@ static void htcpld_unregister_chip_i2c(
385 htcpld = platform_get_drvdata(pdev); 385 htcpld = platform_get_drvdata(pdev);
386 chip = &htcpld->chip[chip_index]; 386 chip = &htcpld->chip[chip_index];
387 387
388 if (chip->client) 388 i2c_unregister_device(chip->client);
389 i2c_unregister_device(chip->client);
390} 389}
391 390
392static int htcpld_register_chip_gpio( 391static int htcpld_register_chip_gpio(
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 61ffb8b393e4..c8fe334b5fe8 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -18,6 +18,10 @@
18 18
19#include "intel-lpss.h" 19#include "intel-lpss.h"
20 20
21static const struct intel_lpss_platform_info spt_info = {
22 .clk_rate = 120000000,
23};
24
21static struct property_entry spt_i2c_properties[] = { 25static struct property_entry spt_i2c_properties[] = {
22 PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 230), 26 PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 230),
23 { }, 27 { },
@@ -28,6 +32,19 @@ static const struct intel_lpss_platform_info spt_i2c_info = {
28 .properties = spt_i2c_properties, 32 .properties = spt_i2c_properties,
29}; 33};
30 34
35static struct property_entry uart_properties[] = {
36 PROPERTY_ENTRY_U32("reg-io-width", 4),
37 PROPERTY_ENTRY_U32("reg-shift", 2),
38 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
39 { },
40};
41
42static const struct intel_lpss_platform_info spt_uart_info = {
43 .clk_rate = 120000000,
44 .clk_con_id = "baudclk",
45 .properties = uart_properties,
46};
47
31static const struct intel_lpss_platform_info bxt_info = { 48static const struct intel_lpss_platform_info bxt_info = {
32 .clk_rate = 100000000, 49 .clk_rate = 100000000,
33}; 50};
@@ -58,8 +75,17 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
58 75
59static const struct acpi_device_id intel_lpss_acpi_ids[] = { 76static const struct acpi_device_id intel_lpss_acpi_ids[] = {
60 /* SPT */ 77 /* SPT */
78 { "INT3440", (kernel_ulong_t)&spt_info },
79 { "INT3441", (kernel_ulong_t)&spt_info },
80 { "INT3442", (kernel_ulong_t)&spt_i2c_info },
81 { "INT3443", (kernel_ulong_t)&spt_i2c_info },
82 { "INT3444", (kernel_ulong_t)&spt_i2c_info },
83 { "INT3445", (kernel_ulong_t)&spt_i2c_info },
61 { "INT3446", (kernel_ulong_t)&spt_i2c_info }, 84 { "INT3446", (kernel_ulong_t)&spt_i2c_info },
62 { "INT3447", (kernel_ulong_t)&spt_i2c_info }, 85 { "INT3447", (kernel_ulong_t)&spt_i2c_info },
86 { "INT3448", (kernel_ulong_t)&spt_uart_info },
87 { "INT3449", (kernel_ulong_t)&spt_uart_info },
88 { "INT344A", (kernel_ulong_t)&spt_uart_info },
63 /* BXT */ 89 /* BXT */
64 { "80860AAC", (kernel_ulong_t)&bxt_i2c_info }, 90 { "80860AAC", (kernel_ulong_t)&bxt_i2c_info },
65 { "80860ABC", (kernel_ulong_t)&bxt_info }, 91 { "80860ABC", (kernel_ulong_t)&bxt_info },
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index ade6e1ce5a98..9355db29d2f9 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -35,6 +35,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
35 info->mem = &pdev->resource[0]; 35 info->mem = &pdev->resource[0];
36 info->irq = pdev->irq; 36 info->irq = pdev->irq;
37 37
38 pdev->d3cold_delay = 0;
39
38 /* Probably it is enough to set this for iDMA capable devices only */ 40 /* Probably it is enough to set this for iDMA capable devices only */
39 pci_set_master(pdev); 41 pci_set_master(pdev);
40 pci_try_set_mwi(pdev); 42 pci_try_set_mwi(pdev);
@@ -256,6 +258,29 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
256 { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info }, 258 { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info },
257 { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info }, 259 { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info },
258 { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info }, 260 { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info },
261 /* TGL-LP */
262 { PCI_VDEVICE(INTEL, 0xa0a8), (kernel_ulong_t)&bxt_uart_info },
263 { PCI_VDEVICE(INTEL, 0xa0a9), (kernel_ulong_t)&bxt_uart_info },
264 { PCI_VDEVICE(INTEL, 0xa0aa), (kernel_ulong_t)&spt_info },
265 { PCI_VDEVICE(INTEL, 0xa0ab), (kernel_ulong_t)&spt_info },
266 { PCI_VDEVICE(INTEL, 0xa0c5), (kernel_ulong_t)&spt_i2c_info },
267 { PCI_VDEVICE(INTEL, 0xa0c6), (kernel_ulong_t)&spt_i2c_info },
268 { PCI_VDEVICE(INTEL, 0xa0c7), (kernel_ulong_t)&bxt_uart_info },
269 { PCI_VDEVICE(INTEL, 0xa0d8), (kernel_ulong_t)&spt_i2c_info },
270 { PCI_VDEVICE(INTEL, 0xa0d9), (kernel_ulong_t)&spt_i2c_info },
271 { PCI_VDEVICE(INTEL, 0xa0da), (kernel_ulong_t)&bxt_uart_info },
272 { PCI_VDEVICE(INTEL, 0xa0db), (kernel_ulong_t)&bxt_uart_info },
273 { PCI_VDEVICE(INTEL, 0xa0dc), (kernel_ulong_t)&bxt_uart_info },
274 { PCI_VDEVICE(INTEL, 0xa0dd), (kernel_ulong_t)&bxt_uart_info },
275 { PCI_VDEVICE(INTEL, 0xa0de), (kernel_ulong_t)&spt_info },
276 { PCI_VDEVICE(INTEL, 0xa0df), (kernel_ulong_t)&spt_info },
277 { PCI_VDEVICE(INTEL, 0xa0e8), (kernel_ulong_t)&spt_i2c_info },
278 { PCI_VDEVICE(INTEL, 0xa0e9), (kernel_ulong_t)&spt_i2c_info },
279 { PCI_VDEVICE(INTEL, 0xa0ea), (kernel_ulong_t)&spt_i2c_info },
280 { PCI_VDEVICE(INTEL, 0xa0eb), (kernel_ulong_t)&spt_i2c_info },
281 { PCI_VDEVICE(INTEL, 0xa0fb), (kernel_ulong_t)&spt_info },
282 { PCI_VDEVICE(INTEL, 0xa0fd), (kernel_ulong_t)&spt_info },
283 { PCI_VDEVICE(INTEL, 0xa0fe), (kernel_ulong_t)&spt_info },
259 /* SPT-H */ 284 /* SPT-H */
260 { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info }, 285 { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
261 { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info }, 286 { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 277f48f1cc1c..bfe4ff337581 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -47,10 +47,10 @@
47#define LPSS_PRIV_IDLELTR 0x14 47#define LPSS_PRIV_IDLELTR 0x14
48 48
49#define LPSS_PRIV_LTR_REQ BIT(15) 49#define LPSS_PRIV_LTR_REQ BIT(15)
50#define LPSS_PRIV_LTR_SCALE_MASK 0xc00 50#define LPSS_PRIV_LTR_SCALE_MASK GENMASK(11, 10)
51#define LPSS_PRIV_LTR_SCALE_1US 0x800 51#define LPSS_PRIV_LTR_SCALE_1US (2 << 10)
52#define LPSS_PRIV_LTR_SCALE_32US 0xc00 52#define LPSS_PRIV_LTR_SCALE_32US (3 << 10)
53#define LPSS_PRIV_LTR_VALUE_MASK 0x3ff 53#define LPSS_PRIV_LTR_VALUE_MASK GENMASK(9, 0)
54 54
55#define LPSS_PRIV_SSP_REG 0x20 55#define LPSS_PRIV_SSP_REG 0x20
56#define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0) 56#define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0)
@@ -59,8 +59,8 @@
59 59
60#define LPSS_PRIV_CAPS 0xfc 60#define LPSS_PRIV_CAPS 0xfc
61#define LPSS_PRIV_CAPS_NO_IDMA BIT(8) 61#define LPSS_PRIV_CAPS_NO_IDMA BIT(8)
62#define LPSS_PRIV_CAPS_TYPE_MASK GENMASK(7, 4)
62#define LPSS_PRIV_CAPS_TYPE_SHIFT 4 63#define LPSS_PRIV_CAPS_TYPE_SHIFT 4
63#define LPSS_PRIV_CAPS_TYPE_MASK (0xf << LPSS_PRIV_CAPS_TYPE_SHIFT)
64 64
65/* This matches the type field in CAPS register */ 65/* This matches the type field in CAPS register */
66enum intel_lpss_dev_type { 66enum intel_lpss_dev_type {
@@ -128,17 +128,6 @@ static const struct mfd_cell intel_lpss_spi_cell = {
128static DEFINE_IDA(intel_lpss_devid_ida); 128static DEFINE_IDA(intel_lpss_devid_ida);
129static struct dentry *intel_lpss_debugfs; 129static struct dentry *intel_lpss_debugfs;
130 130
131static int intel_lpss_request_dma_module(const char *name)
132{
133 static bool intel_lpss_dma_requested;
134
135 if (intel_lpss_dma_requested)
136 return 0;
137
138 intel_lpss_dma_requested = true;
139 return request_module("%s", name);
140}
141
142static void intel_lpss_cache_ltr(struct intel_lpss *lpss) 131static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
143{ 132{
144 lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR); 133 lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
@@ -429,16 +418,6 @@ int intel_lpss_probe(struct device *dev,
429 dev_warn(dev, "Failed to create debugfs entries\n"); 418 dev_warn(dev, "Failed to create debugfs entries\n");
430 419
431 if (intel_lpss_has_idma(lpss)) { 420 if (intel_lpss_has_idma(lpss)) {
432 /*
433 * Ensure the DMA driver is loaded before the host
434 * controller device appears, so that the host controller
435 * driver can request its DMA channels as early as
436 * possible.
437 *
438 * If the DMA module is not there that's OK as well.
439 */
440 intel_lpss_request_dma_module(LPSS_IDMA64_DRIVER_NAME);
441
442 ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell, 421 ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
443 1, info->mem, info->irq, NULL); 422 1, info->mem, info->irq, NULL);
444 if (ret) 423 if (ret)
@@ -554,3 +533,11 @@ MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
554MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>"); 533MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
555MODULE_DESCRIPTION("Intel LPSS core driver"); 534MODULE_DESCRIPTION("Intel LPSS core driver");
556MODULE_LICENSE("GPL v2"); 535MODULE_LICENSE("GPL v2");
536/*
537 * Ensure the DMA driver is loaded before the host controller device appears,
538 * so that the host controller driver can request its DMA channels as early
539 * as possible.
540 *
541 * If the DMA module is not there that's OK as well.
542 */
543MODULE_SOFTDEP("pre: platform:" LPSS_IDMA64_DRIVER_NAME);
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 6310c3bdb991..739cfb5b69fe 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -450,10 +450,8 @@ static int bxtwc_probe(struct platform_device *pdev)
450 return -ENOMEM; 450 return -ENOMEM;
451 451
452 ret = platform_get_irq(pdev, 0); 452 ret = platform_get_irq(pdev, 0);
453 if (ret < 0) { 453 if (ret < 0)
454 dev_err(&pdev->dev, "Invalid IRQ\n");
455 return ret; 454 return ret;
456 }
457 pmic->irq = ret; 455 pmic->irq = ret;
458 456
459 dev_set_drvdata(&pdev->dev, pmic); 457 dev_set_drvdata(&pdev->dev, pmic);
diff --git a/drivers/mfd/intel_soc_pmic_mrfld.c b/drivers/mfd/intel_soc_pmic_mrfld.c
new file mode 100644
index 000000000000..26a1551c5faf
--- /dev/null
+++ b/drivers/mfd/intel_soc_pmic_mrfld.c
@@ -0,0 +1,157 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Device access for Basin Cove PMIC
4 *
5 * Copyright (c) 2019, Intel Corporation.
6 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 */
8
9#include <linux/acpi.h>
10#include <linux/interrupt.h>
11#include <linux/mfd/core.h>
12#include <linux/mfd/intel_soc_pmic.h>
13#include <linux/mfd/intel_soc_pmic_mrfld.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/regmap.h>
17
18#include <asm/intel_scu_ipc.h>
19
20/*
21 * Level 2 IRQs
22 *
23 * Firmware on the systems with Basin Cove PMIC services Level 1 IRQs
24 * without an assistance. Thus, each of the Level 1 IRQ is represented
25 * as a separate RTE in IOAPIC.
26 */
27static struct resource irq_level2_resources[] = {
28 DEFINE_RES_IRQ(0), /* power button */
29 DEFINE_RES_IRQ(0), /* TMU */
30 DEFINE_RES_IRQ(0), /* thermal */
31 DEFINE_RES_IRQ(0), /* BCU */
32 DEFINE_RES_IRQ(0), /* ADC */
33 DEFINE_RES_IRQ(0), /* charger */
34 DEFINE_RES_IRQ(0), /* GPIO */
35};
36
37static const struct mfd_cell bcove_dev[] = {
38 {
39 .name = "mrfld_bcove_pwrbtn",
40 .num_resources = 1,
41 .resources = &irq_level2_resources[0],
42 }, {
43 .name = "mrfld_bcove_tmu",
44 .num_resources = 1,
45 .resources = &irq_level2_resources[1],
46 }, {
47 .name = "mrfld_bcove_thermal",
48 .num_resources = 1,
49 .resources = &irq_level2_resources[2],
50 }, {
51 .name = "mrfld_bcove_bcu",
52 .num_resources = 1,
53 .resources = &irq_level2_resources[3],
54 }, {
55 .name = "mrfld_bcove_adc",
56 .num_resources = 1,
57 .resources = &irq_level2_resources[4],
58 }, {
59 .name = "mrfld_bcove_charger",
60 .num_resources = 1,
61 .resources = &irq_level2_resources[5],
62 }, {
63 .name = "mrfld_bcove_pwrsrc",
64 .num_resources = 1,
65 .resources = &irq_level2_resources[5],
66 }, {
67 .name = "mrfld_bcove_gpio",
68 .num_resources = 1,
69 .resources = &irq_level2_resources[6],
70 },
71 { .name = "mrfld_bcove_region", },
72};
73
74static int bcove_ipc_byte_reg_read(void *context, unsigned int reg,
75 unsigned int *val)
76{
77 u8 ipc_out;
78 int ret;
79
80 ret = intel_scu_ipc_ioread8(reg, &ipc_out);
81 if (ret)
82 return ret;
83
84 *val = ipc_out;
85 return 0;
86}
87
88static int bcove_ipc_byte_reg_write(void *context, unsigned int reg,
89 unsigned int val)
90{
91 u8 ipc_in = val;
92 int ret;
93
94 ret = intel_scu_ipc_iowrite8(reg, ipc_in);
95 if (ret)
96 return ret;
97
98 return 0;
99}
100
101static const struct regmap_config bcove_regmap_config = {
102 .reg_bits = 16,
103 .val_bits = 8,
104 .max_register = 0xff,
105 .reg_write = bcove_ipc_byte_reg_write,
106 .reg_read = bcove_ipc_byte_reg_read,
107};
108
109static int bcove_probe(struct platform_device *pdev)
110{
111 struct device *dev = &pdev->dev;
112 struct intel_soc_pmic *pmic;
113 unsigned int i;
114 int ret;
115
116 pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
117 if (!pmic)
118 return -ENOMEM;
119
120 platform_set_drvdata(pdev, pmic);
121 pmic->dev = &pdev->dev;
122
123 pmic->regmap = devm_regmap_init(dev, NULL, pmic, &bcove_regmap_config);
124 if (IS_ERR(pmic->regmap))
125 return PTR_ERR(pmic->regmap);
126
127 for (i = 0; i < ARRAY_SIZE(irq_level2_resources); i++) {
128 ret = platform_get_irq(pdev, i);
129 if (ret < 0)
130 return ret;
131
132 irq_level2_resources[i].start = ret;
133 irq_level2_resources[i].end = ret;
134 }
135
136 return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
137 bcove_dev, ARRAY_SIZE(bcove_dev),
138 NULL, 0, NULL);
139}
140
141static const struct acpi_device_id bcove_acpi_ids[] = {
142 { "INTC100E" },
143 {}
144};
145MODULE_DEVICE_TABLE(acpi, bcove_acpi_ids);
146
147static struct platform_driver bcove_driver = {
148 .driver = {
149 .name = "intel_soc_pmic_mrfld",
150 .acpi_match_table = bcove_acpi_ids,
151 },
152 .probe = bcove_probe,
153};
154module_platform_driver(bcove_driver);
155
156MODULE_DESCRIPTION("IPC driver for Intel SoC Basin Cove PMIC");
157MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
deleted file mode 100644
index 082f16917519..000000000000
--- a/drivers/mfd/jz4740-adc.c
+++ /dev/null
@@ -1,324 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
4 * JZ4740 SoC ADC driver
5 *
6 * This driver synchronizes access to the JZ4740 ADC core between the
7 * JZ4740 battery and hwmon drivers.
8 */
9
10#include <linux/err.h>
11#include <linux/io.h>
12#include <linux/irq.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include <linux/clk.h>
21#include <linux/mfd/core.h>
22
23#include <linux/jz4740-adc.h>
24
25
26#define JZ_REG_ADC_ENABLE 0x00
27#define JZ_REG_ADC_CFG 0x04
28#define JZ_REG_ADC_CTRL 0x08
29#define JZ_REG_ADC_STATUS 0x0c
30
31#define JZ_REG_ADC_TOUCHSCREEN_BASE 0x10
32#define JZ_REG_ADC_BATTERY_BASE 0x1c
33#define JZ_REG_ADC_HWMON_BASE 0x20
34
35#define JZ_ADC_ENABLE_TOUCH BIT(2)
36#define JZ_ADC_ENABLE_BATTERY BIT(1)
37#define JZ_ADC_ENABLE_ADCIN BIT(0)
38
39enum {
40 JZ_ADC_IRQ_ADCIN = 0,
41 JZ_ADC_IRQ_BATTERY,
42 JZ_ADC_IRQ_TOUCH,
43 JZ_ADC_IRQ_PENUP,
44 JZ_ADC_IRQ_PENDOWN,
45};
46
47struct jz4740_adc {
48 struct resource *mem;
49 void __iomem *base;
50
51 int irq;
52 struct irq_chip_generic *gc;
53
54 struct clk *clk;
55 atomic_t clk_ref;
56
57 spinlock_t lock;
58};
59
60static void jz4740_adc_irq_demux(struct irq_desc *desc)
61{
62 struct irq_chip_generic *gc = irq_desc_get_handler_data(desc);
63 uint8_t status;
64 unsigned int i;
65
66 status = readb(gc->reg_base + JZ_REG_ADC_STATUS);
67
68 for (i = 0; i < 5; ++i) {
69 if (status & BIT(i))
70 generic_handle_irq(gc->irq_base + i);
71 }
72}
73
74
75/* Refcounting for the ADC clock is done in here instead of in the clock
76 * framework, because it is the only clock which is shared between multiple
77 * devices and thus is the only clock which needs refcounting */
78static inline void jz4740_adc_clk_enable(struct jz4740_adc *adc)
79{
80 if (atomic_inc_return(&adc->clk_ref) == 1)
81 clk_prepare_enable(adc->clk);
82}
83
84static inline void jz4740_adc_clk_disable(struct jz4740_adc *adc)
85{
86 if (atomic_dec_return(&adc->clk_ref) == 0)
87 clk_disable_unprepare(adc->clk);
88}
89
90static inline void jz4740_adc_set_enabled(struct jz4740_adc *adc, int engine,
91 bool enabled)
92{
93 unsigned long flags;
94 uint8_t val;
95
96 spin_lock_irqsave(&adc->lock, flags);
97
98 val = readb(adc->base + JZ_REG_ADC_ENABLE);
99 if (enabled)
100 val |= BIT(engine);
101 else
102 val &= ~BIT(engine);
103 writeb(val, adc->base + JZ_REG_ADC_ENABLE);
104
105 spin_unlock_irqrestore(&adc->lock, flags);
106}
107
108static int jz4740_adc_cell_enable(struct platform_device *pdev)
109{
110 struct jz4740_adc *adc = dev_get_drvdata(pdev->dev.parent);
111
112 jz4740_adc_clk_enable(adc);
113 jz4740_adc_set_enabled(adc, pdev->id, true);
114
115 return 0;
116}
117
118static int jz4740_adc_cell_disable(struct platform_device *pdev)
119{
120 struct jz4740_adc *adc = dev_get_drvdata(pdev->dev.parent);
121
122 jz4740_adc_set_enabled(adc, pdev->id, false);
123 jz4740_adc_clk_disable(adc);
124
125 return 0;
126}
127
128int jz4740_adc_set_config(struct device *dev, uint32_t mask, uint32_t val)
129{
130 struct jz4740_adc *adc = dev_get_drvdata(dev);
131 unsigned long flags;
132 uint32_t cfg;
133
134 if (!adc)
135 return -ENODEV;
136
137 spin_lock_irqsave(&adc->lock, flags);
138
139 cfg = readl(adc->base + JZ_REG_ADC_CFG);
140
141 cfg &= ~mask;
142 cfg |= val;
143
144 writel(cfg, adc->base + JZ_REG_ADC_CFG);
145
146 spin_unlock_irqrestore(&adc->lock, flags);
147
148 return 0;
149}
150EXPORT_SYMBOL_GPL(jz4740_adc_set_config);
151
152static struct resource jz4740_hwmon_resources[] = {
153 {
154 .start = JZ_ADC_IRQ_ADCIN,
155 .flags = IORESOURCE_IRQ,
156 },
157 {
158 .start = JZ_REG_ADC_HWMON_BASE,
159 .end = JZ_REG_ADC_HWMON_BASE + 3,
160 .flags = IORESOURCE_MEM,
161 },
162};
163
164static struct resource jz4740_battery_resources[] = {
165 {
166 .start = JZ_ADC_IRQ_BATTERY,
167 .flags = IORESOURCE_IRQ,
168 },
169 {
170 .start = JZ_REG_ADC_BATTERY_BASE,
171 .end = JZ_REG_ADC_BATTERY_BASE + 3,
172 .flags = IORESOURCE_MEM,
173 },
174};
175
176static const struct mfd_cell jz4740_adc_cells[] = {
177 {
178 .id = 0,
179 .name = "jz4740-hwmon",
180 .num_resources = ARRAY_SIZE(jz4740_hwmon_resources),
181 .resources = jz4740_hwmon_resources,
182
183 .enable = jz4740_adc_cell_enable,
184 .disable = jz4740_adc_cell_disable,
185 },
186 {
187 .id = 1,
188 .name = "jz4740-battery",
189 .num_resources = ARRAY_SIZE(jz4740_battery_resources),
190 .resources = jz4740_battery_resources,
191
192 .enable = jz4740_adc_cell_enable,
193 .disable = jz4740_adc_cell_disable,
194 },
195};
196
197static int jz4740_adc_probe(struct platform_device *pdev)
198{
199 struct irq_chip_generic *gc;
200 struct irq_chip_type *ct;
201 struct jz4740_adc *adc;
202 struct resource *mem_base;
203 int ret;
204 int irq_base;
205
206 adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
207 if (!adc)
208 return -ENOMEM;
209
210 adc->irq = platform_get_irq(pdev, 0);
211 if (adc->irq < 0) {
212 ret = adc->irq;
213 dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
214 return ret;
215 }
216
217 irq_base = platform_get_irq(pdev, 1);
218 if (irq_base < 0) {
219 dev_err(&pdev->dev, "Failed to get irq base: %d\n", irq_base);
220 return irq_base;
221 }
222
223 mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
224 if (!mem_base) {
225 dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
226 return -ENOENT;
227 }
228
229 /* Only request the shared registers for the MFD driver */
230 adc->mem = request_mem_region(mem_base->start, JZ_REG_ADC_STATUS,
231 pdev->name);
232 if (!adc->mem) {
233 dev_err(&pdev->dev, "Failed to request mmio memory region\n");
234 return -EBUSY;
235 }
236
237 adc->base = ioremap_nocache(adc->mem->start, resource_size(adc->mem));
238 if (!adc->base) {
239 ret = -EBUSY;
240 dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
241 goto err_release_mem_region;
242 }
243
244 adc->clk = clk_get(&pdev->dev, "adc");
245 if (IS_ERR(adc->clk)) {
246 ret = PTR_ERR(adc->clk);
247 dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
248 goto err_iounmap;
249 }
250
251 spin_lock_init(&adc->lock);
252 atomic_set(&adc->clk_ref, 0);
253
254 platform_set_drvdata(pdev, adc);
255
256 gc = irq_alloc_generic_chip("INTC", 1, irq_base, adc->base,
257 handle_level_irq);
258
259 ct = gc->chip_types;
260 ct->regs.mask = JZ_REG_ADC_CTRL;
261 ct->regs.ack = JZ_REG_ADC_STATUS;
262 ct->chip.irq_mask = irq_gc_mask_set_bit;
263 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
264 ct->chip.irq_ack = irq_gc_ack_set_bit;
265
266 irq_setup_generic_chip(gc, IRQ_MSK(5), IRQ_GC_INIT_MASK_CACHE, 0,
267 IRQ_NOPROBE | IRQ_LEVEL);
268
269 adc->gc = gc;
270
271 irq_set_chained_handler_and_data(adc->irq, jz4740_adc_irq_demux, gc);
272
273 writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
274 writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
275
276 ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells,
277 ARRAY_SIZE(jz4740_adc_cells), mem_base,
278 irq_base, NULL);
279 if (ret < 0)
280 goto err_clk_put;
281
282 return 0;
283
284err_clk_put:
285 clk_put(adc->clk);
286err_iounmap:
287 iounmap(adc->base);
288err_release_mem_region:
289 release_mem_region(adc->mem->start, resource_size(adc->mem));
290 return ret;
291}
292
293static int jz4740_adc_remove(struct platform_device *pdev)
294{
295 struct jz4740_adc *adc = platform_get_drvdata(pdev);
296
297 mfd_remove_devices(&pdev->dev);
298
299 irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0);
300 kfree(adc->gc);
301 irq_set_chained_handler_and_data(adc->irq, NULL, NULL);
302
303 iounmap(adc->base);
304 release_mem_region(adc->mem->start, resource_size(adc->mem));
305
306 clk_put(adc->clk);
307
308 return 0;
309}
310
311static struct platform_driver jz4740_adc_driver = {
312 .probe = jz4740_adc_probe,
313 .remove = jz4740_adc_remove,
314 .driver = {
315 .name = "jz4740-adc",
316 },
317};
318
319module_platform_driver(jz4740_adc_driver);
320
321MODULE_DESCRIPTION("JZ4740 SoC ADC driver");
322MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
323MODULE_LICENSE("GPL");
324MODULE_ALIAS("platform:jz4740-adc");
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index ebb13d5de530..fd8864cafd25 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -297,11 +297,11 @@ static int max77836_init(struct max14577 *max14577)
297 int ret; 297 int ret;
298 u8 intsrc_mask; 298 u8 intsrc_mask;
299 299
300 max14577->i2c_pmic = i2c_new_dummy(max14577->i2c->adapter, 300 max14577->i2c_pmic = i2c_new_dummy_device(max14577->i2c->adapter,
301 I2C_ADDR_PMIC); 301 I2C_ADDR_PMIC);
302 if (!max14577->i2c_pmic) { 302 if (IS_ERR(max14577->i2c_pmic)) {
303 dev_err(max14577->dev, "Failed to register PMIC I2C device\n"); 303 dev_err(max14577->dev, "Failed to register PMIC I2C device\n");
304 return -ENODEV; 304 return PTR_ERR(max14577->i2c_pmic);
305 } 305 }
306 i2c_set_clientdata(max14577->i2c_pmic, max14577); 306 i2c_set_clientdata(max14577->i2c_pmic, max14577);
307 307
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 0c28965fcc6a..a851ff473a44 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -416,8 +416,10 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
416 416
417 for_each_child_of_node(fps_np, fps_child) { 417 for_each_child_of_node(fps_np, fps_child) {
418 ret = max77620_config_fps(chip, fps_child); 418 ret = max77620_config_fps(chip, fps_child);
419 if (ret < 0) 419 if (ret < 0) {
420 of_node_put(fps_child);
420 return ret; 421 return ret;
422 }
421 } 423 }
422 424
423 config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0; 425 config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0;
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 901d99d65924..596ed85cab3b 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -183,17 +183,17 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
183 } else 183 } else
184 dev_info(max77693->dev, "device ID: 0x%x\n", reg_data); 184 dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
185 185
186 max77693->i2c_muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC); 186 max77693->i2c_muic = i2c_new_dummy_device(i2c->adapter, I2C_ADDR_MUIC);
187 if (!max77693->i2c_muic) { 187 if (IS_ERR(max77693->i2c_muic)) {
188 dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n"); 188 dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n");
189 return -ENODEV; 189 return PTR_ERR(max77693->i2c_muic);
190 } 190 }
191 i2c_set_clientdata(max77693->i2c_muic, max77693); 191 i2c_set_clientdata(max77693->i2c_muic, max77693);
192 192
193 max77693->i2c_haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC); 193 max77693->i2c_haptic = i2c_new_dummy_device(i2c->adapter, I2C_ADDR_HAPTIC);
194 if (!max77693->i2c_haptic) { 194 if (IS_ERR(max77693->i2c_haptic)) {
195 dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n"); 195 dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n");
196 ret = -ENODEV; 196 ret = PTR_ERR(max77693->i2c_haptic);
197 goto err_i2c_haptic; 197 goto err_i2c_haptic;
198 } 198 }
199 i2c_set_clientdata(max77693->i2c_haptic, max77693); 199 i2c_set_clientdata(max77693->i2c_haptic, max77693);
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index 25cbb2242b26..209ee24d9ce1 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -70,11 +70,11 @@ static int max77843_chg_init(struct max77693_dev *max77843)
70{ 70{
71 int ret; 71 int ret;
72 72
73 max77843->i2c_chg = i2c_new_dummy(max77843->i2c->adapter, I2C_ADDR_CHG); 73 max77843->i2c_chg = i2c_new_dummy_device(max77843->i2c->adapter, I2C_ADDR_CHG);
74 if (!max77843->i2c_chg) { 74 if (IS_ERR(max77843->i2c_chg)) {
75 dev_err(&max77843->i2c->dev, 75 dev_err(&max77843->i2c->dev,
76 "Cannot allocate I2C device for Charger\n"); 76 "Cannot allocate I2C device for Charger\n");
77 return -ENODEV; 77 return PTR_ERR(max77843->i2c_chg);
78 } 78 }
79 i2c_set_clientdata(max77843->i2c_chg, max77843); 79 i2c_set_clientdata(max77843->i2c_chg, max77843);
80 80
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index cc01f706cb32..d44baafd9d14 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -214,9 +214,9 @@ static int max8907_i2c_probe(struct i2c_client *i2c,
214 goto err_regmap_gen; 214 goto err_regmap_gen;
215 } 215 }
216 216
217 max8907->i2c_rtc = i2c_new_dummy(i2c->adapter, MAX8907_RTC_I2C_ADDR); 217 max8907->i2c_rtc = i2c_new_dummy_device(i2c->adapter, MAX8907_RTC_I2C_ADDR);
218 if (!max8907->i2c_rtc) { 218 if (IS_ERR(max8907->i2c_rtc)) {
219 ret = -ENOMEM; 219 ret = PTR_ERR(max8907->i2c_rtc);
220 goto err_dummy_rtc; 220 goto err_dummy_rtc;
221 } 221 }
222 i2c_set_clientdata(max8907->i2c_rtc, max8907); 222 i2c_set_clientdata(max8907->i2c_rtc, max8907);
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 20bb19b71109..114e905bef25 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -176,18 +176,18 @@ static int max8925_probe(struct i2c_client *client,
176 dev_set_drvdata(chip->dev, chip); 176 dev_set_drvdata(chip->dev, chip);
177 mutex_init(&chip->io_lock); 177 mutex_init(&chip->io_lock);
178 178
179 chip->rtc = i2c_new_dummy(chip->i2c->adapter, RTC_I2C_ADDR); 179 chip->rtc = i2c_new_dummy_device(chip->i2c->adapter, RTC_I2C_ADDR);
180 if (!chip->rtc) { 180 if (IS_ERR(chip->rtc)) {
181 dev_err(chip->dev, "Failed to allocate I2C device for RTC\n"); 181 dev_err(chip->dev, "Failed to allocate I2C device for RTC\n");
182 return -ENODEV; 182 return PTR_ERR(chip->rtc);
183 } 183 }
184 i2c_set_clientdata(chip->rtc, chip); 184 i2c_set_clientdata(chip->rtc, chip);
185 185
186 chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR); 186 chip->adc = i2c_new_dummy_device(chip->i2c->adapter, ADC_I2C_ADDR);
187 if (!chip->adc) { 187 if (IS_ERR(chip->adc)) {
188 dev_err(chip->dev, "Failed to allocate I2C device for ADC\n"); 188 dev_err(chip->dev, "Failed to allocate I2C device for ADC\n");
189 i2c_unregister_device(chip->rtc); 189 i2c_unregister_device(chip->rtc);
190 return -ENODEV; 190 return PTR_ERR(chip->adc);
191 } 191 }
192 i2c_set_clientdata(chip->adc, chip); 192 i2c_set_clientdata(chip->adc, chip);
193 193
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 8c06c09e36d1..68d8f2b95287 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -185,25 +185,25 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
185 185
186 mutex_init(&max8997->iolock); 186 mutex_init(&max8997->iolock);
187 187
188 max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC); 188 max8997->rtc = i2c_new_dummy_device(i2c->adapter, I2C_ADDR_RTC);
189 if (!max8997->rtc) { 189 if (IS_ERR(max8997->rtc)) {
190 dev_err(max8997->dev, "Failed to allocate I2C device for RTC\n"); 190 dev_err(max8997->dev, "Failed to allocate I2C device for RTC\n");
191 return -ENODEV; 191 return PTR_ERR(max8997->rtc);
192 } 192 }
193 i2c_set_clientdata(max8997->rtc, max8997); 193 i2c_set_clientdata(max8997->rtc, max8997);
194 194
195 max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC); 195 max8997->haptic = i2c_new_dummy_device(i2c->adapter, I2C_ADDR_HAPTIC);
196 if (!max8997->haptic) { 196 if (IS_ERR(max8997->haptic)) {
197 dev_err(max8997->dev, "Failed to allocate I2C device for Haptic\n"); 197 dev_err(max8997->dev, "Failed to allocate I2C device for Haptic\n");
198 ret = -ENODEV; 198 ret = PTR_ERR(max8997->haptic);
199 goto err_i2c_haptic; 199 goto err_i2c_haptic;
200 } 200 }
201 i2c_set_clientdata(max8997->haptic, max8997); 201 i2c_set_clientdata(max8997->haptic, max8997);
202 202
203 max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC); 203 max8997->muic = i2c_new_dummy_device(i2c->adapter, I2C_ADDR_MUIC);
204 if (!max8997->muic) { 204 if (IS_ERR(max8997->muic)) {
205 dev_err(max8997->dev, "Failed to allocate I2C device for MUIC\n"); 205 dev_err(max8997->dev, "Failed to allocate I2C device for MUIC\n");
206 ret = -ENODEV; 206 ret = PTR_ERR(max8997->muic);
207 goto err_i2c_muic; 207 goto err_i2c_muic;
208 } 208 }
209 i2c_set_clientdata(max8997->muic, max8997); 209 i2c_set_clientdata(max8997->muic, max8997);
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 56409df120f8..785f8e9841b7 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -195,10 +195,10 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
195 } 195 }
196 mutex_init(&max8998->iolock); 196 mutex_init(&max8998->iolock);
197 197
198 max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); 198 max8998->rtc = i2c_new_dummy_device(i2c->adapter, RTC_I2C_ADDR);
199 if (!max8998->rtc) { 199 if (IS_ERR(max8998->rtc)) {
200 dev_err(&i2c->dev, "Failed to allocate I2C device for RTC\n"); 200 dev_err(&i2c->dev, "Failed to allocate I2C device for RTC\n");
201 return -ENODEV; 201 return PTR_ERR(max8998->rtc);
202 } 202 }
203 i2c_set_clientdata(max8998->rtc, max8998); 203 i2c_set_clientdata(max8998->rtc, max8998);
204 204
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 337bcccdb914..310dae26ddff 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -5,34 +5,34 @@
5 */ 5 */
6 6
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <linux/ioport.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/of_device.h> 10#include <linux/of_device.h>
10#include <linux/of_irq.h> 11#include <linux/of_irq.h>
11#include <linux/regmap.h> 12#include <linux/regmap.h>
12#include <linux/mfd/core.h> 13#include <linux/mfd/core.h>
13#include <linux/mfd/mt6397/core.h>
14#include <linux/mfd/mt6323/core.h> 14#include <linux/mfd/mt6323/core.h>
15#include <linux/mfd/mt6397/registers.h> 15#include <linux/mfd/mt6397/core.h>
16#include <linux/mfd/mt6323/registers.h> 16#include <linux/mfd/mt6323/registers.h>
17#include <linux/mfd/mt6397/registers.h>
18
19#define MT6323_RTC_BASE 0x8000
20#define MT6323_RTC_SIZE 0x40
17 21
18#define MT6397_RTC_BASE 0xe000 22#define MT6397_RTC_BASE 0xe000
19#define MT6397_RTC_SIZE 0x3e 23#define MT6397_RTC_SIZE 0x3e
20 24
21#define MT6323_CID_CODE 0x23 25#define MT6323_PWRC_BASE 0x8000
22#define MT6391_CID_CODE 0x91 26#define MT6323_PWRC_SIZE 0x40
23#define MT6397_CID_CODE 0x97 27
28static const struct resource mt6323_rtc_resources[] = {
29 DEFINE_RES_MEM(MT6323_RTC_BASE, MT6323_RTC_SIZE),
30 DEFINE_RES_IRQ(MT6323_IRQ_STATUS_RTC),
31};
24 32
25static const struct resource mt6397_rtc_resources[] = { 33static const struct resource mt6397_rtc_resources[] = {
26 { 34 DEFINE_RES_MEM(MT6397_RTC_BASE, MT6397_RTC_SIZE),
27 .start = MT6397_RTC_BASE, 35 DEFINE_RES_IRQ(MT6397_IRQ_RTC),
28 .end = MT6397_RTC_BASE + MT6397_RTC_SIZE,
29 .flags = IORESOURCE_MEM,
30 },
31 {
32 .start = MT6397_IRQ_RTC,
33 .end = MT6397_IRQ_RTC,
34 .flags = IORESOURCE_IRQ,
35 },
36}; 36};
37 37
38static const struct resource mt6323_keys_resources[] = { 38static const struct resource mt6323_keys_resources[] = {
@@ -45,8 +45,17 @@ static const struct resource mt6397_keys_resources[] = {
45 DEFINE_RES_IRQ(MT6397_IRQ_HOMEKEY), 45 DEFINE_RES_IRQ(MT6397_IRQ_HOMEKEY),
46}; 46};
47 47
48static const struct resource mt6323_pwrc_resources[] = {
49 DEFINE_RES_MEM(MT6323_PWRC_BASE, MT6323_PWRC_SIZE),
50};
51
48static const struct mfd_cell mt6323_devs[] = { 52static const struct mfd_cell mt6323_devs[] = {
49 { 53 {
54 .name = "mt6323-rtc",
55 .num_resources = ARRAY_SIZE(mt6323_rtc_resources),
56 .resources = mt6323_rtc_resources,
57 .of_compatible = "mediatek,mt6323-rtc",
58 }, {
50 .name = "mt6323-regulator", 59 .name = "mt6323-regulator",
51 .of_compatible = "mediatek,mt6323-regulator" 60 .of_compatible = "mediatek,mt6323-regulator"
52 }, { 61 }, {
@@ -57,6 +66,11 @@ static const struct mfd_cell mt6323_devs[] = {
57 .num_resources = ARRAY_SIZE(mt6323_keys_resources), 66 .num_resources = ARRAY_SIZE(mt6323_keys_resources),
58 .resources = mt6323_keys_resources, 67 .resources = mt6323_keys_resources,
59 .of_compatible = "mediatek,mt6323-keys" 68 .of_compatible = "mediatek,mt6323-keys"
69 }, {
70 .name = "mt6323-pwrc",
71 .num_resources = ARRAY_SIZE(mt6323_pwrc_resources),
72 .resources = mt6323_pwrc_resources,
73 .of_compatible = "mediatek,mt6323-pwrc"
60 }, 74 },
61}; 75};
62 76
@@ -86,148 +100,6 @@ static const struct mfd_cell mt6397_devs[] = {
86 } 100 }
87}; 101};
88 102
89static void mt6397_irq_lock(struct irq_data *data)
90{
91 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
92
93 mutex_lock(&mt6397->irqlock);
94}
95
96static void mt6397_irq_sync_unlock(struct irq_data *data)
97{
98 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
99
100 regmap_write(mt6397->regmap, mt6397->int_con[0],
101 mt6397->irq_masks_cur[0]);
102 regmap_write(mt6397->regmap, mt6397->int_con[1],
103 mt6397->irq_masks_cur[1]);
104
105 mutex_unlock(&mt6397->irqlock);
106}
107
108static void mt6397_irq_disable(struct irq_data *data)
109{
110 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
111 int shift = data->hwirq & 0xf;
112 int reg = data->hwirq >> 4;
113
114 mt6397->irq_masks_cur[reg] &= ~BIT(shift);
115}
116
117static void mt6397_irq_enable(struct irq_data *data)
118{
119 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
120 int shift = data->hwirq & 0xf;
121 int reg = data->hwirq >> 4;
122
123 mt6397->irq_masks_cur[reg] |= BIT(shift);
124}
125
126#ifdef CONFIG_PM_SLEEP
127static int mt6397_irq_set_wake(struct irq_data *irq_data, unsigned int on)
128{
129 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(irq_data);
130 int shift = irq_data->hwirq & 0xf;
131 int reg = irq_data->hwirq >> 4;
132
133 if (on)
134 mt6397->wake_mask[reg] |= BIT(shift);
135 else
136 mt6397->wake_mask[reg] &= ~BIT(shift);
137
138 return 0;
139}
140#else
141#define mt6397_irq_set_wake NULL
142#endif
143
144static struct irq_chip mt6397_irq_chip = {
145 .name = "mt6397-irq",
146 .irq_bus_lock = mt6397_irq_lock,
147 .irq_bus_sync_unlock = mt6397_irq_sync_unlock,
148 .irq_enable = mt6397_irq_enable,
149 .irq_disable = mt6397_irq_disable,
150 .irq_set_wake = mt6397_irq_set_wake,
151};
152
153static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
154 int irqbase)
155{
156 unsigned int status;
157 int i, irq, ret;
158
159 ret = regmap_read(mt6397->regmap, reg, &status);
160 if (ret) {
161 dev_err(mt6397->dev, "Failed to read irq status: %d\n", ret);
162 return;
163 }
164
165 for (i = 0; i < 16; i++) {
166 if (status & BIT(i)) {
167 irq = irq_find_mapping(mt6397->irq_domain, irqbase + i);
168 if (irq)
169 handle_nested_irq(irq);
170 }
171 }
172
173 regmap_write(mt6397->regmap, reg, status);
174}
175
176static irqreturn_t mt6397_irq_thread(int irq, void *data)
177{
178 struct mt6397_chip *mt6397 = data;
179
180 mt6397_irq_handle_reg(mt6397, mt6397->int_status[0], 0);
181 mt6397_irq_handle_reg(mt6397, mt6397->int_status[1], 16);
182
183 return IRQ_HANDLED;
184}
185
186static int mt6397_irq_domain_map(struct irq_domain *d, unsigned int irq,
187 irq_hw_number_t hw)
188{
189 struct mt6397_chip *mt6397 = d->host_data;
190
191 irq_set_chip_data(irq, mt6397);
192 irq_set_chip_and_handler(irq, &mt6397_irq_chip, handle_level_irq);
193 irq_set_nested_thread(irq, 1);
194 irq_set_noprobe(irq);
195
196 return 0;
197}
198
199static const struct irq_domain_ops mt6397_irq_domain_ops = {
200 .map = mt6397_irq_domain_map,
201};
202
203static int mt6397_irq_init(struct mt6397_chip *mt6397)
204{
205 int ret;
206
207 mutex_init(&mt6397->irqlock);
208
209 /* Mask all interrupt sources */
210 regmap_write(mt6397->regmap, mt6397->int_con[0], 0x0);
211 regmap_write(mt6397->regmap, mt6397->int_con[1], 0x0);
212
213 mt6397->irq_domain = irq_domain_add_linear(mt6397->dev->of_node,
214 MT6397_IRQ_NR, &mt6397_irq_domain_ops, mt6397);
215 if (!mt6397->irq_domain) {
216 dev_err(mt6397->dev, "could not create irq domain\n");
217 return -ENOMEM;
218 }
219
220 ret = devm_request_threaded_irq(mt6397->dev, mt6397->irq, NULL,
221 mt6397_irq_thread, IRQF_ONESHOT, "mt6397-pmic", mt6397);
222 if (ret) {
223 dev_err(mt6397->dev, "failed to register irq=%d; err: %d\n",
224 mt6397->irq, ret);
225 return ret;
226 }
227
228 return 0;
229}
230
231#ifdef CONFIG_PM_SLEEP 103#ifdef CONFIG_PM_SLEEP
232static int mt6397_irq_suspend(struct device *dev) 104static int mt6397_irq_suspend(struct device *dev)
233{ 105{
@@ -290,7 +162,7 @@ static int mt6397_probe(struct platform_device *pdev)
290 return pmic->irq; 162 return pmic->irq;
291 163
292 switch (id & 0xff) { 164 switch (id & 0xff) {
293 case MT6323_CID_CODE: 165 case MT6323_CHIP_ID:
294 pmic->int_con[0] = MT6323_INT_CON0; 166 pmic->int_con[0] = MT6323_INT_CON0;
295 pmic->int_con[1] = MT6323_INT_CON1; 167 pmic->int_con[1] = MT6323_INT_CON1;
296 pmic->int_status[0] = MT6323_INT_STATUS0; 168 pmic->int_status[0] = MT6323_INT_STATUS0;
@@ -304,8 +176,8 @@ static int mt6397_probe(struct platform_device *pdev)
304 0, pmic->irq_domain); 176 0, pmic->irq_domain);
305 break; 177 break;
306 178
307 case MT6397_CID_CODE: 179 case MT6391_CHIP_ID:
308 case MT6391_CID_CODE: 180 case MT6397_CHIP_ID:
309 pmic->int_con[0] = MT6397_INT_CON0; 181 pmic->int_con[0] = MT6397_INT_CON0;
310 pmic->int_con[1] = MT6397_INT_CON1; 182 pmic->int_con[1] = MT6397_INT_CON1;
311 pmic->int_status[0] = MT6397_INT_STATUS0; 183 pmic->int_status[0] = MT6397_INT_STATUS0;
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
new file mode 100644
index 000000000000..b2d3ce1f3115
--- /dev/null
+++ b/drivers/mfd/mt6397-irq.c
@@ -0,0 +1,181 @@
1// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (c) 2019 MediaTek Inc.
4
5#include <linux/interrupt.h>
6#include <linux/module.h>
7#include <linux/of.h>
8#include <linux/of_device.h>
9#include <linux/of_irq.h>
10#include <linux/platform_device.h>
11#include <linux/regmap.h>
12#include <linux/mfd/mt6323/core.h>
13#include <linux/mfd/mt6323/registers.h>
14#include <linux/mfd/mt6397/core.h>
15#include <linux/mfd/mt6397/registers.h>
16
17static void mt6397_irq_lock(struct irq_data *data)
18{
19 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
20
21 mutex_lock(&mt6397->irqlock);
22}
23
24static void mt6397_irq_sync_unlock(struct irq_data *data)
25{
26 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
27
28 regmap_write(mt6397->regmap, mt6397->int_con[0],
29 mt6397->irq_masks_cur[0]);
30 regmap_write(mt6397->regmap, mt6397->int_con[1],
31 mt6397->irq_masks_cur[1]);
32
33 mutex_unlock(&mt6397->irqlock);
34}
35
36static void mt6397_irq_disable(struct irq_data *data)
37{
38 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
39 int shift = data->hwirq & 0xf;
40 int reg = data->hwirq >> 4;
41
42 mt6397->irq_masks_cur[reg] &= ~BIT(shift);
43}
44
45static void mt6397_irq_enable(struct irq_data *data)
46{
47 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
48 int shift = data->hwirq & 0xf;
49 int reg = data->hwirq >> 4;
50
51 mt6397->irq_masks_cur[reg] |= BIT(shift);
52}
53
54#ifdef CONFIG_PM_SLEEP
55static int mt6397_irq_set_wake(struct irq_data *irq_data, unsigned int on)
56{
57 struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(irq_data);
58 int shift = irq_data->hwirq & 0xf;
59 int reg = irq_data->hwirq >> 4;
60
61 if (on)
62 mt6397->wake_mask[reg] |= BIT(shift);
63 else
64 mt6397->wake_mask[reg] &= ~BIT(shift);
65
66 return 0;
67}
68#else
69#define mt6397_irq_set_wake NULL
70#endif
71
72static struct irq_chip mt6397_irq_chip = {
73 .name = "mt6397-irq",
74 .irq_bus_lock = mt6397_irq_lock,
75 .irq_bus_sync_unlock = mt6397_irq_sync_unlock,
76 .irq_enable = mt6397_irq_enable,
77 .irq_disable = mt6397_irq_disable,
78 .irq_set_wake = mt6397_irq_set_wake,
79};
80
81static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
82 int irqbase)
83{
84 unsigned int status;
85 int i, irq, ret;
86
87 ret = regmap_read(mt6397->regmap, reg, &status);
88 if (ret) {
89 dev_err(mt6397->dev, "Failed to read irq status: %d\n", ret);
90 return;
91 }
92
93 for (i = 0; i < 16; i++) {
94 if (status & BIT(i)) {
95 irq = irq_find_mapping(mt6397->irq_domain, irqbase + i);
96 if (irq)
97 handle_nested_irq(irq);
98 }
99 }
100
101 regmap_write(mt6397->regmap, reg, status);
102}
103
104static irqreturn_t mt6397_irq_thread(int irq, void *data)
105{
106 struct mt6397_chip *mt6397 = data;
107
108 mt6397_irq_handle_reg(mt6397, mt6397->int_status[0], 0);
109 mt6397_irq_handle_reg(mt6397, mt6397->int_status[1], 16);
110
111 return IRQ_HANDLED;
112}
113
114static int mt6397_irq_domain_map(struct irq_domain *d, unsigned int irq,
115 irq_hw_number_t hw)
116{
117 struct mt6397_chip *mt6397 = d->host_data;
118
119 irq_set_chip_data(irq, mt6397);
120 irq_set_chip_and_handler(irq, &mt6397_irq_chip, handle_level_irq);
121 irq_set_nested_thread(irq, 1);
122 irq_set_noprobe(irq);
123
124 return 0;
125}
126
127static const struct irq_domain_ops mt6397_irq_domain_ops = {
128 .map = mt6397_irq_domain_map,
129};
130
131int mt6397_irq_init(struct mt6397_chip *chip)
132{
133 int ret;
134
135 mutex_init(&chip->irqlock);
136
137 switch (chip->chip_id) {
138 case MT6323_CHIP_ID:
139 chip->int_con[0] = MT6323_INT_CON0;
140 chip->int_con[1] = MT6323_INT_CON1;
141 chip->int_status[0] = MT6323_INT_STATUS0;
142 chip->int_status[1] = MT6323_INT_STATUS1;
143 break;
144
145 case MT6391_CHIP_ID:
146 case MT6397_CHIP_ID:
147 chip->int_con[0] = MT6397_INT_CON0;
148 chip->int_con[1] = MT6397_INT_CON1;
149 chip->int_status[0] = MT6397_INT_STATUS0;
150 chip->int_status[1] = MT6397_INT_STATUS1;
151 break;
152
153 default:
154 dev_err(chip->dev, "unsupported chip: 0x%x\n", chip->chip_id);
155 return -ENODEV;
156 }
157
158 /* Mask all interrupt sources */
159 regmap_write(chip->regmap, chip->int_con[0], 0x0);
160 regmap_write(chip->regmap, chip->int_con[1], 0x0);
161
162 chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
163 MT6397_IRQ_NR,
164 &mt6397_irq_domain_ops,
165 chip);
166 if (!chip->irq_domain) {
167 dev_err(chip->dev, "could not create irq domain\n");
168 return -ENOMEM;
169 }
170
171 ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL,
172 mt6397_irq_thread, IRQF_ONESHOT,
173 "mt6397-pmic", chip);
174 if (ret) {
175 dev_err(chip->dev, "failed to register irq=%d; err: %d\n",
176 chip->irq, ret);
177 return ret;
178 }
179
180 return 0;
181}
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 6818ff34837c..f5b3fa973b13 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -549,12 +549,12 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
549 palmas->i2c_clients[i] = i2c; 549 palmas->i2c_clients[i] = i2c;
550 else { 550 else {
551 palmas->i2c_clients[i] = 551 palmas->i2c_clients[i] =
552 i2c_new_dummy(i2c->adapter, 552 i2c_new_dummy_device(i2c->adapter,
553 i2c->addr + i); 553 i2c->addr + i);
554 if (!palmas->i2c_clients[i]) { 554 if (IS_ERR(palmas->i2c_clients[i])) {
555 dev_err(palmas->dev, 555 dev_err(palmas->dev,
556 "can't attach client %d\n", i); 556 "can't attach client %d\n", i);
557 ret = -ENOMEM; 557 ret = PTR_ERR(palmas->i2c_clients[i]);
558 goto err_i2c; 558 goto err_i2c;
559 } 559 }
560 palmas->i2c_clients[i]->dev.of_node = of_node_get(node); 560 palmas->i2c_clients[i]->dev.of_node = of_node_get(node);
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 4d7e9008628c..71bc34b74bc9 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -561,22 +561,16 @@ static int qcom_rpm_probe(struct platform_device *pdev)
561 clk_prepare_enable(rpm->ramclk); /* Accepts NULL */ 561 clk_prepare_enable(rpm->ramclk); /* Accepts NULL */
562 562
563 irq_ack = platform_get_irq_byname(pdev, "ack"); 563 irq_ack = platform_get_irq_byname(pdev, "ack");
564 if (irq_ack < 0) { 564 if (irq_ack < 0)
565 dev_err(&pdev->dev, "required ack interrupt missing\n");
566 return irq_ack; 565 return irq_ack;
567 }
568 566
569 irq_err = platform_get_irq_byname(pdev, "err"); 567 irq_err = platform_get_irq_byname(pdev, "err");
570 if (irq_err < 0) { 568 if (irq_err < 0)
571 dev_err(&pdev->dev, "required err interrupt missing\n");
572 return irq_err; 569 return irq_err;
573 }
574 570
575 irq_wakeup = platform_get_irq_byname(pdev, "wakeup"); 571 irq_wakeup = platform_get_irq_byname(pdev, "wakeup");
576 if (irq_wakeup < 0) { 572 if (irq_wakeup < 0)
577 dev_err(&pdev->dev, "required wakeup interrupt missing\n");
578 return irq_wakeup; 573 return irq_wakeup;
579 }
580 574
581 match = of_match_device(qcom_rpm_of_match, &pdev->dev); 575 match = of_match_device(qcom_rpm_of_match, &pdev->dev);
582 if (!match) 576 if (!match)
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 9b9b06d36cb1..154270f8d8d7 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -17,6 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/platform_data/i2c-gpio.h> 19#include <linux/platform_data/i2c-gpio.h>
20#include <linux/gpio/driver.h>
20#include <linux/gpio/machine.h> 21#include <linux/gpio/machine.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22 23
@@ -1394,10 +1395,8 @@ static int sm501_plat_probe(struct platform_device *dev)
1394 sm->platdata = dev_get_platdata(&dev->dev); 1395 sm->platdata = dev_get_platdata(&dev->dev);
1395 1396
1396 ret = platform_get_irq(dev, 0); 1397 ret = platform_get_irq(dev, 0);
1397 if (ret < 0) { 1398 if (ret < 0)
1398 dev_err(&dev->dev, "failed to get irq resource\n");
1399 goto err_res; 1399 goto err_res;
1400 }
1401 sm->irq = ret; 1400 sm->irq = ret;
1402 1401
1403 sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1); 1402 sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 60c122e9b39f..faecbca6dba3 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -626,8 +626,7 @@ static const struct mfd_cell timberdale_cells_bar2[] = {
626static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, 626static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
627 char *buf) 627 char *buf)
628{ 628{
629 struct pci_dev *pdev = to_pci_dev(dev); 629 struct timberdale_device *priv = dev_get_drvdata(dev);
630 struct timberdale_device *priv = pci_get_drvdata(pdev);
631 630
632 return sprintf(buf, "%d.%d.%d\n", priv->fw.major, priv->fw.minor, 631 return sprintf(buf, "%d.%d.%d\n", priv->fw.major, priv->fw.minor,
633 priv->fw.config); 632 priv->fw.config);
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
index 865257ade8ac..907452b86e32 100644
--- a/drivers/mfd/tps80031.c
+++ b/drivers/mfd/tps80031.c
@@ -437,12 +437,11 @@ static int tps80031_probe(struct i2c_client *client,
437 if (tps80031_slave_address[i] == client->addr) 437 if (tps80031_slave_address[i] == client->addr)
438 tps80031->clients[i] = client; 438 tps80031->clients[i] = client;
439 else 439 else
440 tps80031->clients[i] = i2c_new_dummy(client->adapter, 440 tps80031->clients[i] = devm_i2c_new_dummy_device(&client->dev,
441 tps80031_slave_address[i]); 441 client->adapter, tps80031_slave_address[i]);
442 if (!tps80031->clients[i]) { 442 if (IS_ERR(tps80031->clients[i])) {
443 dev_err(&client->dev, "can't attach client %d\n", i); 443 dev_err(&client->dev, "can't attach client %d\n", i);
444 ret = -ENOMEM; 444 return PTR_ERR(tps80031->clients[i]);
445 goto fail_client_reg;
446 } 445 }
447 446
448 i2c_set_clientdata(tps80031->clients[i], tps80031); 447 i2c_set_clientdata(tps80031->clients[i], tps80031);
@@ -452,7 +451,7 @@ static int tps80031_probe(struct i2c_client *client,
452 ret = PTR_ERR(tps80031->regmap[i]); 451 ret = PTR_ERR(tps80031->regmap[i]);
453 dev_err(&client->dev, 452 dev_err(&client->dev,
454 "regmap %d init failed, err %d\n", i, ret); 453 "regmap %d init failed, err %d\n", i, ret);
455 goto fail_client_reg; 454 return ret;
456 } 455 }
457 } 456 }
458 457
@@ -461,7 +460,7 @@ static int tps80031_probe(struct i2c_client *client,
461 if (ret < 0) { 460 if (ret < 0) {
462 dev_err(&client->dev, 461 dev_err(&client->dev,
463 "Silicon version number read failed: %d\n", ret); 462 "Silicon version number read failed: %d\n", ret);
464 goto fail_client_reg; 463 return ret;
465 } 464 }
466 465
467 ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3, 466 ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3,
@@ -469,7 +468,7 @@ static int tps80031_probe(struct i2c_client *client,
469 if (ret < 0) { 468 if (ret < 0) {
470 dev_err(&client->dev, 469 dev_err(&client->dev,
471 "Silicon eeprom version read failed: %d\n", ret); 470 "Silicon eeprom version read failed: %d\n", ret);
472 goto fail_client_reg; 471 return ret;
473 } 472 }
474 473
475 dev_info(&client->dev, "ES version 0x%02x and EPROM version 0x%02x\n", 474 dev_info(&client->dev, "ES version 0x%02x and EPROM version 0x%02x\n",
@@ -482,7 +481,7 @@ static int tps80031_probe(struct i2c_client *client,
482 ret = tps80031_irq_init(tps80031, client->irq, pdata->irq_base); 481 ret = tps80031_irq_init(tps80031, client->irq, pdata->irq_base);
483 if (ret) { 482 if (ret) {
484 dev_err(&client->dev, "IRQ init failed: %d\n", ret); 483 dev_err(&client->dev, "IRQ init failed: %d\n", ret);
485 goto fail_client_reg; 484 return ret;
486 } 485 }
487 486
488 tps80031_pupd_init(tps80031, pdata); 487 tps80031_pupd_init(tps80031, pdata);
@@ -506,12 +505,6 @@ static int tps80031_probe(struct i2c_client *client,
506 505
507fail_mfd_add: 506fail_mfd_add:
508 regmap_del_irq_chip(client->irq, tps80031->irq_data); 507 regmap_del_irq_chip(client->irq, tps80031->irq_data);
509
510fail_client_reg:
511 for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
512 if (tps80031->clients[i] && (tps80031->clients[i] != client))
513 i2c_unregister_device(tps80031->clients[i]);
514 }
515 return ret; 508 return ret;
516} 509}
517 510
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 448d9397ff04..20cf8cfe4f3b 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1141,12 +1141,12 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
1141 if (i == 0) { 1141 if (i == 0) {
1142 twl->client = client; 1142 twl->client = client;
1143 } else { 1143 } else {
1144 twl->client = i2c_new_dummy(client->adapter, 1144 twl->client = i2c_new_dummy_device(client->adapter,
1145 client->addr + i); 1145 client->addr + i);
1146 if (!twl->client) { 1146 if (IS_ERR(twl->client)) {
1147 dev_err(&client->dev, 1147 dev_err(&client->dev,
1148 "can't attach client %d\n", i); 1148 "can't attach client %d\n", i);
1149 status = -ENOMEM; 1149 status = PTR_ERR(twl->client);
1150 goto fail; 1150 goto fail;
1151 } 1151 }
1152 } 1152 }
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 34cd67951aec..6c51b1bad8c4 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -13,7 +13,6 @@
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/pci-aspm.h>
17#include <linux/crc32.h> 16#include <linux/crc32.h>
18#include <linux/if_vlan.h> 17#include <linux/if_vlan.h>
19#include <linux/timecounter.h> 18#include <linux/timecounter.h>
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 6d52cf5ce20e..25aa400e2e3c 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -14,7 +14,6 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <linux/pci-aspm.h>
18#include <linux/netdevice.h> 17#include <linux/netdevice.h>
19#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
20#include <linux/ethtool.h> 19#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0ef01db1f8b8..74f81fe03810 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -28,7 +28,6 @@
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
30#include <linux/prefetch.h> 30#include <linux/prefetch.h>
31#include <linux/pci-aspm.h>
32#include <linux/ipv6.h> 31#include <linux/ipv6.h>
33#include <net/ip6_checksum.h> 32#include <net/ip6_checksum.h>
34 33
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index c6156cc38940..d5ee32ce9eb3 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -18,7 +18,6 @@
18 18
19#include <linux/nl80211.h> 19#include <linux/nl80211.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/pci-aspm.h>
22#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
23#include <linux/module.h> 22#include <linux/module.h>
24#include "../ath.h" 23#include "../ath.h"
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index b82da75a9ae3..4fbcc7fba3cc 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -18,7 +18,6 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/pci-aspm.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
23#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index fa2c02881939..ffb705b18fb1 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -18,7 +18,6 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/pci-aspm.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
23#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 5ab87a8dc907..f8a1f985a1d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -62,7 +62,6 @@
62 * 62 *
63 *****************************************************************************/ 63 *****************************************************************************/
64#include <linux/pci.h> 64#include <linux/pci.h>
65#include <linux/pci-aspm.h>
66#include <linux/interrupt.h> 65#include <linux/interrupt.h>
67#include <linux/debugfs.h> 66#include <linux/debugfs.h>
68#include <linux/sched.h> 67#include <linux/sched.h>
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6b4d7b064b38..c0808f9eb8ab 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -549,8 +549,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
549 549
550 WARN_ON_ONCE(!iod->nents); 550 WARN_ON_ONCE(!iod->nents);
551 551
552 /* P2PDMA requests do not need to be unmapped */ 552 if (is_pci_p2pdma_page(sg_page(iod->sg)))
553 if (!is_pci_p2pdma_page(sg_page(iod->sg))) 553 pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
554 rq_dma_dir(req));
555 else
554 dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); 556 dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
555 557
556 558
@@ -834,8 +836,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
834 goto out; 836 goto out;
835 837
836 if (is_pci_p2pdma_page(sg_page(iod->sg))) 838 if (is_pci_p2pdma_page(sg_page(iod->sg)))
837 nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents, 839 nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
838 rq_dma_dir(req)); 840 iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
839 else 841 else
840 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, 842 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
841 rq_dma_dir(req), DMA_ATTR_NO_WARN); 843 rq_dma_dir(req), DMA_ATTR_NO_WARN);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index c313de96a357..a304f5ea11b9 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -52,7 +52,7 @@ config PCI_MSI
52 If you don't know what to do here, say Y. 52 If you don't know what to do here, say Y.
53 53
54config PCI_MSI_IRQ_DOMAIN 54config PCI_MSI_IRQ_DOMAIN
55 def_bool ARC || ARM || ARM64 || X86 55 def_bool ARC || ARM || ARM64 || X86 || RISCV
56 depends on PCI_MSI 56 depends on PCI_MSI
57 select GENERIC_MSI_IRQ_DOMAIN 57 select GENERIC_MSI_IRQ_DOMAIN
58 58
@@ -170,7 +170,7 @@ config PCI_P2PDMA
170 170
171 Many PCIe root complexes do not support P2P transactions and 171 Many PCIe root complexes do not support P2P transactions and
172 it's hard to tell which support it at all, so at this time, 172 it's hard to tell which support it at all, so at this time,
173 P2P DMA transations must be between devices behind the same root 173 P2P DMA transactions must be between devices behind the same root
174 port. 174 port.
175 175
176 If unsure, say N. 176 If unsure, say N.
@@ -181,7 +181,7 @@ config PCI_LABEL
181 181
182config PCI_HYPERV 182config PCI_HYPERV
183 tristate "Hyper-V PCI Frontend" 183 tristate "Hyper-V PCI Frontend"
184 depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 184 depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
185 select PCI_HYPERV_INTERFACE 185 select PCI_HYPERV_INTERFACE
186 help 186 help
187 The PCI device frontend driver allows the kernel to import arbitrary 187 The PCI device frontend driver allows the kernel to import arbitrary
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 544922f097c0..2fccb5762c76 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -336,15 +336,6 @@ static inline int pcie_cap_version(const struct pci_dev *dev)
336 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; 336 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
337} 337}
338 338
339static bool pcie_downstream_port(const struct pci_dev *dev)
340{
341 int type = pci_pcie_type(dev);
342
343 return type == PCI_EXP_TYPE_ROOT_PORT ||
344 type == PCI_EXP_TYPE_DOWNSTREAM ||
345 type == PCI_EXP_TYPE_PCIE_BRIDGE;
346}
347
348bool pcie_cap_has_lnkctl(const struct pci_dev *dev) 339bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
349{ 340{
350 int type = pci_pcie_type(dev); 341 int type = pci_pcie_type(dev);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 495059d923f7..8e40b3e6da77 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -417,11 +417,9 @@ struct pci_bus *pci_bus_get(struct pci_bus *bus)
417 get_device(&bus->dev); 417 get_device(&bus->dev);
418 return bus; 418 return bus;
419} 419}
420EXPORT_SYMBOL(pci_bus_get);
421 420
422void pci_bus_put(struct pci_bus *bus) 421void pci_bus_put(struct pci_bus *bus)
423{ 422{
424 if (bus) 423 if (bus)
425 put_device(&bus->dev); 424 put_device(&bus->dev);
426} 425}
427EXPORT_SYMBOL(pci_bus_put);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 6ea778ae4877..0ba988b5b5bc 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -131,13 +131,29 @@ config PCI_KEYSTONE_EP
131 DesignWare core functions to implement the driver. 131 DesignWare core functions to implement the driver.
132 132
133config PCI_LAYERSCAPE 133config PCI_LAYERSCAPE
134 bool "Freescale Layerscape PCIe controller" 134 bool "Freescale Layerscape PCIe controller - Host mode"
135 depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) 135 depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
136 depends on PCI_MSI_IRQ_DOMAIN 136 depends on PCI_MSI_IRQ_DOMAIN
137 select MFD_SYSCON 137 select MFD_SYSCON
138 select PCIE_DW_HOST 138 select PCIE_DW_HOST
139 help 139 help
140 Say Y here if you want PCIe controller support on Layerscape SoCs. 140 Say Y here if you want to enable PCIe controller support on Layerscape
141 SoCs to work in Host mode.
142 This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
143 determines which PCIe controller works in EP mode and which PCIe
144 controller works in RC mode.
145
146config PCI_LAYERSCAPE_EP
147 bool "Freescale Layerscape PCIe controller - Endpoint mode"
148 depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
149 depends on PCI_ENDPOINT
150 select PCIE_DW_EP
151 help
152 Say Y here if you want to enable PCIe controller support on Layerscape
153 SoCs to work in Endpoint mode.
154 This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
155 determines which PCIe controller works in EP mode and which PCIe
156 controller works in RC mode.
141 157
142config PCI_HISI 158config PCI_HISI
143 depends on OF && (ARM64 || COMPILE_TEST) 159 depends on OF && (ARM64 || COMPILE_TEST)
@@ -220,6 +236,16 @@ config PCI_MESON
220 and therefore the driver re-uses the DesignWare core functions to 236 and therefore the driver re-uses the DesignWare core functions to
221 implement the driver. 237 implement the driver.
222 238
239config PCIE_TEGRA194
240 tristate "NVIDIA Tegra194 (and later) PCIe controller"
241 depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
242 depends on PCI_MSI_IRQ_DOMAIN
243 select PCIE_DW_HOST
244 select PHY_TEGRA194_P2U
245 help
246 Say Y here if you want support for DesignWare core based PCIe host
247 controller found in NVIDIA Tegra194 SoC.
248
223config PCIE_UNIPHIER 249config PCIE_UNIPHIER
224 bool "Socionext UniPhier PCIe controllers" 250 bool "Socionext UniPhier PCIe controllers"
225 depends on ARCH_UNIPHIER || COMPILE_TEST 251 depends on ARCH_UNIPHIER || COMPILE_TEST
@@ -230,4 +256,16 @@ config PCIE_UNIPHIER
230 Say Y here if you want PCIe controller support on UniPhier SoCs. 256 Say Y here if you want PCIe controller support on UniPhier SoCs.
231 This driver supports LD20 and PXs3 SoCs. 257 This driver supports LD20 and PXs3 SoCs.
232 258
259config PCIE_AL
260 bool "Amazon Annapurna Labs PCIe controller"
261 depends on OF && (ARM64 || COMPILE_TEST)
262 depends on PCI_MSI_IRQ_DOMAIN
263 select PCIE_DW_HOST
264 help
265 Say Y here to enable support of the Amazon's Annapurna Labs PCIe
266 controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
267 core plus Annapurna Labs proprietary hardware wrappers. This is
268 required only for DT-based platforms. ACPI platforms with the
269 Annapurna Labs PCIe controller don't need to enable this.
270
233endmenu 271endmenu
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index b085dfd4fab7..69faff371f11 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -8,13 +8,15 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
8obj-$(CONFIG_PCI_IMX6) += pci-imx6.o 8obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
9obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o 9obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
10obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o 10obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
11obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o 11obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
12obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
12obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o 13obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
13obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o 14obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
14obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o 15obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
15obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o 16obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
16obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o 17obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
17obj-$(CONFIG_PCI_MESON) += pci-meson.o 18obj-$(CONFIG_PCI_MESON) += pci-meson.o
19obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
18obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o 20obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
19 21
20# The following drivers are for devices that use the generic ACPI 22# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index cee5f2f590e2..14a6ba4067fb 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
465 465
466 ep->phy = devm_of_phy_get(dev, np, NULL); 466 ep->phy = devm_of_phy_get(dev, np, NULL);
467 if (IS_ERR(ep->phy)) { 467 if (IS_ERR(ep->phy)) {
468 if (PTR_ERR(ep->phy) == -EPROBE_DEFER) 468 if (PTR_ERR(ep->phy) != -ENODEV)
469 return PTR_ERR(ep->phy); 469 return PTR_ERR(ep->phy);
470 470
471 ep->phy = NULL; 471 ep->phy = NULL;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 9b5cb5b70389..acfbd34032a8 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -57,6 +57,7 @@ enum imx6_pcie_variants {
57struct imx6_pcie_drvdata { 57struct imx6_pcie_drvdata {
58 enum imx6_pcie_variants variant; 58 enum imx6_pcie_variants variant;
59 u32 flags; 59 u32 flags;
60 int dbi_length;
60}; 61};
61 62
62struct imx6_pcie { 63struct imx6_pcie {
@@ -1173,8 +1174,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
1173 1174
1174 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); 1175 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
1175 if (IS_ERR(imx6_pcie->vpcie)) { 1176 if (IS_ERR(imx6_pcie->vpcie)) {
1176 if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) 1177 if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
1177 return -EPROBE_DEFER; 1178 return PTR_ERR(imx6_pcie->vpcie);
1178 imx6_pcie->vpcie = NULL; 1179 imx6_pcie->vpcie = NULL;
1179 } 1180 }
1180 1181
@@ -1212,6 +1213,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
1212 .variant = IMX6Q, 1213 .variant = IMX6Q,
1213 .flags = IMX6_PCIE_FLAG_IMX6_PHY | 1214 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1214 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, 1215 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1216 .dbi_length = 0x200,
1215 }, 1217 },
1216 [IMX6SX] = { 1218 [IMX6SX] = {
1217 .variant = IMX6SX, 1219 .variant = IMX6SX,
@@ -1254,6 +1256,37 @@ static struct platform_driver imx6_pcie_driver = {
1254 .shutdown = imx6_pcie_shutdown, 1256 .shutdown = imx6_pcie_shutdown,
1255}; 1257};
1256 1258
1259static void imx6_pcie_quirk(struct pci_dev *dev)
1260{
1261 struct pci_bus *bus = dev->bus;
1262 struct pcie_port *pp = bus->sysdata;
1263
1264 /* Bus parent is the PCI bridge, its parent is this platform driver */
1265 if (!bus->dev.parent || !bus->dev.parent->parent)
1266 return;
1267
1268 /* Make sure we only quirk devices associated with this driver */
1269 if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
1270 return;
1271
1272 if (bus->number == pp->root_bus_nr) {
1273 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1274 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
1275
1276 /*
1277 * Limit config length to avoid the kernel reading beyond
1278 * the register set and causing an abort on i.MX 6Quad
1279 */
1280 if (imx6_pcie->drvdata->dbi_length) {
1281 dev->cfg_size = imx6_pcie->drvdata->dbi_length;
1282 dev_info(&dev->dev, "Limiting cfg_size to %d\n",
1283 dev->cfg_size);
1284 }
1285 }
1286}
1287DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
1288 PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
1289
1257static int __init imx6_pcie_init(void) 1290static int __init imx6_pcie_init(void)
1258{ 1291{
1259#ifdef CONFIG_ARM 1292#ifdef CONFIG_ARM
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index be61d96cc95e..ca9aa4501e7e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -44,6 +44,7 @@ static const struct pci_epc_features ls_pcie_epc_features = {
44 .linkup_notifier = false, 44 .linkup_notifier = false,
45 .msi_capable = true, 45 .msi_capable = true,
46 .msix_capable = false, 46 .msix_capable = false,
47 .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
47}; 48};
48 49
49static const struct pci_epc_features* 50static const struct pci_epc_features*
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index 3ab58f0584a8..1eeda2f6371f 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -91,3 +91,368 @@ struct pci_ecam_ops al_pcie_ops = {
91}; 91};
92 92
93#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */ 93#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
94
95#ifdef CONFIG_PCIE_AL
96
97#include <linux/of_pci.h>
98#include "pcie-designware.h"
99
100#define AL_PCIE_REV_ID_2 2
101#define AL_PCIE_REV_ID_3 3
102#define AL_PCIE_REV_ID_4 4
103
104#define AXI_BASE_OFFSET 0x0
105
106#define DEVICE_ID_OFFSET 0x16c
107
108#define DEVICE_REV_ID 0x0
109#define DEVICE_REV_ID_DEV_ID_MASK GENMASK(31, 16)
110
111#define DEVICE_REV_ID_DEV_ID_X4 0
112#define DEVICE_REV_ID_DEV_ID_X8 2
113#define DEVICE_REV_ID_DEV_ID_X16 4
114
115#define OB_CTRL_REV1_2_OFFSET 0x0040
116#define OB_CTRL_REV3_5_OFFSET 0x0030
117
118#define CFG_TARGET_BUS 0x0
119#define CFG_TARGET_BUS_MASK_MASK GENMASK(7, 0)
120#define CFG_TARGET_BUS_BUSNUM_MASK GENMASK(15, 8)
121
122#define CFG_CONTROL 0x4
123#define CFG_CONTROL_SUBBUS_MASK GENMASK(15, 8)
124#define CFG_CONTROL_SEC_BUS_MASK GENMASK(23, 16)
125
126struct al_pcie_reg_offsets {
127 unsigned int ob_ctrl;
128};
129
130struct al_pcie_target_bus_cfg {
131 u8 reg_val;
132 u8 reg_mask;
133 u8 ecam_mask;
134};
135
136struct al_pcie {
137 struct dw_pcie *pci;
138 void __iomem *controller_base; /* base of PCIe unit (not DW core) */
139 struct device *dev;
140 resource_size_t ecam_size;
141 unsigned int controller_rev_id;
142 struct al_pcie_reg_offsets reg_offsets;
143 struct al_pcie_target_bus_cfg target_bus_cfg;
144};
145
146#define PCIE_ECAM_DEVFN(x) (((x) & 0xff) << 12)
147
148#define to_al_pcie(x) dev_get_drvdata((x)->dev)
149
150static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
151{
152 return readl_relaxed(pcie->controller_base + offset);
153}
154
155static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset,
156 u32 val)
157{
158 writel_relaxed(val, pcie->controller_base + offset);
159}
160
161static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id)
162{
163 u32 dev_rev_id_val;
164 u32 dev_id_val;
165
166 dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET +
167 DEVICE_ID_OFFSET +
168 DEVICE_REV_ID);
169 dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val);
170
171 switch (dev_id_val) {
172 case DEVICE_REV_ID_DEV_ID_X4:
173 *rev_id = AL_PCIE_REV_ID_2;
174 break;
175 case DEVICE_REV_ID_DEV_ID_X8:
176 *rev_id = AL_PCIE_REV_ID_3;
177 break;
178 case DEVICE_REV_ID_DEV_ID_X16:
179 *rev_id = AL_PCIE_REV_ID_4;
180 break;
181 default:
182 dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n",
183 dev_id_val);
184 return -EINVAL;
185 }
186
187 dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val);
188
189 return 0;
190}
191
192static int al_pcie_reg_offsets_set(struct al_pcie *pcie)
193{
194 switch (pcie->controller_rev_id) {
195 case AL_PCIE_REV_ID_2:
196 pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET;
197 break;
198 case AL_PCIE_REV_ID_3:
199 case AL_PCIE_REV_ID_4:
200 pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET;
201 break;
202 default:
203 dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n",
204 pcie->controller_rev_id);
205 return -EINVAL;
206 }
207
208 return 0;
209}
210
211static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
212 u8 target_bus,
213 u8 mask_target_bus)
214{
215 u32 reg;
216
217 reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) |
218 FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus);
219
220 al_pcie_controller_writel(pcie, AXI_BASE_OFFSET +
221 pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS,
222 reg);
223}
224
225static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,
226 unsigned int busnr,
227 unsigned int devfn)
228{
229 struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
230 unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
231 unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
232 struct pcie_port *pp = &pcie->pci->pp;
233 void __iomem *pci_base_addr;
234
235 pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base +
236 (busnr_ecam << 20) +
237 PCIE_ECAM_DEVFN(devfn));
238
239 if (busnr_reg != target_bus_cfg->reg_val) {
240 dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
241 target_bus_cfg->reg_val, busnr_reg);
242 target_bus_cfg->reg_val = busnr_reg;
243 al_pcie_target_bus_set(pcie,
244 target_bus_cfg->reg_val,
245 target_bus_cfg->reg_mask);
246 }
247
248 return pci_base_addr;
249}
250
251static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
252 unsigned int devfn, int where, int size,
253 u32 *val)
254{
255 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
256 struct al_pcie *pcie = to_al_pcie(pci);
257 unsigned int busnr = bus->number;
258 void __iomem *pci_addr;
259 int rc;
260
261 pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
262
263 rc = dw_pcie_read(pci_addr + where, size, val);
264
265 dev_dbg(pci->dev, "%d-byte config read from %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
266 size, pci_domain_nr(bus), bus->number,
267 PCI_SLOT(devfn), PCI_FUNC(devfn), where,
268 (pci_addr + where), *val);
269
270 return rc;
271}
272
273static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
274 unsigned int devfn, int where, int size,
275 u32 val)
276{
277 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
278 struct al_pcie *pcie = to_al_pcie(pci);
279 unsigned int busnr = bus->number;
280 void __iomem *pci_addr;
281 int rc;
282
283 pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
284
285 rc = dw_pcie_write(pci_addr + where, size, val);
286
287 dev_dbg(pci->dev, "%d-byte config write to %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
288 size, pci_domain_nr(bus), bus->number,
289 PCI_SLOT(devfn), PCI_FUNC(devfn), where,
290 (pci_addr + where), val);
291
292 return rc;
293}
294
295static void al_pcie_config_prepare(struct al_pcie *pcie)
296{
297 struct al_pcie_target_bus_cfg *target_bus_cfg;
298 struct pcie_port *pp = &pcie->pci->pp;
299 unsigned int ecam_bus_mask;
300 u32 cfg_control_offset;
301 u8 subordinate_bus;
302 u8 secondary_bus;
303 u32 cfg_control;
304 u32 reg;
305
306 target_bus_cfg = &pcie->target_bus_cfg;
307
308 ecam_bus_mask = (pcie->ecam_size >> 20) - 1;
309 if (ecam_bus_mask > 255) {
310 dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
311 ecam_bus_mask = 255;
312 }
313
314 /* This portion is taken from the transaction address */
315 target_bus_cfg->ecam_mask = ecam_bus_mask;
316 /* This portion is taken from the cfg_target_bus reg */
317 target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
318 target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask;
319
320 al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
321 target_bus_cfg->reg_mask);
322
323 secondary_bus = pp->busn->start + 1;
324 subordinate_bus = pp->busn->end;
325
326 /* Set the valid values of secondary and subordinate buses */
327 cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
328 CFG_CONTROL;
329
330 cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset);
331
332 reg = cfg_control &
333 ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK);
334
335 reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) |
336 FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
337
338 al_pcie_controller_writel(pcie, cfg_control_offset, reg);
339}
340
341static int al_pcie_host_init(struct pcie_port *pp)
342{
343 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
344 struct al_pcie *pcie = to_al_pcie(pci);
345 int rc;
346
347 rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
348 if (rc)
349 return rc;
350
351 rc = al_pcie_reg_offsets_set(pcie);
352 if (rc)
353 return rc;
354
355 al_pcie_config_prepare(pcie);
356
357 return 0;
358}
359
360static const struct dw_pcie_host_ops al_pcie_host_ops = {
361 .rd_other_conf = al_pcie_rd_other_conf,
362 .wr_other_conf = al_pcie_wr_other_conf,
363 .host_init = al_pcie_host_init,
364};
365
366static int al_add_pcie_port(struct pcie_port *pp,
367 struct platform_device *pdev)
368{
369 struct device *dev = &pdev->dev;
370 int ret;
371
372 pp->ops = &al_pcie_host_ops;
373
374 ret = dw_pcie_host_init(pp);
375 if (ret) {
376 dev_err(dev, "failed to initialize host\n");
377 return ret;
378 }
379
380 return 0;
381}
382
383static const struct dw_pcie_ops dw_pcie_ops = {
384};
385
386static int al_pcie_probe(struct platform_device *pdev)
387{
388 struct device *dev = &pdev->dev;
389 struct resource *controller_res;
390 struct resource *ecam_res;
391 struct resource *dbi_res;
392 struct al_pcie *al_pcie;
393 struct dw_pcie *pci;
394
395 al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
396 if (!al_pcie)
397 return -ENOMEM;
398
399 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
400 if (!pci)
401 return -ENOMEM;
402
403 pci->dev = dev;
404 pci->ops = &dw_pcie_ops;
405
406 al_pcie->pci = pci;
407 al_pcie->dev = dev;
408
409 dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
410 pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
411 if (IS_ERR(pci->dbi_base)) {
412 dev_err(dev, "couldn't remap dbi base %pR\n", dbi_res);
413 return PTR_ERR(pci->dbi_base);
414 }
415
416 ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
417 if (!ecam_res) {
418 dev_err(dev, "couldn't find 'config' reg in DT\n");
419 return -ENOENT;
420 }
421 al_pcie->ecam_size = resource_size(ecam_res);
422
423 controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
424 "controller");
425 al_pcie->controller_base = devm_ioremap_resource(dev, controller_res);
426 if (IS_ERR(al_pcie->controller_base)) {
427 dev_err(dev, "couldn't remap controller base %pR\n",
428 controller_res);
429 return PTR_ERR(al_pcie->controller_base);
430 }
431
432 dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n",
433 dbi_res, controller_res);
434
435 platform_set_drvdata(pdev, al_pcie);
436
437 return al_add_pcie_port(&pci->pp, pdev);
438}
439
440static const struct of_device_id al_pcie_of_match[] = {
441 { .compatible = "amazon,al-alpine-v2-pcie",
442 },
443 { .compatible = "amazon,al-alpine-v3-pcie",
444 },
445 {},
446};
447
448static struct platform_driver al_pcie_driver = {
449 .driver = {
450 .name = "al-pcie",
451 .of_match_table = al_pcie_of_match,
452 .suppress_bind_attrs = true,
453 },
454 .probe = al_pcie_probe,
455};
456builtin_platform_driver(al_pcie_driver);
457
458#endif /* CONFIG_PCIE_AL*/
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 3d55dc78d999..49596547e8c2 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -118,11 +118,10 @@ static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
118 118
119 for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) { 119 for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
120 pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i); 120 pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
121 if (IS_ERR(pcie->phy[i]) &&
122 (PTR_ERR(pcie->phy[i]) == -EPROBE_DEFER))
123 return PTR_ERR(pcie->phy[i]);
124
125 if (IS_ERR(pcie->phy[i])) { 121 if (IS_ERR(pcie->phy[i])) {
122 if (PTR_ERR(pcie->phy[i]) != -ENODEV)
123 return PTR_ERR(pcie->phy[i]);
124
126 pcie->phy[i] = NULL; 125 pcie->phy[i] = NULL;
127 continue; 126 continue;
128 } 127 }
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 2bf5a35c0570..3dd2e2697294 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -40,39 +40,6 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
40 __dw_pcie_ep_reset_bar(pci, bar, 0); 40 __dw_pcie_ep_reset_bar(pci, bar, 0);
41} 41}
42 42
43static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
44 u8 cap)
45{
46 u8 cap_id, next_cap_ptr;
47 u16 reg;
48
49 if (!cap_ptr)
50 return 0;
51
52 reg = dw_pcie_readw_dbi(pci, cap_ptr);
53 cap_id = (reg & 0x00ff);
54
55 if (cap_id > PCI_CAP_ID_MAX)
56 return 0;
57
58 if (cap_id == cap)
59 return cap_ptr;
60
61 next_cap_ptr = (reg & 0xff00) >> 8;
62 return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
63}
64
65static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
66{
67 u8 next_cap_ptr;
68 u16 reg;
69
70 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
71 next_cap_ptr = (reg & 0x00ff);
72
73 return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
74}
75
76static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, 43static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
77 struct pci_epf_header *hdr) 44 struct pci_epf_header *hdr)
78{ 45{
@@ -531,6 +498,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
531 int ret; 498 int ret;
532 u32 reg; 499 u32 reg;
533 void *addr; 500 void *addr;
501 u8 hdr_type;
534 unsigned int nbars; 502 unsigned int nbars;
535 unsigned int offset; 503 unsigned int offset;
536 struct pci_epc *epc; 504 struct pci_epc *epc;
@@ -595,6 +563,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
595 if (ep->ops->ep_init) 563 if (ep->ops->ep_init)
596 ep->ops->ep_init(ep); 564 ep->ops->ep_init(ep);
597 565
566 hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
567 if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
568 dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
569 hdr_type);
570 return -EIO;
571 }
572
598 ret = of_property_read_u8(np, "max-functions", &epc->max_functions); 573 ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
599 if (ret < 0) 574 if (ret < 0)
600 epc->max_functions = 1; 575 epc->max_functions = 1;
@@ -612,9 +587,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
612 dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); 587 dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
613 return -ENOMEM; 588 return -ENOMEM;
614 } 589 }
615 ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI); 590 ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
616 591
617 ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); 592 ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
618 593
619 offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); 594 offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
620 if (offset) { 595 if (offset) {
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index f93252d0da5b..0f36a926059a 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -323,6 +323,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
323 struct pci_bus *child; 323 struct pci_bus *child;
324 struct pci_host_bridge *bridge; 324 struct pci_host_bridge *bridge;
325 struct resource *cfg_res; 325 struct resource *cfg_res;
326 u32 hdr_type;
326 int ret; 327 int ret;
327 328
328 raw_spin_lock_init(&pci->pp.lock); 329 raw_spin_lock_init(&pci->pp.lock);
@@ -464,6 +465,21 @@ int dw_pcie_host_init(struct pcie_port *pp)
464 goto err_free_msi; 465 goto err_free_msi;
465 } 466 }
466 467
468 ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type);
469 if (ret != PCIBIOS_SUCCESSFUL) {
470 dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n",
471 ret);
472 ret = pcibios_err_to_errno(ret);
473 goto err_free_msi;
474 }
475 if (hdr_type != PCI_HEADER_TYPE_BRIDGE) {
476 dev_err(pci->dev,
477 "PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n",
478 hdr_type);
479 ret = -EIO;
480 goto err_free_msi;
481 }
482
467 pp->root_bus_nr = pp->busn->start; 483 pp->root_bus_nr = pp->busn->start;
468 484
469 bridge->dev.parent = dev; 485 bridge->dev.parent = dev;
@@ -628,6 +644,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
628 u32 val, ctrl, num_ctrls; 644 u32 val, ctrl, num_ctrls;
629 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 645 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
630 646
647 /*
648 * Enable DBI read-only registers for writing/updating configuration.
649 * Write permission gets disabled towards the end of this function.
650 */
651 dw_pcie_dbi_ro_wr_en(pci);
652
631 dw_pcie_setup(pci); 653 dw_pcie_setup(pci);
632 654
633 if (!pp->ops->msi_host_init) { 655 if (!pp->ops->msi_host_init) {
@@ -650,12 +672,10 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
650 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); 672 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
651 673
652 /* Setup interrupt pins */ 674 /* Setup interrupt pins */
653 dw_pcie_dbi_ro_wr_en(pci);
654 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); 675 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
655 val &= 0xffff00ff; 676 val &= 0xffff00ff;
656 val |= 0x00000100; 677 val |= 0x00000100;
657 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); 678 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
658 dw_pcie_dbi_ro_wr_dis(pci);
659 679
660 /* Setup bus numbers */ 680 /* Setup bus numbers */
661 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 681 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
@@ -687,15 +707,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
687 707
688 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); 708 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
689 709
690 /* Enable write permission for the DBI read-only register */
691 dw_pcie_dbi_ro_wr_en(pci);
692 /* Program correct class for RC */ 710 /* Program correct class for RC */
693 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); 711 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
694 /* Better disable write permission right after the update */
695 dw_pcie_dbi_ro_wr_dis(pci);
696 712
697 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); 713 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
698 val |= PORT_LOGIC_SPEED_CHANGE; 714 val |= PORT_LOGIC_SPEED_CHANGE;
699 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); 715 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
716
717 dw_pcie_dbi_ro_wr_dis(pci);
700} 718}
701EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); 719EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 7d25102c304c..820488dfeaed 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -14,6 +14,86 @@
14 14
15#include "pcie-designware.h" 15#include "pcie-designware.h"
16 16
17/*
18 * These interfaces resemble the pci_find_*capability() interfaces, but these
19 * are for configuring host controllers, which are bridges *to* PCI devices but
20 * are not PCI devices themselves.
21 */
22static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
23 u8 cap)
24{
25 u8 cap_id, next_cap_ptr;
26 u16 reg;
27
28 if (!cap_ptr)
29 return 0;
30
31 reg = dw_pcie_readw_dbi(pci, cap_ptr);
32 cap_id = (reg & 0x00ff);
33
34 if (cap_id > PCI_CAP_ID_MAX)
35 return 0;
36
37 if (cap_id == cap)
38 return cap_ptr;
39
40 next_cap_ptr = (reg & 0xff00) >> 8;
41 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
42}
43
44u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
45{
46 u8 next_cap_ptr;
47 u16 reg;
48
49 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
50 next_cap_ptr = (reg & 0x00ff);
51
52 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
53}
54EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
55
56static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
57 u8 cap)
58{
59 u32 header;
60 int ttl;
61 int pos = PCI_CFG_SPACE_SIZE;
62
63 /* minimum 8 bytes per capability */
64 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
65
66 if (start)
67 pos = start;
68
69 header = dw_pcie_readl_dbi(pci, pos);
70 /*
71 * If we have no capabilities, this is indicated by cap ID,
72 * cap version and next pointer all being 0.
73 */
74 if (header == 0)
75 return 0;
76
77 while (ttl-- > 0) {
78 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
79 return pos;
80
81 pos = PCI_EXT_CAP_NEXT(header);
82 if (pos < PCI_CFG_SPACE_SIZE)
83 break;
84
85 header = dw_pcie_readl_dbi(pci, pos);
86 }
87
88 return 0;
89}
90
91u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
92{
93 return dw_pcie_find_next_ext_capability(pci, 0, cap);
94}
95EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
96
17int dw_pcie_read(void __iomem *addr, int size, u32 *val) 97int dw_pcie_read(void __iomem *addr, int size, u32 *val)
18{ 98{
19 if (!IS_ALIGNED((uintptr_t)addr, size)) { 99 if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -376,10 +456,11 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
376 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); 456 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
377 } 457 }
378 458
379 dev_err(pci->dev, "Phy link never came up\n"); 459 dev_info(pci->dev, "Phy link never came up\n");
380 460
381 return -ETIMEDOUT; 461 return -ETIMEDOUT;
382} 462}
463EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
383 464
384int dw_pcie_link_up(struct dw_pcie *pci) 465int dw_pcie_link_up(struct dw_pcie *pci)
385{ 466{
@@ -423,8 +504,10 @@ void dw_pcie_setup(struct dw_pcie *pci)
423 504
424 505
425 ret = of_property_read_u32(np, "num-lanes", &lanes); 506 ret = of_property_read_u32(np, "num-lanes", &lanes);
426 if (ret) 507 if (ret) {
427 lanes = 0; 508 dev_dbg(pci->dev, "property num-lanes isn't found\n");
509 return;
510 }
428 511
429 /* Set the number of lanes */ 512 /* Set the number of lanes */
430 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); 513 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
@@ -466,4 +549,11 @@ void dw_pcie_setup(struct dw_pcie *pci)
466 break; 549 break;
467 } 550 }
468 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 551 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
552
553 if (of_property_read_bool(np, "snps,enable-cdm-check")) {
554 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
555 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
556 PCIE_PL_CHK_REG_CHK_REG_START;
557 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
558 }
469} 559}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index ffed084a0b4f..5a18e94e52c8 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -86,6 +86,15 @@
86#define PCIE_MISC_CONTROL_1_OFF 0x8BC 86#define PCIE_MISC_CONTROL_1_OFF 0x8BC
87#define PCIE_DBI_RO_WR_EN BIT(0) 87#define PCIE_DBI_RO_WR_EN BIT(0)
88 88
89#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
90#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
91#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
92#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR BIT(16)
93#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR BIT(17)
94#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE BIT(18)
95
96#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
97
89/* 98/*
90 * iATU Unroll-specific register definitions 99 * iATU Unroll-specific register definitions
91 * From 4.80 core version the address translation will be made by unroll 100 * From 4.80 core version the address translation will be made by unroll
@@ -251,6 +260,9 @@ struct dw_pcie {
251#define to_dw_pcie_from_ep(endpoint) \ 260#define to_dw_pcie_from_ep(endpoint) \
252 container_of((endpoint), struct dw_pcie, ep) 261 container_of((endpoint), struct dw_pcie, ep)
253 262
263u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
264u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
265
254int dw_pcie_read(void __iomem *addr, int size, u32 *val); 266int dw_pcie_read(void __iomem *addr, int size, u32 *val);
255int dw_pcie_write(void __iomem *addr, int size, u32 val); 267int dw_pcie_write(void __iomem *addr, int size, u32 val);
256 268
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 954bc2b74bbc..811b5c6d62ea 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev)
340 340
341 hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); 341 hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
342 if (IS_ERR(hipcie->vpcie)) { 342 if (IS_ERR(hipcie->vpcie)) {
343 if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER) 343 if (PTR_ERR(hipcie->vpcie) != -ENODEV)
344 return -EPROBE_DEFER; 344 return PTR_ERR(hipcie->vpcie);
345 hipcie->vpcie = NULL; 345 hipcie->vpcie = NULL;
346 } 346 }
347 347
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 8df1914226be..c19617a912bd 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -436,7 +436,7 @@ static int kirin_pcie_host_init(struct pcie_port *pp)
436 return 0; 436 return 0;
437} 437}
438 438
439static struct dw_pcie_ops kirin_dw_pcie_ops = { 439static const struct dw_pcie_ops kirin_dw_pcie_ops = {
440 .read_dbi = kirin_pcie_read_dbi, 440 .read_dbi = kirin_pcie_read_dbi,
441 .write_dbi = kirin_pcie_write_dbi, 441 .write_dbi = kirin_pcie_write_dbi,
442 .link_up = kirin_pcie_link_up, 442 .link_up = kirin_pcie_link_up,
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
new file mode 100644
index 000000000000..f89f5acee72d
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -0,0 +1,1732 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PCIe host controller driver for Tegra194 SoC
4 *
5 * Copyright (C) 2019 NVIDIA Corporation.
6 *
7 * Author: Vidya Sagar <vidyas@nvidia.com>
8 */
9
10#include <linux/clk.h>
11#include <linux/debugfs.h>
12#include <linux/delay.h>
13#include <linux/gpio.h>
14#include <linux/interrupt.h>
15#include <linux/iopoll.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_device.h>
20#include <linux/of_gpio.h>
21#include <linux/of_irq.h>
22#include <linux/of_pci.h>
23#include <linux/pci.h>
24#include <linux/phy/phy.h>
25#include <linux/pinctrl/consumer.h>
26#include <linux/platform_device.h>
27#include <linux/pm_runtime.h>
28#include <linux/random.h>
29#include <linux/reset.h>
30#include <linux/resource.h>
31#include <linux/types.h>
32#include "pcie-designware.h"
33#include <soc/tegra/bpmp.h>
34#include <soc/tegra/bpmp-abi.h>
35#include "../../pci.h"
36
37#define APPL_PINMUX 0x0
38#define APPL_PINMUX_PEX_RST BIT(0)
39#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
40#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
41#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
42#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
43#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN BIT(9)
44#define APPL_PINMUX_CLKREQ_OUT_OVRD BIT(10)
45
46#define APPL_CTRL 0x4
47#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
48#define APPL_CTRL_LTSSM_EN BIT(7)
49#define APPL_CTRL_HW_HOT_RST_EN BIT(20)
50#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
51#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
52#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
53
54#define APPL_INTR_EN_L0_0 0x8
55#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
56#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
57#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
58#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
59#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
60#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
61
62#define APPL_INTR_STATUS_L0 0xC
63#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
64#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
65#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
66
67#define APPL_INTR_EN_L1_0_0 0x1C
68#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
69
70#define APPL_INTR_STATUS_L1_0_0 0x20
71#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
72
73#define APPL_INTR_STATUS_L1_1 0x2C
74#define APPL_INTR_STATUS_L1_2 0x30
75#define APPL_INTR_STATUS_L1_3 0x34
76#define APPL_INTR_STATUS_L1_6 0x3C
77#define APPL_INTR_STATUS_L1_7 0x40
78
79#define APPL_INTR_EN_L1_8_0 0x44
80#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
81#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
82#define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
83#define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
84
85#define APPL_INTR_STATUS_L1_8_0 0x4C
86#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
87#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
88#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
89
90#define APPL_INTR_STATUS_L1_9 0x54
91#define APPL_INTR_STATUS_L1_10 0x58
92#define APPL_INTR_STATUS_L1_11 0x64
93#define APPL_INTR_STATUS_L1_13 0x74
94#define APPL_INTR_STATUS_L1_14 0x78
95#define APPL_INTR_STATUS_L1_15 0x7C
96#define APPL_INTR_STATUS_L1_17 0x88
97
98#define APPL_INTR_EN_L1_18 0x90
99#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
100#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
101#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
102
103#define APPL_INTR_STATUS_L1_18 0x94
104#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
105#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
106#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
107
108#define APPL_MSI_CTRL_2 0xB0
109
110#define APPL_LTR_MSG_1 0xC4
111#define LTR_MSG_REQ BIT(15)
112#define LTR_MST_NO_SNOOP_SHIFT 16
113
114#define APPL_LTR_MSG_2 0xC8
115#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
116
117#define APPL_LINK_STATUS 0xCC
118#define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
119
120#define APPL_DEBUG 0xD0
121#define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
122#define APPL_DEBUG_PM_LINKST_IN_L0 0x11
123#define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
124#define APPL_DEBUG_LTSSM_STATE_SHIFT 3
125#define LTSSM_STATE_PRE_DETECT 5
126
127#define APPL_RADM_STATUS 0xE4
128#define APPL_PM_XMT_TURNOFF_STATE BIT(0)
129
130#define APPL_DM_TYPE 0x100
131#define APPL_DM_TYPE_MASK GENMASK(3, 0)
132#define APPL_DM_TYPE_RP 0x4
133#define APPL_DM_TYPE_EP 0x0
134
135#define APPL_CFG_BASE_ADDR 0x104
136#define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
137
138#define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
139#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
140
141#define APPL_CFG_MISC 0x110
142#define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
143#define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
144#define APPL_CFG_MISC_ARCACHE_SHIFT 10
145#define APPL_CFG_MISC_ARCACHE_VAL 3
146
147#define APPL_CFG_SLCG_OVERRIDE 0x114
148#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
149
150#define APPL_CAR_RESET_OVRD 0x12C
151#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
152
153#define IO_BASE_IO_DECODE BIT(0)
154#define IO_BASE_IO_DECODE_BIT8 BIT(8)
155
156#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
157#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
158
159#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
160#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
161
162#define EVENT_COUNTER_ALL_CLEAR 0x3
163#define EVENT_COUNTER_ENABLE_ALL 0x7
164#define EVENT_COUNTER_ENABLE_SHIFT 2
165#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
166#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
167#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
168#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
169#define EVENT_COUNTER_EVENT_L1 0x5
170#define EVENT_COUNTER_EVENT_L1_1 0x7
171#define EVENT_COUNTER_EVENT_L1_2 0x8
172#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
173#define EVENT_COUNTER_GROUP_5 0x5
174
175#define PORT_LOGIC_ACK_F_ASPM_CTRL 0x70C
176#define ENTER_ASPM BIT(30)
177#define L0S_ENTRANCE_LAT_SHIFT 24
178#define L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
179#define L1_ENTRANCE_LAT_SHIFT 27
180#define L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
181#define N_FTS_SHIFT 8
182#define N_FTS_MASK GENMASK(7, 0)
183#define N_FTS_VAL 52
184
185#define PORT_LOGIC_GEN2_CTRL 0x80C
186#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE BIT(17)
187#define FTS_MASK GENMASK(7, 0)
188#define FTS_VAL 52
189
190#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
191
192#define GEN3_EQ_CONTROL_OFF 0x8a8
193#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
194#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
195#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
196
197#define GEN3_RELATED_OFF 0x890
198#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
199#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
200#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
201#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
202
203#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
204#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
205#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
206#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
207#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
208#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
209
210#define PORT_LOGIC_MSIX_DOORBELL 0x948
211
212#define CAP_SPCIE_CAP_OFF 0x154
213#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
214#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
215#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
216
217#define PME_ACK_TIMEOUT 10000
218
219#define LTSSM_TIMEOUT 50000 /* 50ms */
220
221#define GEN3_GEN4_EQ_PRESET_INIT 5
222
223#define GEN1_CORE_CLK_FREQ 62500000
224#define GEN2_CORE_CLK_FREQ 125000000
225#define GEN3_CORE_CLK_FREQ 250000000
226#define GEN4_CORE_CLK_FREQ 500000000
227
228static const unsigned int pcie_gen_freq[] = {
229 GEN1_CORE_CLK_FREQ,
230 GEN2_CORE_CLK_FREQ,
231 GEN3_CORE_CLK_FREQ,
232 GEN4_CORE_CLK_FREQ
233};
234
235static const u32 event_cntr_ctrl_offset[] = {
236 0x1d8,
237 0x1a8,
238 0x1a8,
239 0x1a8,
240 0x1c4,
241 0x1d8
242};
243
244static const u32 event_cntr_data_offset[] = {
245 0x1dc,
246 0x1ac,
247 0x1ac,
248 0x1ac,
249 0x1c8,
250 0x1dc
251};
252
253struct tegra_pcie_dw {
254 struct device *dev;
255 struct resource *appl_res;
256 struct resource *dbi_res;
257 struct resource *atu_dma_res;
258 void __iomem *appl_base;
259 struct clk *core_clk;
260 struct reset_control *core_apb_rst;
261 struct reset_control *core_rst;
262 struct dw_pcie pci;
263 struct tegra_bpmp *bpmp;
264
265 bool supports_clkreq;
266 bool enable_cdm_check;
267 bool link_state;
268 bool update_fc_fixup;
269 u8 init_link_width;
270 u32 msi_ctrl_int;
271 u32 num_lanes;
272 u32 max_speed;
273 u32 cid;
274 u32 cfg_link_cap_l1sub;
275 u32 pcie_cap_base;
276 u32 aspm_cmrt;
277 u32 aspm_pwr_on_t;
278 u32 aspm_l0s_enter_lat;
279
280 struct regulator *pex_ctl_supply;
281 struct regulator *slot_ctl_3v3;
282 struct regulator *slot_ctl_12v;
283
284 unsigned int phy_count;
285 struct phy **phys;
286
287 struct dentry *debugfs;
288};
289
290static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
291{
292 return container_of(pci, struct tegra_pcie_dw, pci);
293}
294
295static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
296 const u32 reg)
297{
298 writel_relaxed(value, pcie->appl_base + reg);
299}
300
301static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
302{
303 return readl_relaxed(pcie->appl_base + reg);
304}
305
306struct tegra_pcie_soc {
307 enum dw_pcie_device_mode mode;
308};
309
310static void apply_bad_link_workaround(struct pcie_port *pp)
311{
312 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
313 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
314 u32 current_link_width;
315 u16 val;
316
317 /*
318 * NOTE:- Since this scenario is uncommon and link as such is not
319 * stable anyway, not waiting to confirm if link is really
320 * transitioning to Gen-2 speed
321 */
322 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
323 if (val & PCI_EXP_LNKSTA_LBMS) {
324 current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
325 PCI_EXP_LNKSTA_NLW_SHIFT;
326 if (pcie->init_link_width > current_link_width) {
327 dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
328 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
329 PCI_EXP_LNKCTL2);
330 val &= ~PCI_EXP_LNKCTL2_TLS;
331 val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
332 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
333 PCI_EXP_LNKCTL2, val);
334
335 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
336 PCI_EXP_LNKCTL);
337 val |= PCI_EXP_LNKCTL_RL;
338 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
339 PCI_EXP_LNKCTL, val);
340 }
341 }
342}
343
344static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
345{
346 struct dw_pcie *pci = &pcie->pci;
347 struct pcie_port *pp = &pci->pp;
348 u32 val, tmp;
349 u16 val_w;
350
351 val = appl_readl(pcie, APPL_INTR_STATUS_L0);
352 if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
353 val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
354 if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
355 appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
356
357 /* SBR & Surprise Link Down WAR */
358 val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
359 val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
360 appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
361 udelay(1);
362 val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
363 val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
364 appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
365
366 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
367 val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE;
368 dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
369 }
370 }
371
372 if (val & APPL_INTR_STATUS_L0_INT_INT) {
373 val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
374 if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
375 appl_writel(pcie,
376 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
377 APPL_INTR_STATUS_L1_8_0);
378 apply_bad_link_workaround(pp);
379 }
380 if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
381 appl_writel(pcie,
382 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
383 APPL_INTR_STATUS_L1_8_0);
384
385 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
386 PCI_EXP_LNKSTA);
387 dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
388 PCI_EXP_LNKSTA_CLS);
389 }
390 }
391
392 val = appl_readl(pcie, APPL_INTR_STATUS_L0);
393 if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
394 val = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
395 tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
396 if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
397 dev_info(pci->dev, "CDM check complete\n");
398 tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
399 }
400 if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
401 dev_err(pci->dev, "CDM comparison mismatch\n");
402 tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
403 }
404 if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
405 dev_err(pci->dev, "CDM Logic error\n");
406 tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
407 }
408 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp);
409 tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
410 dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp);
411 }
412
413 return IRQ_HANDLED;
414}
415
416static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg)
417{
418 struct tegra_pcie_dw *pcie = arg;
419
420 return tegra_pcie_rp_irq_handler(pcie);
421}
422
423static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
424 u32 *val)
425{
426 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
427
428 /*
429 * This is an endpoint mode specific register happen to appear even
430 * when controller is operating in root port mode and system hangs
431 * when it is accessed with link being in ASPM-L1 state.
432 * So skip accessing it altogether
433 */
434 if (where == PORT_LOGIC_MSIX_DOORBELL) {
435 *val = 0x00000000;
436 return PCIBIOS_SUCCESSFUL;
437 }
438
439 return dw_pcie_read(pci->dbi_base + where, size, val);
440}
441
442static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size,
443 u32 val)
444{
445 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
446
447 /*
448 * This is an endpoint mode specific register happen to appear even
449 * when controller is operating in root port mode and system hangs
450 * when it is accessed with link being in ASPM-L1 state.
451 * So skip accessing it altogether
452 */
453 if (where == PORT_LOGIC_MSIX_DOORBELL)
454 return PCIBIOS_SUCCESSFUL;
455
456 return dw_pcie_write(pci->dbi_base + where, size, val);
457}
458
459#if defined(CONFIG_PCIEASPM)
460static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
461{
462 u32 val;
463
464 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
465 val &= ~PCI_L1SS_CAP_ASPM_L1_1;
466 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
467}
468
469static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
470{
471 u32 val;
472
473 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
474 val &= ~PCI_L1SS_CAP_ASPM_L1_2;
475 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
476}
477
478static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
479{
480 u32 val;
481
482 val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
483 val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
484 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
485 val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
486 val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
487 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
488 val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
489
490 return val;
491}
492
493static int aspm_state_cnt(struct seq_file *s, void *data)
494{
495 struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
496 dev_get_drvdata(s->private);
497 u32 val;
498
499 seq_printf(s, "Tx L0s entry count : %u\n",
500 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
501
502 seq_printf(s, "Rx L0s entry count : %u\n",
503 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
504
505 seq_printf(s, "Link L1 entry count : %u\n",
506 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
507
508 seq_printf(s, "Link L1.1 entry count : %u\n",
509 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
510
511 seq_printf(s, "Link L1.2 entry count : %u\n",
512 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
513
514 /* Clear all counters */
515 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
516 EVENT_COUNTER_ALL_CLEAR);
517
518 /* Re-enable counting */
519 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
520 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
521 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
522
523 return 0;
524}
525
526static void init_host_aspm(struct tegra_pcie_dw *pcie)
527{
528 struct dw_pcie *pci = &pcie->pci;
529 u32 val;
530
531 val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
532 pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
533
534 /* Enable ASPM counters */
535 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
536 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
537 dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
538
539 /* Program T_cmrt and T_pwr_on values */
540 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
541 val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
542 val |= (pcie->aspm_cmrt << 8);
543 val |= (pcie->aspm_pwr_on_t << 19);
544 dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
545
546 /* Program L0s and L1 entrance latencies */
547 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
548 val &= ~L0S_ENTRANCE_LAT_MASK;
549 val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT);
550 val |= ENTER_ASPM;
551 dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
552}
553
554static int init_debugfs(struct tegra_pcie_dw *pcie)
555{
556 struct dentry *d;
557
558 d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt",
559 pcie->debugfs, aspm_state_cnt);
560 if (IS_ERR_OR_NULL(d))
561 dev_err(pcie->dev,
562 "Failed to create debugfs file \"aspm_state_cnt\"\n");
563
564 return 0;
565}
566#else
567static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
568static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
569static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
570static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; }
571#endif
572
573static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
574{
575 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
576 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
577 u32 val;
578 u16 val_w;
579
580 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
581 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
582 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
583
584 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
585 val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
586 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
587
588 if (pcie->enable_cdm_check) {
589 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
590 val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
591 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
592
593 val = appl_readl(pcie, APPL_INTR_EN_L1_18);
594 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
595 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
596 appl_writel(pcie, val, APPL_INTR_EN_L1_18);
597 }
598
599 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
600 PCI_EXP_LNKSTA);
601 pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
602 PCI_EXP_LNKSTA_NLW_SHIFT;
603
604 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
605 PCI_EXP_LNKCTL);
606 val_w |= PCI_EXP_LNKCTL_LBMIE;
607 dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
608 val_w);
609}
610
611static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
612{
613 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
614 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
615 u32 val;
616
617 /* Enable legacy interrupt generation */
618 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
619 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
620 val |= APPL_INTR_EN_L0_0_INT_INT_EN;
621 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
622
623 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
624 val |= APPL_INTR_EN_L1_8_INTX_EN;
625 val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
626 val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
627 if (IS_ENABLED(CONFIG_PCIEAER))
628 val |= APPL_INTR_EN_L1_8_AER_INT_EN;
629 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
630}
631
632static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
633{
634 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
635 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
636 u32 val;
637
638 dw_pcie_msi_init(pp);
639
640 /* Enable MSI interrupt generation */
641 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
642 val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
643 val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
644 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
645}
646
647static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
648{
649 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
650 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
651
652 /* Clear interrupt statuses before enabling interrupts */
653 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
654 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
655 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
656 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
657 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
658 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
659 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
660 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
661 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
662 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
663 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
664 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
665 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
666 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
667 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
668
669 tegra_pcie_enable_system_interrupts(pp);
670 tegra_pcie_enable_legacy_interrupts(pp);
671 if (IS_ENABLED(CONFIG_PCI_MSI))
672 tegra_pcie_enable_msi_interrupts(pp);
673}
674
675static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
676{
677 struct dw_pcie *pci = &pcie->pci;
678 u32 val, offset, i;
679
680 /* Program init preset */
681 for (i = 0; i < pcie->num_lanes; i++) {
682 dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF
683 + (i * 2), 2, &val);
684 val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
685 val |= GEN3_GEN4_EQ_PRESET_INIT;
686 val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
687 val |= (GEN3_GEN4_EQ_PRESET_INIT <<
688 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
689 dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF
690 + (i * 2), 2, val);
691
692 offset = dw_pcie_find_ext_capability(pci,
693 PCI_EXT_CAP_ID_PL_16GT) +
694 PCI_PL_16GT_LE_CTRL;
695 dw_pcie_read(pci->dbi_base + offset + i, 1, &val);
696 val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
697 val |= GEN3_GEN4_EQ_PRESET_INIT;
698 val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
699 val |= (GEN3_GEN4_EQ_PRESET_INIT <<
700 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
701 dw_pcie_write(pci->dbi_base + offset + i, 1, val);
702 }
703
704 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
705 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
706 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
707
708 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
709 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
710 val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
711 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
712 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
713
714 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
715 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
716 val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
717 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
718
719 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
720 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
721 val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
722 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
723 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
724
725 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
726 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
727 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
728}
729
730static void tegra_pcie_prepare_host(struct pcie_port *pp)
731{
732 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
733 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
734 u32 val;
735
736 val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
737 val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
738 dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
739
740 val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
741 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
742 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
743 dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
744
745 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
746
747 /* Configure FTS */
748 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
749 val &= ~(N_FTS_MASK << N_FTS_SHIFT);
750 val |= N_FTS_VAL << N_FTS_SHIFT;
751 dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
752
753 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
754 val &= ~FTS_MASK;
755 val |= FTS_VAL;
756 dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
757
758 /* Enable as 0xFFFF0001 response for CRS */
759 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
760 val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
761 val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
762 AMBA_ERROR_RESPONSE_CRS_SHIFT);
763 dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
764
765 /* Configure Max Speed from DT */
766 if (pcie->max_speed && pcie->max_speed != -EINVAL) {
767 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
768 PCI_EXP_LNKCAP);
769 val &= ~PCI_EXP_LNKCAP_SLS;
770 val |= pcie->max_speed;
771 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
772 val);
773 }
774
775 /* Configure Max lane width from DT */
776 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
777 val &= ~PCI_EXP_LNKCAP_MLW;
778 val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
779 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
780
781 config_gen3_gen4_eq_presets(pcie);
782
783 init_host_aspm(pcie);
784
785 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
786 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
787 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
788
789 if (pcie->update_fc_fixup) {
790 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
791 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
792 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
793 }
794
795 dw_pcie_setup_rc(pp);
796
797 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
798
799 /* Assert RST */
800 val = appl_readl(pcie, APPL_PINMUX);
801 val &= ~APPL_PINMUX_PEX_RST;
802 appl_writel(pcie, val, APPL_PINMUX);
803
804 usleep_range(100, 200);
805
806 /* Enable LTSSM */
807 val = appl_readl(pcie, APPL_CTRL);
808 val |= APPL_CTRL_LTSSM_EN;
809 appl_writel(pcie, val, APPL_CTRL);
810
811 /* De-assert RST */
812 val = appl_readl(pcie, APPL_PINMUX);
813 val |= APPL_PINMUX_PEX_RST;
814 appl_writel(pcie, val, APPL_PINMUX);
815
816 msleep(100);
817}
818
819static int tegra_pcie_dw_host_init(struct pcie_port *pp)
820{
821 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
822 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
823 u32 val, tmp, offset, speed;
824
825 tegra_pcie_prepare_host(pp);
826
827 if (dw_pcie_wait_for_link(pci)) {
828 /*
829 * There are some endpoints which can't get the link up if
830 * root port has Data Link Feature (DLF) enabled.
831 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
832 * on Scaled Flow Control and DLF.
833 * So, need to confirm that is indeed the case here and attempt
834 * link up once again with DLF disabled.
835 */
836 val = appl_readl(pcie, APPL_DEBUG);
837 val &= APPL_DEBUG_LTSSM_STATE_MASK;
838 val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
839 tmp = appl_readl(pcie, APPL_LINK_STATUS);
840 tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
841 if (!(val == 0x11 && !tmp)) {
842 /* Link is down for all good reasons */
843 return 0;
844 }
845
846 dev_info(pci->dev, "Link is down in DLL");
847 dev_info(pci->dev, "Trying again with DLFE disabled\n");
848 /* Disable LTSSM */
849 val = appl_readl(pcie, APPL_CTRL);
850 val &= ~APPL_CTRL_LTSSM_EN;
851 appl_writel(pcie, val, APPL_CTRL);
852
853 reset_control_assert(pcie->core_rst);
854 reset_control_deassert(pcie->core_rst);
855
856 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
857 val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
858 val &= ~PCI_DLF_EXCHANGE_ENABLE;
859 dw_pcie_writel_dbi(pci, offset, val);
860
861 tegra_pcie_prepare_host(pp);
862
863 if (dw_pcie_wait_for_link(pci))
864 return 0;
865 }
866
867 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
868 PCI_EXP_LNKSTA_CLS;
869 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
870
871 tegra_pcie_enable_interrupts(pp);
872
873 return 0;
874}
875
876static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
877{
878 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
879 u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
880
881 return !!(val & PCI_EXP_LNKSTA_DLLLA);
882}
883
884static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
885{
886 pp->num_vectors = MAX_MSI_IRQS;
887}
888
889static const struct dw_pcie_ops tegra_dw_pcie_ops = {
890 .link_up = tegra_pcie_dw_link_up,
891};
892
893static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
894 .rd_own_conf = tegra_pcie_dw_rd_own_conf,
895 .wr_own_conf = tegra_pcie_dw_wr_own_conf,
896 .host_init = tegra_pcie_dw_host_init,
897 .set_num_vectors = tegra_pcie_set_msi_vec_num,
898};
899
900static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
901{
902 unsigned int phy_count = pcie->phy_count;
903
904 while (phy_count--) {
905 phy_power_off(pcie->phys[phy_count]);
906 phy_exit(pcie->phys[phy_count]);
907 }
908}
909
910static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
911{
912 unsigned int i;
913 int ret;
914
915 for (i = 0; i < pcie->phy_count; i++) {
916 ret = phy_init(pcie->phys[i]);
917 if (ret < 0)
918 goto phy_power_off;
919
920 ret = phy_power_on(pcie->phys[i]);
921 if (ret < 0)
922 goto phy_exit;
923 }
924
925 return 0;
926
927phy_power_off:
928 while (i--) {
929 phy_power_off(pcie->phys[i]);
930phy_exit:
931 phy_exit(pcie->phys[i]);
932 }
933
934 return ret;
935}
936
937static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
938{
939 struct device_node *np = pcie->dev->of_node;
940 int ret;
941
942 ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
943 if (ret < 0) {
944 dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
945 return ret;
946 }
947
948 ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
949 &pcie->aspm_pwr_on_t);
950 if (ret < 0)
951 dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
952 ret);
953
954 ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
955 &pcie->aspm_l0s_enter_lat);
956 if (ret < 0)
957 dev_info(pcie->dev,
958 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
959
960 ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
961 if (ret < 0) {
962 dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
963 return ret;
964 }
965
966 pcie->max_speed = of_pci_get_max_link_speed(np);
967
968 ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
969 if (ret) {
970 dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
971 return ret;
972 }
973
974 ret = of_property_count_strings(np, "phy-names");
975 if (ret < 0) {
976 dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
977 ret);
978 return ret;
979 }
980 pcie->phy_count = ret;
981
982 if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
983 pcie->update_fc_fixup = true;
984
985 pcie->supports_clkreq =
986 of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
987
988 pcie->enable_cdm_check =
989 of_property_read_bool(np, "snps,enable-cdm-check");
990
991 return 0;
992}
993
994static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
995 bool enable)
996{
997 struct mrq_uphy_response resp;
998 struct tegra_bpmp_message msg;
999 struct mrq_uphy_request req;
1000
1001 /* Controller-5 doesn't need to have its state set by BPMP-FW */
1002 if (pcie->cid == 5)
1003 return 0;
1004
1005 memset(&req, 0, sizeof(req));
1006 memset(&resp, 0, sizeof(resp));
1007
1008 req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1009 req.controller_state.pcie_controller = pcie->cid;
1010 req.controller_state.enable = enable;
1011
1012 memset(&msg, 0, sizeof(msg));
1013 msg.mrq = MRQ_UPHY;
1014 msg.tx.data = &req;
1015 msg.tx.size = sizeof(req);
1016 msg.rx.data = &resp;
1017 msg.rx.size = sizeof(resp);
1018
1019 return tegra_bpmp_transfer(pcie->bpmp, &msg);
1020}
1021
1022static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
1023{
1024 struct pcie_port *pp = &pcie->pci.pp;
1025 struct pci_bus *child, *root_bus = NULL;
1026 struct pci_dev *pdev;
1027
1028 /*
1029 * link doesn't go into L2 state with some of the endpoints with Tegra
1030 * if they are not in D0 state. So, need to make sure that immediate
1031 * downstream devices are in D0 state before sending PME_TurnOff to put
1032 * link into L2 state.
1033 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1034 * 5.2 Link State Power Management (Page #428).
1035 */
1036
1037 list_for_each_entry(child, &pp->root_bus->children, node) {
1038 /* Bring downstream devices to D0 if they are not already in */
1039 if (child->parent == pp->root_bus) {
1040 root_bus = child;
1041 break;
1042 }
1043 }
1044
1045 if (!root_bus) {
1046 dev_err(pcie->dev, "Failed to find downstream devices\n");
1047 return;
1048 }
1049
1050 list_for_each_entry(pdev, &root_bus->devices, bus_list) {
1051 if (PCI_SLOT(pdev->devfn) == 0) {
1052 if (pci_set_power_state(pdev, PCI_D0))
1053 dev_err(pcie->dev,
1054 "Failed to transition %s to D0 state\n",
1055 dev_name(&pdev->dev));
1056 }
1057 }
1058}
1059
1060static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
1061{
1062 pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1063 if (IS_ERR(pcie->slot_ctl_3v3)) {
1064 if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1065 return PTR_ERR(pcie->slot_ctl_3v3);
1066
1067 pcie->slot_ctl_3v3 = NULL;
1068 }
1069
1070 pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1071 if (IS_ERR(pcie->slot_ctl_12v)) {
1072 if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1073 return PTR_ERR(pcie->slot_ctl_12v);
1074
1075 pcie->slot_ctl_12v = NULL;
1076 }
1077
1078 return 0;
1079}
1080
1081static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
1082{
1083 int ret;
1084
1085 if (pcie->slot_ctl_3v3) {
1086 ret = regulator_enable(pcie->slot_ctl_3v3);
1087 if (ret < 0) {
1088 dev_err(pcie->dev,
1089 "Failed to enable 3.3V slot supply: %d\n", ret);
1090 return ret;
1091 }
1092 }
1093
1094 if (pcie->slot_ctl_12v) {
1095 ret = regulator_enable(pcie->slot_ctl_12v);
1096 if (ret < 0) {
1097 dev_err(pcie->dev,
1098 "Failed to enable 12V slot supply: %d\n", ret);
1099 goto fail_12v_enable;
1100 }
1101 }
1102
1103 /*
1104 * According to PCI Express Card Electromechanical Specification
1105 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1106 * should be a minimum of 100ms.
1107 */
1108 if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1109 msleep(100);
1110
1111 return 0;
1112
1113fail_12v_enable:
1114 if (pcie->slot_ctl_3v3)
1115 regulator_disable(pcie->slot_ctl_3v3);
1116 return ret;
1117}
1118
1119static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
1120{
1121 if (pcie->slot_ctl_12v)
1122 regulator_disable(pcie->slot_ctl_12v);
1123 if (pcie->slot_ctl_3v3)
1124 regulator_disable(pcie->slot_ctl_3v3);
1125}
1126
1127static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
1128 bool en_hw_hot_rst)
1129{
1130 int ret;
1131 u32 val;
1132
1133 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1134 if (ret) {
1135 dev_err(pcie->dev,
1136 "Failed to enable controller %u: %d\n", pcie->cid, ret);
1137 return ret;
1138 }
1139
1140 ret = tegra_pcie_enable_slot_regulators(pcie);
1141 if (ret < 0)
1142 goto fail_slot_reg_en;
1143
1144 ret = regulator_enable(pcie->pex_ctl_supply);
1145 if (ret < 0) {
1146 dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1147 goto fail_reg_en;
1148 }
1149
1150 ret = clk_prepare_enable(pcie->core_clk);
1151 if (ret) {
1152 dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1153 goto fail_core_clk;
1154 }
1155
1156 ret = reset_control_deassert(pcie->core_apb_rst);
1157 if (ret) {
1158 dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1159 ret);
1160 goto fail_core_apb_rst;
1161 }
1162
1163 if (en_hw_hot_rst) {
1164 /* Enable HW_HOT_RST mode */
1165 val = appl_readl(pcie, APPL_CTRL);
1166 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1167 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1168 val |= APPL_CTRL_HW_HOT_RST_EN;
1169 appl_writel(pcie, val, APPL_CTRL);
1170 }
1171
1172 ret = tegra_pcie_enable_phy(pcie);
1173 if (ret) {
1174 dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1175 goto fail_phy;
1176 }
1177
1178 /* Update CFG base address */
1179 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1180 APPL_CFG_BASE_ADDR);
1181
1182 /* Configure this core for RP mode operation */
1183 appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1184
1185 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1186
1187 val = appl_readl(pcie, APPL_CTRL);
1188 appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1189
1190 val = appl_readl(pcie, APPL_CFG_MISC);
1191 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1192 appl_writel(pcie, val, APPL_CFG_MISC);
1193
1194 if (!pcie->supports_clkreq) {
1195 val = appl_readl(pcie, APPL_PINMUX);
1196 val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN;
1197 val |= APPL_PINMUX_CLKREQ_OUT_OVRD;
1198 appl_writel(pcie, val, APPL_PINMUX);
1199 }
1200
1201 /* Update iATU_DMA base address */
1202 appl_writel(pcie,
1203 pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1204 APPL_CFG_IATU_DMA_BASE_ADDR);
1205
1206 reset_control_deassert(pcie->core_rst);
1207
1208 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1209 PCI_CAP_ID_EXP);
1210
1211 /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
1212 if (!pcie->supports_clkreq) {
1213 disable_aspm_l11(pcie);
1214 disable_aspm_l12(pcie);
1215 }
1216
1217 return ret;
1218
1219fail_phy:
1220 reset_control_assert(pcie->core_apb_rst);
1221fail_core_apb_rst:
1222 clk_disable_unprepare(pcie->core_clk);
1223fail_core_clk:
1224 regulator_disable(pcie->pex_ctl_supply);
1225fail_reg_en:
1226 tegra_pcie_disable_slot_regulators(pcie);
1227fail_slot_reg_en:
1228 tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1229
1230 return ret;
1231}
1232
1233static int __deinit_controller(struct tegra_pcie_dw *pcie)
1234{
1235 int ret;
1236
1237 ret = reset_control_assert(pcie->core_rst);
1238 if (ret) {
1239 dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n",
1240 ret);
1241 return ret;
1242 }
1243
1244 tegra_pcie_disable_phy(pcie);
1245
1246 ret = reset_control_assert(pcie->core_apb_rst);
1247 if (ret) {
1248 dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1249 return ret;
1250 }
1251
1252 clk_disable_unprepare(pcie->core_clk);
1253
1254 ret = regulator_disable(pcie->pex_ctl_supply);
1255 if (ret) {
1256 dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1257 return ret;
1258 }
1259
1260 tegra_pcie_disable_slot_regulators(pcie);
1261
1262 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1263 if (ret) {
1264 dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1265 pcie->cid, ret);
1266 return ret;
1267 }
1268
1269 return ret;
1270}
1271
1272static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
1273{
1274 struct dw_pcie *pci = &pcie->pci;
1275 struct pcie_port *pp = &pci->pp;
1276 int ret;
1277
1278 ret = tegra_pcie_config_controller(pcie, false);
1279 if (ret < 0)
1280 return ret;
1281
1282 pp->ops = &tegra_pcie_dw_host_ops;
1283
1284 ret = dw_pcie_host_init(pp);
1285 if (ret < 0) {
1286 dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1287 goto fail_host_init;
1288 }
1289
1290 return 0;
1291
1292fail_host_init:
1293 return __deinit_controller(pcie);
1294}
1295
1296static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1297{
1298 u32 val;
1299
1300 if (!tegra_pcie_dw_link_up(&pcie->pci))
1301 return 0;
1302
1303 val = appl_readl(pcie, APPL_RADM_STATUS);
1304 val |= APPL_PM_XMT_TURNOFF_STATE;
1305 appl_writel(pcie, val, APPL_RADM_STATUS);
1306
1307 return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1308 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1309 1, PME_ACK_TIMEOUT);
1310}
1311
1312static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
1313{
1314 u32 data;
1315 int err;
1316
1317 if (!tegra_pcie_dw_link_up(&pcie->pci)) {
1318 dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1319 return;
1320 }
1321
1322 if (tegra_pcie_try_link_l2(pcie)) {
1323 dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1324 /*
1325 * TX lane clock freq will reset to Gen1 only if link is in L2
1326 * or detect state.
1327 * So apply pex_rst to end point to force RP to go into detect
1328 * state
1329 */
1330 data = appl_readl(pcie, APPL_PINMUX);
1331 data &= ~APPL_PINMUX_PEX_RST;
1332 appl_writel(pcie, data, APPL_PINMUX);
1333
1334 err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1335 data,
1336 ((data &
1337 APPL_DEBUG_LTSSM_STATE_MASK) >>
1338 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1339 LTSSM_STATE_PRE_DETECT,
1340 1, LTSSM_TIMEOUT);
1341 if (err) {
1342 dev_info(pcie->dev, "Link didn't go to detect state\n");
1343 } else {
1344 /* Disable LTSSM after link is in detect state */
1345 data = appl_readl(pcie, APPL_CTRL);
1346 data &= ~APPL_CTRL_LTSSM_EN;
1347 appl_writel(pcie, data, APPL_CTRL);
1348 }
1349 }
1350 /*
1351 * DBI registers may not be accessible after this as PLL-E would be
1352 * down depending on how CLKREQ is pulled by end point
1353 */
1354 data = appl_readl(pcie, APPL_PINMUX);
1355 data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1356 /* Cut REFCLK to slot */
1357 data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1358 data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1359 appl_writel(pcie, data, APPL_PINMUX);
1360}
1361
1362static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
1363{
1364 tegra_pcie_downstream_dev_to_D0(pcie);
1365 dw_pcie_host_deinit(&pcie->pci.pp);
1366 tegra_pcie_dw_pme_turnoff(pcie);
1367
1368 return __deinit_controller(pcie);
1369}
1370
1371static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
1372{
1373 struct pcie_port *pp = &pcie->pci.pp;
1374 struct device *dev = pcie->dev;
1375 char *name;
1376 int ret;
1377
1378 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1379 pp->msi_irq = of_irq_get_byname(dev->of_node, "msi");
1380 if (!pp->msi_irq) {
1381 dev_err(dev, "Failed to get MSI interrupt\n");
1382 return -ENODEV;
1383 }
1384 }
1385
1386 pm_runtime_enable(dev);
1387
1388 ret = pm_runtime_get_sync(dev);
1389 if (ret < 0) {
1390 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1391 ret);
1392 goto fail_pm_get_sync;
1393 }
1394
1395 ret = pinctrl_pm_select_default_state(dev);
1396 if (ret < 0) {
1397 dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1398 goto fail_pinctrl;
1399 }
1400
1401 tegra_pcie_init_controller(pcie);
1402
1403 pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
1404 if (!pcie->link_state) {
1405 ret = -ENOMEDIUM;
1406 goto fail_host_init;
1407 }
1408
1409 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1410 if (!name) {
1411 ret = -ENOMEM;
1412 goto fail_host_init;
1413 }
1414
1415 pcie->debugfs = debugfs_create_dir(name, NULL);
1416 if (!pcie->debugfs)
1417 dev_err(dev, "Failed to create debugfs\n");
1418 else
1419 init_debugfs(pcie);
1420
1421 return ret;
1422
1423fail_host_init:
1424 tegra_pcie_deinit_controller(pcie);
1425fail_pinctrl:
1426 pm_runtime_put_sync(dev);
1427fail_pm_get_sync:
1428 pm_runtime_disable(dev);
1429 return ret;
1430}
1431
1432static int tegra_pcie_dw_probe(struct platform_device *pdev)
1433{
1434 struct device *dev = &pdev->dev;
1435 struct resource *atu_dma_res;
1436 struct tegra_pcie_dw *pcie;
1437 struct resource *dbi_res;
1438 struct pcie_port *pp;
1439 struct dw_pcie *pci;
1440 struct phy **phys;
1441 char *name;
1442 int ret;
1443 u32 i;
1444
1445 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1446 if (!pcie)
1447 return -ENOMEM;
1448
1449 pci = &pcie->pci;
1450 pci->dev = &pdev->dev;
1451 pci->ops = &tegra_dw_pcie_ops;
1452 pp = &pci->pp;
1453 pcie->dev = &pdev->dev;
1454
1455 ret = tegra_pcie_dw_parse_dt(pcie);
1456 if (ret < 0) {
1457 dev_err(dev, "Failed to parse device tree: %d\n", ret);
1458 return ret;
1459 }
1460
1461 ret = tegra_pcie_get_slot_regulators(pcie);
1462 if (ret < 0) {
1463 dev_err(dev, "Failed to get slot regulators: %d\n", ret);
1464 return ret;
1465 }
1466
1467 pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
1468 if (IS_ERR(pcie->pex_ctl_supply)) {
1469 ret = PTR_ERR(pcie->pex_ctl_supply);
1470 if (ret != -EPROBE_DEFER)
1471 dev_err(dev, "Failed to get regulator: %ld\n",
1472 PTR_ERR(pcie->pex_ctl_supply));
1473 return ret;
1474 }
1475
1476 pcie->core_clk = devm_clk_get(dev, "core");
1477 if (IS_ERR(pcie->core_clk)) {
1478 dev_err(dev, "Failed to get core clock: %ld\n",
1479 PTR_ERR(pcie->core_clk));
1480 return PTR_ERR(pcie->core_clk);
1481 }
1482
1483 pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1484 "appl");
1485 if (!pcie->appl_res) {
1486 dev_err(dev, "Failed to find \"appl\" region\n");
1487 return -ENODEV;
1488 }
1489
1490 pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
1491 if (IS_ERR(pcie->appl_base))
1492 return PTR_ERR(pcie->appl_base);
1493
1494 pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
1495 if (IS_ERR(pcie->core_apb_rst)) {
1496 dev_err(dev, "Failed to get APB reset: %ld\n",
1497 PTR_ERR(pcie->core_apb_rst));
1498 return PTR_ERR(pcie->core_apb_rst);
1499 }
1500
1501 phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
1502 if (!phys)
1503 return -ENOMEM;
1504
1505 for (i = 0; i < pcie->phy_count; i++) {
1506 name = kasprintf(GFP_KERNEL, "p2u-%u", i);
1507 if (!name) {
1508 dev_err(dev, "Failed to create P2U string\n");
1509 return -ENOMEM;
1510 }
1511 phys[i] = devm_phy_get(dev, name);
1512 kfree(name);
1513 if (IS_ERR(phys[i])) {
1514 ret = PTR_ERR(phys[i]);
1515 if (ret != -EPROBE_DEFER)
1516 dev_err(dev, "Failed to get PHY: %d\n", ret);
1517 return ret;
1518 }
1519 }
1520
1521 pcie->phys = phys;
1522
1523 dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1524 if (!dbi_res) {
1525 dev_err(dev, "Failed to find \"dbi\" region\n");
1526 return -ENODEV;
1527 }
1528 pcie->dbi_res = dbi_res;
1529
1530 pci->dbi_base = devm_ioremap_resource(dev, dbi_res);
1531 if (IS_ERR(pci->dbi_base))
1532 return PTR_ERR(pci->dbi_base);
1533
1534 /* Tegra HW locates DBI2 at a fixed offset from DBI */
1535 pci->dbi_base2 = pci->dbi_base + 0x1000;
1536
1537 atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1538 "atu_dma");
1539 if (!atu_dma_res) {
1540 dev_err(dev, "Failed to find \"atu_dma\" region\n");
1541 return -ENODEV;
1542 }
1543 pcie->atu_dma_res = atu_dma_res;
1544
1545 pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
1546 if (IS_ERR(pci->atu_base))
1547 return PTR_ERR(pci->atu_base);
1548
1549 pcie->core_rst = devm_reset_control_get(dev, "core");
1550 if (IS_ERR(pcie->core_rst)) {
1551 dev_err(dev, "Failed to get core reset: %ld\n",
1552 PTR_ERR(pcie->core_rst));
1553 return PTR_ERR(pcie->core_rst);
1554 }
1555
1556 pp->irq = platform_get_irq_byname(pdev, "intr");
1557 if (!pp->irq) {
1558 dev_err(dev, "Failed to get \"intr\" interrupt\n");
1559 return -ENODEV;
1560 }
1561
1562 ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler,
1563 IRQF_SHARED, "tegra-pcie-intr", pcie);
1564 if (ret) {
1565 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret);
1566 return ret;
1567 }
1568
1569 pcie->bpmp = tegra_bpmp_get(dev);
1570 if (IS_ERR(pcie->bpmp))
1571 return PTR_ERR(pcie->bpmp);
1572
1573 platform_set_drvdata(pdev, pcie);
1574
1575 ret = tegra_pcie_config_rp(pcie);
1576 if (ret && ret != -ENOMEDIUM)
1577 goto fail;
1578 else
1579 return 0;
1580
1581fail:
1582 tegra_bpmp_put(pcie->bpmp);
1583 return ret;
1584}
1585
1586static int tegra_pcie_dw_remove(struct platform_device *pdev)
1587{
1588 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
1589
1590 if (!pcie->link_state)
1591 return 0;
1592
1593 debugfs_remove_recursive(pcie->debugfs);
1594 tegra_pcie_deinit_controller(pcie);
1595 pm_runtime_put_sync(pcie->dev);
1596 pm_runtime_disable(pcie->dev);
1597 tegra_bpmp_put(pcie->bpmp);
1598
1599 return 0;
1600}
1601
1602static int tegra_pcie_dw_suspend_late(struct device *dev)
1603{
1604 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
1605 u32 val;
1606
1607 if (!pcie->link_state)
1608 return 0;
1609
1610 /* Enable HW_HOT_RST mode */
1611 val = appl_readl(pcie, APPL_CTRL);
1612 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1613 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1614 val |= APPL_CTRL_HW_HOT_RST_EN;
1615 appl_writel(pcie, val, APPL_CTRL);
1616
1617 return 0;
1618}
1619
1620static int tegra_pcie_dw_suspend_noirq(struct device *dev)
1621{
1622 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
1623
1624 if (!pcie->link_state)
1625 return 0;
1626
1627 /* Save MSI interrupt vector */
1628 pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
1629 PORT_LOGIC_MSI_CTRL_INT_0_EN);
1630 tegra_pcie_downstream_dev_to_D0(pcie);
1631 tegra_pcie_dw_pme_turnoff(pcie);
1632
1633 return __deinit_controller(pcie);
1634}
1635
1636static int tegra_pcie_dw_resume_noirq(struct device *dev)
1637{
1638 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
1639 int ret;
1640
1641 if (!pcie->link_state)
1642 return 0;
1643
1644 ret = tegra_pcie_config_controller(pcie, true);
1645 if (ret < 0)
1646 return ret;
1647
1648 ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
1649 if (ret < 0) {
1650 dev_err(dev, "Failed to init host: %d\n", ret);
1651 goto fail_host_init;
1652 }
1653
1654 /* Restore MSI interrupt vector */
1655 dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
1656 pcie->msi_ctrl_int);
1657
1658 return 0;
1659
1660fail_host_init:
1661 return __deinit_controller(pcie);
1662}
1663
1664static int tegra_pcie_dw_resume_early(struct device *dev)
1665{
1666 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
1667 u32 val;
1668
1669 if (!pcie->link_state)
1670 return 0;
1671
1672 /* Disable HW_HOT_RST mode */
1673 val = appl_readl(pcie, APPL_CTRL);
1674 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1675 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1676 val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
1677 APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
1678 val &= ~APPL_CTRL_HW_HOT_RST_EN;
1679 appl_writel(pcie, val, APPL_CTRL);
1680
1681 return 0;
1682}
1683
1684static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
1685{
1686 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
1687
1688 if (!pcie->link_state)
1689 return;
1690
1691 debugfs_remove_recursive(pcie->debugfs);
1692 tegra_pcie_downstream_dev_to_D0(pcie);
1693
1694 disable_irq(pcie->pci.pp.irq);
1695 if (IS_ENABLED(CONFIG_PCI_MSI))
1696 disable_irq(pcie->pci.pp.msi_irq);
1697
1698 tegra_pcie_dw_pme_turnoff(pcie);
1699 __deinit_controller(pcie);
1700}
1701
1702static const struct of_device_id tegra_pcie_dw_of_match[] = {
1703 {
1704 .compatible = "nvidia,tegra194-pcie",
1705 },
1706 {},
1707};
1708
1709static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
1710 .suspend_late = tegra_pcie_dw_suspend_late,
1711 .suspend_noirq = tegra_pcie_dw_suspend_noirq,
1712 .resume_noirq = tegra_pcie_dw_resume_noirq,
1713 .resume_early = tegra_pcie_dw_resume_early,
1714};
1715
1716static struct platform_driver tegra_pcie_dw_driver = {
1717 .probe = tegra_pcie_dw_probe,
1718 .remove = tegra_pcie_dw_remove,
1719 .shutdown = tegra_pcie_dw_shutdown,
1720 .driver = {
1721 .name = "tegra194-pcie",
1722 .pm = &tegra_pcie_dw_pm_ops,
1723 .of_match_table = tegra_pcie_dw_of_match,
1724 },
1725};
1726module_platform_driver(tegra_pcie_dw_driver);
1727
1728MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
1729
1730MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
1731MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
1732MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index c742881b5061..c8cb9c5188a4 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -43,9 +43,8 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
43 goto err_out; 43 goto err_out;
44 } 44 }
45 45
46 err = devm_add_action(dev, gen_pci_unmap_cfg, cfg); 46 err = devm_add_action_or_reset(dev, gen_pci_unmap_cfg, cfg);
47 if (err) { 47 if (err) {
48 gen_pci_unmap_cfg(cfg);
49 goto err_out; 48 goto err_out;
50 } 49 }
51 return cfg; 50 return cfg;
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 0ca73c851e0f..f1f300218fab 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2809,6 +2809,48 @@ static void put_hvpcibus(struct hv_pcibus_device *hbus)
2809 complete(&hbus->remove_event); 2809 complete(&hbus->remove_event);
2810} 2810}
2811 2811
2812#define HVPCI_DOM_MAP_SIZE (64 * 1024)
2813static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
2814
2815/*
2816 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
2817 * as invalid for passthrough PCI devices of this driver.
2818 */
2819#define HVPCI_DOM_INVALID 0
2820
2821/**
2822 * hv_get_dom_num() - Get a valid PCI domain number
2823 * Check if the PCI domain number is in use, and return another number if
2824 * it is in use.
2825 *
2826 * @dom: Requested domain number
2827 *
2828 * return: domain number on success, HVPCI_DOM_INVALID on failure
2829 */
2830static u16 hv_get_dom_num(u16 dom)
2831{
2832 unsigned int i;
2833
2834 if (test_and_set_bit(dom, hvpci_dom_map) == 0)
2835 return dom;
2836
2837 for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
2838 if (test_and_set_bit(i, hvpci_dom_map) == 0)
2839 return i;
2840 }
2841
2842 return HVPCI_DOM_INVALID;
2843}
2844
2845/**
2846 * hv_put_dom_num() - Mark the PCI domain number as free
2847 * @dom: Domain number to be freed
2848 */
2849static void hv_put_dom_num(u16 dom)
2850{
2851 clear_bit(dom, hvpci_dom_map);
2852}
2853
2812/** 2854/**
2813 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus 2855 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
2814 * @hdev: VMBus's tracking struct for this root PCI bus 2856 * @hdev: VMBus's tracking struct for this root PCI bus
@@ -2820,6 +2862,7 @@ static int hv_pci_probe(struct hv_device *hdev,
2820 const struct hv_vmbus_device_id *dev_id) 2862 const struct hv_vmbus_device_id *dev_id)
2821{ 2863{
2822 struct hv_pcibus_device *hbus; 2864 struct hv_pcibus_device *hbus;
2865 u16 dom_req, dom;
2823 char *name; 2866 char *name;
2824 int ret; 2867 int ret;
2825 2868
@@ -2835,19 +2878,34 @@ static int hv_pci_probe(struct hv_device *hdev,
2835 hbus->state = hv_pcibus_init; 2878 hbus->state = hv_pcibus_init;
2836 2879
2837 /* 2880 /*
2838 * The PCI bus "domain" is what is called "segment" in ACPI and 2881 * The PCI bus "domain" is what is called "segment" in ACPI and other
2839 * other specs. Pull it from the instance ID, to get something 2882 * specs. Pull it from the instance ID, to get something usually
2840 * unique. Bytes 8 and 9 are what is used in Windows guests, so 2883 * unique. In rare cases of collision, we will find out another number
2841 * do the same thing for consistency. Note that, since this code 2884 * not in use.
2842 * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee 2885 *
2843 * that (1) the only domain in use for something that looks like 2886 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
2844 * a physical PCI bus (which is actually emulated by the 2887 * together with this guest driver can guarantee that (1) The only
2845 * hypervisor) is domain 0 and (2) there will be no overlap 2888 * domain used by Gen1 VMs for something that looks like a physical
2846 * between domains derived from these instance IDs in the same 2889 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
2847 * VM. 2890 * (2) There will be no overlap between domains (after fixing possible
2891 * collisions) in the same VM.
2848 */ 2892 */
2849 hbus->sysdata.domain = hdev->dev_instance.b[9] | 2893 dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
2850 hdev->dev_instance.b[8] << 8; 2894 dom = hv_get_dom_num(dom_req);
2895
2896 if (dom == HVPCI_DOM_INVALID) {
2897 dev_err(&hdev->device,
2898 "Unable to use dom# 0x%hx or other numbers", dom_req);
2899 ret = -EINVAL;
2900 goto free_bus;
2901 }
2902
2903 if (dom != dom_req)
2904 dev_info(&hdev->device,
2905 "PCI dom# 0x%hx has collision, using 0x%hx",
2906 dom_req, dom);
2907
2908 hbus->sysdata.domain = dom;
2851 2909
2852 hbus->hdev = hdev; 2910 hbus->hdev = hdev;
2853 refcount_set(&hbus->remove_lock, 1); 2911 refcount_set(&hbus->remove_lock, 1);
@@ -2862,7 +2920,7 @@ static int hv_pci_probe(struct hv_device *hdev,
2862 hbus->sysdata.domain); 2920 hbus->sysdata.domain);
2863 if (!hbus->wq) { 2921 if (!hbus->wq) {
2864 ret = -ENOMEM; 2922 ret = -ENOMEM;
2865 goto free_bus; 2923 goto free_dom;
2866 } 2924 }
2867 2925
2868 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, 2926 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
@@ -2946,6 +3004,8 @@ close:
2946 vmbus_close(hdev->channel); 3004 vmbus_close(hdev->channel);
2947destroy_wq: 3005destroy_wq:
2948 destroy_workqueue(hbus->wq); 3006 destroy_workqueue(hbus->wq);
3007free_dom:
3008 hv_put_dom_num(hbus->sysdata.domain);
2949free_bus: 3009free_bus:
2950 free_page((unsigned long)hbus); 3010 free_page((unsigned long)hbus);
2951 return ret; 3011 return ret;
@@ -3008,8 +3068,8 @@ static int hv_pci_remove(struct hv_device *hdev)
3008 /* Remove the bus from PCI's point of view. */ 3068 /* Remove the bus from PCI's point of view. */
3009 pci_lock_rescan_remove(); 3069 pci_lock_rescan_remove();
3010 pci_stop_root_bus(hbus->pci_bus); 3070 pci_stop_root_bus(hbus->pci_bus);
3011 pci_remove_root_bus(hbus->pci_bus);
3012 hv_pci_remove_slots(hbus); 3071 hv_pci_remove_slots(hbus);
3072 pci_remove_root_bus(hbus->pci_bus);
3013 pci_unlock_rescan_remove(); 3073 pci_unlock_rescan_remove();
3014 hbus->state = hv_pcibus_removed; 3074 hbus->state = hv_pcibus_removed;
3015 } 3075 }
@@ -3027,6 +3087,9 @@ static int hv_pci_remove(struct hv_device *hdev)
3027 put_hvpcibus(hbus); 3087 put_hvpcibus(hbus);
3028 wait_for_completion(&hbus->remove_event); 3088 wait_for_completion(&hbus->remove_event);
3029 destroy_workqueue(hbus->wq); 3089 destroy_workqueue(hbus->wq);
3090
3091 hv_put_dom_num(hbus->sysdata.domain);
3092
3030 free_page((unsigned long)hbus); 3093 free_page((unsigned long)hbus);
3031 return 0; 3094 return 0;
3032} 3095}
@@ -3058,6 +3121,9 @@ static void __exit exit_hv_pci_drv(void)
3058 3121
3059static int __init init_hv_pci_drv(void) 3122static int __init init_hv_pci_drv(void)
3060{ 3123{
3124 /* Set the invalid domain number's bit, so it will not be used */
3125 set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
3126
3061 /* Initialize PCI block r/w interface */ 3127 /* Initialize PCI block r/w interface */
3062 hvpci_block_ops.read_block = hv_read_config_block; 3128 hvpci_block_ops.read_block = hv_read_config_block;
3063 hvpci_block_ops.write_block = hv_write_config_block; 3129 hvpci_block_ops.write_block = hv_write_config_block;
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 9a917b2456f6..673a1725ef38 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2237,14 +2237,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2237 err = of_pci_get_devfn(port); 2237 err = of_pci_get_devfn(port);
2238 if (err < 0) { 2238 if (err < 0) {
2239 dev_err(dev, "failed to parse address: %d\n", err); 2239 dev_err(dev, "failed to parse address: %d\n", err);
2240 return err; 2240 goto err_node_put;
2241 } 2241 }
2242 2242
2243 index = PCI_SLOT(err); 2243 index = PCI_SLOT(err);
2244 2244
2245 if (index < 1 || index > soc->num_ports) { 2245 if (index < 1 || index > soc->num_ports) {
2246 dev_err(dev, "invalid port number: %d\n", index); 2246 dev_err(dev, "invalid port number: %d\n", index);
2247 return -EINVAL; 2247 err = -EINVAL;
2248 goto err_node_put;
2248 } 2249 }
2249 2250
2250 index--; 2251 index--;
@@ -2253,12 +2254,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2253 if (err < 0) { 2254 if (err < 0) {
2254 dev_err(dev, "failed to parse # of lanes: %d\n", 2255 dev_err(dev, "failed to parse # of lanes: %d\n",
2255 err); 2256 err);
2256 return err; 2257 goto err_node_put;
2257 } 2258 }
2258 2259
2259 if (value > 16) { 2260 if (value > 16) {
2260 dev_err(dev, "invalid # of lanes: %u\n", value); 2261 dev_err(dev, "invalid # of lanes: %u\n", value);
2261 return -EINVAL; 2262 err = -EINVAL;
2263 goto err_node_put;
2262 } 2264 }
2263 2265
2264 lanes |= value << (index << 3); 2266 lanes |= value << (index << 3);
@@ -2272,13 +2274,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2272 lane += value; 2274 lane += value;
2273 2275
2274 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); 2276 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2275 if (!rp) 2277 if (!rp) {
2276 return -ENOMEM; 2278 err = -ENOMEM;
2279 goto err_node_put;
2280 }
2277 2281
2278 err = of_address_to_resource(port, 0, &rp->regs); 2282 err = of_address_to_resource(port, 0, &rp->regs);
2279 if (err < 0) { 2283 if (err < 0) {
2280 dev_err(dev, "failed to parse address: %d\n", err); 2284 dev_err(dev, "failed to parse address: %d\n", err);
2281 return err; 2285 goto err_node_put;
2282 } 2286 }
2283 2287
2284 INIT_LIST_HEAD(&rp->list); 2288 INIT_LIST_HEAD(&rp->list);
@@ -2330,6 +2334,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2330 return err; 2334 return err;
2331 2335
2332 return 0; 2336 return 0;
2337
2338err_node_put:
2339 of_node_put(port);
2340 return err;
2333} 2341}
2334 2342
2335/* 2343/*
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index 5a3550b6bb29..9ee6200a66f4 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -93,12 +93,9 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
93 pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); 93 pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges");
94 94
95 /* PHY use is optional */ 95 /* PHY use is optional */
96 pcie->phy = devm_phy_get(dev, "pcie-phy"); 96 pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
97 if (IS_ERR(pcie->phy)) { 97 if (IS_ERR(pcie->phy))
98 if (PTR_ERR(pcie->phy) == -EPROBE_DEFER) 98 return PTR_ERR(pcie->phy);
99 return -EPROBE_DEFER;
100 pcie->phy = NULL;
101 }
102 99
103 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources, 100 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
104 &iobase); 101 &iobase);
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 80601e1b939e..626a7c352dfd 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -73,6 +73,7 @@
73#define PCIE_MSI_VECTOR 0x0c0 73#define PCIE_MSI_VECTOR 0x0c0
74 74
75#define PCIE_CONF_VEND_ID 0x100 75#define PCIE_CONF_VEND_ID 0x100
76#define PCIE_CONF_DEVICE_ID 0x102
76#define PCIE_CONF_CLASS_ID 0x106 77#define PCIE_CONF_CLASS_ID 0x106
77 78
78#define PCIE_INT_MASK 0x420 79#define PCIE_INT_MASK 0x420
@@ -141,12 +142,16 @@ struct mtk_pcie_port;
141/** 142/**
142 * struct mtk_pcie_soc - differentiate between host generations 143 * struct mtk_pcie_soc - differentiate between host generations
143 * @need_fix_class_id: whether this host's class ID needed to be fixed or not 144 * @need_fix_class_id: whether this host's class ID needed to be fixed or not
145 * @need_fix_device_id: whether this host's device ID needed to be fixed or not
146 * @device_id: device ID which this host need to be fixed
144 * @ops: pointer to configuration access functions 147 * @ops: pointer to configuration access functions
145 * @startup: pointer to controller setting functions 148 * @startup: pointer to controller setting functions
146 * @setup_irq: pointer to initialize IRQ functions 149 * @setup_irq: pointer to initialize IRQ functions
147 */ 150 */
148struct mtk_pcie_soc { 151struct mtk_pcie_soc {
149 bool need_fix_class_id; 152 bool need_fix_class_id;
153 bool need_fix_device_id;
154 unsigned int device_id;
150 struct pci_ops *ops; 155 struct pci_ops *ops;
151 int (*startup)(struct mtk_pcie_port *port); 156 int (*startup)(struct mtk_pcie_port *port);
152 int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); 157 int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
@@ -630,8 +635,6 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
630 } 635 }
631 636
632 chained_irq_exit(irqchip, desc); 637 chained_irq_exit(irqchip, desc);
633
634 return;
635} 638}
636 639
637static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, 640static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
@@ -696,6 +699,9 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
696 writew(val, port->base + PCIE_CONF_CLASS_ID); 699 writew(val, port->base + PCIE_CONF_CLASS_ID);
697 } 700 }
698 701
702 if (soc->need_fix_device_id)
703 writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
704
699 /* 100ms timeout value should be enough for Gen1/2 training */ 705 /* 100ms timeout value should be enough for Gen1/2 training */
700 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, 706 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
701 !!(val & PCIE_PORT_LINKUP_V2), 20, 707 !!(val & PCIE_PORT_LINKUP_V2), 20,
@@ -1216,11 +1222,21 @@ static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
1216 .setup_irq = mtk_pcie_setup_irq, 1222 .setup_irq = mtk_pcie_setup_irq,
1217}; 1223};
1218 1224
1225static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
1226 .need_fix_class_id = true,
1227 .need_fix_device_id = true,
1228 .device_id = PCI_DEVICE_ID_MEDIATEK_7629,
1229 .ops = &mtk_pcie_ops_v2,
1230 .startup = mtk_pcie_startup_port_v2,
1231 .setup_irq = mtk_pcie_setup_irq,
1232};
1233
1219static const struct of_device_id mtk_pcie_ids[] = { 1234static const struct of_device_id mtk_pcie_ids[] = {
1220 { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, 1235 { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
1221 { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, 1236 { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
1222 { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, 1237 { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
1223 { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 }, 1238 { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
1239 { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
1224 {}, 1240 {},
1225}; 1241};
1226 1242
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index 672e633601c7..a45a6447b01d 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -88,6 +88,7 @@
88#define AMAP_CTRL_TYPE_MASK 3 88#define AMAP_CTRL_TYPE_MASK 3
89 89
90#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) 90#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
91#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
91#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) 92#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
92#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) 93#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
93#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) 94#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
@@ -462,7 +463,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
462} 463}
463 464
464static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, 465static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
465 u64 pci_addr, u32 type, u64 size) 466 u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
466{ 467{
467 u32 value; 468 u32 value;
468 u64 size64 = ~(size - 1); 469 u64 size64 = ~(size - 1);
@@ -482,7 +483,10 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
482 csr_writel(pcie, upper_32_bits(size64), 483 csr_writel(pcie, upper_32_bits(size64),
483 PAB_EXT_PEX_AMAP_SIZEN(win_num)); 484 PAB_EXT_PEX_AMAP_SIZEN(win_num));
484 485
485 csr_writel(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num)); 486 csr_writel(pcie, lower_32_bits(cpu_addr),
487 PAB_PEX_AMAP_AXI_WIN(win_num));
488 csr_writel(pcie, upper_32_bits(cpu_addr),
489 PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
486 490
487 csr_writel(pcie, lower_32_bits(pci_addr), 491 csr_writel(pcie, lower_32_bits(pci_addr),
488 PAB_PEX_AMAP_PEX_WIN_L(win_num)); 492 PAB_PEX_AMAP_PEX_WIN_L(win_num));
@@ -624,7 +628,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
624 CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res)); 628 CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
625 629
626 /* memory inbound translation window */ 630 /* memory inbound translation window */
627 program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); 631 program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
628 632
629 /* Get the I/O and memory ranges from DT */ 633 /* Get the I/O and memory ranges from DT */
630 resource_list_for_each_entry(win, &pcie->resources) { 634 resource_list_for_each_entry(win, &pcie->resources) {
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 8d20f1793a61..ef8e677ce9d1 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
608 608
609 rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); 609 rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
610 if (IS_ERR(rockchip->vpcie12v)) { 610 if (IS_ERR(rockchip->vpcie12v)) {
611 if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) 611 if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
612 return -EPROBE_DEFER; 612 return PTR_ERR(rockchip->vpcie12v);
613 dev_info(dev, "no vpcie12v regulator found\n"); 613 dev_info(dev, "no vpcie12v regulator found\n");
614 } 614 }
615 615
616 rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); 616 rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
617 if (IS_ERR(rockchip->vpcie3v3)) { 617 if (IS_ERR(rockchip->vpcie3v3)) {
618 if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) 618 if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
619 return -EPROBE_DEFER; 619 return PTR_ERR(rockchip->vpcie3v3);
620 dev_info(dev, "no vpcie3v3 regulator found\n"); 620 dev_info(dev, "no vpcie3v3 regulator found\n");
621 } 621 }
622 622
623 rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); 623 rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
624 if (IS_ERR(rockchip->vpcie1v8)) { 624 if (IS_ERR(rockchip->vpcie1v8)) {
625 if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) 625 if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
626 return -EPROBE_DEFER; 626 return PTR_ERR(rockchip->vpcie1v8);
627 dev_info(dev, "no vpcie1v8 regulator found\n"); 627 dev_info(dev, "no vpcie1v8 regulator found\n");
628 } 628 }
629 629
630 rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); 630 rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
631 if (IS_ERR(rockchip->vpcie0v9)) { 631 if (IS_ERR(rockchip->vpcie0v9)) {
632 if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) 632 if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
633 return -EPROBE_DEFER; 633 return PTR_ERR(rockchip->vpcie0v9);
634 dev_info(dev, "no vpcie0v9 regulator found\n"); 634 dev_info(dev, "no vpcie0v9 regulator found\n");
635 } 635 }
636 636
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 4575e0c6dc4b..a35d3f3996d7 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -31,6 +31,9 @@
31#define PCI_REG_VMLOCK 0x70 31#define PCI_REG_VMLOCK 0x70
32#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) 32#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
33 33
34#define MB2_SHADOW_OFFSET 0x2000
35#define MB2_SHADOW_SIZE 16
36
34enum vmd_features { 37enum vmd_features {
35 /* 38 /*
36 * Device may contain registers which hint the physical location of the 39 * Device may contain registers which hint the physical location of the
@@ -94,6 +97,7 @@ struct vmd_dev {
94 struct resource resources[3]; 97 struct resource resources[3];
95 struct irq_domain *irq_domain; 98 struct irq_domain *irq_domain;
96 struct pci_bus *bus; 99 struct pci_bus *bus;
100 u8 busn_start;
97 101
98 struct dma_map_ops dma_ops; 102 struct dma_map_ops dma_ops;
99 struct dma_domain dma_domain; 103 struct dma_domain dma_domain;
@@ -440,7 +444,8 @@ static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
440 unsigned int devfn, int reg, int len) 444 unsigned int devfn, int reg, int len)
441{ 445{
442 char __iomem *addr = vmd->cfgbar + 446 char __iomem *addr = vmd->cfgbar +
443 (bus->number << 20) + (devfn << 12) + reg; 447 ((bus->number - vmd->busn_start) << 20) +
448 (devfn << 12) + reg;
444 449
445 if ((addr - vmd->cfgbar) + len >= 450 if ((addr - vmd->cfgbar) + len >=
446 resource_size(&vmd->dev->resource[VMD_CFGBAR])) 451 resource_size(&vmd->dev->resource[VMD_CFGBAR]))
@@ -563,7 +568,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
563 unsigned long flags; 568 unsigned long flags;
564 LIST_HEAD(resources); 569 LIST_HEAD(resources);
565 resource_size_t offset[2] = {0}; 570 resource_size_t offset[2] = {0};
566 resource_size_t membar2_offset = 0x2000, busn_start = 0; 571 resource_size_t membar2_offset = 0x2000;
567 struct pci_bus *child; 572 struct pci_bus *child;
568 573
569 /* 574 /*
@@ -576,7 +581,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
576 u32 vmlock; 581 u32 vmlock;
577 int ret; 582 int ret;
578 583
579 membar2_offset = 0x2018; 584 membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
580 ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); 585 ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
581 if (ret || vmlock == ~0) 586 if (ret || vmlock == ~0)
582 return -ENODEV; 587 return -ENODEV;
@@ -588,9 +593,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
588 if (!membar2) 593 if (!membar2)
589 return -ENOMEM; 594 return -ENOMEM;
590 offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - 595 offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
591 readq(membar2 + 0x2008); 596 readq(membar2 + MB2_SHADOW_OFFSET);
592 offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - 597 offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
593 readq(membar2 + 0x2010); 598 readq(membar2 + MB2_SHADOW_OFFSET + 8);
594 pci_iounmap(vmd->dev, membar2); 599 pci_iounmap(vmd->dev, membar2);
595 } 600 }
596 } 601 }
@@ -606,14 +611,14 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
606 pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); 611 pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
607 if (BUS_RESTRICT_CAP(vmcap) && 612 if (BUS_RESTRICT_CAP(vmcap) &&
608 (BUS_RESTRICT_CFG(vmconfig) == 0x1)) 613 (BUS_RESTRICT_CFG(vmconfig) == 0x1))
609 busn_start = 128; 614 vmd->busn_start = 128;
610 } 615 }
611 616
612 res = &vmd->dev->resource[VMD_CFGBAR]; 617 res = &vmd->dev->resource[VMD_CFGBAR];
613 vmd->resources[0] = (struct resource) { 618 vmd->resources[0] = (struct resource) {
614 .name = "VMD CFGBAR", 619 .name = "VMD CFGBAR",
615 .start = busn_start, 620 .start = vmd->busn_start,
616 .end = busn_start + (resource_size(res) >> 20) - 1, 621 .end = vmd->busn_start + (resource_size(res) >> 20) - 1,
617 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, 622 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
618 }; 623 };
619 624
@@ -681,8 +686,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
681 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); 686 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
682 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); 687 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
683 688
684 vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops, 689 vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
685 sd, &resources); 690 &vmd_ops, sd, &resources);
686 if (!vmd->bus) { 691 if (!vmd->bus) {
687 pci_free_resource_list(&resources); 692 pci_free_resource_list(&resources);
688 irq_domain_remove(vmd->irq_domain); 693 irq_domain_remove(vmd->irq_domain);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 603eadf3d965..d0559d2faf50 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -563,7 +563,6 @@ cleanup_slots(void)
563 } 563 }
564cleanup_null: 564cleanup_null:
565 up_write(&list_rwsem); 565 up_write(&list_rwsem);
566 return;
567} 566}
568 567
569int 568int
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 16bbb183695a..b8aacb41a83c 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -173,7 +173,6 @@ static void pci_print_IRQ_route(void)
173 dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot); 173 dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
174 174
175 } 175 }
176 return;
177} 176}
178 177
179 178
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index b7f4e1f099d9..68de958a9be8 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1872,8 +1872,6 @@ static void interrupt_event_handler(struct controller *ctrl)
1872 } 1872 }
1873 } /* End of FOR loop */ 1873 } /* End of FOR loop */
1874 } 1874 }
1875
1876 return;
1877} 1875}
1878 1876
1879 1877
@@ -1943,8 +1941,6 @@ void cpqhp_pushbutton_thread(struct timer_list *t)
1943 1941
1944 p_slot->state = STATIC_STATE; 1942 p_slot->state = STATIC_STATE;
1945 } 1943 }
1946
1947 return;
1948} 1944}
1949 1945
1950 1946
diff --git a/drivers/pci/hotplug/cpqphp_nvram.h b/drivers/pci/hotplug/cpqphp_nvram.h
index 918ff8dbfe62..70e879b6a23f 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.h
+++ b/drivers/pci/hotplug/cpqphp_nvram.h
@@ -16,10 +16,7 @@
16 16
17#ifndef CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM 17#ifndef CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM
18 18
19static inline void compaq_nvram_init(void __iomem *rom_start) 19static inline void compaq_nvram_init(void __iomem *rom_start) { }
20{
21 return;
22}
23 20
24static inline int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl) 21static inline int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl)
25{ 22{
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index 5e8caf7a4452..5c93aa14f0de 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -1941,6 +1941,7 @@ static int __init update_bridge_ranges(struct bus_node **bus)
1941 break; 1941 break;
1942 case PCI_HEADER_TYPE_BRIDGE: 1942 case PCI_HEADER_TYPE_BRIDGE:
1943 function = 0x8; 1943 function = 0x8;
1944 /* fall through */
1944 case PCI_HEADER_TYPE_MULTIBRIDGE: 1945 case PCI_HEADER_TYPE_MULTIBRIDGE:
1945 /* We assume here that only 1 bus behind the bridge 1946 /* We assume here that only 1 bus behind the bridge
1946 TO DO: add functionality for several: 1947 TO DO: add functionality for several:
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 8c51a04b8083..654c972b8ea0 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -110,9 +110,9 @@ struct controller {
110 * 110 *
111 * @OFF_STATE: slot is powered off, no subordinate devices are enumerated 111 * @OFF_STATE: slot is powered off, no subordinate devices are enumerated
112 * @BLINKINGON_STATE: slot will be powered on after the 5 second delay, 112 * @BLINKINGON_STATE: slot will be powered on after the 5 second delay,
113 * green led is blinking 113 * Power Indicator is blinking
114 * @BLINKINGOFF_STATE: slot will be powered off after the 5 second delay, 114 * @BLINKINGOFF_STATE: slot will be powered off after the 5 second delay,
115 * green led is blinking 115 * Power Indicator is blinking
116 * @POWERON_STATE: slot is currently powering on 116 * @POWERON_STATE: slot is currently powering on
117 * @POWEROFF_STATE: slot is currently powering off 117 * @POWEROFF_STATE: slot is currently powering off
118 * @ON_STATE: slot is powered on, subordinate devices have been enumerated 118 * @ON_STATE: slot is powered on, subordinate devices have been enumerated
@@ -167,12 +167,11 @@ int pciehp_power_on_slot(struct controller *ctrl);
167void pciehp_power_off_slot(struct controller *ctrl); 167void pciehp_power_off_slot(struct controller *ctrl);
168void pciehp_get_power_status(struct controller *ctrl, u8 *status); 168void pciehp_get_power_status(struct controller *ctrl, u8 *status);
169 169
170void pciehp_set_attention_status(struct controller *ctrl, u8 status); 170#define INDICATOR_NOOP -1 /* Leave indicator unchanged */
171void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn);
172
171void pciehp_get_latch_status(struct controller *ctrl, u8 *status); 173void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
172int pciehp_query_power_fault(struct controller *ctrl); 174int pciehp_query_power_fault(struct controller *ctrl);
173void pciehp_green_led_on(struct controller *ctrl);
174void pciehp_green_led_off(struct controller *ctrl);
175void pciehp_green_led_blink(struct controller *ctrl);
176bool pciehp_card_present(struct controller *ctrl); 175bool pciehp_card_present(struct controller *ctrl);
177bool pciehp_card_present_or_link_active(struct controller *ctrl); 176bool pciehp_card_present_or_link_active(struct controller *ctrl);
178int pciehp_check_link_status(struct controller *ctrl); 177int pciehp_check_link_status(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 6ad0d86762cb..b3122c151b80 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -95,15 +95,20 @@ static void cleanup_slot(struct controller *ctrl)
95} 95}
96 96
97/* 97/*
98 * set_attention_status - Turns the Amber LED for a slot on, off or blink 98 * set_attention_status - Turns the Attention Indicator on, off or blinking
99 */ 99 */
100static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) 100static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
101{ 101{
102 struct controller *ctrl = to_ctrl(hotplug_slot); 102 struct controller *ctrl = to_ctrl(hotplug_slot);
103 struct pci_dev *pdev = ctrl->pcie->port; 103 struct pci_dev *pdev = ctrl->pcie->port;
104 104
105 if (status)
106 status <<= PCI_EXP_SLTCTL_ATTN_IND_SHIFT;
107 else
108 status = PCI_EXP_SLTCTL_ATTN_IND_OFF;
109
105 pci_config_pm_runtime_get(pdev); 110 pci_config_pm_runtime_get(pdev);
106 pciehp_set_attention_status(ctrl, status); 111 pciehp_set_indicators(ctrl, INDICATOR_NOOP, status);
107 pci_config_pm_runtime_put(pdev); 112 pci_config_pm_runtime_put(pdev);
108 return 0; 113 return 0;
109} 114}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 631ced0ab28a..21af7b16d7a4 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -30,7 +30,10 @@
30 30
31static void set_slot_off(struct controller *ctrl) 31static void set_slot_off(struct controller *ctrl)
32{ 32{
33 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 33 /*
34 * Turn off slot, turn on attention indicator, turn off power
35 * indicator
36 */
34 if (POWER_CTRL(ctrl)) { 37 if (POWER_CTRL(ctrl)) {
35 pciehp_power_off_slot(ctrl); 38 pciehp_power_off_slot(ctrl);
36 39
@@ -42,8 +45,8 @@ static void set_slot_off(struct controller *ctrl)
42 msleep(1000); 45 msleep(1000);
43 } 46 }
44 47
45 pciehp_green_led_off(ctrl); 48 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
46 pciehp_set_attention_status(ctrl, 1); 49 PCI_EXP_SLTCTL_ATTN_IND_ON);
47} 50}
48 51
49/** 52/**
@@ -65,7 +68,8 @@ static int board_added(struct controller *ctrl)
65 return retval; 68 return retval;
66 } 69 }
67 70
68 pciehp_green_led_blink(ctrl); 71 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK,
72 INDICATOR_NOOP);
69 73
70 /* Check link training status */ 74 /* Check link training status */
71 retval = pciehp_check_link_status(ctrl); 75 retval = pciehp_check_link_status(ctrl);
@@ -90,8 +94,8 @@ static int board_added(struct controller *ctrl)
90 } 94 }
91 } 95 }
92 96
93 pciehp_green_led_on(ctrl); 97 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
94 pciehp_set_attention_status(ctrl, 0); 98 PCI_EXP_SLTCTL_ATTN_IND_OFF);
95 return 0; 99 return 0;
96 100
97err_exit: 101err_exit:
@@ -100,7 +104,7 @@ err_exit:
100} 104}
101 105
102/** 106/**
103 * remove_board - Turns off slot and LEDs 107 * remove_board - Turn off slot and Power Indicator
104 * @ctrl: PCIe hotplug controller where board is being removed 108 * @ctrl: PCIe hotplug controller where board is being removed
105 * @safe_removal: whether the board is safely removed (versus surprise removed) 109 * @safe_removal: whether the board is safely removed (versus surprise removed)
106 */ 110 */
@@ -123,8 +127,8 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
123 &ctrl->pending_events); 127 &ctrl->pending_events);
124 } 128 }
125 129
126 /* turn off Green LED */ 130 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
127 pciehp_green_led_off(ctrl); 131 INDICATOR_NOOP);
128} 132}
129 133
130static int pciehp_enable_slot(struct controller *ctrl); 134static int pciehp_enable_slot(struct controller *ctrl);
@@ -171,9 +175,9 @@ void pciehp_handle_button_press(struct controller *ctrl)
171 ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n", 175 ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n",
172 slot_name(ctrl)); 176 slot_name(ctrl));
173 } 177 }
174 /* blink green LED and turn off amber */ 178 /* blink power indicator and turn off attention */
175 pciehp_green_led_blink(ctrl); 179 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK,
176 pciehp_set_attention_status(ctrl, 0); 180 PCI_EXP_SLTCTL_ATTN_IND_OFF);
177 schedule_delayed_work(&ctrl->button_work, 5 * HZ); 181 schedule_delayed_work(&ctrl->button_work, 5 * HZ);
178 break; 182 break;
179 case BLINKINGOFF_STATE: 183 case BLINKINGOFF_STATE:
@@ -187,12 +191,13 @@ void pciehp_handle_button_press(struct controller *ctrl)
187 cancel_delayed_work(&ctrl->button_work); 191 cancel_delayed_work(&ctrl->button_work);
188 if (ctrl->state == BLINKINGOFF_STATE) { 192 if (ctrl->state == BLINKINGOFF_STATE) {
189 ctrl->state = ON_STATE; 193 ctrl->state = ON_STATE;
190 pciehp_green_led_on(ctrl); 194 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
195 PCI_EXP_SLTCTL_ATTN_IND_OFF);
191 } else { 196 } else {
192 ctrl->state = OFF_STATE; 197 ctrl->state = OFF_STATE;
193 pciehp_green_led_off(ctrl); 198 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
199 PCI_EXP_SLTCTL_ATTN_IND_OFF);
194 } 200 }
195 pciehp_set_attention_status(ctrl, 0);
196 ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n", 201 ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n",
197 slot_name(ctrl)); 202 slot_name(ctrl));
198 break; 203 break;
@@ -310,7 +315,9 @@ static int pciehp_enable_slot(struct controller *ctrl)
310 pm_runtime_get_sync(&ctrl->pcie->port->dev); 315 pm_runtime_get_sync(&ctrl->pcie->port->dev);
311 ret = __pciehp_enable_slot(ctrl); 316 ret = __pciehp_enable_slot(ctrl);
312 if (ret && ATTN_BUTTN(ctrl)) 317 if (ret && ATTN_BUTTN(ctrl))
313 pciehp_green_led_off(ctrl); /* may be blinking */ 318 /* may be blinking */
319 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
320 INDICATOR_NOOP);
314 pm_runtime_put(&ctrl->pcie->port->dev); 321 pm_runtime_put(&ctrl->pcie->port->dev);
315 322
316 mutex_lock(&ctrl->state_lock); 323 mutex_lock(&ctrl->state_lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index bd990e3371e3..1a522c1c4177 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -418,65 +418,40 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
418 return 0; 418 return 0;
419} 419}
420 420
421void pciehp_set_attention_status(struct controller *ctrl, u8 value) 421/**
422 * pciehp_set_indicators() - set attention indicator, power indicator, or both
423 * @ctrl: PCIe hotplug controller
424 * @pwr: one of:
425 * PCI_EXP_SLTCTL_PWR_IND_ON
426 * PCI_EXP_SLTCTL_PWR_IND_BLINK
427 * PCI_EXP_SLTCTL_PWR_IND_OFF
428 * @attn: one of:
429 * PCI_EXP_SLTCTL_ATTN_IND_ON
430 * PCI_EXP_SLTCTL_ATTN_IND_BLINK
431 * PCI_EXP_SLTCTL_ATTN_IND_OFF
432 *
433 * Either @pwr or @attn can also be INDICATOR_NOOP to leave that indicator
434 * unchanged.
435 */
436void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
422{ 437{
423 u16 slot_cmd; 438 u16 cmd = 0, mask = 0;
424 439
425 if (!ATTN_LED(ctrl)) 440 if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
426 return; 441 cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
427 442 mask |= PCI_EXP_SLTCTL_PIC;
428 switch (value) {
429 case 0: /* turn off */
430 slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
431 break;
432 case 1: /* turn on */
433 slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON;
434 break;
435 case 2: /* turn blink */
436 slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK;
437 break;
438 default:
439 return;
440 } 443 }
441 pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
442 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
443 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
444}
445 444
446void pciehp_green_led_on(struct controller *ctrl) 445 if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
447{ 446 cmd |= (attn & PCI_EXP_SLTCTL_AIC);
448 if (!PWR_LED(ctrl)) 447 mask |= PCI_EXP_SLTCTL_AIC;
449 return; 448 }
450
451 pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
452 PCI_EXP_SLTCTL_PIC);
453 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
454 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
455 PCI_EXP_SLTCTL_PWR_IND_ON);
456}
457
458void pciehp_green_led_off(struct controller *ctrl)
459{
460 if (!PWR_LED(ctrl))
461 return;
462
463 pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
464 PCI_EXP_SLTCTL_PIC);
465 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
466 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
467 PCI_EXP_SLTCTL_PWR_IND_OFF);
468}
469
470void pciehp_green_led_blink(struct controller *ctrl)
471{
472 if (!PWR_LED(ctrl))
473 return;
474 449
475 pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, 450 if (cmd) {
476 PCI_EXP_SLTCTL_PIC); 451 pcie_write_cmd_nowait(ctrl, cmd, mask);
477 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 452 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
478 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 453 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
479 PCI_EXP_SLTCTL_PWR_IND_BLINK); 454 }
480} 455}
481 456
482int pciehp_power_on_slot(struct controller *ctrl) 457int pciehp_power_on_slot(struct controller *ctrl)
@@ -638,8 +613,8 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
638 if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { 613 if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
639 ctrl->power_fault_detected = 1; 614 ctrl->power_fault_detected = 1;
640 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl)); 615 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
641 pciehp_set_attention_status(ctrl, 1); 616 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
642 pciehp_green_led_off(ctrl); 617 PCI_EXP_SLTCTL_ATTN_IND_ON);
643 } 618 }
644 619
645 /* 620 /*
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 182f9e3443ee..977946e4e613 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -473,7 +473,6 @@ int __init rpadlpar_io_init(void)
473void rpadlpar_io_exit(void) 473void rpadlpar_io_exit(void)
474{ 474{
475 dlpar_sysfs_exit(); 475 dlpar_sysfs_exit();
476 return;
477} 476}
478 477
479module_init(rpadlpar_io_init); 478module_init(rpadlpar_io_init);
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index c3899ee1db99..18627bb21e9e 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -408,7 +408,6 @@ static void __exit cleanup_slots(void)
408 pci_hp_deregister(&slot->hotplug_slot); 408 pci_hp_deregister(&slot->hotplug_slot);
409 dealloc_slot_struct(slot); 409 dealloc_slot_struct(slot);
410 } 410 }
411 return;
412} 411}
413 412
414static int __init rpaphp_init(void) 413static int __init rpaphp_init(void)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 525fd3f272b3..b3f972e8cfed 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -240,6 +240,173 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
240 pci_dev_put(dev); 240 pci_dev_put(dev);
241} 241}
242 242
243static ssize_t sriov_totalvfs_show(struct device *dev,
244 struct device_attribute *attr,
245 char *buf)
246{
247 struct pci_dev *pdev = to_pci_dev(dev);
248
249 return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
250}
251
252static ssize_t sriov_numvfs_show(struct device *dev,
253 struct device_attribute *attr,
254 char *buf)
255{
256 struct pci_dev *pdev = to_pci_dev(dev);
257
258 return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
259}
260
261/*
262 * num_vfs > 0; number of VFs to enable
263 * num_vfs = 0; disable all VFs
264 *
265 * Note: SRIOV spec does not allow partial VF
266 * disable, so it's all or none.
267 */
268static ssize_t sriov_numvfs_store(struct device *dev,
269 struct device_attribute *attr,
270 const char *buf, size_t count)
271{
272 struct pci_dev *pdev = to_pci_dev(dev);
273 int ret;
274 u16 num_vfs;
275
276 ret = kstrtou16(buf, 0, &num_vfs);
277 if (ret < 0)
278 return ret;
279
280 if (num_vfs > pci_sriov_get_totalvfs(pdev))
281 return -ERANGE;
282
283 device_lock(&pdev->dev);
284
285 if (num_vfs == pdev->sriov->num_VFs)
286 goto exit;
287
288 /* is PF driver loaded w/callback */
289 if (!pdev->driver || !pdev->driver->sriov_configure) {
290 pci_info(pdev, "Driver does not support SRIOV configuration via sysfs\n");
291 ret = -ENOENT;
292 goto exit;
293 }
294
295 if (num_vfs == 0) {
296 /* disable VFs */
297 ret = pdev->driver->sriov_configure(pdev, 0);
298 goto exit;
299 }
300
301 /* enable VFs */
302 if (pdev->sriov->num_VFs) {
303 pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n",
304 pdev->sriov->num_VFs, num_vfs);
305 ret = -EBUSY;
306 goto exit;
307 }
308
309 ret = pdev->driver->sriov_configure(pdev, num_vfs);
310 if (ret < 0)
311 goto exit;
312
313 if (ret != num_vfs)
314 pci_warn(pdev, "%d VFs requested; only %d enabled\n",
315 num_vfs, ret);
316
317exit:
318 device_unlock(&pdev->dev);
319
320 if (ret < 0)
321 return ret;
322
323 return count;
324}
325
326static ssize_t sriov_offset_show(struct device *dev,
327 struct device_attribute *attr,
328 char *buf)
329{
330 struct pci_dev *pdev = to_pci_dev(dev);
331
332 return sprintf(buf, "%u\n", pdev->sriov->offset);
333}
334
335static ssize_t sriov_stride_show(struct device *dev,
336 struct device_attribute *attr,
337 char *buf)
338{
339 struct pci_dev *pdev = to_pci_dev(dev);
340
341 return sprintf(buf, "%u\n", pdev->sriov->stride);
342}
343
344static ssize_t sriov_vf_device_show(struct device *dev,
345 struct device_attribute *attr,
346 char *buf)
347{
348 struct pci_dev *pdev = to_pci_dev(dev);
349
350 return sprintf(buf, "%x\n", pdev->sriov->vf_device);
351}
352
353static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
354 struct device_attribute *attr,
355 char *buf)
356{
357 struct pci_dev *pdev = to_pci_dev(dev);
358
359 return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe);
360}
361
362static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
363 struct device_attribute *attr,
364 const char *buf, size_t count)
365{
366 struct pci_dev *pdev = to_pci_dev(dev);
367 bool drivers_autoprobe;
368
369 if (kstrtobool(buf, &drivers_autoprobe) < 0)
370 return -EINVAL;
371
372 pdev->sriov->drivers_autoprobe = drivers_autoprobe;
373
374 return count;
375}
376
377static DEVICE_ATTR_RO(sriov_totalvfs);
378static DEVICE_ATTR_RW(sriov_numvfs);
379static DEVICE_ATTR_RO(sriov_offset);
380static DEVICE_ATTR_RO(sriov_stride);
381static DEVICE_ATTR_RO(sriov_vf_device);
382static DEVICE_ATTR_RW(sriov_drivers_autoprobe);
383
384static struct attribute *sriov_dev_attrs[] = {
385 &dev_attr_sriov_totalvfs.attr,
386 &dev_attr_sriov_numvfs.attr,
387 &dev_attr_sriov_offset.attr,
388 &dev_attr_sriov_stride.attr,
389 &dev_attr_sriov_vf_device.attr,
390 &dev_attr_sriov_drivers_autoprobe.attr,
391 NULL,
392};
393
394static umode_t sriov_attrs_are_visible(struct kobject *kobj,
395 struct attribute *a, int n)
396{
397 struct device *dev = kobj_to_dev(kobj);
398
399 if (!dev_is_pf(dev))
400 return 0;
401
402 return a->mode;
403}
404
405const struct attribute_group sriov_dev_attr_group = {
406 .attrs = sriov_dev_attrs,
407 .is_visible = sriov_attrs_are_visible,
408};
409
243int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) 410int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
244{ 411{
245 return 0; 412 return 0;
@@ -557,8 +724,8 @@ static void sriov_restore_state(struct pci_dev *dev)
557 ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI; 724 ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
558 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl); 725 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
559 726
560 for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) 727 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
561 pci_update_resource(dev, i); 728 pci_update_resource(dev, i + PCI_IOV_RESOURCES);
562 729
563 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); 730 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
564 pci_iov_set_numvfs(dev, iov->num_VFs); 731 pci_iov_set_numvfs(dev, iov->num_VFs);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index bc7b27a28795..36891e7deee3 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -353,7 +353,7 @@ EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
353/** 353/**
354 * of_irq_parse_pci - Resolve the interrupt for a PCI device 354 * of_irq_parse_pci - Resolve the interrupt for a PCI device
355 * @pdev: the device whose interrupt is to be resolved 355 * @pdev: the device whose interrupt is to be resolved
356 * @out_irq: structure of_irq filled by this function 356 * @out_irq: structure of_phandle_args filled by this function
357 * 357 *
358 * This function resolves the PCI interrupt for a given PCI device. If a 358 * This function resolves the PCI interrupt for a given PCI device. If a
359 * device-node exists for a given pci_dev, it will use normal OF tree 359 * device-node exists for a given pci_dev, it will use normal OF tree
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 234476226529..0608aae72ccc 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -18,13 +18,32 @@
18#include <linux/percpu-refcount.h> 18#include <linux/percpu-refcount.h>
19#include <linux/random.h> 19#include <linux/random.h>
20#include <linux/seq_buf.h> 20#include <linux/seq_buf.h>
21#include <linux/iommu.h> 21#include <linux/xarray.h>
22
23enum pci_p2pdma_map_type {
24 PCI_P2PDMA_MAP_UNKNOWN = 0,
25 PCI_P2PDMA_MAP_NOT_SUPPORTED,
26 PCI_P2PDMA_MAP_BUS_ADDR,
27 PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
28};
22 29
23struct pci_p2pdma { 30struct pci_p2pdma {
24 struct gen_pool *pool; 31 struct gen_pool *pool;
25 bool p2pmem_published; 32 bool p2pmem_published;
33 struct xarray map_types;
26}; 34};
27 35
36struct pci_p2pdma_pagemap {
37 struct dev_pagemap pgmap;
38 struct pci_dev *provider;
39 u64 bus_offset;
40};
41
42static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
43{
44 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
45}
46
28static ssize_t size_show(struct device *dev, struct device_attribute *attr, 47static ssize_t size_show(struct device *dev, struct device_attribute *attr,
29 char *buf) 48 char *buf)
30{ 49{
@@ -87,6 +106,7 @@ static void pci_p2pdma_release(void *data)
87 106
88 gen_pool_destroy(p2pdma->pool); 107 gen_pool_destroy(p2pdma->pool);
89 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); 108 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
109 xa_destroy(&p2pdma->map_types);
90} 110}
91 111
92static int pci_p2pdma_setup(struct pci_dev *pdev) 112static int pci_p2pdma_setup(struct pci_dev *pdev)
@@ -98,6 +118,8 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
98 if (!p2p) 118 if (!p2p)
99 return -ENOMEM; 119 return -ENOMEM;
100 120
121 xa_init(&p2p->map_types);
122
101 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev)); 123 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
102 if (!p2p->pool) 124 if (!p2p->pool)
103 goto out; 125 goto out;
@@ -135,6 +157,7 @@ out:
135int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, 157int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
136 u64 offset) 158 u64 offset)
137{ 159{
160 struct pci_p2pdma_pagemap *p2p_pgmap;
138 struct dev_pagemap *pgmap; 161 struct dev_pagemap *pgmap;
139 void *addr; 162 void *addr;
140 int error; 163 int error;
@@ -157,14 +180,18 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
157 return error; 180 return error;
158 } 181 }
159 182
160 pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL); 183 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
161 if (!pgmap) 184 if (!p2p_pgmap)
162 return -ENOMEM; 185 return -ENOMEM;
186
187 pgmap = &p2p_pgmap->pgmap;
163 pgmap->res.start = pci_resource_start(pdev, bar) + offset; 188 pgmap->res.start = pci_resource_start(pdev, bar) + offset;
164 pgmap->res.end = pgmap->res.start + size - 1; 189 pgmap->res.end = pgmap->res.start + size - 1;
165 pgmap->res.flags = pci_resource_flags(pdev, bar); 190 pgmap->res.flags = pci_resource_flags(pdev, bar);
166 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; 191 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
167 pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - 192
193 p2p_pgmap->provider = pdev;
194 p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
168 pci_resource_start(pdev, bar); 195 pci_resource_start(pdev, bar);
169 196
170 addr = devm_memremap_pages(&pdev->dev, pgmap); 197 addr = devm_memremap_pages(&pdev->dev, pgmap);
@@ -246,19 +273,32 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
246 seq_buf_printf(buf, "%s;", pci_name(pdev)); 273 seq_buf_printf(buf, "%s;", pci_name(pdev));
247} 274}
248 275
249/* 276static const struct pci_p2pdma_whitelist_entry {
250 * If we can't find a common upstream bridge take a look at the root 277 unsigned short vendor;
251 * complex and compare it to a whitelist of known good hardware. 278 unsigned short device;
252 */ 279 enum {
253static bool root_complex_whitelist(struct pci_dev *dev) 280 REQ_SAME_HOST_BRIDGE = 1 << 0,
281 } flags;
282} pci_p2pdma_whitelist[] = {
283 /* AMD ZEN */
284 {PCI_VENDOR_ID_AMD, 0x1450, 0},
285
286 /* Intel Xeon E5/Core i7 */
287 {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE},
288 {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE},
289 /* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */
290 {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE},
291 {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
292 {}
293};
294
295static bool __host_bridge_whitelist(struct pci_host_bridge *host,
296 bool same_host_bridge)
254{ 297{
255 struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
256 struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0)); 298 struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
299 const struct pci_p2pdma_whitelist_entry *entry;
257 unsigned short vendor, device; 300 unsigned short vendor, device;
258 301
259 if (iommu_present(dev->dev.bus))
260 return false;
261
262 if (!root) 302 if (!root)
263 return false; 303 return false;
264 304
@@ -266,65 +306,49 @@ static bool root_complex_whitelist(struct pci_dev *dev)
266 device = root->device; 306 device = root->device;
267 pci_dev_put(root); 307 pci_dev_put(root);
268 308
269 /* AMD ZEN host bridges can do peer to peer */ 309 for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
270 if (vendor == PCI_VENDOR_ID_AMD && device == 0x1450) 310 if (vendor != entry->vendor || device != entry->device)
311 continue;
312 if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge)
313 return false;
314
271 return true; 315 return true;
316 }
272 317
273 return false; 318 return false;
274} 319}
275 320
276/* 321/*
277 * Find the distance through the nearest common upstream bridge between 322 * If we can't find a common upstream bridge take a look at the root
278 * two PCI devices. 323 * complex and compare it to a whitelist of known good hardware.
279 *
280 * If the two devices are the same device then 0 will be returned.
281 *
282 * If there are two virtual functions of the same device behind the same
283 * bridge port then 2 will be returned (one step down to the PCIe switch,
284 * then one step back to the same device).
285 *
286 * In the case where two devices are connected to the same PCIe switch, the
287 * value 4 will be returned. This corresponds to the following PCI tree:
288 *
289 * -+ Root Port
290 * \+ Switch Upstream Port
291 * +-+ Switch Downstream Port
292 * + \- Device A
293 * \-+ Switch Downstream Port
294 * \- Device B
295 *
296 * The distance is 4 because we traverse from Device A through the downstream
297 * port of the switch, to the common upstream port, back up to the second
298 * downstream port and then to Device B.
299 *
300 * Any two devices that don't have a common upstream bridge will return -1.
301 * In this way devices on separate PCIe root ports will be rejected, which
302 * is what we want for peer-to-peer seeing each PCIe root port defines a
303 * separate hierarchy domain and there's no way to determine whether the root
304 * complex supports forwarding between them.
305 *
306 * In the case where two devices are connected to different PCIe switches,
307 * this function will still return a positive distance as long as both
308 * switches eventually have a common upstream bridge. Note this covers
309 * the case of using multiple PCIe switches to achieve a desired level of
310 * fan-out from a root port. The exact distance will be a function of the
311 * number of switches between Device A and Device B.
312 *
313 * If a bridge which has any ACS redirection bits set is in the path
314 * then this functions will return -2. This is so we reject any
315 * cases where the TLPs are forwarded up into the root complex.
316 * In this case, a list of all infringing bridge addresses will be
317 * populated in acs_list (assuming it's non-null) for printk purposes.
318 */ 324 */
319static int upstream_bridge_distance(struct pci_dev *provider, 325static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b)
320 struct pci_dev *client, 326{
321 struct seq_buf *acs_list) 327 struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus);
328 struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus);
329
330 if (host_a == host_b)
331 return __host_bridge_whitelist(host_a, true);
332
333 if (__host_bridge_whitelist(host_a, false) &&
334 __host_bridge_whitelist(host_b, false))
335 return true;
336
337 return false;
338}
339
340static enum pci_p2pdma_map_type
341__upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
342 int *dist, bool *acs_redirects, struct seq_buf *acs_list)
322{ 343{
323 struct pci_dev *a = provider, *b = client, *bb; 344 struct pci_dev *a = provider, *b = client, *bb;
324 int dist_a = 0; 345 int dist_a = 0;
325 int dist_b = 0; 346 int dist_b = 0;
326 int acs_cnt = 0; 347 int acs_cnt = 0;
327 348
349 if (acs_redirects)
350 *acs_redirects = false;
351
328 /* 352 /*
329 * Note, we don't need to take references to devices returned by 353 * Note, we don't need to take references to devices returned by
330 * pci_upstream_bridge() seeing we hold a reference to a child 354 * pci_upstream_bridge() seeing we hold a reference to a child
@@ -353,15 +377,10 @@ static int upstream_bridge_distance(struct pci_dev *provider,
353 dist_a++; 377 dist_a++;
354 } 378 }
355 379
356 /* 380 if (dist)
357 * Allow the connection if both devices are on a whitelisted root 381 *dist = dist_a + dist_b;
358 * complex, but add an arbitrary large value to the distance.
359 */
360 if (root_complex_whitelist(provider) &&
361 root_complex_whitelist(client))
362 return 0x1000 + dist_a + dist_b;
363 382
364 return -1; 383 return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
365 384
366check_b_path_acs: 385check_b_path_acs:
367 bb = b; 386 bb = b;
@@ -378,33 +397,110 @@ check_b_path_acs:
378 bb = pci_upstream_bridge(bb); 397 bb = pci_upstream_bridge(bb);
379 } 398 }
380 399
381 if (acs_cnt) 400 if (dist)
382 return -2; 401 *dist = dist_a + dist_b;
402
403 if (acs_cnt) {
404 if (acs_redirects)
405 *acs_redirects = true;
406
407 return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
408 }
409
410 return PCI_P2PDMA_MAP_BUS_ADDR;
411}
412
413static unsigned long map_types_idx(struct pci_dev *client)
414{
415 return (pci_domain_nr(client->bus) << 16) |
416 (client->bus->number << 8) | client->devfn;
417}
418
419/*
420 * Find the distance through the nearest common upstream bridge between
421 * two PCI devices.
422 *
423 * If the two devices are the same device then 0 will be returned.
424 *
425 * If there are two virtual functions of the same device behind the same
426 * bridge port then 2 will be returned (one step down to the PCIe switch,
427 * then one step back to the same device).
428 *
429 * In the case where two devices are connected to the same PCIe switch, the
430 * value 4 will be returned. This corresponds to the following PCI tree:
431 *
432 * -+ Root Port
433 * \+ Switch Upstream Port
434 * +-+ Switch Downstream Port
435 * + \- Device A
436 * \-+ Switch Downstream Port
437 * \- Device B
438 *
439 * The distance is 4 because we traverse from Device A through the downstream
440 * port of the switch, to the common upstream port, back up to the second
441 * downstream port and then to Device B.
442 *
443 * Any two devices that cannot communicate using p2pdma will return
444 * PCI_P2PDMA_MAP_NOT_SUPPORTED.
445 *
446 * Any two devices that have a data path that goes through the host bridge
447 * will consult a whitelist. If the host bridges are on the whitelist,
448 * this function will return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE.
449 *
450 * If either bridge is not on the whitelist this function returns
451 * PCI_P2PDMA_MAP_NOT_SUPPORTED.
452 *
453 * If a bridge which has any ACS redirection bits set is in the path,
454 * acs_redirects will be set to true. In this case, a list of all infringing
455 * bridge addresses will be populated in acs_list (assuming it's non-null)
456 * for printk purposes.
457 */
458static enum pci_p2pdma_map_type
459upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
460 int *dist, bool *acs_redirects, struct seq_buf *acs_list)
461{
462 enum pci_p2pdma_map_type map_type;
463
464 map_type = __upstream_bridge_distance(provider, client, dist,
465 acs_redirects, acs_list);
466
467 if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) {
468 if (!host_bridge_whitelist(provider, client))
469 map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
470 }
471
472 if (provider->p2pdma)
473 xa_store(&provider->p2pdma->map_types, map_types_idx(client),
474 xa_mk_value(map_type), GFP_KERNEL);
383 475
384 return dist_a + dist_b; 476 return map_type;
385} 477}
386 478
387static int upstream_bridge_distance_warn(struct pci_dev *provider, 479static enum pci_p2pdma_map_type
388 struct pci_dev *client) 480upstream_bridge_distance_warn(struct pci_dev *provider, struct pci_dev *client,
481 int *dist)
389{ 482{
390 struct seq_buf acs_list; 483 struct seq_buf acs_list;
484 bool acs_redirects;
391 int ret; 485 int ret;
392 486
393 seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 487 seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
394 if (!acs_list.buffer) 488 if (!acs_list.buffer)
395 return -ENOMEM; 489 return -ENOMEM;
396 490
397 ret = upstream_bridge_distance(provider, client, &acs_list); 491 ret = upstream_bridge_distance(provider, client, dist, &acs_redirects,
398 if (ret == -2) { 492 &acs_list);
399 pci_warn(client, "cannot be used for peer-to-peer DMA as ACS redirect is set between the client and provider (%s)\n", 493 if (acs_redirects) {
494 pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
400 pci_name(provider)); 495 pci_name(provider));
401 /* Drop final semicolon */ 496 /* Drop final semicolon */
402 acs_list.buffer[acs_list.len-1] = 0; 497 acs_list.buffer[acs_list.len-1] = 0;
403 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n", 498 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
404 acs_list.buffer); 499 acs_list.buffer);
500 }
405 501
406 } else if (ret < 0) { 502 if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) {
407 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge\n", 503 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
408 pci_name(provider)); 504 pci_name(provider));
409 } 505 }
410 506
@@ -421,22 +517,22 @@ static int upstream_bridge_distance_warn(struct pci_dev *provider,
421 * @num_clients: number of clients in the array 517 * @num_clients: number of clients in the array
422 * @verbose: if true, print warnings for devices when we return -1 518 * @verbose: if true, print warnings for devices when we return -1
423 * 519 *
424 * Returns -1 if any of the clients are not compatible (behind the same 520 * Returns -1 if any of the clients are not compatible, otherwise returns a
425 * root port as the provider), otherwise returns a positive number where 521 * positive number where a lower number is the preferable choice. (If there's
426 * a lower number is the preferable choice. (If there's one client 522 * one client that's the same as the provider it will return 0, which is best
427 * that's the same as the provider it will return 0, which is best choice). 523 * choice).
428 * 524 *
429 * For now, "compatible" means the provider and the clients are all behind 525 * "compatible" means the provider and the clients are either all behind
430 * the same PCI root port. This cuts out cases that may work but is safest 526 * the same PCI root port or the host bridges connected to each of the devices
431 * for the user. Future work can expand this to white-list root complexes that 527 * are listed in the 'pci_p2pdma_whitelist'.
432 * can safely forward between each ports.
433 */ 528 */
434int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, 529int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
435 int num_clients, bool verbose) 530 int num_clients, bool verbose)
436{ 531{
437 bool not_supported = false; 532 bool not_supported = false;
438 struct pci_dev *pci_client; 533 struct pci_dev *pci_client;
439 int distance = 0; 534 int total_dist = 0;
535 int distance;
440 int i, ret; 536 int i, ret;
441 537
442 if (num_clients == 0) 538 if (num_clients == 0)
@@ -461,26 +557,26 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
461 557
462 if (verbose) 558 if (verbose)
463 ret = upstream_bridge_distance_warn(provider, 559 ret = upstream_bridge_distance_warn(provider,
464 pci_client); 560 pci_client, &distance);
465 else 561 else
466 ret = upstream_bridge_distance(provider, pci_client, 562 ret = upstream_bridge_distance(provider, pci_client,
467 NULL); 563 &distance, NULL, NULL);
468 564
469 pci_dev_put(pci_client); 565 pci_dev_put(pci_client);
470 566
471 if (ret < 0) 567 if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED)
472 not_supported = true; 568 not_supported = true;
473 569
474 if (not_supported && !verbose) 570 if (not_supported && !verbose)
475 break; 571 break;
476 572
477 distance += ret; 573 total_dist += distance;
478 } 574 }
479 575
480 if (not_supported) 576 if (not_supported)
481 return -1; 577 return -1;
482 578
483 return distance; 579 return total_dist;
484} 580}
485EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many); 581EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
486 582
@@ -706,21 +802,19 @@ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
706} 802}
707EXPORT_SYMBOL_GPL(pci_p2pmem_publish); 803EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
708 804
709/** 805static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct pci_dev *provider,
710 * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA 806 struct pci_dev *client)
711 * @dev: device doing the DMA request 807{
712 * @sg: scatter list to map 808 if (!provider->p2pdma)
713 * @nents: elements in the scatterlist 809 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
714 * @dir: DMA direction 810
715 * 811 return xa_to_value(xa_load(&provider->p2pdma->map_types,
716 * Scatterlists mapped with this function should not be unmapped in any way. 812 map_types_idx(client)));
717 * 813}
718 * Returns the number of SG entries mapped or 0 on error. 814
719 */ 815static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
720int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 816 struct device *dev, struct scatterlist *sg, int nents)
721 enum dma_data_direction dir)
722{ 817{
723 struct dev_pagemap *pgmap;
724 struct scatterlist *s; 818 struct scatterlist *s;
725 phys_addr_t paddr; 819 phys_addr_t paddr;
726 int i; 820 int i;
@@ -736,16 +830,80 @@ int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
736 return 0; 830 return 0;
737 831
738 for_each_sg(sg, s, nents, i) { 832 for_each_sg(sg, s, nents, i) {
739 pgmap = sg_page(s)->pgmap;
740 paddr = sg_phys(s); 833 paddr = sg_phys(s);
741 834
742 s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset; 835 s->dma_address = paddr - p2p_pgmap->bus_offset;
743 sg_dma_len(s) = s->length; 836 sg_dma_len(s) = s->length;
744 } 837 }
745 838
746 return nents; 839 return nents;
747} 840}
748EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg); 841
842/**
843 * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA
844 * @dev: device doing the DMA request
845 * @sg: scatter list to map
846 * @nents: elements in the scatterlist
847 * @dir: DMA direction
848 * @attrs: DMA attributes passed to dma_map_sg() (if called)
849 *
850 * Scatterlists mapped with this function should be unmapped using
851 * pci_p2pdma_unmap_sg_attrs().
852 *
853 * Returns the number of SG entries mapped or 0 on error.
854 */
855int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
856 int nents, enum dma_data_direction dir, unsigned long attrs)
857{
858 struct pci_p2pdma_pagemap *p2p_pgmap =
859 to_p2p_pgmap(sg_page(sg)->pgmap);
860 struct pci_dev *client;
861
862 if (WARN_ON_ONCE(!dev_is_pci(dev)))
863 return 0;
864
865 client = to_pci_dev(dev);
866
867 switch (pci_p2pdma_map_type(p2p_pgmap->provider, client)) {
868 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
869 return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
870 case PCI_P2PDMA_MAP_BUS_ADDR:
871 return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
872 default:
873 WARN_ON_ONCE(1);
874 return 0;
875 }
876}
877EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
878
879/**
880 * pci_p2pdma_unmap_sg - unmap a PCI peer-to-peer scatterlist that was
881 * mapped with pci_p2pdma_map_sg()
882 * @dev: device doing the DMA request
883 * @sg: scatter list to map
884 * @nents: number of elements returned by pci_p2pdma_map_sg()
885 * @dir: DMA direction
886 * @attrs: DMA attributes passed to dma_unmap_sg() (if called)
887 */
888void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
889 int nents, enum dma_data_direction dir, unsigned long attrs)
890{
891 struct pci_p2pdma_pagemap *p2p_pgmap =
892 to_p2p_pgmap(sg_page(sg)->pgmap);
893 enum pci_p2pdma_map_type map_type;
894 struct pci_dev *client;
895
896 if (WARN_ON_ONCE(!dev_is_pci(dev)))
897 return;
898
899 client = to_pci_dev(dev);
900
901 map_type = pci_p2pdma_map_type(p2p_pgmap->provider, client);
902
903 if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
904 dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
905}
906EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
749 907
750/** 908/**
751 * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store 909 * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 45049f558860..0c02d500158f 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -14,7 +14,6 @@
14#include <linux/msi.h> 14#include <linux/msi.h>
15#include <linux/pci_hotplug.h> 15#include <linux/pci_hotplug.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/pci-aspm.h>
18#include <linux/pci-acpi.h> 17#include <linux/pci-acpi.h>
19#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
20#include <linux/pm_qos.h> 19#include <linux/pm_qos.h>
@@ -118,8 +117,58 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
118 return (phys_addr_t)mcfg_addr; 117 return (phys_addr_t)mcfg_addr;
119} 118}
120 119
120/* _HPX PCI Setting Record (Type 0); same as _HPP */
121struct hpx_type0 {
122 u32 revision; /* Not present in _HPP */
123 u8 cache_line_size; /* Not applicable to PCIe */
124 u8 latency_timer; /* Not applicable to PCIe */
125 u8 enable_serr;
126 u8 enable_perr;
127};
128
129static struct hpx_type0 pci_default_type0 = {
130 .revision = 1,
131 .cache_line_size = 8,
132 .latency_timer = 0x40,
133 .enable_serr = 0,
134 .enable_perr = 0,
135};
136
137static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
138{
139 u16 pci_cmd, pci_bctl;
140
141 if (!hpx)
142 hpx = &pci_default_type0;
143
144 if (hpx->revision > 1) {
145 pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
146 hpx->revision);
147 hpx = &pci_default_type0;
148 }
149
150 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
151 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
152 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
153 if (hpx->enable_serr)
154 pci_cmd |= PCI_COMMAND_SERR;
155 if (hpx->enable_perr)
156 pci_cmd |= PCI_COMMAND_PARITY;
157 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
158
159 /* Program bridge control value */
160 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
161 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
162 hpx->latency_timer);
163 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
164 if (hpx->enable_perr)
165 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
166 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
167 }
168}
169
121static acpi_status decode_type0_hpx_record(union acpi_object *record, 170static acpi_status decode_type0_hpx_record(union acpi_object *record,
122 struct hpp_type0 *hpx0) 171 struct hpx_type0 *hpx0)
123{ 172{
124 int i; 173 int i;
125 union acpi_object *fields = record->package.elements; 174 union acpi_object *fields = record->package.elements;
@@ -146,8 +195,30 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record,
146 return AE_OK; 195 return AE_OK;
147} 196}
148 197
198/* _HPX PCI-X Setting Record (Type 1) */
199struct hpx_type1 {
200 u32 revision;
201 u8 max_mem_read;
202 u8 avg_max_split;
203 u16 tot_max_split;
204};
205
206static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
207{
208 int pos;
209
210 if (!hpx)
211 return;
212
213 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
214 if (!pos)
215 return;
216
217 pci_warn(dev, "PCI-X settings not supported\n");
218}
219
149static acpi_status decode_type1_hpx_record(union acpi_object *record, 220static acpi_status decode_type1_hpx_record(union acpi_object *record,
150 struct hpp_type1 *hpx1) 221 struct hpx_type1 *hpx1)
151{ 222{
152 int i; 223 int i;
153 union acpi_object *fields = record->package.elements; 224 union acpi_object *fields = record->package.elements;
@@ -173,8 +244,130 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record,
173 return AE_OK; 244 return AE_OK;
174} 245}
175 246
247static bool pcie_root_rcb_set(struct pci_dev *dev)
248{
249 struct pci_dev *rp = pcie_find_root_port(dev);
250 u16 lnkctl;
251
252 if (!rp)
253 return false;
254
255 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
256 if (lnkctl & PCI_EXP_LNKCTL_RCB)
257 return true;
258
259 return false;
260}
261
262/* _HPX PCI Express Setting Record (Type 2) */
263struct hpx_type2 {
264 u32 revision;
265 u32 unc_err_mask_and;
266 u32 unc_err_mask_or;
267 u32 unc_err_sever_and;
268 u32 unc_err_sever_or;
269 u32 cor_err_mask_and;
270 u32 cor_err_mask_or;
271 u32 adv_err_cap_and;
272 u32 adv_err_cap_or;
273 u16 pci_exp_devctl_and;
274 u16 pci_exp_devctl_or;
275 u16 pci_exp_lnkctl_and;
276 u16 pci_exp_lnkctl_or;
277 u32 sec_unc_err_sever_and;
278 u32 sec_unc_err_sever_or;
279 u32 sec_unc_err_mask_and;
280 u32 sec_unc_err_mask_or;
281};
282
283static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
284{
285 int pos;
286 u32 reg32;
287
288 if (!hpx)
289 return;
290
291 if (!pci_is_pcie(dev))
292 return;
293
294 if (hpx->revision > 1) {
295 pci_warn(dev, "PCIe settings rev %d not supported\n",
296 hpx->revision);
297 return;
298 }
299
300 /*
301 * Don't allow _HPX to change MPS or MRRS settings. We manage
302 * those to make sure they're consistent with the rest of the
303 * platform.
304 */
305 hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
306 PCI_EXP_DEVCTL_READRQ;
307 hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
308 PCI_EXP_DEVCTL_READRQ);
309
310 /* Initialize Device Control Register */
311 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
312 ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
313
314 /* Initialize Link Control Register */
315 if (pcie_cap_has_lnkctl(dev)) {
316
317 /*
318 * If the Root Port supports Read Completion Boundary of
319 * 128, set RCB to 128. Otherwise, clear it.
320 */
321 hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
322 hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
323 if (pcie_root_rcb_set(dev))
324 hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
325
326 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
327 ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
328 }
329
330 /* Find Advanced Error Reporting Enhanced Capability */
331 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
332 if (!pos)
333 return;
334
335 /* Initialize Uncorrectable Error Mask Register */
336 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
337 reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
338 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
339
340 /* Initialize Uncorrectable Error Severity Register */
341 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
342 reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
343 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
344
345 /* Initialize Correctable Error Mask Register */
346 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
347 reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
348 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
349
350 /* Initialize Advanced Error Capabilities and Control Register */
351 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
352 reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
353
354 /* Don't enable ECRC generation or checking if unsupported */
355 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
356 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
357 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
358 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
359 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
360
361 /*
362 * FIXME: The following two registers are not supported yet.
363 *
364 * o Secondary Uncorrectable Error Severity Register
365 * o Secondary Uncorrectable Error Mask Register
366 */
367}
368
176static acpi_status decode_type2_hpx_record(union acpi_object *record, 369static acpi_status decode_type2_hpx_record(union acpi_object *record,
177 struct hpp_type2 *hpx2) 370 struct hpx_type2 *hpx2)
178{ 371{
179 int i; 372 int i;
180 union acpi_object *fields = record->package.elements; 373 union acpi_object *fields = record->package.elements;
@@ -213,6 +406,164 @@ static acpi_status decode_type2_hpx_record(union acpi_object *record,
213 return AE_OK; 406 return AE_OK;
214} 407}
215 408
409/* _HPX PCI Express Setting Record (Type 3) */
410struct hpx_type3 {
411 u16 device_type;
412 u16 function_type;
413 u16 config_space_location;
414 u16 pci_exp_cap_id;
415 u16 pci_exp_cap_ver;
416 u16 pci_exp_vendor_id;
417 u16 dvsec_id;
418 u16 dvsec_rev;
419 u16 match_offset;
420 u32 match_mask_and;
421 u32 match_value;
422 u16 reg_offset;
423 u32 reg_mask_and;
424 u32 reg_mask_or;
425};
426
427enum hpx_type3_dev_type {
428 HPX_TYPE_ENDPOINT = BIT(0),
429 HPX_TYPE_LEG_END = BIT(1),
430 HPX_TYPE_RC_END = BIT(2),
431 HPX_TYPE_RC_EC = BIT(3),
432 HPX_TYPE_ROOT_PORT = BIT(4),
433 HPX_TYPE_UPSTREAM = BIT(5),
434 HPX_TYPE_DOWNSTREAM = BIT(6),
435 HPX_TYPE_PCI_BRIDGE = BIT(7),
436 HPX_TYPE_PCIE_BRIDGE = BIT(8),
437};
438
439static u16 hpx3_device_type(struct pci_dev *dev)
440{
441 u16 pcie_type = pci_pcie_type(dev);
442 const int pcie_to_hpx3_type[] = {
443 [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
444 [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
445 [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
446 [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
447 [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
448 [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
449 [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
450 [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
451 [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
452 };
453
454 if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
455 return 0;
456
457 return pcie_to_hpx3_type[pcie_type];
458}
459
460enum hpx_type3_fn_type {
461 HPX_FN_NORMAL = BIT(0),
462 HPX_FN_SRIOV_PHYS = BIT(1),
463 HPX_FN_SRIOV_VIRT = BIT(2),
464};
465
466static u8 hpx3_function_type(struct pci_dev *dev)
467{
468 if (dev->is_virtfn)
469 return HPX_FN_SRIOV_VIRT;
470 else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
471 return HPX_FN_SRIOV_PHYS;
472 else
473 return HPX_FN_NORMAL;
474}
475
476static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
477{
478 u8 cap_ver = hpx3_cap_id & 0xf;
479
480 if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
481 return true;
482 else if (cap_ver == pcie_cap_id)
483 return true;
484
485 return false;
486}
487
488enum hpx_type3_cfg_loc {
489 HPX_CFG_PCICFG = 0,
490 HPX_CFG_PCIE_CAP = 1,
491 HPX_CFG_PCIE_CAP_EXT = 2,
492 HPX_CFG_VEND_CAP = 3,
493 HPX_CFG_DVSEC = 4,
494 HPX_CFG_MAX,
495};
496
497static void program_hpx_type3_register(struct pci_dev *dev,
498 const struct hpx_type3 *reg)
499{
500 u32 match_reg, write_reg, header, orig_value;
501 u16 pos;
502
503 if (!(hpx3_device_type(dev) & reg->device_type))
504 return;
505
506 if (!(hpx3_function_type(dev) & reg->function_type))
507 return;
508
509 switch (reg->config_space_location) {
510 case HPX_CFG_PCICFG:
511 pos = 0;
512 break;
513 case HPX_CFG_PCIE_CAP:
514 pos = pci_find_capability(dev, reg->pci_exp_cap_id);
515 if (pos == 0)
516 return;
517
518 break;
519 case HPX_CFG_PCIE_CAP_EXT:
520 pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
521 if (pos == 0)
522 return;
523
524 pci_read_config_dword(dev, pos, &header);
525 if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
526 reg->pci_exp_cap_ver))
527 return;
528
529 break;
530 case HPX_CFG_VEND_CAP: /* Fall through */
531 case HPX_CFG_DVSEC: /* Fall through */
532 default:
533 pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
534 return;
535 }
536
537 pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
538
539 if ((match_reg & reg->match_mask_and) != reg->match_value)
540 return;
541
542 pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
543 orig_value = write_reg;
544 write_reg &= reg->reg_mask_and;
545 write_reg |= reg->reg_mask_or;
546
547 if (orig_value == write_reg)
548 return;
549
550 pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
551
552 pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
553 pos, orig_value, write_reg);
554}
555
556static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
557{
558 if (!hpx)
559 return;
560
561 if (!pci_is_pcie(dev))
562 return;
563
564 program_hpx_type3_register(dev, hpx);
565}
566
216static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, 567static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
217 union acpi_object *reg_fields) 568 union acpi_object *reg_fields)
218{ 569{
@@ -233,8 +584,7 @@ static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
233} 584}
234 585
235static acpi_status program_type3_hpx_record(struct pci_dev *dev, 586static acpi_status program_type3_hpx_record(struct pci_dev *dev,
236 union acpi_object *record, 587 union acpi_object *record)
237 const struct hotplug_program_ops *hp_ops)
238{ 588{
239 union acpi_object *fields = record->package.elements; 589 union acpi_object *fields = record->package.elements;
240 u32 desc_count, expected_length, revision; 590 u32 desc_count, expected_length, revision;
@@ -258,7 +608,7 @@ static acpi_status program_type3_hpx_record(struct pci_dev *dev,
258 for (i = 0; i < desc_count; i++) { 608 for (i = 0; i < desc_count; i++) {
259 reg_fields = fields + 3 + i * 14; 609 reg_fields = fields + 3 + i * 14;
260 parse_hpx3_register(&hpx3, reg_fields); 610 parse_hpx3_register(&hpx3, reg_fields);
261 hp_ops->program_type3(dev, &hpx3); 611 program_hpx_type3(dev, &hpx3);
262 } 612 }
263 613
264 break; 614 break;
@@ -271,15 +621,14 @@ static acpi_status program_type3_hpx_record(struct pci_dev *dev,
271 return AE_OK; 621 return AE_OK;
272} 622}
273 623
274static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle, 624static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
275 const struct hotplug_program_ops *hp_ops)
276{ 625{
277 acpi_status status; 626 acpi_status status;
278 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 627 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
279 union acpi_object *package, *record, *fields; 628 union acpi_object *package, *record, *fields;
280 struct hpp_type0 hpx0; 629 struct hpx_type0 hpx0;
281 struct hpp_type1 hpx1; 630 struct hpx_type1 hpx1;
282 struct hpp_type2 hpx2; 631 struct hpx_type2 hpx2;
283 u32 type; 632 u32 type;
284 int i; 633 int i;
285 634
@@ -314,24 +663,24 @@ static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle,
314 status = decode_type0_hpx_record(record, &hpx0); 663 status = decode_type0_hpx_record(record, &hpx0);
315 if (ACPI_FAILURE(status)) 664 if (ACPI_FAILURE(status))
316 goto exit; 665 goto exit;
317 hp_ops->program_type0(dev, &hpx0); 666 program_hpx_type0(dev, &hpx0);
318 break; 667 break;
319 case 1: 668 case 1:
320 memset(&hpx1, 0, sizeof(hpx1)); 669 memset(&hpx1, 0, sizeof(hpx1));
321 status = decode_type1_hpx_record(record, &hpx1); 670 status = decode_type1_hpx_record(record, &hpx1);
322 if (ACPI_FAILURE(status)) 671 if (ACPI_FAILURE(status))
323 goto exit; 672 goto exit;
324 hp_ops->program_type1(dev, &hpx1); 673 program_hpx_type1(dev, &hpx1);
325 break; 674 break;
326 case 2: 675 case 2:
327 memset(&hpx2, 0, sizeof(hpx2)); 676 memset(&hpx2, 0, sizeof(hpx2));
328 status = decode_type2_hpx_record(record, &hpx2); 677 status = decode_type2_hpx_record(record, &hpx2);
329 if (ACPI_FAILURE(status)) 678 if (ACPI_FAILURE(status))
330 goto exit; 679 goto exit;
331 hp_ops->program_type2(dev, &hpx2); 680 program_hpx_type2(dev, &hpx2);
332 break; 681 break;
333 case 3: 682 case 3:
334 status = program_type3_hpx_record(dev, record, hp_ops); 683 status = program_type3_hpx_record(dev, record);
335 if (ACPI_FAILURE(status)) 684 if (ACPI_FAILURE(status))
336 goto exit; 685 goto exit;
337 break; 686 break;
@@ -347,16 +696,15 @@ static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle,
347 return status; 696 return status;
348} 697}
349 698
350static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle, 699static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
351 const struct hotplug_program_ops *hp_ops)
352{ 700{
353 acpi_status status; 701 acpi_status status;
354 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 702 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
355 union acpi_object *package, *fields; 703 union acpi_object *package, *fields;
356 struct hpp_type0 hpp0; 704 struct hpx_type0 hpx0;
357 int i; 705 int i;
358 706
359 memset(&hpp0, 0, sizeof(hpp0)); 707 memset(&hpx0, 0, sizeof(hpx0));
360 708
361 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); 709 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
362 if (ACPI_FAILURE(status)) 710 if (ACPI_FAILURE(status))
@@ -377,26 +725,24 @@ static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle,
377 } 725 }
378 } 726 }
379 727
380 hpp0.revision = 1; 728 hpx0.revision = 1;
381 hpp0.cache_line_size = fields[0].integer.value; 729 hpx0.cache_line_size = fields[0].integer.value;
382 hpp0.latency_timer = fields[1].integer.value; 730 hpx0.latency_timer = fields[1].integer.value;
383 hpp0.enable_serr = fields[2].integer.value; 731 hpx0.enable_serr = fields[2].integer.value;
384 hpp0.enable_perr = fields[3].integer.value; 732 hpx0.enable_perr = fields[3].integer.value;
385 733
386 hp_ops->program_type0(dev, &hpp0); 734 program_hpx_type0(dev, &hpx0);
387 735
388exit: 736exit:
389 kfree(buffer.pointer); 737 kfree(buffer.pointer);
390 return status; 738 return status;
391} 739}
392 740
393/* pci_get_hp_params 741/* pci_acpi_program_hp_params
394 * 742 *
395 * @dev - the pci_dev for which we want parameters 743 * @dev - the pci_dev for which we want parameters
396 * @hpp - allocated by the caller
397 */ 744 */
398int pci_acpi_program_hp_params(struct pci_dev *dev, 745int pci_acpi_program_hp_params(struct pci_dev *dev)
399 const struct hotplug_program_ops *hp_ops)
400{ 746{
401 acpi_status status; 747 acpi_status status;
402 acpi_handle handle, phandle; 748 acpi_handle handle, phandle;
@@ -419,10 +765,10 @@ int pci_acpi_program_hp_params(struct pci_dev *dev,
419 * this pci dev. 765 * this pci dev.
420 */ 766 */
421 while (handle) { 767 while (handle) {
422 status = acpi_run_hpx(dev, handle, hp_ops); 768 status = acpi_run_hpx(dev, handle);
423 if (ACPI_SUCCESS(status)) 769 if (ACPI_SUCCESS(status))
424 return 0; 770 return 0;
425 status = acpi_run_hpp(dev, handle, hp_ops); 771 status = acpi_run_hpp(dev, handle);
426 if (ACPI_SUCCESS(status)) 772 if (ACPI_SUCCESS(status))
427 return 0; 773 return 0;
428 if (acpi_is_root_bridge(handle)) 774 if (acpi_is_root_bridge(handle))
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 06083b86d4f4..5fd90105510d 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -38,7 +38,7 @@ struct pci_bridge_reg_behavior {
38 u32 rsvd; 38 u32 rsvd;
39}; 39};
40 40
41const static struct pci_bridge_reg_behavior pci_regs_behavior[] = { 41static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
42 [PCI_VENDOR_ID / 4] = { .ro = ~0 }, 42 [PCI_VENDOR_ID / 4] = { .ro = ~0 },
43 [PCI_COMMAND / 4] = { 43 [PCI_COMMAND / 4] = {
44 .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 44 .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
@@ -173,7 +173,7 @@ const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
173 }, 173 },
174}; 174};
175 175
176const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { 176static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
177 [PCI_CAP_LIST_ID / 4] = { 177 [PCI_CAP_LIST_ID / 4] = {
178 /* 178 /*
179 * Capability ID, Next Capability Pointer and 179 * Capability ID, Next Capability Pointer and
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 965c72104150..868e35109284 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -464,9 +464,7 @@ static ssize_t dev_rescan_store(struct device *dev,
464 } 464 }
465 return count; 465 return count;
466} 466}
467static struct device_attribute dev_rescan_attr = __ATTR(rescan, 467static DEVICE_ATTR_WO(dev_rescan);
468 (S_IWUSR|S_IWGRP),
469 NULL, dev_rescan_store);
470 468
471static ssize_t remove_store(struct device *dev, struct device_attribute *attr, 469static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
472 const char *buf, size_t count) 470 const char *buf, size_t count)
@@ -480,13 +478,12 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
480 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); 478 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
481 return count; 479 return count;
482} 480}
483static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove, 481static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
484 (S_IWUSR|S_IWGRP), 482 remove_store);
485 NULL, remove_store);
486 483
487static ssize_t dev_bus_rescan_store(struct device *dev, 484static ssize_t bus_rescan_store(struct device *dev,
488 struct device_attribute *attr, 485 struct device_attribute *attr,
489 const char *buf, size_t count) 486 const char *buf, size_t count)
490{ 487{
491 unsigned long val; 488 unsigned long val;
492 struct pci_bus *bus = to_pci_bus(dev); 489 struct pci_bus *bus = to_pci_bus(dev);
@@ -504,7 +501,7 @@ static ssize_t dev_bus_rescan_store(struct device *dev,
504 } 501 }
505 return count; 502 return count;
506} 503}
507static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store); 504static DEVICE_ATTR_WO(bus_rescan);
508 505
509#if defined(CONFIG_PM) && defined(CONFIG_ACPI) 506#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
510static ssize_t d3cold_allowed_store(struct device *dev, 507static ssize_t d3cold_allowed_store(struct device *dev,
@@ -551,154 +548,6 @@ static ssize_t devspec_show(struct device *dev,
551static DEVICE_ATTR_RO(devspec); 548static DEVICE_ATTR_RO(devspec);
552#endif 549#endif
553 550
554#ifdef CONFIG_PCI_IOV
555static ssize_t sriov_totalvfs_show(struct device *dev,
556 struct device_attribute *attr,
557 char *buf)
558{
559 struct pci_dev *pdev = to_pci_dev(dev);
560
561 return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
562}
563
564
565static ssize_t sriov_numvfs_show(struct device *dev,
566 struct device_attribute *attr,
567 char *buf)
568{
569 struct pci_dev *pdev = to_pci_dev(dev);
570
571 return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
572}
573
574/*
575 * num_vfs > 0; number of VFs to enable
576 * num_vfs = 0; disable all VFs
577 *
578 * Note: SRIOV spec doesn't allow partial VF
579 * disable, so it's all or none.
580 */
581static ssize_t sriov_numvfs_store(struct device *dev,
582 struct device_attribute *attr,
583 const char *buf, size_t count)
584{
585 struct pci_dev *pdev = to_pci_dev(dev);
586 int ret;
587 u16 num_vfs;
588
589 ret = kstrtou16(buf, 0, &num_vfs);
590 if (ret < 0)
591 return ret;
592
593 if (num_vfs > pci_sriov_get_totalvfs(pdev))
594 return -ERANGE;
595
596 device_lock(&pdev->dev);
597
598 if (num_vfs == pdev->sriov->num_VFs)
599 goto exit;
600
601 /* is PF driver loaded w/callback */
602 if (!pdev->driver || !pdev->driver->sriov_configure) {
603 pci_info(pdev, "Driver doesn't support SRIOV configuration via sysfs\n");
604 ret = -ENOENT;
605 goto exit;
606 }
607
608 if (num_vfs == 0) {
609 /* disable VFs */
610 ret = pdev->driver->sriov_configure(pdev, 0);
611 goto exit;
612 }
613
614 /* enable VFs */
615 if (pdev->sriov->num_VFs) {
616 pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n",
617 pdev->sriov->num_VFs, num_vfs);
618 ret = -EBUSY;
619 goto exit;
620 }
621
622 ret = pdev->driver->sriov_configure(pdev, num_vfs);
623 if (ret < 0)
624 goto exit;
625
626 if (ret != num_vfs)
627 pci_warn(pdev, "%d VFs requested; only %d enabled\n",
628 num_vfs, ret);
629
630exit:
631 device_unlock(&pdev->dev);
632
633 if (ret < 0)
634 return ret;
635
636 return count;
637}
638
639static ssize_t sriov_offset_show(struct device *dev,
640 struct device_attribute *attr,
641 char *buf)
642{
643 struct pci_dev *pdev = to_pci_dev(dev);
644
645 return sprintf(buf, "%u\n", pdev->sriov->offset);
646}
647
648static ssize_t sriov_stride_show(struct device *dev,
649 struct device_attribute *attr,
650 char *buf)
651{
652 struct pci_dev *pdev = to_pci_dev(dev);
653
654 return sprintf(buf, "%u\n", pdev->sriov->stride);
655}
656
657static ssize_t sriov_vf_device_show(struct device *dev,
658 struct device_attribute *attr,
659 char *buf)
660{
661 struct pci_dev *pdev = to_pci_dev(dev);
662
663 return sprintf(buf, "%x\n", pdev->sriov->vf_device);
664}
665
666static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
667 struct device_attribute *attr,
668 char *buf)
669{
670 struct pci_dev *pdev = to_pci_dev(dev);
671
672 return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe);
673}
674
675static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
676 struct device_attribute *attr,
677 const char *buf, size_t count)
678{
679 struct pci_dev *pdev = to_pci_dev(dev);
680 bool drivers_autoprobe;
681
682 if (kstrtobool(buf, &drivers_autoprobe) < 0)
683 return -EINVAL;
684
685 pdev->sriov->drivers_autoprobe = drivers_autoprobe;
686
687 return count;
688}
689
690static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
691static struct device_attribute sriov_numvfs_attr =
692 __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
693 sriov_numvfs_show, sriov_numvfs_store);
694static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset);
695static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride);
696static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device);
697static struct device_attribute sriov_drivers_autoprobe_attr =
698 __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP),
699 sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store);
700#endif /* CONFIG_PCI_IOV */
701
702static ssize_t driver_override_store(struct device *dev, 551static ssize_t driver_override_store(struct device *dev,
703 struct device_attribute *attr, 552 struct device_attribute *attr,
704 const char *buf, size_t count) 553 const char *buf, size_t count)
@@ -792,7 +641,7 @@ static struct attribute *pcie_dev_attrs[] = {
792}; 641};
793 642
794static struct attribute *pcibus_attrs[] = { 643static struct attribute *pcibus_attrs[] = {
795 &dev_attr_rescan.attr, 644 &dev_attr_bus_rescan.attr,
796 &dev_attr_cpuaffinity.attr, 645 &dev_attr_cpuaffinity.attr,
797 &dev_attr_cpulistaffinity.attr, 646 &dev_attr_cpulistaffinity.attr,
798 NULL, 647 NULL,
@@ -820,7 +669,7 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
820 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 669 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
821 IORESOURCE_ROM_SHADOW)); 670 IORESOURCE_ROM_SHADOW));
822} 671}
823static struct device_attribute vga_attr = __ATTR_RO(boot_vga); 672static DEVICE_ATTR_RO(boot_vga);
824 673
825static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, 674static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
826 struct bin_attribute *bin_attr, char *buf, 675 struct bin_attribute *bin_attr, char *buf,
@@ -1085,7 +934,7 @@ void pci_create_legacy_files(struct pci_bus *b)
1085 sysfs_bin_attr_init(b->legacy_io); 934 sysfs_bin_attr_init(b->legacy_io);
1086 b->legacy_io->attr.name = "legacy_io"; 935 b->legacy_io->attr.name = "legacy_io";
1087 b->legacy_io->size = 0xffff; 936 b->legacy_io->size = 0xffff;
1088 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; 937 b->legacy_io->attr.mode = 0600;
1089 b->legacy_io->read = pci_read_legacy_io; 938 b->legacy_io->read = pci_read_legacy_io;
1090 b->legacy_io->write = pci_write_legacy_io; 939 b->legacy_io->write = pci_write_legacy_io;
1091 b->legacy_io->mmap = pci_mmap_legacy_io; 940 b->legacy_io->mmap = pci_mmap_legacy_io;
@@ -1099,7 +948,7 @@ void pci_create_legacy_files(struct pci_bus *b)
1099 sysfs_bin_attr_init(b->legacy_mem); 948 sysfs_bin_attr_init(b->legacy_mem);
1100 b->legacy_mem->attr.name = "legacy_mem"; 949 b->legacy_mem->attr.name = "legacy_mem";
1101 b->legacy_mem->size = 1024*1024; 950 b->legacy_mem->size = 1024*1024;
1102 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 951 b->legacy_mem->attr.mode = 0600;
1103 b->legacy_mem->mmap = pci_mmap_legacy_mem; 952 b->legacy_mem->mmap = pci_mmap_legacy_mem;
1104 pci_adjust_legacy_attr(b, pci_mmap_mem); 953 pci_adjust_legacy_attr(b, pci_mmap_mem);
1105 error = device_create_bin_file(&b->dev, b->legacy_mem); 954 error = device_create_bin_file(&b->dev, b->legacy_mem);
@@ -1306,7 +1155,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1306 } 1155 }
1307 } 1156 }
1308 res_attr->attr.name = res_attr_name; 1157 res_attr->attr.name = res_attr_name;
1309 res_attr->attr.mode = S_IRUSR | S_IWUSR; 1158 res_attr->attr.mode = 0600;
1310 res_attr->size = pci_resource_len(pdev, num); 1159 res_attr->size = pci_resource_len(pdev, num);
1311 res_attr->private = (void *)(unsigned long)num; 1160 res_attr->private = (void *)(unsigned long)num;
1312 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); 1161 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
@@ -1419,7 +1268,7 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1419static const struct bin_attribute pci_config_attr = { 1268static const struct bin_attribute pci_config_attr = {
1420 .attr = { 1269 .attr = {
1421 .name = "config", 1270 .name = "config",
1422 .mode = S_IRUGO | S_IWUSR, 1271 .mode = 0644,
1423 }, 1272 },
1424 .size = PCI_CFG_SPACE_SIZE, 1273 .size = PCI_CFG_SPACE_SIZE,
1425 .read = pci_read_config, 1274 .read = pci_read_config,
@@ -1429,7 +1278,7 @@ static const struct bin_attribute pci_config_attr = {
1429static const struct bin_attribute pcie_config_attr = { 1278static const struct bin_attribute pcie_config_attr = {
1430 .attr = { 1279 .attr = {
1431 .name = "config", 1280 .name = "config",
1432 .mode = S_IRUGO | S_IWUSR, 1281 .mode = 0644,
1433 }, 1282 },
1434 .size = PCI_CFG_SPACE_EXP_SIZE, 1283 .size = PCI_CFG_SPACE_EXP_SIZE,
1435 .read = pci_read_config, 1284 .read = pci_read_config,
@@ -1458,7 +1307,7 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1458 return count; 1307 return count;
1459} 1308}
1460 1309
1461static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); 1310static DEVICE_ATTR(reset, 0200, NULL, reset_store);
1462 1311
1463static int pci_create_capabilities_sysfs(struct pci_dev *dev) 1312static int pci_create_capabilities_sysfs(struct pci_dev *dev)
1464{ 1313{
@@ -1468,7 +1317,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
1468 pcie_aspm_create_sysfs_dev_files(dev); 1317 pcie_aspm_create_sysfs_dev_files(dev);
1469 1318
1470 if (dev->reset_fn) { 1319 if (dev->reset_fn) {
1471 retval = device_create_file(&dev->dev, &reset_attr); 1320 retval = device_create_file(&dev->dev, &dev_attr_reset);
1472 if (retval) 1321 if (retval)
1473 goto error; 1322 goto error;
1474 } 1323 }
@@ -1511,7 +1360,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1511 sysfs_bin_attr_init(attr); 1360 sysfs_bin_attr_init(attr);
1512 attr->size = rom_size; 1361 attr->size = rom_size;
1513 attr->attr.name = "rom"; 1362 attr->attr.name = "rom";
1514 attr->attr.mode = S_IRUSR | S_IWUSR; 1363 attr->attr.mode = 0600;
1515 attr->read = pci_read_rom; 1364 attr->read = pci_read_rom;
1516 attr->write = pci_write_rom; 1365 attr->write = pci_write_rom;
1517 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); 1366 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
@@ -1553,7 +1402,7 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
1553 pcie_vpd_remove_sysfs_dev_files(dev); 1402 pcie_vpd_remove_sysfs_dev_files(dev);
1554 pcie_aspm_remove_sysfs_dev_files(dev); 1403 pcie_aspm_remove_sysfs_dev_files(dev);
1555 if (dev->reset_fn) { 1404 if (dev->reset_fn) {
1556 device_remove_file(&dev->dev, &reset_attr); 1405 device_remove_file(&dev->dev, &dev_attr_reset);
1557 dev->reset_fn = 0; 1406 dev->reset_fn = 0;
1558 } 1407 }
1559} 1408}
@@ -1606,7 +1455,7 @@ static int __init pci_sysfs_init(void)
1606late_initcall(pci_sysfs_init); 1455late_initcall(pci_sysfs_init);
1607 1456
1608static struct attribute *pci_dev_dev_attrs[] = { 1457static struct attribute *pci_dev_dev_attrs[] = {
1609 &vga_attr.attr, 1458 &dev_attr_boot_vga.attr,
1610 NULL, 1459 NULL,
1611}; 1460};
1612 1461
@@ -1616,7 +1465,7 @@ static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1616 struct device *dev = kobj_to_dev(kobj); 1465 struct device *dev = kobj_to_dev(kobj);
1617 struct pci_dev *pdev = to_pci_dev(dev); 1466 struct pci_dev *pdev = to_pci_dev(dev);
1618 1467
1619 if (a == &vga_attr.attr) 1468 if (a == &dev_attr_boot_vga.attr)
1620 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 1469 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
1621 return 0; 1470 return 0;
1622 1471
@@ -1624,8 +1473,8 @@ static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1624} 1473}
1625 1474
1626static struct attribute *pci_dev_hp_attrs[] = { 1475static struct attribute *pci_dev_hp_attrs[] = {
1627 &dev_remove_attr.attr, 1476 &dev_attr_remove.attr,
1628 &dev_rescan_attr.attr, 1477 &dev_attr_dev_rescan.attr,
1629 NULL, 1478 NULL,
1630}; 1479};
1631 1480
@@ -1697,34 +1546,6 @@ static const struct attribute_group pci_dev_hp_attr_group = {
1697 .is_visible = pci_dev_hp_attrs_are_visible, 1546 .is_visible = pci_dev_hp_attrs_are_visible,
1698}; 1547};
1699 1548
1700#ifdef CONFIG_PCI_IOV
1701static struct attribute *sriov_dev_attrs[] = {
1702 &sriov_totalvfs_attr.attr,
1703 &sriov_numvfs_attr.attr,
1704 &sriov_offset_attr.attr,
1705 &sriov_stride_attr.attr,
1706 &sriov_vf_device_attr.attr,
1707 &sriov_drivers_autoprobe_attr.attr,
1708 NULL,
1709};
1710
1711static umode_t sriov_attrs_are_visible(struct kobject *kobj,
1712 struct attribute *a, int n)
1713{
1714 struct device *dev = kobj_to_dev(kobj);
1715
1716 if (!dev_is_pf(dev))
1717 return 0;
1718
1719 return a->mode;
1720}
1721
1722static const struct attribute_group sriov_dev_attr_group = {
1723 .attrs = sriov_dev_attrs,
1724 .is_visible = sriov_attrs_are_visible,
1725};
1726#endif /* CONFIG_PCI_IOV */
1727
1728static const struct attribute_group pci_dev_attr_group = { 1549static const struct attribute_group pci_dev_attr_group = {
1729 .attrs = pci_dev_dev_attrs, 1550 .attrs = pci_dev_dev_attrs,
1730 .is_visible = pci_dev_attrs_are_visible, 1551 .is_visible = pci_dev_attrs_are_visible,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1b27b5af3d55..e7982af9a5d8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -890,8 +890,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
890 890
891 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 891 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
892 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 892 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
893 if (dev->current_state != state && printk_ratelimit()) 893 if (dev->current_state != state)
894 pci_info(dev, "Refused to change power state, currently in D%d\n", 894 pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
895 dev->current_state); 895 dev->current_state);
896 896
897 /* 897 /*
@@ -1443,7 +1443,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
1443 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); 1443 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1444 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX; 1444 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1445 res = pdev->resource + bar_idx; 1445 res = pdev->resource + bar_idx;
1446 size = order_base_2((resource_size(res) >> 20) | 1) - 1; 1446 size = ilog2(resource_size(res)) - 20;
1447 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE; 1447 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1448 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT; 1448 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1449 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl); 1449 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
@@ -3581,7 +3581,7 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3581 } 3581 }
3582 3582
3583 /* Ensure upstream ports don't block AtomicOps on egress */ 3583 /* Ensure upstream ports don't block AtomicOps on egress */
3584 if (!bridge->has_secondary_link) { 3584 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3585 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, 3585 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3586 &ctl2); 3586 &ctl2);
3587 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) 3587 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
@@ -5923,8 +5923,19 @@ resource_size_t __weak pcibios_default_alignment(void)
5923 return 0; 5923 return 0;
5924} 5924}
5925 5925
5926#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 5926/*
5927static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 5927 * Arches that don't want to expose struct resource to userland as-is in
5928 * sysfs and /proc can implement their own pci_resource_to_user().
5929 */
5930void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
5931 const struct resource *rsrc,
5932 resource_size_t *start, resource_size_t *end)
5933{
5934 *start = rsrc->start;
5935 *end = rsrc->end;
5936}
5937
5938static char *resource_alignment_param;
5928static DEFINE_SPINLOCK(resource_alignment_lock); 5939static DEFINE_SPINLOCK(resource_alignment_lock);
5929 5940
5930/** 5941/**
@@ -5945,7 +5956,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5945 5956
5946 spin_lock(&resource_alignment_lock); 5957 spin_lock(&resource_alignment_lock);
5947 p = resource_alignment_param; 5958 p = resource_alignment_param;
5948 if (!*p && !align) 5959 if (!p || !*p)
5949 goto out; 5960 goto out;
5950 if (pci_has_flag(PCI_PROBE_ONLY)) { 5961 if (pci_has_flag(PCI_PROBE_ONLY)) {
5951 align = 0; 5962 align = 0;
@@ -6109,35 +6120,41 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6109 } 6120 }
6110} 6121}
6111 6122
6112static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) 6123static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6113{ 6124{
6114 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) 6125 size_t count = 0;
6115 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
6116 spin_lock(&resource_alignment_lock);
6117 strncpy(resource_alignment_param, buf, count);
6118 resource_alignment_param[count] = '\0';
6119 spin_unlock(&resource_alignment_lock);
6120 return count;
6121}
6122 6126
6123static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
6124{
6125 size_t count;
6126 spin_lock(&resource_alignment_lock); 6127 spin_lock(&resource_alignment_lock);
6127 count = snprintf(buf, size, "%s", resource_alignment_param); 6128 if (resource_alignment_param)
6129 count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6128 spin_unlock(&resource_alignment_lock); 6130 spin_unlock(&resource_alignment_lock);
6129 return count;
6130}
6131 6131
6132static ssize_t resource_alignment_show(struct bus_type *bus, char *buf) 6132 /*
6133{ 6133 * When set by the command line, resource_alignment_param will not
6134 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 6134 * have a trailing line feed, which is ugly. So conditionally add
6135 * it here.
6136 */
6137 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6138 buf[count - 1] = '\n';
6139 buf[count++] = 0;
6140 }
6141
6142 return count;
6135} 6143}
6136 6144
6137static ssize_t resource_alignment_store(struct bus_type *bus, 6145static ssize_t resource_alignment_store(struct bus_type *bus,
6138 const char *buf, size_t count) 6146 const char *buf, size_t count)
6139{ 6147{
6140 return pci_set_resource_alignment_param(buf, count); 6148 char *param = kstrndup(buf, count, GFP_KERNEL);
6149
6150 if (!param)
6151 return -ENOMEM;
6152
6153 spin_lock(&resource_alignment_lock);
6154 kfree(resource_alignment_param);
6155 resource_alignment_param = param;
6156 spin_unlock(&resource_alignment_lock);
6157 return count;
6141} 6158}
6142 6159
6143static BUS_ATTR_RW(resource_alignment); 6160static BUS_ATTR_RW(resource_alignment);
@@ -6266,8 +6283,7 @@ static int __init pci_setup(char *str)
6266 } else if (!strncmp(str, "cbmemsize=", 10)) { 6283 } else if (!strncmp(str, "cbmemsize=", 10)) {
6267 pci_cardbus_mem_size = memparse(str + 10, &str); 6284 pci_cardbus_mem_size = memparse(str + 10, &str);
6268 } else if (!strncmp(str, "resource_alignment=", 19)) { 6285 } else if (!strncmp(str, "resource_alignment=", 19)) {
6269 pci_set_resource_alignment_param(str + 19, 6286 resource_alignment_param = str + 19;
6270 strlen(str + 19));
6271 } else if (!strncmp(str, "ecrc=", 5)) { 6287 } else if (!strncmp(str, "ecrc=", 5)) {
6272 pcie_ecrc_get_policy(str + 5); 6288 pcie_ecrc_get_policy(str + 5);
6273 } else if (!strncmp(str, "hpiosize=", 9)) { 6289 } else if (!strncmp(str, "hpiosize=", 9)) {
@@ -6302,15 +6318,18 @@ static int __init pci_setup(char *str)
6302early_param("pci", pci_setup); 6318early_param("pci", pci_setup);
6303 6319
6304/* 6320/*
6305 * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point 6321 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6306 * to data in the __initdata section which will be freed after the init 6322 * in pci_setup(), above, to point to data in the __initdata section which
6307 * sequence is complete. We can't allocate memory in pci_setup() because some 6323 * will be freed after the init sequence is complete. We can't allocate memory
6308 * architectures do not have any memory allocation service available during 6324 * in pci_setup() because some architectures do not have any memory allocation
6309 * an early_param() call. So we allocate memory and copy the variable here 6325 * service available during an early_param() call. So we allocate memory and
6310 * before the init section is freed. 6326 * copy the variable here before the init section is freed.
6327 *
6311 */ 6328 */
6312static int __init pci_realloc_setup_params(void) 6329static int __init pci_realloc_setup_params(void)
6313{ 6330{
6331 resource_alignment_param = kstrdup(resource_alignment_param,
6332 GFP_KERNEL);
6314 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); 6333 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6315 6334
6316 return 0; 6335 return 0;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d22d1b807701..3f6947ee3324 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -39,6 +39,11 @@ int pci_probe_reset_function(struct pci_dev *dev);
39int pci_bridge_secondary_bus_reset(struct pci_dev *dev); 39int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
40int pci_bus_error_reset(struct pci_dev *dev); 40int pci_bus_error_reset(struct pci_dev *dev);
41 41
42#define PCI_PM_D2_DELAY 200
43#define PCI_PM_D3_WAIT 10
44#define PCI_PM_D3COLD_WAIT 100
45#define PCI_PM_BUS_WAIT 50
46
42/** 47/**
43 * struct pci_platform_pm_ops - Firmware PM callbacks 48 * struct pci_platform_pm_ops - Firmware PM callbacks
44 * 49 *
@@ -84,6 +89,8 @@ void pci_power_up(struct pci_dev *dev);
84void pci_disable_enabled_device(struct pci_dev *dev); 89void pci_disable_enabled_device(struct pci_dev *dev);
85int pci_finish_runtime_suspend(struct pci_dev *dev); 90int pci_finish_runtime_suspend(struct pci_dev *dev);
86void pcie_clear_root_pme_status(struct pci_dev *dev); 91void pcie_clear_root_pme_status(struct pci_dev *dev);
92bool pci_check_pme_status(struct pci_dev *dev);
93void pci_pme_wakeup_bus(struct pci_bus *bus);
87int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 94int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
88void pci_pme_restore(struct pci_dev *dev); 95void pci_pme_restore(struct pci_dev *dev);
89bool pci_dev_need_resume(struct pci_dev *dev); 96bool pci_dev_need_resume(struct pci_dev *dev);
@@ -118,11 +125,25 @@ static inline bool pci_power_manageable(struct pci_dev *pci_dev)
118 return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3; 125 return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3;
119} 126}
120 127
128static inline bool pcie_downstream_port(const struct pci_dev *dev)
129{
130 int type = pci_pcie_type(dev);
131
132 return type == PCI_EXP_TYPE_ROOT_PORT ||
133 type == PCI_EXP_TYPE_DOWNSTREAM ||
134 type == PCI_EXP_TYPE_PCIE_BRIDGE;
135}
136
121int pci_vpd_init(struct pci_dev *dev); 137int pci_vpd_init(struct pci_dev *dev);
122void pci_vpd_release(struct pci_dev *dev); 138void pci_vpd_release(struct pci_dev *dev);
123void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev); 139void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev);
124void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev); 140void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev);
125 141
142/* PCI Virtual Channel */
143int pci_save_vc_state(struct pci_dev *dev);
144void pci_restore_vc_state(struct pci_dev *dev);
145void pci_allocate_vc_save_buffers(struct pci_dev *dev);
146
126/* PCI /proc functions */ 147/* PCI /proc functions */
127#ifdef CONFIG_PROC_FS 148#ifdef CONFIG_PROC_FS
128int pci_proc_attach_device(struct pci_dev *dev); 149int pci_proc_attach_device(struct pci_dev *dev);
@@ -196,6 +217,9 @@ extern const struct attribute_group *pcibus_groups[];
196extern const struct device_type pci_dev_type; 217extern const struct device_type pci_dev_type;
197extern const struct attribute_group *pci_bus_groups[]; 218extern const struct attribute_group *pci_bus_groups[];
198 219
220extern unsigned long pci_hotplug_io_size;
221extern unsigned long pci_hotplug_mem_size;
222extern unsigned long pci_hotplug_bus_size;
199 223
200/** 224/**
201 * pci_match_one_device - Tell if a PCI device structure has a matching 225 * pci_match_one_device - Tell if a PCI device structure has a matching
@@ -236,6 +260,9 @@ enum pci_bar_type {
236 pci_bar_mem64, /* A 64-bit memory BAR */ 260 pci_bar_mem64, /* A 64-bit memory BAR */
237}; 261};
238 262
263struct device *pci_get_host_bridge_device(struct pci_dev *dev);
264void pci_put_host_bridge_device(struct device *dev);
265
239int pci_configure_extended_tags(struct pci_dev *dev, void *ign); 266int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
240bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, 267bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
241 int crs_timeout); 268 int crs_timeout);
@@ -256,6 +283,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
256 283
257void pci_reassigndev_resource_alignment(struct pci_dev *dev); 284void pci_reassigndev_resource_alignment(struct pci_dev *dev);
258void pci_disable_bridge_window(struct pci_dev *dev); 285void pci_disable_bridge_window(struct pci_dev *dev);
286struct pci_bus *pci_bus_get(struct pci_bus *bus);
287void pci_bus_put(struct pci_bus *bus);
259 288
260/* PCIe link information */ 289/* PCIe link information */
261#define PCIE_SPEED2STR(speed) \ 290#define PCIE_SPEED2STR(speed) \
@@ -279,6 +308,7 @@ u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
279 enum pcie_link_width *width); 308 enum pcie_link_width *width);
280void __pcie_print_link_status(struct pci_dev *dev, bool verbose); 309void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
281void pcie_report_downtraining(struct pci_dev *dev); 310void pcie_report_downtraining(struct pci_dev *dev);
311void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
282 312
283/* Single Root I/O Virtualization */ 313/* Single Root I/O Virtualization */
284struct pci_sriov { 314struct pci_sriov {
@@ -418,11 +448,12 @@ static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
418#endif 448#endif
419 449
420#ifdef CONFIG_PCI_ATS 450#ifdef CONFIG_PCI_ATS
451/* Address Translation Service */
452void pci_ats_init(struct pci_dev *dev);
421void pci_restore_ats_state(struct pci_dev *dev); 453void pci_restore_ats_state(struct pci_dev *dev);
422#else 454#else
423static inline void pci_restore_ats_state(struct pci_dev *dev) 455static inline void pci_ats_init(struct pci_dev *d) { }
424{ 456static inline void pci_restore_ats_state(struct pci_dev *dev) { }
425}
426#endif /* CONFIG_PCI_ATS */ 457#endif /* CONFIG_PCI_ATS */
427 458
428#ifdef CONFIG_PCI_IOV 459#ifdef CONFIG_PCI_IOV
@@ -433,7 +464,7 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno);
433resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 464resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
434void pci_restore_iov_state(struct pci_dev *dev); 465void pci_restore_iov_state(struct pci_dev *dev);
435int pci_iov_bus_range(struct pci_bus *bus); 466int pci_iov_bus_range(struct pci_bus *bus);
436 467extern const struct attribute_group sriov_dev_attr_group;
437#else 468#else
438static inline int pci_iov_init(struct pci_dev *dev) 469static inline int pci_iov_init(struct pci_dev *dev)
439{ 470{
@@ -518,10 +549,21 @@ static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { }
518static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { } 549static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { }
519#endif 550#endif
520 551
552#ifdef CONFIG_PCIE_ECRC
553void pcie_set_ecrc_checking(struct pci_dev *dev);
554void pcie_ecrc_get_policy(char *str);
555#else
556static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
557static inline void pcie_ecrc_get_policy(char *str) { }
558#endif
559
521#ifdef CONFIG_PCIE_PTM 560#ifdef CONFIG_PCIE_PTM
522void pci_ptm_init(struct pci_dev *dev); 561void pci_ptm_init(struct pci_dev *dev);
562int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
523#else 563#else
524static inline void pci_ptm_init(struct pci_dev *dev) { } 564static inline void pci_ptm_init(struct pci_dev *dev) { }
565static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
566{ return -EINVAL; }
525#endif 567#endif
526 568
527struct pci_dev_reset_methods { 569struct pci_dev_reset_methods {
@@ -558,6 +600,10 @@ struct device_node;
558int of_pci_parse_bus_range(struct device_node *node, struct resource *res); 600int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
559int of_get_pci_domain_nr(struct device_node *node); 601int of_get_pci_domain_nr(struct device_node *node);
560int of_pci_get_max_link_speed(struct device_node *node); 602int of_pci_get_max_link_speed(struct device_node *node);
603void pci_set_of_node(struct pci_dev *dev);
604void pci_release_of_node(struct pci_dev *dev);
605void pci_set_bus_of_node(struct pci_bus *bus);
606void pci_release_bus_of_node(struct pci_bus *bus);
561 607
562#else 608#else
563static inline int 609static inline int
@@ -577,6 +623,11 @@ of_pci_get_max_link_speed(struct device_node *node)
577{ 623{
578 return -EINVAL; 624 return -EINVAL;
579} 625}
626
627static inline void pci_set_of_node(struct pci_dev *dev) { }
628static inline void pci_release_of_node(struct pci_dev *dev) { }
629static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
630static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
580#endif /* CONFIG_OF */ 631#endif /* CONFIG_OF */
581 632
582#if defined(CONFIG_OF_ADDRESS) 633#if defined(CONFIG_OF_ADDRESS)
@@ -607,4 +658,13 @@ static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
607static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } 658static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
608#endif 659#endif
609 660
661#ifdef CONFIG_ACPI
662int pci_acpi_program_hp_params(struct pci_dev *dev);
663#else
664static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
665{
666 return -ENODEV;
667}
668#endif
669
610#endif /* DRIVERS_PCI_H */ 670#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 464f8f92653f..652ef23bba35 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -18,7 +18,6 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/jiffies.h> 19#include <linux/jiffies.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/pci-aspm.h>
22#include "../pci.h" 21#include "../pci.h"
23 22
24#ifdef MODULE_PARAM_PREFIX 23#ifdef MODULE_PARAM_PREFIX
@@ -913,10 +912,10 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
913 912
914 /* 913 /*
915 * We allocate pcie_link_state for the component on the upstream 914 * We allocate pcie_link_state for the component on the upstream
916 * end of a Link, so there's nothing to do unless this device has a 915 * end of a Link, so there's nothing to do unless this device is
917 * Link on its secondary side. 916 * downstream port.
918 */ 917 */
919 if (!pdev->has_secondary_link) 918 if (!pcie_downstream_port(pdev))
920 return; 919 return;
921 920
922 /* VIA has a strange chipset, root port is under a bridge */ 921 /* VIA has a strange chipset, root port is under a bridge */
@@ -1070,7 +1069,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
1070 if (!pci_is_pcie(pdev)) 1069 if (!pci_is_pcie(pdev))
1071 return 0; 1070 return 0;
1072 1071
1073 if (pdev->has_secondary_link) 1072 if (pcie_downstream_port(pdev))
1074 parent = pdev; 1073 parent = pdev;
1075 if (!parent || !parent->link_state) 1074 if (!parent || !parent->link_state)
1076 return -EINVAL; 1075 return -EINVAL;
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 773197a12568..b0e6048a9208 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -166,7 +166,7 @@ static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
166 driver = pcie_port_find_service(dev, service); 166 driver = pcie_port_find_service(dev, service);
167 if (driver && driver->reset_link) { 167 if (driver && driver->reset_link) {
168 status = driver->reset_link(dev); 168 status = driver->reset_link(dev);
169 } else if (dev->has_secondary_link) { 169 } else if (pcie_downstream_port(dev)) {
170 status = default_reset_link(dev); 170 status = default_reset_link(dev);
171 } else { 171 } else {
172 pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n", 172 pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index dbeeb385fb9f..3d5271a7a849 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1426,26 +1426,38 @@ void set_pcie_port_type(struct pci_dev *pdev)
1426 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16); 1426 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1427 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 1427 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1428 1428
1429 parent = pci_upstream_bridge(pdev);
1430 if (!parent)
1431 return;
1432
1429 /* 1433 /*
1430 * A Root Port or a PCI-to-PCIe bridge is always the upstream end 1434 * Some systems do not identify their upstream/downstream ports
1431 * of a Link. No PCIe component has two Links. Two Links are 1435 * correctly so detect impossible configurations here and correct
1432 * connected by a Switch that has a Port on each Link and internal 1436 * the port type accordingly.
1433 * logic to connect the two Ports.
1434 */ 1437 */
1435 type = pci_pcie_type(pdev); 1438 type = pci_pcie_type(pdev);
1436 if (type == PCI_EXP_TYPE_ROOT_PORT || 1439 if (type == PCI_EXP_TYPE_DOWNSTREAM) {
1437 type == PCI_EXP_TYPE_PCIE_BRIDGE)
1438 pdev->has_secondary_link = 1;
1439 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1440 type == PCI_EXP_TYPE_DOWNSTREAM) {
1441 parent = pci_upstream_bridge(pdev);
1442
1443 /* 1440 /*
1444 * Usually there's an upstream device (Root Port or Switch 1441 * If pdev claims to be downstream port but the parent
1445 * Downstream Port), but we can't assume one exists. 1442 * device is also downstream port assume pdev is actually
1443 * upstream port.
1446 */ 1444 */
1447 if (parent && !parent->has_secondary_link) 1445 if (pcie_downstream_port(parent)) {
1448 pdev->has_secondary_link = 1; 1446 pci_info(pdev, "claims to be downstream port but is acting as upstream port, correcting type\n");
1447 pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE;
1448 pdev->pcie_flags_reg |= PCI_EXP_TYPE_UPSTREAM;
1449 }
1450 } else if (type == PCI_EXP_TYPE_UPSTREAM) {
1451 /*
1452 * If pdev claims to be upstream port but the parent
1453 * device is also upstream port assume pdev is actually
1454 * downstream port.
1455 */
1456 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) {
1457 pci_info(pdev, "claims to be upstream port but is acting as downstream port, correcting type\n");
1458 pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE;
1459 pdev->pcie_flags_reg |= PCI_EXP_TYPE_DOWNSTREAM;
1460 }
1449 } 1461 }
1450} 1462}
1451 1463
@@ -1915,275 +1927,6 @@ static void pci_configure_mps(struct pci_dev *dev)
1915 p_mps, mps, mpss); 1927 p_mps, mps, mpss);
1916} 1928}
1917 1929
1918static struct hpp_type0 pci_default_type0 = {
1919 .revision = 1,
1920 .cache_line_size = 8,
1921 .latency_timer = 0x40,
1922 .enable_serr = 0,
1923 .enable_perr = 0,
1924};
1925
1926static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1927{
1928 u16 pci_cmd, pci_bctl;
1929
1930 if (!hpp)
1931 hpp = &pci_default_type0;
1932
1933 if (hpp->revision > 1) {
1934 pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
1935 hpp->revision);
1936 hpp = &pci_default_type0;
1937 }
1938
1939 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1940 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1941 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1942 if (hpp->enable_serr)
1943 pci_cmd |= PCI_COMMAND_SERR;
1944 if (hpp->enable_perr)
1945 pci_cmd |= PCI_COMMAND_PARITY;
1946 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1947
1948 /* Program bridge control value */
1949 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1950 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1951 hpp->latency_timer);
1952 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1953 if (hpp->enable_perr)
1954 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1955 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1956 }
1957}
1958
1959static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1960{
1961 int pos;
1962
1963 if (!hpp)
1964 return;
1965
1966 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1967 if (!pos)
1968 return;
1969
1970 pci_warn(dev, "PCI-X settings not supported\n");
1971}
1972
1973static bool pcie_root_rcb_set(struct pci_dev *dev)
1974{
1975 struct pci_dev *rp = pcie_find_root_port(dev);
1976 u16 lnkctl;
1977
1978 if (!rp)
1979 return false;
1980
1981 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1982 if (lnkctl & PCI_EXP_LNKCTL_RCB)
1983 return true;
1984
1985 return false;
1986}
1987
1988static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1989{
1990 int pos;
1991 u32 reg32;
1992
1993 if (!hpp)
1994 return;
1995
1996 if (!pci_is_pcie(dev))
1997 return;
1998
1999 if (hpp->revision > 1) {
2000 pci_warn(dev, "PCIe settings rev %d not supported\n",
2001 hpp->revision);
2002 return;
2003 }
2004
2005 /*
2006 * Don't allow _HPX to change MPS or MRRS settings. We manage
2007 * those to make sure they're consistent with the rest of the
2008 * platform.
2009 */
2010 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
2011 PCI_EXP_DEVCTL_READRQ;
2012 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
2013 PCI_EXP_DEVCTL_READRQ);
2014
2015 /* Initialize Device Control Register */
2016 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
2017 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
2018
2019 /* Initialize Link Control Register */
2020 if (pcie_cap_has_lnkctl(dev)) {
2021
2022 /*
2023 * If the Root Port supports Read Completion Boundary of
2024 * 128, set RCB to 128. Otherwise, clear it.
2025 */
2026 hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
2027 hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
2028 if (pcie_root_rcb_set(dev))
2029 hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
2030
2031 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
2032 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
2033 }
2034
2035 /* Find Advanced Error Reporting Enhanced Capability */
2036 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
2037 if (!pos)
2038 return;
2039
2040 /* Initialize Uncorrectable Error Mask Register */
2041 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
2042 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
2043 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
2044
2045 /* Initialize Uncorrectable Error Severity Register */
2046 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
2047 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
2048 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
2049
2050 /* Initialize Correctable Error Mask Register */
2051 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
2052 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
2053 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
2054
2055 /* Initialize Advanced Error Capabilities and Control Register */
2056 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
2057 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
2058
2059 /* Don't enable ECRC generation or checking if unsupported */
2060 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
2061 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
2062 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
2063 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
2064 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
2065
2066 /*
2067 * FIXME: The following two registers are not supported yet.
2068 *
2069 * o Secondary Uncorrectable Error Severity Register
2070 * o Secondary Uncorrectable Error Mask Register
2071 */
2072}
2073
2074static u16 hpx3_device_type(struct pci_dev *dev)
2075{
2076 u16 pcie_type = pci_pcie_type(dev);
2077 const int pcie_to_hpx3_type[] = {
2078 [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
2079 [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
2080 [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
2081 [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
2082 [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
2083 [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
2084 [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
2085 [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
2086 [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
2087 };
2088
2089 if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
2090 return 0;
2091
2092 return pcie_to_hpx3_type[pcie_type];
2093}
2094
2095static u8 hpx3_function_type(struct pci_dev *dev)
2096{
2097 if (dev->is_virtfn)
2098 return HPX_FN_SRIOV_VIRT;
2099 else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
2100 return HPX_FN_SRIOV_PHYS;
2101 else
2102 return HPX_FN_NORMAL;
2103}
2104
2105static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
2106{
2107 u8 cap_ver = hpx3_cap_id & 0xf;
2108
2109 if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
2110 return true;
2111 else if (cap_ver == pcie_cap_id)
2112 return true;
2113
2114 return false;
2115}
2116
2117static void program_hpx_type3_register(struct pci_dev *dev,
2118 const struct hpx_type3 *reg)
2119{
2120 u32 match_reg, write_reg, header, orig_value;
2121 u16 pos;
2122
2123 if (!(hpx3_device_type(dev) & reg->device_type))
2124 return;
2125
2126 if (!(hpx3_function_type(dev) & reg->function_type))
2127 return;
2128
2129 switch (reg->config_space_location) {
2130 case HPX_CFG_PCICFG:
2131 pos = 0;
2132 break;
2133 case HPX_CFG_PCIE_CAP:
2134 pos = pci_find_capability(dev, reg->pci_exp_cap_id);
2135 if (pos == 0)
2136 return;
2137
2138 break;
2139 case HPX_CFG_PCIE_CAP_EXT:
2140 pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
2141 if (pos == 0)
2142 return;
2143
2144 pci_read_config_dword(dev, pos, &header);
2145 if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
2146 reg->pci_exp_cap_ver))
2147 return;
2148
2149 break;
2150 case HPX_CFG_VEND_CAP: /* Fall through */
2151 case HPX_CFG_DVSEC: /* Fall through */
2152 default:
2153 pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
2154 return;
2155 }
2156
2157 pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
2158
2159 if ((match_reg & reg->match_mask_and) != reg->match_value)
2160 return;
2161
2162 pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
2163 orig_value = write_reg;
2164 write_reg &= reg->reg_mask_and;
2165 write_reg |= reg->reg_mask_or;
2166
2167 if (orig_value == write_reg)
2168 return;
2169
2170 pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
2171
2172 pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
2173 pos, orig_value, write_reg);
2174}
2175
2176static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx3)
2177{
2178 if (!hpx3)
2179 return;
2180
2181 if (!pci_is_pcie(dev))
2182 return;
2183
2184 program_hpx_type3_register(dev, hpx3);
2185}
2186
2187int pci_configure_extended_tags(struct pci_dev *dev, void *ign) 1930int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
2188{ 1931{
2189 struct pci_host_bridge *host; 1932 struct pci_host_bridge *host;
@@ -2364,13 +2107,6 @@ static void pci_configure_serr(struct pci_dev *dev)
2364 2107
2365static void pci_configure_device(struct pci_dev *dev) 2108static void pci_configure_device(struct pci_dev *dev)
2366{ 2109{
2367 static const struct hotplug_program_ops hp_ops = {
2368 .program_type0 = program_hpp_type0,
2369 .program_type1 = program_hpp_type1,
2370 .program_type2 = program_hpp_type2,
2371 .program_type3 = program_hpx_type3,
2372 };
2373
2374 pci_configure_mps(dev); 2110 pci_configure_mps(dev);
2375 pci_configure_extended_tags(dev, NULL); 2111 pci_configure_extended_tags(dev, NULL);
2376 pci_configure_relaxed_ordering(dev); 2112 pci_configure_relaxed_ordering(dev);
@@ -2378,7 +2114,7 @@ static void pci_configure_device(struct pci_dev *dev)
2378 pci_configure_eetlp_prefix(dev); 2114 pci_configure_eetlp_prefix(dev);
2379 pci_configure_serr(dev); 2115 pci_configure_serr(dev);
2380 2116
2381 pci_acpi_program_hp_params(dev, &hp_ops); 2117 pci_acpi_program_hp_params(dev);
2382} 2118}
2383 2119
2384static void pci_release_capabilities(struct pci_dev *dev) 2120static void pci_release_capabilities(struct pci_dev *dev)
@@ -2759,12 +2495,8 @@ static int only_one_child(struct pci_bus *bus)
2759 * A PCIe Downstream Port normally leads to a Link with only Device 2495 * A PCIe Downstream Port normally leads to a Link with only Device
2760 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan 2496 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan
2761 * only for Device 0 in that situation. 2497 * only for Device 0 in that situation.
2762 *
2763 * Checking has_secondary_link is a hack to identify Downstream
2764 * Ports because sometimes Switches are configured such that the
2765 * PCIe Port Type labels are backwards.
2766 */ 2498 */
2767 if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link) 2499 if (bridge && pci_is_pcie(bridge) && pcie_downstream_port(bridge))
2768 return 1; 2500 return 1;
2769 2501
2770 return 0; 2502 return 0;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 44c4ae1abd00..320255e5e8f8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -20,7 +20,6 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/acpi.h> 21#include <linux/acpi.h>
22#include <linux/dmi.h> 22#include <linux/dmi.h>
23#include <linux/pci-aspm.h>
24#include <linux/ioport.h> 23#include <linux/ioport.h>
25#include <linux/sched.h> 24#include <linux/sched.h>
26#include <linux/ktime.h> 25#include <linux/ktime.h>
@@ -2593,6 +2592,59 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2593 nvenet_msi_disable); 2592 nvenet_msi_disable);
2594 2593
2595/* 2594/*
2595 * PCIe spec r4.0 sec 7.7.1.2 and sec 7.7.2.2 say that if MSI/MSI-X is enabled,
2596 * then the device can't use INTx interrupts. Tegra's PCIe root ports don't
2597 * generate MSI interrupts for PME and AER events instead only INTx interrupts
2598 * are generated. Though Tegra's PCIe root ports can generate MSI interrupts
2599 * for other events, since PCIe specificiation doesn't support using a mix of
2600 * INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port
2601 * service drivers registering their respective ISRs for MSIs.
2602 */
2603static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev)
2604{
2605 dev->no_msi = 1;
2606}
2607DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0,
2608 PCI_CLASS_BRIDGE_PCI, 8,
2609 pci_quirk_nvidia_tegra_disable_rp_msi);
2610DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1,
2611 PCI_CLASS_BRIDGE_PCI, 8,
2612 pci_quirk_nvidia_tegra_disable_rp_msi);
2613DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2,
2614 PCI_CLASS_BRIDGE_PCI, 8,
2615 pci_quirk_nvidia_tegra_disable_rp_msi);
2616DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0,
2617 PCI_CLASS_BRIDGE_PCI, 8,
2618 pci_quirk_nvidia_tegra_disable_rp_msi);
2619DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1,
2620 PCI_CLASS_BRIDGE_PCI, 8,
2621 pci_quirk_nvidia_tegra_disable_rp_msi);
2622DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c,
2623 PCI_CLASS_BRIDGE_PCI, 8,
2624 pci_quirk_nvidia_tegra_disable_rp_msi);
2625DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d,
2626 PCI_CLASS_BRIDGE_PCI, 8,
2627 pci_quirk_nvidia_tegra_disable_rp_msi);
2628DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12,
2629 PCI_CLASS_BRIDGE_PCI, 8,
2630 pci_quirk_nvidia_tegra_disable_rp_msi);
2631DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13,
2632 PCI_CLASS_BRIDGE_PCI, 8,
2633 pci_quirk_nvidia_tegra_disable_rp_msi);
2634DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae,
2635 PCI_CLASS_BRIDGE_PCI, 8,
2636 pci_quirk_nvidia_tegra_disable_rp_msi);
2637DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf,
2638 PCI_CLASS_BRIDGE_PCI, 8,
2639 pci_quirk_nvidia_tegra_disable_rp_msi);
2640DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
2641 PCI_CLASS_BRIDGE_PCI, 8,
2642 pci_quirk_nvidia_tegra_disable_rp_msi);
2643DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
2644 PCI_CLASS_BRIDGE_PCI, 8,
2645 pci_quirk_nvidia_tegra_disable_rp_msi);
2646
2647/*
2596 * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing 2648 * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
2597 * config register. This register controls the routing of legacy 2649 * config register. This register controls the routing of legacy
2598 * interrupts from devices that route through the MCP55. If this register 2650 * interrupts from devices that route through the MCP55. If this register
@@ -2925,6 +2977,24 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
2925 quirk_msi_intx_disable_qca_bug); 2977 quirk_msi_intx_disable_qca_bug);
2926DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091, 2978DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
2927 quirk_msi_intx_disable_qca_bug); 2979 quirk_msi_intx_disable_qca_bug);
2980
2981/*
2982 * Amazon's Annapurna Labs 1c36:0031 Root Ports don't support MSI-X, so it
2983 * should be disabled on platforms where the device (mistakenly) advertises it.
2984 *
2985 * Notice that this quirk also disables MSI (which may work, but hasn't been
2986 * tested), since currently there is no standard way to disable only MSI-X.
2987 *
2988 * The 0031 device id is reused for other non Root Port device types,
2989 * therefore the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
2990 */
2991static void quirk_al_msi_disable(struct pci_dev *dev)
2992{
2993 dev->no_msi = 1;
2994 pci_warn(dev, "Disabling MSI/MSI-X\n");
2995}
2996DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
2997 PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable);
2928#endif /* CONFIG_PCI_MSI */ 2998#endif /* CONFIG_PCI_MSI */
2929 2999
2930/* 3000/*
@@ -4366,6 +4436,24 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4366 return ret; 4436 return ret;
4367} 4437}
4368 4438
4439static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
4440{
4441 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4442 return -ENOTTY;
4443
4444 /*
4445 * Amazon's Annapurna Labs root ports don't include an ACS capability,
4446 * but do include ACS-like functionality. The hardware doesn't support
4447 * peer-to-peer transactions via the root port and each has a unique
4448 * segment number.
4449 *
4450 * Additionally, the root ports cannot send traffic to each other.
4451 */
4452 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4453
4454 return acs_flags ? 0 : 1;
4455}
4456
4369/* 4457/*
4370 * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in 4458 * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in
4371 * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2, 4459 * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2,
@@ -4466,6 +4554,19 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
4466 return acs_flags ? 0 : 1; 4554 return acs_flags ? 0 : 1;
4467} 4555}
4468 4556
4557static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
4558{
4559 /*
4560 * iProc PAXB Root Ports don't advertise an ACS capability, but
4561 * they do not allow peer-to-peer transactions between Root Ports.
4562 * Allow each Root Port to be in a separate IOMMU group by masking
4563 * SV/RR/CR/UF bits.
4564 */
4565 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4566
4567 return acs_flags ? 0 : 1;
4568}
4569
4469static const struct pci_dev_acs_enabled { 4570static const struct pci_dev_acs_enabled {
4470 u16 vendor; 4571 u16 vendor;
4471 u16 device; 4572 u16 device;
@@ -4559,6 +4660,9 @@ static const struct pci_dev_acs_enabled {
4559 { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, 4660 { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
4560 { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, 4661 { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
4561 { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, 4662 { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
4663 { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
4664 /* Amazon Annapurna Labs */
4665 { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
4562 { 0 } 4666 { 0 }
4563}; 4667};
4564 4668
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 7f4e65872b8d..bade14002fd8 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -15,7 +15,6 @@
15#include "pci.h" 15#include "pci.h"
16 16
17DECLARE_RWSEM(pci_bus_sem); 17DECLARE_RWSEM(pci_bus_sem);
18EXPORT_SYMBOL_GPL(pci_bus_sem);
19 18
20/* 19/*
21 * pci_for_each_dma_alias - Iterate over DMA aliases for a device 20 * pci_for_each_dma_alias - Iterate over DMA aliases for a device
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 79b1fa6519be..e7dbe21705ba 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1662,8 +1662,8 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data)
1662 int i; 1662 int i;
1663 bool *unassigned = data; 1663 bool *unassigned = data;
1664 1664
1665 for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) { 1665 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1666 struct resource *r = &dev->resource[i]; 1666 struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES];
1667 struct pci_bus_region region; 1667 struct pci_bus_region region;
1668 1668
1669 /* Not assigned or rejected by kernel? */ 1669 /* Not assigned or rejected by kernel? */
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
index 5acd9c02683a..5486f8768c86 100644
--- a/drivers/pci/vc.c
+++ b/drivers/pci/vc.c
@@ -13,6 +13,8 @@
13#include <linux/pci_regs.h> 13#include <linux/pci_regs.h>
14#include <linux/types.h> 14#include <linux/types.h>
15 15
16#include "pci.h"
17
16/** 18/**
17 * pci_vc_save_restore_dwords - Save or restore a series of dwords 19 * pci_vc_save_restore_dwords - Save or restore a series of dwords
18 * @dev: device 20 * @dev: device
@@ -105,7 +107,7 @@ static void pci_vc_enable(struct pci_dev *dev, int pos, int res)
105 struct pci_dev *link = NULL; 107 struct pci_dev *link = NULL;
106 108
107 /* Enable VCs from the downstream device */ 109 /* Enable VCs from the downstream device */
108 if (!dev->has_secondary_link) 110 if (!pci_is_pcie(dev) || !pcie_downstream_port(dev))
109 return; 111 return;
110 112
111 ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); 113 ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
@@ -409,7 +411,6 @@ void pci_restore_vc_state(struct pci_dev *dev)
409 * For each type of VC capability, VC/VC9/MFVC, find the capability, size 411 * For each type of VC capability, VC/VC9/MFVC, find the capability, size
410 * it, and allocate a buffer for save/restore. 412 * it, and allocate a buffer for save/restore.
411 */ 413 */
412
413void pci_allocate_vc_save_buffers(struct pci_dev *dev) 414void pci_allocate_vc_save_buffers(struct pci_dev *dev)
414{ 415{
415 int i; 416 int i;
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 4963c2e2bd4c..7915d10f9aa1 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -571,6 +571,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
571DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, 571DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
572 quirk_blacklist_vpd); 572 quirk_blacklist_vpd);
573DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); 573DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
574/*
575 * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
576 * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
577 */
578DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
579 PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
574 580
575/* 581/*
576 * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the 582 * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig
index e516967d695b..f9817c3ae85f 100644
--- a/drivers/phy/tegra/Kconfig
+++ b/drivers/phy/tegra/Kconfig
@@ -7,3 +7,10 @@ config PHY_TEGRA_XUSB
7 7
8 To compile this driver as a module, choose M here: the module will 8 To compile this driver as a module, choose M here: the module will
9 be called phy-tegra-xusb. 9 be called phy-tegra-xusb.
10
11config PHY_TEGRA194_P2U
12 tristate "NVIDIA Tegra194 PIPE2UPHY PHY driver"
13 depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
14 select GENERIC_PHY
15 help
16 Enable this to support the P2U (PIPE to UPHY) that is part of Tegra 19x SOCs.
diff --git a/drivers/phy/tegra/Makefile b/drivers/phy/tegra/Makefile
index 64ccaeacb631..320dd389f34d 100644
--- a/drivers/phy/tegra/Makefile
+++ b/drivers/phy/tegra/Makefile
@@ -6,3 +6,4 @@ phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_124_SOC) += xusb-tegra124.o
6phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o 6phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o
7phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o 7phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o
8phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_186_SOC) += xusb-tegra186.o 8phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_186_SOC) += xusb-tegra186.o
9obj-$(CONFIG_PHY_TEGRA194_P2U) += phy-tegra194-p2u.o
diff --git a/drivers/phy/tegra/phy-tegra194-p2u.c b/drivers/phy/tegra/phy-tegra194-p2u.c
new file mode 100644
index 000000000000..7042bed9feaa
--- /dev/null
+++ b/drivers/phy/tegra/phy-tegra194-p2u.c
@@ -0,0 +1,120 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * P2U (PIPE to UPHY) driver for Tegra T194 SoC
4 *
5 * Copyright (C) 2019 NVIDIA Corporation.
6 *
7 * Author: Vidya Sagar <vidyas@nvidia.com>
8 */
9
10#include <linux/err.h>
11#include <linux/io.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/of_platform.h>
15#include <linux/phy/phy.h>
16
17#define P2U_PERIODIC_EQ_CTRL_GEN3 0xc0
18#define P2U_PERIODIC_EQ_CTRL_GEN3_PERIODIC_EQ_EN BIT(0)
19#define P2U_PERIODIC_EQ_CTRL_GEN3_INIT_PRESET_EQ_TRAIN_EN BIT(1)
20#define P2U_PERIODIC_EQ_CTRL_GEN4 0xc4
21#define P2U_PERIODIC_EQ_CTRL_GEN4_INIT_PRESET_EQ_TRAIN_EN BIT(1)
22
23#define P2U_RX_DEBOUNCE_TIME 0xa4
24#define P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_MASK 0xffff
25#define P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_VAL 160
26
27struct tegra_p2u {
28 void __iomem *base;
29};
30
31static inline void p2u_writel(struct tegra_p2u *phy, const u32 value,
32 const u32 reg)
33{
34 writel_relaxed(value, phy->base + reg);
35}
36
37static inline u32 p2u_readl(struct tegra_p2u *phy, const u32 reg)
38{
39 return readl_relaxed(phy->base + reg);
40}
41
42static int tegra_p2u_power_on(struct phy *x)
43{
44 struct tegra_p2u *phy = phy_get_drvdata(x);
45 u32 val;
46
47 val = p2u_readl(phy, P2U_PERIODIC_EQ_CTRL_GEN3);
48 val &= ~P2U_PERIODIC_EQ_CTRL_GEN3_PERIODIC_EQ_EN;
49 val |= P2U_PERIODIC_EQ_CTRL_GEN3_INIT_PRESET_EQ_TRAIN_EN;
50 p2u_writel(phy, val, P2U_PERIODIC_EQ_CTRL_GEN3);
51
52 val = p2u_readl(phy, P2U_PERIODIC_EQ_CTRL_GEN4);
53 val |= P2U_PERIODIC_EQ_CTRL_GEN4_INIT_PRESET_EQ_TRAIN_EN;
54 p2u_writel(phy, val, P2U_PERIODIC_EQ_CTRL_GEN4);
55
56 val = p2u_readl(phy, P2U_RX_DEBOUNCE_TIME);
57 val &= ~P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_MASK;
58 val |= P2U_RX_DEBOUNCE_TIME_DEBOUNCE_TIMER_VAL;
59 p2u_writel(phy, val, P2U_RX_DEBOUNCE_TIME);
60
61 return 0;
62}
63
64static const struct phy_ops ops = {
65 .power_on = tegra_p2u_power_on,
66 .owner = THIS_MODULE,
67};
68
69static int tegra_p2u_probe(struct platform_device *pdev)
70{
71 struct phy_provider *phy_provider;
72 struct device *dev = &pdev->dev;
73 struct phy *generic_phy;
74 struct tegra_p2u *phy;
75 struct resource *res;
76
77 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
78 if (!phy)
79 return -ENOMEM;
80
81 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctl");
82 phy->base = devm_ioremap_resource(dev, res);
83 if (IS_ERR(phy->base))
84 return PTR_ERR(phy->base);
85
86 platform_set_drvdata(pdev, phy);
87
88 generic_phy = devm_phy_create(dev, NULL, &ops);
89 if (IS_ERR(generic_phy))
90 return PTR_ERR(generic_phy);
91
92 phy_set_drvdata(generic_phy, phy);
93
94 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
95 if (IS_ERR(phy_provider))
96 return PTR_ERR(phy_provider);
97
98 return 0;
99}
100
101static const struct of_device_id tegra_p2u_id_table[] = {
102 {
103 .compatible = "nvidia,tegra194-p2u",
104 },
105 {}
106};
107MODULE_DEVICE_TABLE(of, tegra_p2u_id_table);
108
109static struct platform_driver tegra_p2u_driver = {
110 .probe = tegra_p2u_probe,
111 .driver = {
112 .name = "tegra194-p2u",
113 .of_match_table = tegra_p2u_id_table,
114 },
115};
116module_platform_driver(tegra_p2u_driver);
117
118MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
119MODULE_DESCRIPTION("NVIDIA Tegra194 PIPE2UPHY PHY driver");
120MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 1b67bb578f9f..ae21d08c65e8 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -674,6 +674,7 @@ config EEEPC_LAPTOP
674config ASUS_WMI 674config ASUS_WMI
675 tristate "ASUS WMI Driver" 675 tristate "ASUS WMI Driver"
676 depends on ACPI_WMI 676 depends on ACPI_WMI
677 depends on ACPI_BATTERY
677 depends on INPUT 678 depends on INPUT
678 depends on HWMON 679 depends on HWMON
679 depends on BACKLIGHT_CLASS_DEVICE 680 depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index 61fe341a85aa..ea68f6ed66ae 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -90,7 +90,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
90 for (i = 0; i < multi->num_clients && inst_data[i].type; i++) { 90 for (i = 0; i < multi->num_clients && inst_data[i].type; i++) {
91 memset(&board_info, 0, sizeof(board_info)); 91 memset(&board_info, 0, sizeof(board_info));
92 strlcpy(board_info.type, inst_data[i].type, I2C_NAME_SIZE); 92 strlcpy(board_info.type, inst_data[i].type, I2C_NAME_SIZE);
93 snprintf(name, sizeof(name), "%s-%s.%d", match->id, 93 snprintf(name, sizeof(name), "%s-%s.%d", dev_name(dev),
94 inst_data[i].type, i); 94 inst_data[i].type, i);
95 board_info.dev_name = name; 95 board_info.dev_name = name;
96 switch (inst_data[i].flags & IRQ_RESOURCE_TYPE) { 96 switch (inst_data[i].flags & IRQ_RESOURCE_TYPE) {
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 9aca5e7ce6d0..07d1b911e72f 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -422,6 +422,13 @@ static const struct dmi_system_id critclk_systems[] = {
422 DMI_MATCH(DMI_PRODUCT_VERSION, "6ES7647-8B"), 422 DMI_MATCH(DMI_PRODUCT_VERSION, "6ES7647-8B"),
423 }, 423 },
424 }, 424 },
425 {
426 .ident = "SIMATIC IPC277E",
427 .matches = {
428 DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
429 DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"),
430 },
431 },
425 { /*sentinel*/ } 432 { /*sentinel*/ }
426}; 433};
427 434
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 644f7f5c61a2..4a858789e6c5 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -27,7 +27,6 @@
27#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/aer.h> 29#include <linux/aer.h>
30#include <linux/pci-aspm.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32#include <linux/mutex.h> 31#include <linux/mutex.h>
33#include <linux/spinlock.h> 32#include <linux/spinlock.h>
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 1bb6aada93fa..ac39ed79ccaa 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -21,7 +21,6 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/pci-aspm.h>
25#include <linux/kernel.h> 24#include <linux/kernel.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d0c2f8d6f2a2..c8e512ba6d39 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -51,7 +51,6 @@
51#include <linux/workqueue.h> 51#include <linux/workqueue.h>
52#include <linux/delay.h> 52#include <linux/delay.h>
53#include <linux/pci.h> 53#include <linux/pci.h>
54#include <linux/pci-aspm.h>
55#include <linux/interrupt.h> 54#include <linux/interrupt.h>
56#include <linux/aer.h> 55#include <linux/aer.h>
57#include <linux/raid_class.h> 56#include <linux/raid_class.h>
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 213ff03c8a9f..59d9d512dcda 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -17,6 +17,7 @@
17#include <linux/blkdev.h> 17#include <linux/blkdev.h>
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19#include <linux/export.h> 19#include <linux/export.h>
20#include <linux/fs_parser.h>
20#include <linux/hid.h> 21#include <linux/hid.h>
21#include <linux/mm.h> 22#include <linux/mm.h>
22#include <linux/module.h> 23#include <linux/module.h>
@@ -1451,9 +1452,9 @@ struct ffs_sb_fill_data {
1451 struct ffs_data *ffs_data; 1452 struct ffs_data *ffs_data;
1452}; 1453};
1453 1454
1454static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) 1455static int ffs_sb_fill(struct super_block *sb, struct fs_context *fc)
1455{ 1456{
1456 struct ffs_sb_fill_data *data = _data; 1457 struct ffs_sb_fill_data *data = fc->fs_private;
1457 struct inode *inode; 1458 struct inode *inode;
1458 struct ffs_data *ffs = data->ffs_data; 1459 struct ffs_data *ffs = data->ffs_data;
1459 1460
@@ -1486,147 +1487,152 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1486 return 0; 1487 return 0;
1487} 1488}
1488 1489
1489static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) 1490enum {
1490{ 1491 Opt_no_disconnect,
1491 ENTER(); 1492 Opt_rmode,
1493 Opt_fmode,
1494 Opt_mode,
1495 Opt_uid,
1496 Opt_gid,
1497};
1492 1498
1493 if (!opts || !*opts) 1499static const struct fs_parameter_spec ffs_fs_param_specs[] = {
1494 return 0; 1500 fsparam_bool ("no_disconnect", Opt_no_disconnect),
1501 fsparam_u32 ("rmode", Opt_rmode),
1502 fsparam_u32 ("fmode", Opt_fmode),
1503 fsparam_u32 ("mode", Opt_mode),
1504 fsparam_u32 ("uid", Opt_uid),
1505 fsparam_u32 ("gid", Opt_gid),
1506 {}
1507};
1495 1508
1496 for (;;) { 1509static const struct fs_parameter_description ffs_fs_fs_parameters = {
1497 unsigned long value; 1510 .name = "kAFS",
1498 char *eq, *comma; 1511 .specs = ffs_fs_param_specs,
1499 1512};
1500 /* Option limit */
1501 comma = strchr(opts, ',');
1502 if (comma)
1503 *comma = 0;
1504
1505 /* Value limit */
1506 eq = strchr(opts, '=');
1507 if (unlikely(!eq)) {
1508 pr_err("'=' missing in %s\n", opts);
1509 return -EINVAL;
1510 }
1511 *eq = 0;
1512 1513
1513 /* Parse value */ 1514static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1514 if (kstrtoul(eq + 1, 0, &value)) { 1515{
1515 pr_err("%s: invalid value: %s\n", opts, eq + 1); 1516 struct ffs_sb_fill_data *data = fc->fs_private;
1516 return -EINVAL; 1517 struct fs_parse_result result;
1517 } 1518 int opt;
1518 1519
1519 /* Interpret option */ 1520 ENTER();
1520 switch (eq - opts) {
1521 case 13:
1522 if (!memcmp(opts, "no_disconnect", 13))
1523 data->no_disconnect = !!value;
1524 else
1525 goto invalid;
1526 break;
1527 case 5:
1528 if (!memcmp(opts, "rmode", 5))
1529 data->root_mode = (value & 0555) | S_IFDIR;
1530 else if (!memcmp(opts, "fmode", 5))
1531 data->perms.mode = (value & 0666) | S_IFREG;
1532 else
1533 goto invalid;
1534 break;
1535 1521
1536 case 4: 1522 opt = fs_parse(fc, &ffs_fs_fs_parameters, param, &result);
1537 if (!memcmp(opts, "mode", 4)) { 1523 if (opt < 0)
1538 data->root_mode = (value & 0555) | S_IFDIR; 1524 return opt;
1539 data->perms.mode = (value & 0666) | S_IFREG;
1540 } else {
1541 goto invalid;
1542 }
1543 break;
1544 1525
1545 case 3: 1526 switch (opt) {
1546 if (!memcmp(opts, "uid", 3)) { 1527 case Opt_no_disconnect:
1547 data->perms.uid = make_kuid(current_user_ns(), value); 1528 data->no_disconnect = result.boolean;
1548 if (!uid_valid(data->perms.uid)) { 1529 break;
1549 pr_err("%s: unmapped value: %lu\n", opts, value); 1530 case Opt_rmode:
1550 return -EINVAL; 1531 data->root_mode = (result.uint_32 & 0555) | S_IFDIR;
1551 } 1532 break;
1552 } else if (!memcmp(opts, "gid", 3)) { 1533 case Opt_fmode:
1553 data->perms.gid = make_kgid(current_user_ns(), value); 1534 data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
1554 if (!gid_valid(data->perms.gid)) { 1535 break;
1555 pr_err("%s: unmapped value: %lu\n", opts, value); 1536 case Opt_mode:
1556 return -EINVAL; 1537 data->root_mode = (result.uint_32 & 0555) | S_IFDIR;
1557 } 1538 data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
1558 } else { 1539 break;
1559 goto invalid;
1560 }
1561 break;
1562 1540
1563 default: 1541 case Opt_uid:
1564invalid: 1542 data->perms.uid = make_kuid(current_user_ns(), result.uint_32);
1565 pr_err("%s: invalid option\n", opts); 1543 if (!uid_valid(data->perms.uid))
1566 return -EINVAL; 1544 goto unmapped_value;
1567 } 1545 break;
1546 case Opt_gid:
1547 data->perms.gid = make_kgid(current_user_ns(), result.uint_32);
1548 if (!gid_valid(data->perms.gid))
1549 goto unmapped_value;
1550 break;
1568 1551
1569 /* Next iteration */ 1552 default:
1570 if (!comma) 1553 return -ENOPARAM;
1571 break;
1572 opts = comma + 1;
1573 } 1554 }
1574 1555
1575 return 0; 1556 return 0;
1576}
1577 1557
1578/* "mount -t functionfs dev_name /dev/function" ends up here */ 1558unmapped_value:
1559 return invalf(fc, "%s: unmapped value: %u", param->key, result.uint_32);
1560}
1579 1561
1580static struct dentry * 1562/*
1581ffs_fs_mount(struct file_system_type *t, int flags, 1563 * Set up the superblock for a mount.
1582 const char *dev_name, void *opts) 1564 */
1583{ 1565static int ffs_fs_get_tree(struct fs_context *fc)
1584 struct ffs_sb_fill_data data = { 1566{
1585 .perms = { 1567 struct ffs_sb_fill_data *ctx = fc->fs_private;
1586 .mode = S_IFREG | 0600,
1587 .uid = GLOBAL_ROOT_UID,
1588 .gid = GLOBAL_ROOT_GID,
1589 },
1590 .root_mode = S_IFDIR | 0500,
1591 .no_disconnect = false,
1592 };
1593 struct dentry *rv;
1594 int ret;
1595 void *ffs_dev; 1568 void *ffs_dev;
1596 struct ffs_data *ffs; 1569 struct ffs_data *ffs;
1597 1570
1598 ENTER(); 1571 ENTER();
1599 1572
1600 ret = ffs_fs_parse_opts(&data, opts); 1573 if (!fc->source)
1601 if (unlikely(ret < 0)) 1574 return invalf(fc, "No source specified");
1602 return ERR_PTR(ret);
1603 1575
1604 ffs = ffs_data_new(dev_name); 1576 ffs = ffs_data_new(fc->source);
1605 if (unlikely(!ffs)) 1577 if (unlikely(!ffs))
1606 return ERR_PTR(-ENOMEM); 1578 return -ENOMEM;
1607 ffs->file_perms = data.perms; 1579 ffs->file_perms = ctx->perms;
1608 ffs->no_disconnect = data.no_disconnect; 1580 ffs->no_disconnect = ctx->no_disconnect;
1609 1581
1610 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL); 1582 ffs->dev_name = kstrdup(fc->source, GFP_KERNEL);
1611 if (unlikely(!ffs->dev_name)) { 1583 if (unlikely(!ffs->dev_name)) {
1612 ffs_data_put(ffs); 1584 ffs_data_put(ffs);
1613 return ERR_PTR(-ENOMEM); 1585 return -ENOMEM;
1614 } 1586 }
1615 1587
1616 ffs_dev = ffs_acquire_dev(dev_name); 1588 ffs_dev = ffs_acquire_dev(ffs->dev_name);
1617 if (IS_ERR(ffs_dev)) { 1589 if (IS_ERR(ffs_dev)) {
1618 ffs_data_put(ffs); 1590 ffs_data_put(ffs);
1619 return ERR_CAST(ffs_dev); 1591 return PTR_ERR(ffs_dev);
1620 } 1592 }
1593
1621 ffs->private_data = ffs_dev; 1594 ffs->private_data = ffs_dev;
1622 data.ffs_data = ffs; 1595 ctx->ffs_data = ffs;
1596 return get_tree_nodev(fc, ffs_sb_fill);
1597}
1598
1599static void ffs_fs_free_fc(struct fs_context *fc)
1600{
1601 struct ffs_sb_fill_data *ctx = fc->fs_private;
1602
1603 if (ctx) {
1604 if (ctx->ffs_data) {
1605 ffs_release_dev(ctx->ffs_data);
1606 ffs_data_put(ctx->ffs_data);
1607 }
1623 1608
1624 rv = mount_nodev(t, flags, &data, ffs_sb_fill); 1609 kfree(ctx);
1625 if (IS_ERR(rv) && data.ffs_data) {
1626 ffs_release_dev(data.ffs_data);
1627 ffs_data_put(data.ffs_data);
1628 } 1610 }
1629 return rv; 1611}
1612
1613static const struct fs_context_operations ffs_fs_context_ops = {
1614 .free = ffs_fs_free_fc,
1615 .parse_param = ffs_fs_parse_param,
1616 .get_tree = ffs_fs_get_tree,
1617};
1618
1619static int ffs_fs_init_fs_context(struct fs_context *fc)
1620{
1621 struct ffs_sb_fill_data *ctx;
1622
1623 ctx = kzalloc(sizeof(struct ffs_sb_fill_data), GFP_KERNEL);
1624 if (!ctx)
1625 return -ENOMEM;
1626
1627 ctx->perms.mode = S_IFREG | 0600;
1628 ctx->perms.uid = GLOBAL_ROOT_UID;
1629 ctx->perms.gid = GLOBAL_ROOT_GID;
1630 ctx->root_mode = S_IFDIR | 0500;
1631 ctx->no_disconnect = false;
1632
1633 fc->fs_private = ctx;
1634 fc->ops = &ffs_fs_context_ops;
1635 return 0;
1630} 1636}
1631 1637
1632static void 1638static void
@@ -1644,7 +1650,8 @@ ffs_fs_kill_sb(struct super_block *sb)
1644static struct file_system_type ffs_fs_type = { 1650static struct file_system_type ffs_fs_type = {
1645 .owner = THIS_MODULE, 1651 .owner = THIS_MODULE,
1646 .name = "functionfs", 1652 .name = "functionfs",
1647 .mount = ffs_fs_mount, 1653 .init_fs_context = ffs_fs_init_fs_context,
1654 .parameters = &ffs_fs_fs_parameters,
1648 .kill_sb = ffs_fs_kill_sb, 1655 .kill_sb = ffs_fs_kill_sb,
1649}; 1656};
1650MODULE_ALIAS_FS("functionfs"); 1657MODULE_ALIAS_FS("functionfs");
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 8b081d61773e..40676be2e46a 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -10,7 +10,6 @@ menu "Backlight & LCD device support"
10# 10#
11config LCD_CLASS_DEVICE 11config LCD_CLASS_DEVICE
12 tristate "Lowlevel LCD controls" 12 tristate "Lowlevel LCD controls"
13 default m
14 help 13 help
15 This framework adds support for low-level control of LCD. 14 This framework adds support for low-level control of LCD.
16 Some framebuffer devices connect to platform-specific LCD modules 15 Some framebuffer devices connect to platform-specific LCD modules
@@ -143,7 +142,6 @@ endif # LCD_CLASS_DEVICE
143# 142#
144config BACKLIGHT_CLASS_DEVICE 143config BACKLIGHT_CLASS_DEVICE
145 tristate "Lowlevel Backlight controls" 144 tristate "Lowlevel Backlight controls"
146 default m
147 help 145 help
148 This framework adds support for low-level control of the LCD 146 This framework adds support for low-level control of the LCD
149 backlight. This includes support for brightness and power. 147 backlight. This includes support for brightness and power.
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 5dc07106a59e..cac3e35d7630 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -32,6 +32,12 @@ static const char *const backlight_types[] = {
32 [BACKLIGHT_FIRMWARE] = "firmware", 32 [BACKLIGHT_FIRMWARE] = "firmware",
33}; 33};
34 34
35static const char *const backlight_scale_types[] = {
36 [BACKLIGHT_SCALE_UNKNOWN] = "unknown",
37 [BACKLIGHT_SCALE_LINEAR] = "linear",
38 [BACKLIGHT_SCALE_NON_LINEAR] = "non-linear",
39};
40
35#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \ 41#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
36 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)) 42 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
37/* This callback gets called when something important happens inside a 43/* This callback gets called when something important happens inside a
@@ -246,6 +252,18 @@ static ssize_t actual_brightness_show(struct device *dev,
246} 252}
247static DEVICE_ATTR_RO(actual_brightness); 253static DEVICE_ATTR_RO(actual_brightness);
248 254
255static ssize_t scale_show(struct device *dev,
256 struct device_attribute *attr, char *buf)
257{
258 struct backlight_device *bd = to_backlight_device(dev);
259
260 if (WARN_ON(bd->props.scale > BACKLIGHT_SCALE_NON_LINEAR))
261 return sprintf(buf, "unknown\n");
262
263 return sprintf(buf, "%s\n", backlight_scale_types[bd->props.scale]);
264}
265static DEVICE_ATTR_RO(scale);
266
249static struct class *backlight_class; 267static struct class *backlight_class;
250 268
251#ifdef CONFIG_PM_SLEEP 269#ifdef CONFIG_PM_SLEEP
@@ -292,6 +310,7 @@ static struct attribute *bl_device_attrs[] = {
292 &dev_attr_brightness.attr, 310 &dev_attr_brightness.attr,
293 &dev_attr_actual_brightness.attr, 311 &dev_attr_actual_brightness.attr,
294 &dev_attr_max_brightness.attr, 312 &dev_attr_max_brightness.attr,
313 &dev_attr_scale.attr,
295 &dev_attr_type.attr, 314 &dev_attr_type.attr,
296 NULL, 315 NULL,
297}; 316};
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index e84f3087e29f..18e053e4716c 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -59,13 +59,11 @@ static int gpio_backlight_probe_dt(struct platform_device *pdev,
59 struct gpio_backlight *gbl) 59 struct gpio_backlight *gbl)
60{ 60{
61 struct device *dev = &pdev->dev; 61 struct device *dev = &pdev->dev;
62 enum gpiod_flags flags;
63 int ret; 62 int ret;
64 63
65 gbl->def_value = device_property_read_bool(dev, "default-on"); 64 gbl->def_value = device_property_read_bool(dev, "default-on");
66 flags = gbl->def_value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
67 65
68 gbl->gpiod = devm_gpiod_get(dev, NULL, flags); 66 gbl->gpiod = devm_gpiod_get(dev, NULL, GPIOD_ASIS);
69 if (IS_ERR(gbl->gpiod)) { 67 if (IS_ERR(gbl->gpiod)) {
70 ret = PTR_ERR(gbl->gpiod); 68 ret = PTR_ERR(gbl->gpiod);
71 69
@@ -79,6 +77,22 @@ static int gpio_backlight_probe_dt(struct platform_device *pdev,
79 return 0; 77 return 0;
80} 78}
81 79
80static int gpio_backlight_initial_power_state(struct gpio_backlight *gbl)
81{
82 struct device_node *node = gbl->dev->of_node;
83
84 /* Not booted with device tree or no phandle link to the node */
85 if (!node || !node->phandle)
86 return gbl->def_value ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
87
88 /* if the enable GPIO is disabled, do not enable the backlight */
89 if (gpiod_get_value_cansleep(gbl->gpiod) == 0)
90 return FB_BLANK_POWERDOWN;
91
92 return FB_BLANK_UNBLANK;
93}
94
95
82static int gpio_backlight_probe(struct platform_device *pdev) 96static int gpio_backlight_probe(struct platform_device *pdev)
83{ 97{
84 struct gpio_backlight_platform_data *pdata = 98 struct gpio_backlight_platform_data *pdata =
@@ -136,7 +150,9 @@ static int gpio_backlight_probe(struct platform_device *pdev)
136 return PTR_ERR(bl); 150 return PTR_ERR(bl);
137 } 151 }
138 152
139 bl->props.brightness = gbl->def_value; 153 bl->props.power = gpio_backlight_initial_power_state(gbl);
154 bl->props.brightness = 1;
155
140 backlight_update_status(bl); 156 backlight_update_status(bl);
141 157
142 platform_set_drvdata(pdev, bl); 158 platform_set_drvdata(pdev, bl);
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index b04b35d007a2..2d8e8192e4e2 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -377,8 +377,7 @@ static int lm3630a_parse_led_sources(struct fwnode_handle *node,
377 u32 sources[LM3630A_NUM_SINKS]; 377 u32 sources[LM3630A_NUM_SINKS];
378 int ret, num_sources, i; 378 int ret, num_sources, i;
379 379
380 num_sources = fwnode_property_read_u32_array(node, "led-sources", NULL, 380 num_sources = fwnode_property_count_u32(node, "led-sources");
381 0);
382 if (num_sources < 0) 381 if (num_sources < 0)
383 return default_led_sources; 382 return default_led_sources;
384 else if (num_sources > ARRAY_SIZE(sources)) 383 else if (num_sources > ARRAY_SIZE(sources))
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 35bc012b22cc..0e45685bcc1c 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -158,7 +158,7 @@ static int lms283gf05_probe(struct spi_device *spi)
158 ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio, 158 ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio,
159 GPIOF_DIR_OUT | (!pdata->reset_inverted ? 159 GPIOF_DIR_OUT | (!pdata->reset_inverted ?
160 GPIOF_INIT_HIGH : GPIOF_INIT_LOW), 160 GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
161 "LMS285GF05 RESET"); 161 "LMS283GF05 RESET");
162 if (ret) 162 if (ret)
163 return ret; 163 return ret;
164 } 164 }
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 2201b8c78641..746eebc411df 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -387,6 +387,31 @@ int pwm_backlight_brightness_default(struct device *dev,
387} 387}
388#endif 388#endif
389 389
390static bool pwm_backlight_is_linear(struct platform_pwm_backlight_data *data)
391{
392 unsigned int nlevels = data->max_brightness + 1;
393 unsigned int min_val = data->levels[0];
394 unsigned int max_val = data->levels[nlevels - 1];
395 /*
396 * Multiplying by 128 means that even in pathological cases such
397 * as (max_val - min_val) == nlevels the error at max_val is less
398 * than 1%.
399 */
400 unsigned int slope = (128 * (max_val - min_val)) / nlevels;
401 unsigned int margin = (max_val - min_val) / 20; /* 5% */
402 int i;
403
404 for (i = 1; i < nlevels; i++) {
405 unsigned int linear_value = min_val + ((i * slope) / 128);
406 unsigned int delta = abs(linear_value - data->levels[i]);
407
408 if (delta > margin)
409 return false;
410 }
411
412 return true;
413}
414
390static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb) 415static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
391{ 416{
392 struct device_node *node = pb->dev->of_node; 417 struct device_node *node = pb->dev->of_node;
@@ -536,6 +561,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
536 goto err_alloc; 561 goto err_alloc;
537 } 562 }
538 563
564 memset(&props, 0, sizeof(struct backlight_properties));
565
539 if (data->levels) { 566 if (data->levels) {
540 /* 567 /*
541 * For the DT case, only when brightness levels is defined 568 * For the DT case, only when brightness levels is defined
@@ -548,6 +575,11 @@ static int pwm_backlight_probe(struct platform_device *pdev)
548 575
549 pb->levels = data->levels; 576 pb->levels = data->levels;
550 } 577 }
578
579 if (pwm_backlight_is_linear(data))
580 props.scale = BACKLIGHT_SCALE_LINEAR;
581 else
582 props.scale = BACKLIGHT_SCALE_NON_LINEAR;
551 } else if (!data->max_brightness) { 583 } else if (!data->max_brightness) {
552 /* 584 /*
553 * If no brightness levels are provided and max_brightness is 585 * If no brightness levels are provided and max_brightness is
@@ -574,6 +606,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
574 606
575 pb->levels = data->levels; 607 pb->levels = data->levels;
576 } 608 }
609
610 props.scale = BACKLIGHT_SCALE_NON_LINEAR;
577 } else { 611 } else {
578 /* 612 /*
579 * That only happens for the non-DT case, where platform data 613 * That only happens for the non-DT case, where platform data
@@ -584,7 +618,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
584 618
585 pb->lth_brightness = data->lth_brightness * (state.period / pb->scale); 619 pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
586 620
587 memset(&props, 0, sizeof(struct backlight_properties));
588 props.type = BACKLIGHT_RAW; 621 props.type = BACKLIGHT_RAW;
589 props.max_brightness = data->max_brightness; 622 props.max_brightness = data->max_brightness;
590 bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb, 623 bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb,
diff --git a/drivers/video/backlight/rave-sp-backlight.c b/drivers/video/backlight/rave-sp-backlight.c
index 462f14a1b19d..05b5f003a3d1 100644
--- a/drivers/video/backlight/rave-sp-backlight.c
+++ b/drivers/video/backlight/rave-sp-backlight.c
@@ -48,14 +48,20 @@ static int rave_sp_backlight_probe(struct platform_device *pdev)
48 struct device *dev = &pdev->dev; 48 struct device *dev = &pdev->dev;
49 struct backlight_device *bd; 49 struct backlight_device *bd;
50 50
51 bd = devm_backlight_device_register(dev, pdev->name, dev->parent, 51 bd = devm_backlight_device_register(dev, pdev->name, dev,
52 dev_get_drvdata(dev->parent), 52 dev_get_drvdata(dev->parent),
53 &rave_sp_backlight_ops, 53 &rave_sp_backlight_ops,
54 &rave_sp_backlight_props); 54 &rave_sp_backlight_props);
55 if (IS_ERR(bd)) 55 if (IS_ERR(bd))
56 return PTR_ERR(bd); 56 return PTR_ERR(bd);
57 57
58 backlight_update_status(bd); 58 /*
59 * If there is a phandle pointing to the device node we can
60 * assume that another device will manage the status changes.
61 * If not we make sure the backlight is in a consistent state.
62 */
63 if (!dev->of_node->phandle)
64 backlight_update_status(bd);
59 65
60 return 0; 66 return 0;
61} 67}
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 65cb7578776f..29af8e27b6e5 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -222,8 +222,7 @@ static int tosa_lcd_remove(struct spi_device *spi)
222{ 222{
223 struct tosa_lcd_data *data = spi_get_drvdata(spi); 223 struct tosa_lcd_data *data = spi_get_drvdata(spi);
224 224
225 if (data->i2c) 225 i2c_unregister_device(data->i2c);
226 i2c_unregister_device(data->i2c);
227 226
228 tosa_lcd_tg_off(data); 227 tosa_lcd_tg_off(data);
229 228
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6b450065b9d5..5f89c515f5bb 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -584,10 +584,10 @@ struct gfs2_args {
584 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */ 584 unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
585 unsigned int ar_loccookie:1; /* use location based readdir 585 unsigned int ar_loccookie:1; /* use location based readdir
586 cookies */ 586 cookies */
587 int ar_commit; /* Commit interval */ 587 s32 ar_commit; /* Commit interval */
588 int ar_statfs_quantum; /* The fast statfs interval */ 588 s32 ar_statfs_quantum; /* The fast statfs interval */
589 int ar_quota_quantum; /* The quota interval */ 589 s32 ar_quota_quantum; /* The quota interval */
590 int ar_statfs_percent; /* The % change to force sync */ 590 s32 ar_statfs_percent; /* The % change to force sync */
591}; 591};
592 592
593struct gfs2_tune { 593struct gfs2_tune {
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index f3fd5cd9d43f..681b44682b0d 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -21,6 +21,7 @@
21#include <linux/lockdep.h> 21#include <linux/lockdep.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/backing-dev.h> 23#include <linux/backing-dev.h>
24#include <linux/fs_parser.h>
24 25
25#include "gfs2.h" 26#include "gfs2.h"
26#include "incore.h" 27#include "incore.h"
@@ -1031,16 +1032,17 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
1031} 1032}
1032 1033
1033/** 1034/**
1034 * fill_super - Read in superblock 1035 * gfs2_fill_super - Read in superblock
1035 * @sb: The VFS superblock 1036 * @sb: The VFS superblock
1036 * @data: Mount options 1037 * @args: Mount options
1037 * @silent: Don't complain if it's not a GFS2 filesystem 1038 * @silent: Don't complain if it's not a GFS2 filesystem
1038 * 1039 *
1039 * Returns: errno 1040 * Returns: -errno
1040 */ 1041 */
1041 1042static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
1042static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
1043{ 1043{
1044 struct gfs2_args *args = fc->fs_private;
1045 int silent = fc->sb_flags & SB_SILENT;
1044 struct gfs2_sbd *sdp; 1046 struct gfs2_sbd *sdp;
1045 struct gfs2_holder mount_gh; 1047 struct gfs2_holder mount_gh;
1046 int error; 1048 int error;
@@ -1205,161 +1207,411 @@ fail_debug:
1205 return error; 1207 return error;
1206} 1208}
1207 1209
1208static int set_gfs2_super(struct super_block *s, void *data) 1210/**
1211 * gfs2_get_tree - Get the GFS2 superblock and root directory
1212 * @fc: The filesystem context
1213 *
1214 * Returns: 0 or -errno on error
1215 */
1216static int gfs2_get_tree(struct fs_context *fc)
1209{ 1217{
1210 s->s_bdev = data; 1218 struct gfs2_args *args = fc->fs_private;
1211 s->s_dev = s->s_bdev->bd_dev; 1219 struct gfs2_sbd *sdp;
1212 s->s_bdi = bdi_get(s->s_bdev->bd_bdi); 1220 int error;
1221
1222 error = get_tree_bdev(fc, gfs2_fill_super);
1223 if (error)
1224 return error;
1225
1226 sdp = fc->root->d_sb->s_fs_info;
1227 dput(fc->root);
1228 if (args->ar_meta)
1229 fc->root = dget(sdp->sd_master_dir);
1230 else
1231 fc->root = dget(sdp->sd_root_dir);
1213 return 0; 1232 return 0;
1214} 1233}
1215 1234
1216static int test_gfs2_super(struct super_block *s, void *ptr) 1235static void gfs2_fc_free(struct fs_context *fc)
1217{ 1236{
1218 struct block_device *bdev = ptr; 1237 struct gfs2_args *args = fc->fs_private;
1219 return (bdev == s->s_bdev); 1238
1239 kfree(args);
1220} 1240}
1221 1241
1222/** 1242enum gfs2_param {
1223 * gfs2_mount - Get the GFS2 superblock 1243 Opt_lockproto,
1224 * @fs_type: The GFS2 filesystem type 1244 Opt_locktable,
1225 * @flags: Mount flags 1245 Opt_hostdata,
1226 * @dev_name: The name of the device 1246 Opt_spectator,
1227 * @data: The mount arguments 1247 Opt_ignore_local_fs,
1228 * 1248 Opt_localflocks,
1229 * Q. Why not use get_sb_bdev() ? 1249 Opt_localcaching,
1230 * A. We need to select one of two root directories to mount, independent 1250 Opt_debug,
1231 * of whether this is the initial, or subsequent, mount of this sb 1251 Opt_upgrade,
1232 * 1252 Opt_acl,
1233 * Returns: 0 or -ve on error 1253 Opt_quota,
1234 */ 1254 Opt_suiddir,
1255 Opt_data,
1256 Opt_meta,
1257 Opt_discard,
1258 Opt_commit,
1259 Opt_errors,
1260 Opt_statfs_quantum,
1261 Opt_statfs_percent,
1262 Opt_quota_quantum,
1263 Opt_barrier,
1264 Opt_rgrplvb,
1265 Opt_loccookie,
1266};
1235 1267
1236static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, 1268enum opt_quota {
1237 const char *dev_name, void *data) 1269 Opt_quota_unset = 0,
1238{ 1270 Opt_quota_off,
1239 struct block_device *bdev; 1271 Opt_quota_account,
1240 struct super_block *s; 1272 Opt_quota_on,
1241 fmode_t mode = FMODE_READ | FMODE_EXCL; 1273};
1242 int error; 1274
1243 struct gfs2_args args; 1275static const unsigned int opt_quota_values[] = {
1244 struct gfs2_sbd *sdp; 1276 [Opt_quota_off] = GFS2_QUOTA_OFF,
1277 [Opt_quota_account] = GFS2_QUOTA_ACCOUNT,
1278 [Opt_quota_on] = GFS2_QUOTA_ON,
1279};
1245 1280
1246 if (!(flags & SB_RDONLY)) 1281enum opt_data {
1247 mode |= FMODE_WRITE; 1282 Opt_data_writeback = GFS2_DATA_WRITEBACK,
1283 Opt_data_ordered = GFS2_DATA_ORDERED,
1284};
1248 1285
1249 bdev = blkdev_get_by_path(dev_name, mode, fs_type); 1286enum opt_errors {
1250 if (IS_ERR(bdev)) 1287 Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
1251 return ERR_CAST(bdev); 1288 Opt_errors_panic = GFS2_ERRORS_PANIC,
1289};
1252 1290
1253 /* 1291static const struct fs_parameter_spec gfs2_param_specs[] = {
1254 * once the super is inserted into the list by sget, s_umount 1292 fsparam_string ("lockproto", Opt_lockproto),
1255 * will protect the lockfs code from trying to start a snapshot 1293 fsparam_string ("locktable", Opt_locktable),
1256 * while we are mounting 1294 fsparam_string ("hostdata", Opt_hostdata),
1257 */ 1295 fsparam_flag ("spectator", Opt_spectator),
1258 mutex_lock(&bdev->bd_fsfreeze_mutex); 1296 fsparam_flag ("norecovery", Opt_spectator),
1259 if (bdev->bd_fsfreeze_count > 0) { 1297 fsparam_flag ("ignore_local_fs", Opt_ignore_local_fs),
1260 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1298 fsparam_flag ("localflocks", Opt_localflocks),
1261 error = -EBUSY; 1299 fsparam_flag ("localcaching", Opt_localcaching),
1262 goto error_bdev; 1300 fsparam_flag_no("debug", Opt_debug),
1263 } 1301 fsparam_flag ("upgrade", Opt_upgrade),
1264 s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev); 1302 fsparam_flag_no("acl", Opt_acl),
1265 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1303 fsparam_flag_no("suiddir", Opt_suiddir),
1266 error = PTR_ERR(s); 1304 fsparam_enum ("data", Opt_data),
1267 if (IS_ERR(s)) 1305 fsparam_flag ("meta", Opt_meta),
1268 goto error_bdev; 1306 fsparam_flag_no("discard", Opt_discard),
1269 1307 fsparam_s32 ("commit", Opt_commit),
1270 if (s->s_root) { 1308 fsparam_enum ("errors", Opt_errors),
1271 /* 1309 fsparam_s32 ("statfs_quantum", Opt_statfs_quantum),
1272 * s_umount nests inside bd_mutex during 1310 fsparam_s32 ("statfs_percent", Opt_statfs_percent),
1273 * __invalidate_device(). blkdev_put() acquires 1311 fsparam_s32 ("quota_quantum", Opt_quota_quantum),
1274 * bd_mutex and can't be called under s_umount. Drop 1312 fsparam_flag_no("barrier", Opt_barrier),
1275 * s_umount temporarily. This is safe as we're 1313 fsparam_flag_no("rgrplvb", Opt_rgrplvb),
1276 * holding an active reference. 1314 fsparam_flag_no("loccookie", Opt_loccookie),
1277 */ 1315 /* quota can be a flag or an enum so it gets special treatment */
1278 up_write(&s->s_umount); 1316 __fsparam(fs_param_is_enum, "quota", Opt_quota, fs_param_neg_with_no|fs_param_v_optional),
1279 blkdev_put(bdev, mode); 1317 {}
1280 down_write(&s->s_umount); 1318};
1281 } else {
1282 /* s_mode must be set before deactivate_locked_super calls */
1283 s->s_mode = mode;
1284 }
1285 1319
1286 memset(&args, 0, sizeof(args)); 1320static const struct fs_parameter_enum gfs2_param_enums[] = {
1287 args.ar_quota = GFS2_QUOTA_DEFAULT; 1321 { Opt_quota, "off", Opt_quota_off },
1288 args.ar_data = GFS2_DATA_DEFAULT; 1322 { Opt_quota, "account", Opt_quota_account },
1289 args.ar_commit = 30; 1323 { Opt_quota, "on", Opt_quota_on },
1290 args.ar_statfs_quantum = 30; 1324 { Opt_data, "writeback", Opt_data_writeback },
1291 args.ar_quota_quantum = 60; 1325 { Opt_data, "ordered", Opt_data_ordered },
1292 args.ar_errors = GFS2_ERRORS_DEFAULT; 1326 { Opt_errors, "withdraw", Opt_errors_withdraw },
1327 { Opt_errors, "panic", Opt_errors_panic },
1328 {}
1329};
1293 1330
1294 error = gfs2_mount_args(&args, data); 1331const struct fs_parameter_description gfs2_fs_parameters = {
1295 if (error) { 1332 .name = "gfs2",
1296 pr_warn("can't parse mount arguments\n"); 1333 .specs = gfs2_param_specs,
1297 goto error_super; 1334 .enums = gfs2_param_enums,
1335};
1336
1337/* Parse a single mount parameter */
1338static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
1339{
1340 struct gfs2_args *args = fc->fs_private;
1341 struct fs_parse_result result;
1342 int o;
1343
1344 o = fs_parse(fc, &gfs2_fs_parameters, param, &result);
1345 if (o < 0)
1346 return o;
1347
1348 switch (o) {
1349 case Opt_lockproto:
1350 strlcpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
1351 break;
1352 case Opt_locktable:
1353 strlcpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
1354 break;
1355 case Opt_hostdata:
1356 strlcpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
1357 break;
1358 case Opt_spectator:
1359 args->ar_spectator = 1;
1360 break;
1361 case Opt_ignore_local_fs:
1362 /* Retained for backwards compat only */
1363 break;
1364 case Opt_localflocks:
1365 args->ar_localflocks = 1;
1366 break;
1367 case Opt_localcaching:
1368 /* Retained for backwards compat only */
1369 break;
1370 case Opt_debug:
1371 if (result.boolean && args->ar_errors == GFS2_ERRORS_PANIC)
1372 return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
1373 args->ar_debug = result.boolean;
1374 break;
1375 case Opt_upgrade:
1376 /* Retained for backwards compat only */
1377 break;
1378 case Opt_acl:
1379 args->ar_posix_acl = result.boolean;
1380 break;
1381 case Opt_quota:
1382 /* The quota option can be a flag or an enum. A non-zero int_32
1383 result means that we have an enum index. Otherwise we have
1384 to rely on the 'negated' flag to tell us whether 'quota' or
1385 'noquota' was specified. */
1386 if (result.negated)
1387 args->ar_quota = GFS2_QUOTA_OFF;
1388 else if (result.int_32 > 0)
1389 args->ar_quota = opt_quota_values[result.int_32];
1390 else
1391 args->ar_quota = GFS2_QUOTA_ON;
1392 break;
1393 case Opt_suiddir:
1394 args->ar_suiddir = result.boolean;
1395 break;
1396 case Opt_data:
1397 /* The uint_32 result maps directly to GFS2_DATA_* */
1398 args->ar_data = result.uint_32;
1399 break;
1400 case Opt_meta:
1401 args->ar_meta = 1;
1402 break;
1403 case Opt_discard:
1404 args->ar_discard = result.boolean;
1405 break;
1406 case Opt_commit:
1407 if (result.int_32 <= 0)
1408 return invalf(fc, "gfs2: commit mount option requires a positive numeric argument");
1409 args->ar_commit = result.int_32;
1410 break;
1411 case Opt_statfs_quantum:
1412 if (result.int_32 < 0)
1413 return invalf(fc, "gfs2: statfs_quantum mount option requires a non-negative numeric argument");
1414 args->ar_statfs_quantum = result.int_32;
1415 break;
1416 case Opt_quota_quantum:
1417 if (result.int_32 <= 0)
1418 return invalf(fc, "gfs2: quota_quantum mount option requires a positive numeric argument");
1419 args->ar_quota_quantum = result.int_32;
1420 break;
1421 case Opt_statfs_percent:
1422 if (result.int_32 < 0 || result.int_32 > 100)
1423 return invalf(fc, "gfs2: statfs_percent mount option requires a numeric argument between 0 and 100");
1424 args->ar_statfs_percent = result.int_32;
1425 break;
1426 case Opt_errors:
1427 if (args->ar_debug && result.uint_32 == GFS2_ERRORS_PANIC)
1428 return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
1429 args->ar_errors = result.uint_32;
1430 break;
1431 case Opt_barrier:
1432 args->ar_nobarrier = result.boolean;
1433 break;
1434 case Opt_rgrplvb:
1435 args->ar_rgrplvb = result.boolean;
1436 break;
1437 case Opt_loccookie:
1438 args->ar_loccookie = result.boolean;
1439 break;
1440 default:
1441 return invalf(fc, "gfs2: invalid mount option: %s", param->key);
1298 } 1442 }
1443 return 0;
1444}
1299 1445
1300 if (s->s_root) { 1446static int gfs2_reconfigure(struct fs_context *fc)
1301 error = -EBUSY; 1447{
1302 if ((flags ^ s->s_flags) & SB_RDONLY) 1448 struct super_block *sb = fc->root->d_sb;
1303 goto error_super; 1449 struct gfs2_sbd *sdp = sb->s_fs_info;
1304 } else { 1450 struct gfs2_args *oldargs = &sdp->sd_args;
1305 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev); 1451 struct gfs2_args *newargs = fc->fs_private;
1306 sb_set_blocksize(s, block_size(bdev)); 1452 struct gfs2_tune *gt = &sdp->sd_tune;
1307 error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0); 1453 int error = 0;
1308 if (error) 1454
1309 goto error_super; 1455 sync_filesystem(sb);
1310 s->s_flags |= SB_ACTIVE; 1456
1311 bdev->bd_super = s; 1457 spin_lock(&gt->gt_spin);
1458 oldargs->ar_commit = gt->gt_logd_secs;
1459 oldargs->ar_quota_quantum = gt->gt_quota_quantum;
1460 if (gt->gt_statfs_slow)
1461 oldargs->ar_statfs_quantum = 0;
1462 else
1463 oldargs->ar_statfs_quantum = gt->gt_statfs_quantum;
1464 spin_unlock(&gt->gt_spin);
1465
1466 if (strcmp(newargs->ar_lockproto, oldargs->ar_lockproto)) {
1467 errorf(fc, "gfs2: reconfiguration of locking protocol not allowed");
1468 return -EINVAL;
1469 }
1470 if (strcmp(newargs->ar_locktable, oldargs->ar_locktable)) {
1471 errorf(fc, "gfs2: reconfiguration of lock table not allowed");
1472 return -EINVAL;
1473 }
1474 if (strcmp(newargs->ar_hostdata, oldargs->ar_hostdata)) {
1475 errorf(fc, "gfs2: reconfiguration of host data not allowed");
1476 return -EINVAL;
1477 }
1478 if (newargs->ar_spectator != oldargs->ar_spectator) {
1479 errorf(fc, "gfs2: reconfiguration of spectator mode not allowed");
1480 return -EINVAL;
1481 }
1482 if (newargs->ar_localflocks != oldargs->ar_localflocks) {
1483 errorf(fc, "gfs2: reconfiguration of localflocks not allowed");
1484 return -EINVAL;
1485 }
1486 if (newargs->ar_meta != oldargs->ar_meta) {
1487 errorf(fc, "gfs2: switching between gfs2 and gfs2meta not allowed");
1488 return -EINVAL;
1489 }
1490 if (oldargs->ar_spectator)
1491 fc->sb_flags |= SB_RDONLY;
1492
1493 if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
1494 if (fc->sb_flags & SB_RDONLY) {
1495 error = gfs2_make_fs_ro(sdp);
1496 if (error)
1497 errorf(fc, "gfs2: unable to remount read-only");
1498 } else {
1499 error = gfs2_make_fs_rw(sdp);
1500 if (error)
1501 errorf(fc, "gfs2: unable to remount read-write");
1502 }
1312 } 1503 }
1504 sdp->sd_args = *newargs;
1313 1505
1314 sdp = s->s_fs_info; 1506 if (sdp->sd_args.ar_posix_acl)
1315 if (args.ar_meta) 1507 sb->s_flags |= SB_POSIXACL;
1316 return dget(sdp->sd_master_dir); 1508 else
1509 sb->s_flags &= ~SB_POSIXACL;
1510 if (sdp->sd_args.ar_nobarrier)
1511 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1317 else 1512 else
1318 return dget(sdp->sd_root_dir); 1513 clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1319 1514 spin_lock(&gt->gt_spin);
1320error_super: 1515 gt->gt_logd_secs = newargs->ar_commit;
1321 deactivate_locked_super(s); 1516 gt->gt_quota_quantum = newargs->ar_quota_quantum;
1322 return ERR_PTR(error); 1517 if (newargs->ar_statfs_quantum) {
1323error_bdev: 1518 gt->gt_statfs_slow = 0;
1324 blkdev_put(bdev, mode); 1519 gt->gt_statfs_quantum = newargs->ar_statfs_quantum;
1325 return ERR_PTR(error); 1520 }
1521 else {
1522 gt->gt_statfs_slow = 1;
1523 gt->gt_statfs_quantum = 30;
1524 }
1525 spin_unlock(&gt->gt_spin);
1526
1527 gfs2_online_uevent(sdp);
1528 return error;
1529}
1530
1531static const struct fs_context_operations gfs2_context_ops = {
1532 .free = gfs2_fc_free,
1533 .parse_param = gfs2_parse_param,
1534 .get_tree = gfs2_get_tree,
1535 .reconfigure = gfs2_reconfigure,
1536};
1537
1538/* Set up the filesystem mount context */
1539static int gfs2_init_fs_context(struct fs_context *fc)
1540{
1541 struct gfs2_args *args;
1542
1543 args = kzalloc(sizeof(*args), GFP_KERNEL);
1544 if (args == NULL)
1545 return -ENOMEM;
1546
1547 args->ar_quota = GFS2_QUOTA_DEFAULT;
1548 args->ar_data = GFS2_DATA_DEFAULT;
1549 args->ar_commit = 30;
1550 args->ar_statfs_quantum = 30;
1551 args->ar_quota_quantum = 60;
1552 args->ar_errors = GFS2_ERRORS_DEFAULT;
1553
1554 fc->fs_private = args;
1555 fc->ops = &gfs2_context_ops;
1556 return 0;
1326} 1557}
1327 1558
1328static int set_meta_super(struct super_block *s, void *ptr) 1559static int set_meta_super(struct super_block *s, struct fs_context *fc)
1329{ 1560{
1330 return -EINVAL; 1561 return -EINVAL;
1331} 1562}
1332 1563
1333static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type, 1564static int test_meta_super(struct super_block *s, struct fs_context *fc)
1334 int flags, const char *dev_name, void *data) 1565{
1566 return (fc->sget_key == s->s_bdev);
1567}
1568
1569static int gfs2_meta_get_tree(struct fs_context *fc)
1335{ 1570{
1336 struct super_block *s; 1571 struct super_block *s;
1337 struct gfs2_sbd *sdp; 1572 struct gfs2_sbd *sdp;
1338 struct path path; 1573 struct path path;
1339 int error; 1574 int error;
1340 1575
1341 if (!dev_name || !*dev_name) 1576 if (!fc->source || !*fc->source)
1342 return ERR_PTR(-EINVAL); 1577 return -EINVAL;
1343 1578
1344 error = kern_path(dev_name, LOOKUP_FOLLOW, &path); 1579 error = kern_path(fc->source, LOOKUP_FOLLOW, &path);
1345 if (error) { 1580 if (error) {
1346 pr_warn("path_lookup on %s returned error %d\n", 1581 pr_warn("path_lookup on %s returned error %d\n",
1347 dev_name, error); 1582 fc->source, error);
1348 return ERR_PTR(error); 1583 return error;
1349 } 1584 }
1350 s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags, 1585 fc->fs_type = &gfs2_fs_type;
1351 path.dentry->d_sb->s_bdev); 1586 fc->sget_key = path.dentry->d_sb->s_bdev;
1587 s = sget_fc(fc, test_meta_super, set_meta_super);
1352 path_put(&path); 1588 path_put(&path);
1353 if (IS_ERR(s)) { 1589 if (IS_ERR(s)) {
1354 pr_warn("gfs2 mount does not exist\n"); 1590 pr_warn("gfs2 mount does not exist\n");
1355 return ERR_CAST(s); 1591 return PTR_ERR(s);
1356 } 1592 }
1357 if ((flags ^ s->s_flags) & SB_RDONLY) { 1593 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1358 deactivate_locked_super(s); 1594 deactivate_locked_super(s);
1359 return ERR_PTR(-EBUSY); 1595 return -EBUSY;
1360 } 1596 }
1361 sdp = s->s_fs_info; 1597 sdp = s->s_fs_info;
1362 return dget(sdp->sd_master_dir); 1598 fc->root = dget(sdp->sd_master_dir);
1599 return 0;
1600}
1601
1602static const struct fs_context_operations gfs2_meta_context_ops = {
1603 .get_tree = gfs2_meta_get_tree,
1604};
1605
1606static int gfs2_meta_init_fs_context(struct fs_context *fc)
1607{
1608 int ret = gfs2_init_fs_context(fc);
1609
1610 if (ret)
1611 return ret;
1612
1613 fc->ops = &gfs2_meta_context_ops;
1614 return 0;
1363} 1615}
1364 1616
1365static void gfs2_kill_sb(struct super_block *sb) 1617static void gfs2_kill_sb(struct super_block *sb)
@@ -1383,7 +1635,8 @@ static void gfs2_kill_sb(struct super_block *sb)
1383struct file_system_type gfs2_fs_type = { 1635struct file_system_type gfs2_fs_type = {
1384 .name = "gfs2", 1636 .name = "gfs2",
1385 .fs_flags = FS_REQUIRES_DEV, 1637 .fs_flags = FS_REQUIRES_DEV,
1386 .mount = gfs2_mount, 1638 .init_fs_context = gfs2_init_fs_context,
1639 .parameters = &gfs2_fs_parameters,
1387 .kill_sb = gfs2_kill_sb, 1640 .kill_sb = gfs2_kill_sb,
1388 .owner = THIS_MODULE, 1641 .owner = THIS_MODULE,
1389}; 1642};
@@ -1392,7 +1645,7 @@ MODULE_ALIAS_FS("gfs2");
1392struct file_system_type gfs2meta_fs_type = { 1645struct file_system_type gfs2meta_fs_type = {
1393 .name = "gfs2meta", 1646 .name = "gfs2meta",
1394 .fs_flags = FS_REQUIRES_DEV, 1647 .fs_flags = FS_REQUIRES_DEV,
1395 .mount = gfs2_mount_meta, 1648 .init_fs_context = gfs2_meta_init_fs_context,
1396 .owner = THIS_MODULE, 1649 .owner = THIS_MODULE,
1397}; 1650};
1398MODULE_ALIAS_FS("gfs2meta"); 1651MODULE_ALIAS_FS("gfs2meta");
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 644c70ae09f7..5fa1eec4fb4f 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -44,258 +44,6 @@
44#include "xattr.h" 44#include "xattr.h"
45#include "lops.h" 45#include "lops.h"
46 46
47#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
48
49enum {
50 Opt_lockproto,
51 Opt_locktable,
52 Opt_hostdata,
53 Opt_spectator,
54 Opt_ignore_local_fs,
55 Opt_localflocks,
56 Opt_localcaching,
57 Opt_debug,
58 Opt_nodebug,
59 Opt_upgrade,
60 Opt_acl,
61 Opt_noacl,
62 Opt_quota_off,
63 Opt_quota_account,
64 Opt_quota_on,
65 Opt_quota,
66 Opt_noquota,
67 Opt_suiddir,
68 Opt_nosuiddir,
69 Opt_data_writeback,
70 Opt_data_ordered,
71 Opt_meta,
72 Opt_discard,
73 Opt_nodiscard,
74 Opt_commit,
75 Opt_err_withdraw,
76 Opt_err_panic,
77 Opt_statfs_quantum,
78 Opt_statfs_percent,
79 Opt_quota_quantum,
80 Opt_barrier,
81 Opt_nobarrier,
82 Opt_rgrplvb,
83 Opt_norgrplvb,
84 Opt_loccookie,
85 Opt_noloccookie,
86 Opt_error,
87};
88
89static const match_table_t tokens = {
90 {Opt_lockproto, "lockproto=%s"},
91 {Opt_locktable, "locktable=%s"},
92 {Opt_hostdata, "hostdata=%s"},
93 {Opt_spectator, "spectator"},
94 {Opt_spectator, "norecovery"},
95 {Opt_ignore_local_fs, "ignore_local_fs"},
96 {Opt_localflocks, "localflocks"},
97 {Opt_localcaching, "localcaching"},
98 {Opt_debug, "debug"},
99 {Opt_nodebug, "nodebug"},
100 {Opt_upgrade, "upgrade"},
101 {Opt_acl, "acl"},
102 {Opt_noacl, "noacl"},
103 {Opt_quota_off, "quota=off"},
104 {Opt_quota_account, "quota=account"},
105 {Opt_quota_on, "quota=on"},
106 {Opt_quota, "quota"},
107 {Opt_noquota, "noquota"},
108 {Opt_suiddir, "suiddir"},
109 {Opt_nosuiddir, "nosuiddir"},
110 {Opt_data_writeback, "data=writeback"},
111 {Opt_data_ordered, "data=ordered"},
112 {Opt_meta, "meta"},
113 {Opt_discard, "discard"},
114 {Opt_nodiscard, "nodiscard"},
115 {Opt_commit, "commit=%d"},
116 {Opt_err_withdraw, "errors=withdraw"},
117 {Opt_err_panic, "errors=panic"},
118 {Opt_statfs_quantum, "statfs_quantum=%d"},
119 {Opt_statfs_percent, "statfs_percent=%d"},
120 {Opt_quota_quantum, "quota_quantum=%d"},
121 {Opt_barrier, "barrier"},
122 {Opt_nobarrier, "nobarrier"},
123 {Opt_rgrplvb, "rgrplvb"},
124 {Opt_norgrplvb, "norgrplvb"},
125 {Opt_loccookie, "loccookie"},
126 {Opt_noloccookie, "noloccookie"},
127 {Opt_error, NULL}
128};
129
130/**
131 * gfs2_mount_args - Parse mount options
132 * @args: The structure into which the parsed options will be written
133 * @options: The options to parse
134 *
135 * Return: errno
136 */
137
138int gfs2_mount_args(struct gfs2_args *args, char *options)
139{
140 char *o;
141 int token;
142 substring_t tmp[MAX_OPT_ARGS];
143 int rv;
144
145 /* Split the options into tokens with the "," character and
146 process them */
147
148 while (1) {
149 o = strsep(&options, ",");
150 if (o == NULL)
151 break;
152 if (*o == '\0')
153 continue;
154
155 token = match_token(o, tokens, tmp);
156 switch (token) {
157 case Opt_lockproto:
158 match_strlcpy(args->ar_lockproto, &tmp[0],
159 GFS2_LOCKNAME_LEN);
160 break;
161 case Opt_locktable:
162 match_strlcpy(args->ar_locktable, &tmp[0],
163 GFS2_LOCKNAME_LEN);
164 break;
165 case Opt_hostdata:
166 match_strlcpy(args->ar_hostdata, &tmp[0],
167 GFS2_LOCKNAME_LEN);
168 break;
169 case Opt_spectator:
170 args->ar_spectator = 1;
171 break;
172 case Opt_ignore_local_fs:
173 /* Retained for backwards compat only */
174 break;
175 case Opt_localflocks:
176 args->ar_localflocks = 1;
177 break;
178 case Opt_localcaching:
179 /* Retained for backwards compat only */
180 break;
181 case Opt_debug:
182 if (args->ar_errors == GFS2_ERRORS_PANIC) {
183 pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
184 return -EINVAL;
185 }
186 args->ar_debug = 1;
187 break;
188 case Opt_nodebug:
189 args->ar_debug = 0;
190 break;
191 case Opt_upgrade:
192 /* Retained for backwards compat only */
193 break;
194 case Opt_acl:
195 args->ar_posix_acl = 1;
196 break;
197 case Opt_noacl:
198 args->ar_posix_acl = 0;
199 break;
200 case Opt_quota_off:
201 case Opt_noquota:
202 args->ar_quota = GFS2_QUOTA_OFF;
203 break;
204 case Opt_quota_account:
205 args->ar_quota = GFS2_QUOTA_ACCOUNT;
206 break;
207 case Opt_quota_on:
208 case Opt_quota:
209 args->ar_quota = GFS2_QUOTA_ON;
210 break;
211 case Opt_suiddir:
212 args->ar_suiddir = 1;
213 break;
214 case Opt_nosuiddir:
215 args->ar_suiddir = 0;
216 break;
217 case Opt_data_writeback:
218 args->ar_data = GFS2_DATA_WRITEBACK;
219 break;
220 case Opt_data_ordered:
221 args->ar_data = GFS2_DATA_ORDERED;
222 break;
223 case Opt_meta:
224 args->ar_meta = 1;
225 break;
226 case Opt_discard:
227 args->ar_discard = 1;
228 break;
229 case Opt_nodiscard:
230 args->ar_discard = 0;
231 break;
232 case Opt_commit:
233 rv = match_int(&tmp[0], &args->ar_commit);
234 if (rv || args->ar_commit <= 0) {
235 pr_warn("commit mount option requires a positive numeric argument\n");
236 return rv ? rv : -EINVAL;
237 }
238 break;
239 case Opt_statfs_quantum:
240 rv = match_int(&tmp[0], &args->ar_statfs_quantum);
241 if (rv || args->ar_statfs_quantum < 0) {
242 pr_warn("statfs_quantum mount option requires a non-negative numeric argument\n");
243 return rv ? rv : -EINVAL;
244 }
245 break;
246 case Opt_quota_quantum:
247 rv = match_int(&tmp[0], &args->ar_quota_quantum);
248 if (rv || args->ar_quota_quantum <= 0) {
249 pr_warn("quota_quantum mount option requires a positive numeric argument\n");
250 return rv ? rv : -EINVAL;
251 }
252 break;
253 case Opt_statfs_percent:
254 rv = match_int(&tmp[0], &args->ar_statfs_percent);
255 if (rv || args->ar_statfs_percent < 0 ||
256 args->ar_statfs_percent > 100) {
257 pr_warn("statfs_percent mount option requires a numeric argument between 0 and 100\n");
258 return rv ? rv : -EINVAL;
259 }
260 break;
261 case Opt_err_withdraw:
262 args->ar_errors = GFS2_ERRORS_WITHDRAW;
263 break;
264 case Opt_err_panic:
265 if (args->ar_debug) {
266 pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
267 return -EINVAL;
268 }
269 args->ar_errors = GFS2_ERRORS_PANIC;
270 break;
271 case Opt_barrier:
272 args->ar_nobarrier = 0;
273 break;
274 case Opt_nobarrier:
275 args->ar_nobarrier = 1;
276 break;
277 case Opt_rgrplvb:
278 args->ar_rgrplvb = 1;
279 break;
280 case Opt_norgrplvb:
281 args->ar_rgrplvb = 0;
282 break;
283 case Opt_loccookie:
284 args->ar_loccookie = 1;
285 break;
286 case Opt_noloccookie:
287 args->ar_loccookie = 0;
288 break;
289 case Opt_error:
290 default:
291 pr_warn("invalid mount option: %s\n", o);
292 return -EINVAL;
293 }
294 }
295
296 return 0;
297}
298
299/** 47/**
300 * gfs2_jindex_free - Clear all the journal index information 48 * gfs2_jindex_free - Clear all the journal index information
301 * @sdp: The GFS2 superblock 49 * @sdp: The GFS2 superblock
@@ -847,7 +595,7 @@ out:
847 * Returns: errno 595 * Returns: errno
848 */ 596 */
849 597
850static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) 598int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
851{ 599{
852 struct gfs2_holder freeze_gh; 600 struct gfs2_holder freeze_gh;
853 int error; 601 int error;
@@ -1227,84 +975,6 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1227} 975}
1228 976
1229/** 977/**
1230 * gfs2_remount_fs - called when the FS is remounted
1231 * @sb: the filesystem
1232 * @flags: the remount flags
1233 * @data: extra data passed in (not used right now)
1234 *
1235 * Returns: errno
1236 */
1237
1238static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1239{
1240 struct gfs2_sbd *sdp = sb->s_fs_info;
1241 struct gfs2_args args = sdp->sd_args; /* Default to current settings */
1242 struct gfs2_tune *gt = &sdp->sd_tune;
1243 int error;
1244
1245 sync_filesystem(sb);
1246
1247 spin_lock(&gt->gt_spin);
1248 args.ar_commit = gt->gt_logd_secs;
1249 args.ar_quota_quantum = gt->gt_quota_quantum;
1250 if (gt->gt_statfs_slow)
1251 args.ar_statfs_quantum = 0;
1252 else
1253 args.ar_statfs_quantum = gt->gt_statfs_quantum;
1254 spin_unlock(&gt->gt_spin);
1255 error = gfs2_mount_args(&args, data);
1256 if (error)
1257 return error;
1258
1259 /* Not allowed to change locking details */
1260 if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
1261 strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
1262 strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
1263 return -EINVAL;
1264
1265 /* Some flags must not be changed */
1266 if (args_neq(&args, &sdp->sd_args, spectator) ||
1267 args_neq(&args, &sdp->sd_args, localflocks) ||
1268 args_neq(&args, &sdp->sd_args, meta))
1269 return -EINVAL;
1270
1271 if (sdp->sd_args.ar_spectator)
1272 *flags |= SB_RDONLY;
1273
1274 if ((sb->s_flags ^ *flags) & SB_RDONLY) {
1275 if (*flags & SB_RDONLY)
1276 error = gfs2_make_fs_ro(sdp);
1277 else
1278 error = gfs2_make_fs_rw(sdp);
1279 }
1280
1281 sdp->sd_args = args;
1282 if (sdp->sd_args.ar_posix_acl)
1283 sb->s_flags |= SB_POSIXACL;
1284 else
1285 sb->s_flags &= ~SB_POSIXACL;
1286 if (sdp->sd_args.ar_nobarrier)
1287 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1288 else
1289 clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1290 spin_lock(&gt->gt_spin);
1291 gt->gt_logd_secs = args.ar_commit;
1292 gt->gt_quota_quantum = args.ar_quota_quantum;
1293 if (args.ar_statfs_quantum) {
1294 gt->gt_statfs_slow = 0;
1295 gt->gt_statfs_quantum = args.ar_statfs_quantum;
1296 }
1297 else {
1298 gt->gt_statfs_slow = 1;
1299 gt->gt_statfs_quantum = 30;
1300 }
1301 spin_unlock(&gt->gt_spin);
1302
1303 gfs2_online_uevent(sdp);
1304 return error;
1305}
1306
1307/**
1308 * gfs2_drop_inode - Drop an inode (test for remote unlink) 978 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1309 * @inode: The inode to drop 979 * @inode: The inode to drop
1310 * 980 *
@@ -1748,7 +1418,6 @@ const struct super_operations gfs2_super_ops = {
1748 .freeze_super = gfs2_freeze, 1418 .freeze_super = gfs2_freeze,
1749 .thaw_super = gfs2_unfreeze, 1419 .thaw_super = gfs2_unfreeze,
1750 .statfs = gfs2_statfs, 1420 .statfs = gfs2_statfs,
1751 .remount_fs = gfs2_remount_fs,
1752 .drop_inode = gfs2_drop_inode, 1421 .drop_inode = gfs2_drop_inode,
1753 .show_options = gfs2_show_options, 1422 .show_options = gfs2_show_options,
1754}; 1423};
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 9d49eaadb9d9..b8bf811a1305 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -24,8 +24,6 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
24 24
25extern void gfs2_jindex_free(struct gfs2_sbd *sdp); 25extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
26 26
27extern int gfs2_mount_args(struct gfs2_args *args, char *data);
28
29extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); 27extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
30extern int gfs2_jdesc_check(struct gfs2_jdesc *jd); 28extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
31 29
@@ -33,6 +31,7 @@ extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
33 struct gfs2_inode **ipp); 31 struct gfs2_inode **ipp);
34 32
35extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp); 33extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
34extern int gfs2_make_fs_ro(struct gfs2_sbd *sdp);
36extern void gfs2_online_uevent(struct gfs2_sbd *sdp); 35extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
37extern int gfs2_statfs_init(struct gfs2_sbd *sdp); 36extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
38extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, 37extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index c03758c91481..7a42c2ebe28d 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -13,6 +13,7 @@
13#include <linux/sched/signal.h> 13#include <linux/sched/signal.h>
14#include <linux/dnotify.h> 14#include <linux/dnotify.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/security.h>
16#include <linux/spinlock.h> 17#include <linux/spinlock.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/fdtable.h> 19#include <linux/fdtable.h>
@@ -279,6 +280,17 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
279 goto out_err; 280 goto out_err;
280 } 281 }
281 282
283 /*
284 * convert the userspace DN_* "arg" to the internal FS_*
285 * defined in fsnotify
286 */
287 mask = convert_arg(arg);
288
289 error = security_path_notify(&filp->f_path, mask,
290 FSNOTIFY_OBJ_TYPE_INODE);
291 if (error)
292 goto out_err;
293
282 /* expect most fcntl to add new rather than augment old */ 294 /* expect most fcntl to add new rather than augment old */
283 dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL); 295 dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL);
284 if (!dn) { 296 if (!dn) {
@@ -293,9 +305,6 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
293 goto out_err; 305 goto out_err;
294 } 306 }
295 307
296 /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
297 mask = convert_arg(arg);
298
299 /* set up the new_fsn_mark and new_dn_mark */ 308 /* set up the new_fsn_mark and new_dn_mark */
300 new_fsn_mark = &new_dn_mark->fsn_mark; 309 new_fsn_mark = &new_dn_mark->fsn_mark;
301 fsnotify_init_mark(new_fsn_mark, dnotify_group); 310 fsnotify_init_mark(new_fsn_mark, dnotify_group);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 91006f47e420..8508ab575017 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -528,7 +528,8 @@ static const struct file_operations fanotify_fops = {
528}; 528};
529 529
530static int fanotify_find_path(int dfd, const char __user *filename, 530static int fanotify_find_path(int dfd, const char __user *filename,
531 struct path *path, unsigned int flags) 531 struct path *path, unsigned int flags, __u64 mask,
532 unsigned int obj_type)
532{ 533{
533 int ret; 534 int ret;
534 535
@@ -567,8 +568,15 @@ static int fanotify_find_path(int dfd, const char __user *filename,
567 568
568 /* you can only watch an inode if you have read permissions on it */ 569 /* you can only watch an inode if you have read permissions on it */
569 ret = inode_permission(path->dentry->d_inode, MAY_READ); 570 ret = inode_permission(path->dentry->d_inode, MAY_READ);
571 if (ret) {
572 path_put(path);
573 goto out;
574 }
575
576 ret = security_path_notify(path, mask, obj_type);
570 if (ret) 577 if (ret)
571 path_put(path); 578 path_put(path);
579
572out: 580out:
573 return ret; 581 return ret;
574} 582}
@@ -947,6 +955,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
947 __kernel_fsid_t __fsid, *fsid = NULL; 955 __kernel_fsid_t __fsid, *fsid = NULL;
948 u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS; 956 u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS;
949 unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; 957 unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
958 unsigned int obj_type;
950 int ret; 959 int ret;
951 960
952 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", 961 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
@@ -961,8 +970,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
961 970
962 switch (mark_type) { 971 switch (mark_type) {
963 case FAN_MARK_INODE: 972 case FAN_MARK_INODE:
973 obj_type = FSNOTIFY_OBJ_TYPE_INODE;
974 break;
964 case FAN_MARK_MOUNT: 975 case FAN_MARK_MOUNT:
976 obj_type = FSNOTIFY_OBJ_TYPE_VFSMOUNT;
977 break;
965 case FAN_MARK_FILESYSTEM: 978 case FAN_MARK_FILESYSTEM:
979 obj_type = FSNOTIFY_OBJ_TYPE_SB;
966 break; 980 break;
967 default: 981 default:
968 return -EINVAL; 982 return -EINVAL;
@@ -1030,7 +1044,8 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
1030 goto fput_and_out; 1044 goto fput_and_out;
1031 } 1045 }
1032 1046
1033 ret = fanotify_find_path(dfd, pathname, &path, flags); 1047 ret = fanotify_find_path(dfd, pathname, &path, flags,
1048 (mask & ALL_FSNOTIFY_EVENTS), obj_type);
1034 if (ret) 1049 if (ret)
1035 goto fput_and_out; 1050 goto fput_and_out;
1036 1051
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 0b815178126e..107537a543fd 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -30,6 +30,7 @@
30#include <linux/poll.h> 30#include <linux/poll.h>
31#include <linux/wait.h> 31#include <linux/wait.h>
32#include <linux/memcontrol.h> 32#include <linux/memcontrol.h>
33#include <linux/security.h>
33 34
34#include "inotify.h" 35#include "inotify.h"
35#include "../fdinfo.h" 36#include "../fdinfo.h"
@@ -331,7 +332,8 @@ static const struct file_operations inotify_fops = {
331/* 332/*
332 * find_inode - resolve a user-given path to a specific inode 333 * find_inode - resolve a user-given path to a specific inode
333 */ 334 */
334static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags) 335static int inotify_find_inode(const char __user *dirname, struct path *path,
336 unsigned int flags, __u64 mask)
335{ 337{
336 int error; 338 int error;
337 339
@@ -340,8 +342,15 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
340 return error; 342 return error;
341 /* you can only watch an inode if you have read permissions on it */ 343 /* you can only watch an inode if you have read permissions on it */
342 error = inode_permission(path->dentry->d_inode, MAY_READ); 344 error = inode_permission(path->dentry->d_inode, MAY_READ);
345 if (error) {
346 path_put(path);
347 return error;
348 }
349 error = security_path_notify(path, mask,
350 FSNOTIFY_OBJ_TYPE_INODE);
343 if (error) 351 if (error)
344 path_put(path); 352 path_put(path);
353
345 return error; 354 return error;
346} 355}
347 356
@@ -733,7 +742,8 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
733 if (mask & IN_ONLYDIR) 742 if (mask & IN_ONLYDIR)
734 flags |= LOOKUP_DIRECTORY; 743 flags |= LOOKUP_DIRECTORY;
735 744
736 ret = inotify_find_inode(pathname, &path, flags); 745 ret = inotify_find_inode(pathname, &path, flags,
746 (mask & IN_ALL_EVENTS));
737 if (ret) 747 if (ret)
738 goto fput_and_out; 748 goto fput_and_out;
739 749
diff --git a/include/Kbuild b/include/Kbuild
index 4ae65e13c3f0..ffba79483cc5 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -312,7 +312,6 @@ header-test- += linux/mfd/as3711.h
312header-test- += linux/mfd/as3722.h 312header-test- += linux/mfd/as3722.h
313header-test- += linux/mfd/da903x.h 313header-test- += linux/mfd/da903x.h
314header-test- += linux/mfd/da9055/pdata.h 314header-test- += linux/mfd/da9055/pdata.h
315header-test- += linux/mfd/da9063/pdata.h
316header-test- += linux/mfd/db8500-prcmu.h 315header-test- += linux/mfd/db8500-prcmu.h
317header-test- += linux/mfd/dbx500-prcmu.h 316header-test- += linux/mfd/dbx500-prcmu.h
318header-test- += linux/mfd/dln2.h 317header-test- += linux/mfd/dln2.h
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 0b5897446dca..c7d6b2e8c3b5 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -46,6 +46,12 @@ enum backlight_notification {
46 BACKLIGHT_UNREGISTERED, 46 BACKLIGHT_UNREGISTERED,
47}; 47};
48 48
49enum backlight_scale {
50 BACKLIGHT_SCALE_UNKNOWN = 0,
51 BACKLIGHT_SCALE_LINEAR,
52 BACKLIGHT_SCALE_NON_LINEAR,
53};
54
49struct backlight_device; 55struct backlight_device;
50struct fb_info; 56struct fb_info;
51 57
@@ -80,6 +86,8 @@ struct backlight_properties {
80 enum backlight_type type; 86 enum backlight_type type;
81 /* Flags used to signal drivers of state changes */ 87 /* Flags used to signal drivers of state changes */
82 unsigned int state; 88 unsigned int state;
89 /* Type of the brightness scale (linear, non-linear, ...) */
90 enum backlight_scale scale;
83 91
84#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ 92#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
85#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ 93#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index f7a30e0099be..18639c069263 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -386,7 +386,6 @@ static inline void put_cred(const struct cred *_cred)
386#define current_fsgid() (current_cred_xxx(fsgid)) 386#define current_fsgid() (current_cred_xxx(fsgid))
387#define current_cap() (current_cred_xxx(cap_effective)) 387#define current_cap() (current_cred_xxx(cap_effective))
388#define current_user() (current_cred_xxx(user)) 388#define current_user() (current_cred_xxx(user))
389#define current_security() (current_cred_xxx(security))
390 389
391extern struct user_namespace init_user_ns; 390extern struct user_namespace init_user_ns;
392#ifdef CONFIG_USER_NS 391#ifdef CONFIG_USER_NS
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d770ab1a0479..cd41f209043f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1154,29 +1154,32 @@ int hid_pidff_init(struct hid_device *hid);
1154#define hid_pidff_init NULL 1154#define hid_pidff_init NULL
1155#endif 1155#endif
1156 1156
1157#define dbg_hid(format, arg...) \ 1157#define dbg_hid(fmt, ...) \
1158do { \ 1158do { \
1159 if (hid_debug) \ 1159 if (hid_debug) \
1160 printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \ 1160 printk(KERN_DEBUG "%s: " fmt, __FILE__, ##__VA_ARGS__); \
1161} while (0) 1161} while (0)
1162 1162
1163#define hid_printk(level, hid, fmt, arg...) \ 1163#define hid_err(hid, fmt, ...) \
1164 dev_printk(level, &(hid)->dev, fmt, ##arg) 1164 dev_err(&(hid)->dev, fmt, ##__VA_ARGS__)
1165#define hid_emerg(hid, fmt, arg...) \ 1165#define hid_notice(hid, fmt, ...) \
1166 dev_emerg(&(hid)->dev, fmt, ##arg) 1166 dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__)
1167#define hid_crit(hid, fmt, arg...) \ 1167#define hid_warn(hid, fmt, ...) \
1168 dev_crit(&(hid)->dev, fmt, ##arg) 1168 dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__)
1169#define hid_alert(hid, fmt, arg...) \ 1169#define hid_info(hid, fmt, ...) \
1170 dev_alert(&(hid)->dev, fmt, ##arg) 1170 dev_info(&(hid)->dev, fmt, ##__VA_ARGS__)
1171#define hid_err(hid, fmt, arg...) \ 1171#define hid_dbg(hid, fmt, ...) \
1172 dev_err(&(hid)->dev, fmt, ##arg) 1172 dev_dbg(&(hid)->dev, fmt, ##__VA_ARGS__)
1173#define hid_notice(hid, fmt, arg...) \ 1173
1174 dev_notice(&(hid)->dev, fmt, ##arg) 1174#define hid_err_once(hid, fmt, ...) \
1175#define hid_warn(hid, fmt, arg...) \ 1175 dev_err_once(&(hid)->dev, fmt, ##__VA_ARGS__)
1176 dev_warn(&(hid)->dev, fmt, ##arg) 1176#define hid_notice_once(hid, fmt, ...) \
1177#define hid_info(hid, fmt, arg...) \ 1177 dev_notice_once(&(hid)->dev, fmt, ##__VA_ARGS__)
1178 dev_info(&(hid)->dev, fmt, ##arg) 1178#define hid_warn_once(hid, fmt, ...) \
1179#define hid_dbg(hid, fmt, arg...) \ 1179 dev_warn_once(&(hid)->dev, fmt, ##__VA_ARGS__)
1180 dev_dbg(&(hid)->dev, fmt, ##arg) 1180#define hid_info_once(hid, fmt, ...) \
1181 dev_info_once(&(hid)->dev, fmt, ##__VA_ARGS__)
1182#define hid_dbg_once(hid, fmt, ...) \
1183 dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__)
1181 1184
1182#endif 1185#endif
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 2afe6fdc1dda..b4a017093b69 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -245,7 +245,10 @@ struct vmbus_channel_offer {
245 } pipe; 245 } pipe;
246 } u; 246 } u;
247 /* 247 /*
248 * The sub_channel_index is defined in win8. 248 * The sub_channel_index is defined in Win8: a value of zero means a
249 * primary channel and a value of non-zero means a sub-channel.
250 *
251 * Before Win8, the field is reserved, meaning it's always zero.
249 */ 252 */
250 u16 sub_channel_index; 253 u16 sub_channel_index;
251 u16 reserved3; 254 u16 reserved3;
@@ -423,6 +426,9 @@ enum vmbus_channel_message_type {
423 CHANNELMSG_COUNT 426 CHANNELMSG_COUNT
424}; 427};
425 428
429/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
430#define INVALID_RELID U32_MAX
431
426struct vmbus_channel_message_header { 432struct vmbus_channel_message_header {
427 enum vmbus_channel_message_type msgtype; 433 enum vmbus_channel_message_type msgtype;
428 u32 padding; 434 u32 padding;
@@ -934,6 +940,11 @@ static inline bool is_hvsock_channel(const struct vmbus_channel *c)
934 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); 940 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
935} 941}
936 942
943static inline bool is_sub_channel(const struct vmbus_channel *c)
944{
945 return c->offermsg.offer.sub_channel_index != 0;
946}
947
937static inline void set_channel_affinity_state(struct vmbus_channel *c, 948static inline void set_channel_affinity_state(struct vmbus_channel *c,
938 enum hv_numa_policy policy) 949 enum hv_numa_policy policy)
939{ 950{
@@ -1149,6 +1160,9 @@ struct hv_driver {
1149 int (*remove)(struct hv_device *); 1160 int (*remove)(struct hv_device *);
1150 void (*shutdown)(struct hv_device *); 1161 void (*shutdown)(struct hv_device *);
1151 1162
1163 int (*suspend)(struct hv_device *);
1164 int (*resume)(struct hv_device *);
1165
1152}; 1166};
1153 1167
1154/* Base device object */ 1168/* Base device object */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index df1318d85f7d..3fced5824aee 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -339,6 +339,9 @@
339 * Check for permission to change root directory. 339 * Check for permission to change root directory.
340 * @path contains the path structure. 340 * @path contains the path structure.
341 * Return 0 if permission is granted. 341 * Return 0 if permission is granted.
342 * @path_notify:
343 * Check permissions before setting a watch on events as defined by @mask,
344 * on an object at @path, whose type is defined by @obj_type.
342 * @inode_readlink: 345 * @inode_readlink:
343 * Check the permission to read the symbolic link. 346 * Check the permission to read the symbolic link.
344 * @dentry contains the dentry structure for the file link. 347 * @dentry contains the dentry structure for the file link.
@@ -1535,7 +1538,9 @@ union security_list_options {
1535 int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); 1538 int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid);
1536 int (*path_chroot)(const struct path *path); 1539 int (*path_chroot)(const struct path *path);
1537#endif 1540#endif
1538 1541 /* Needed for inode based security check */
1542 int (*path_notify)(const struct path *path, u64 mask,
1543 unsigned int obj_type);
1539 int (*inode_alloc_security)(struct inode *inode); 1544 int (*inode_alloc_security)(struct inode *inode);
1540 void (*inode_free_security)(struct inode *inode); 1545 void (*inode_free_security)(struct inode *inode);
1541 int (*inode_init_security)(struct inode *inode, struct inode *dir, 1546 int (*inode_init_security)(struct inode *inode, struct inode *dir,
@@ -1860,6 +1865,8 @@ struct security_hook_heads {
1860 struct hlist_head path_chown; 1865 struct hlist_head path_chown;
1861 struct hlist_head path_chroot; 1866 struct hlist_head path_chroot;
1862#endif 1867#endif
1868 /* Needed for inode based modules as well */
1869 struct hlist_head path_notify;
1863 struct hlist_head inode_alloc_security; 1870 struct hlist_head inode_alloc_security;
1864 struct hlist_head inode_free_security; 1871 struct hlist_head inode_free_security;
1865 struct hlist_head inode_init_security; 1872 struct hlist_head inode_init_security;
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index fb2a0bd826b9..bef51e35d8d2 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -111,7 +111,6 @@ struct dev_pagemap {
111 struct completion done; 111 struct completion done;
112 enum memory_type type; 112 enum memory_type type;
113 unsigned int flags; 113 unsigned int flags;
114 u64 pci_p2pdma_bus_offset;
115 const struct dev_pagemap_ops *ops; 114 const struct dev_pagemap_ops *ops;
116}; 115};
117 116
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
deleted file mode 100644
index 085edbf7601b..000000000000
--- a/include/linux/mfd/da9063/pdata.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Platform configuration options for DA9063
4 *
5 * Copyright 2012 Dialog Semiconductor Ltd.
6 *
7 * Author: Michal Hajduk, Dialog Semiconductor
8 * Author: Krystian Garbaciak, Dialog Semiconductor
9 */
10
11#ifndef __MFD_DA9063_PDATA_H__
12#define __MFD_DA9063_PDATA_H__
13
14/*
15 * RGB LED configuration
16 */
17/* LED IDs for flags in struct led_info. */
18enum {
19 DA9063_GPIO11_LED,
20 DA9063_GPIO14_LED,
21 DA9063_GPIO15_LED,
22
23 DA9063_LED_NUM
24};
25#define DA9063_LED_ID_MASK 0x3
26
27/* LED polarity for flags in struct led_info. */
28#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0
29#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4
30
31
32/*
33 * General PMIC configuration
34 */
35/* HWMON ADC channels configuration */
36#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010
37#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020
38#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040
39#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080
40#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100
41#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200
42#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400
43#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800
44
45/* Disable register caching. */
46#define DA9063_FLG_NO_CACHE 0x0008
47
48struct da9063;
49
50/* DA9063 platform data */
51struct da9063_pdata {
52 int (*init)(struct da9063 *da9063);
53 int irq_base;
54 bool key_power;
55 unsigned flags;
56 struct da9063_regulators_pdata *regulators_pdata;
57 struct led_platform_data *leds_pdata;
58};
59
60#endif /* __MFD_DA9063_PDATA_H__ */
diff --git a/include/linux/mfd/intel_soc_pmic_mrfld.h b/include/linux/mfd/intel_soc_pmic_mrfld.h
new file mode 100644
index 000000000000..4daecd682275
--- /dev/null
+++ b/include/linux/mfd/intel_soc_pmic_mrfld.h
@@ -0,0 +1,81 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Header file for Intel Merrifield Basin Cove PMIC
4 *
5 * Copyright (C) 2019 Intel Corporation. All rights reserved.
6 */
7
8#ifndef __INTEL_SOC_PMIC_MRFLD_H__
9#define __INTEL_SOC_PMIC_MRFLD_H__
10
11#include <linux/bits.h>
12
13#define BCOVE_ID 0x00
14
15#define BCOVE_ID_MINREV0 GENMASK(2, 0)
16#define BCOVE_ID_MAJREV0 GENMASK(5, 3)
17#define BCOVE_ID_VENDID0 GENMASK(7, 6)
18
19#define BCOVE_MINOR(x) (unsigned int)(((x) & BCOVE_ID_MINREV0) >> 0)
20#define BCOVE_MAJOR(x) (unsigned int)(((x) & BCOVE_ID_MAJREV0) >> 3)
21#define BCOVE_VENDOR(x) (unsigned int)(((x) & BCOVE_ID_VENDID0) >> 6)
22
23#define BCOVE_IRQLVL1 0x01
24
25#define BCOVE_PBIRQ 0x02
26#define BCOVE_TMUIRQ 0x03
27#define BCOVE_THRMIRQ 0x04
28#define BCOVE_BCUIRQ 0x05
29#define BCOVE_ADCIRQ 0x06
30#define BCOVE_CHGRIRQ0 0x07
31#define BCOVE_CHGRIRQ1 0x08
32#define BCOVE_GPIOIRQ 0x09
33#define BCOVE_CRITIRQ 0x0B
34
35#define BCOVE_MIRQLVL1 0x0C
36
37#define BCOVE_MPBIRQ 0x0D
38#define BCOVE_MTMUIRQ 0x0E
39#define BCOVE_MTHRMIRQ 0x0F
40#define BCOVE_MBCUIRQ 0x10
41#define BCOVE_MADCIRQ 0x11
42#define BCOVE_MCHGRIRQ0 0x12
43#define BCOVE_MCHGRIRQ1 0x13
44#define BCOVE_MGPIOIRQ 0x14
45#define BCOVE_MCRITIRQ 0x16
46
47#define BCOVE_SCHGRIRQ0 0x4E
48#define BCOVE_SCHGRIRQ1 0x4F
49
50/* Level 1 IRQs */
51#define BCOVE_LVL1_PWRBTN BIT(0) /* power button */
52#define BCOVE_LVL1_TMU BIT(1) /* time management unit */
53#define BCOVE_LVL1_THRM BIT(2) /* thermal */
54#define BCOVE_LVL1_BCU BIT(3) /* burst control unit */
55#define BCOVE_LVL1_ADC BIT(4) /* ADC */
56#define BCOVE_LVL1_CHGR BIT(5) /* charger */
57#define BCOVE_LVL1_GPIO BIT(6) /* GPIO */
58#define BCOVE_LVL1_CRIT BIT(7) /* critical event */
59
60/* Level 2 IRQs: power button */
61#define BCOVE_PBIRQ_PBTN BIT(0)
62#define BCOVE_PBIRQ_UBTN BIT(1)
63
64/* Level 2 IRQs: ADC */
65#define BCOVE_ADCIRQ_BATTEMP BIT(2)
66#define BCOVE_ADCIRQ_SYSTEMP BIT(3)
67#define BCOVE_ADCIRQ_BATTID BIT(4)
68#define BCOVE_ADCIRQ_VIBATT BIT(5)
69#define BCOVE_ADCIRQ_CCTICK BIT(7)
70
71/* Level 2 IRQs: charger */
72#define BCOVE_CHGRIRQ_BAT0ALRT BIT(4)
73#define BCOVE_CHGRIRQ_BAT1ALRT BIT(5)
74#define BCOVE_CHGRIRQ_BATCRIT BIT(6)
75
76#define BCOVE_CHGRIRQ_VBUSDET BIT(0)
77#define BCOVE_CHGRIRQ_DCDET BIT(1)
78#define BCOVE_CHGRIRQ_BATTDET BIT(2)
79#define BCOVE_CHGRIRQ_USBIDDET BIT(3)
80
81#endif /* __INTEL_SOC_PMIC_MRFLD_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 25a95e72179b..fc88d315bdde 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -7,6 +7,14 @@
7#ifndef __MFD_MT6397_CORE_H__ 7#ifndef __MFD_MT6397_CORE_H__
8#define __MFD_MT6397_CORE_H__ 8#define __MFD_MT6397_CORE_H__
9 9
10#include <linux/mutex.h>
11
12enum chip_id {
13 MT6323_CHIP_ID = 0x23,
14 MT6391_CHIP_ID = 0x91,
15 MT6397_CHIP_ID = 0x97,
16};
17
10enum mt6397_irq_numbers { 18enum mt6397_irq_numbers {
11 MT6397_IRQ_SPKL_AB = 0, 19 MT6397_IRQ_SPKL_AB = 0,
12 MT6397_IRQ_SPKR_AB, 20 MT6397_IRQ_SPKR_AB,
@@ -54,6 +62,9 @@ struct mt6397_chip {
54 u16 irq_masks_cache[2]; 62 u16 irq_masks_cache[2];
55 u16 int_con[2]; 63 u16 int_con[2];
56 u16 int_status[2]; 64 u16 int_status[2];
65 u16 chip_id;
57}; 66};
58 67
68int mt6397_irq_init(struct mt6397_chip *chip);
69
59#endif /* __MFD_MT6397_CORE_H__ */ 70#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
deleted file mode 100644
index 67064145d76e..000000000000
--- a/include/linux/pci-aspm.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * aspm.h
4 *
5 * PCI Express ASPM defines and function prototypes
6 *
7 * Copyright (C) 2007 Intel Corp.
8 * Zhang Yanmin (yanmin.zhang@intel.com)
9 * Shaohua Li (shaohua.li@intel.com)
10 *
11 * For more information, please consult the following manuals (look at
12 * http://www.pcisig.com/ for how to get them):
13 *
14 * PCI Express Specification
15 */
16
17#ifndef LINUX_ASPM_H
18#define LINUX_ASPM_H
19
20#include <linux/pci.h>
21
22#define PCIE_LINK_STATE_L0S 1
23#define PCIE_LINK_STATE_L1 2
24#define PCIE_LINK_STATE_CLKPM 4
25
26#ifdef CONFIG_PCIEASPM
27int pci_disable_link_state(struct pci_dev *pdev, int state);
28int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
29void pcie_no_aspm(void);
30#else
31static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
32{ return 0; }
33static inline void pcie_no_aspm(void) { }
34#endif
35
36#endif /* LINUX_ASPM_H */
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index bca9bc3e5be7..8318a97c9c61 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -30,8 +30,10 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
30 unsigned int *nents, u32 length); 30 unsigned int *nents, u32 length);
31void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl); 31void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
32void pci_p2pmem_publish(struct pci_dev *pdev, bool publish); 32void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
33int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 33int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
34 enum dma_data_direction dir); 34 int nents, enum dma_data_direction dir, unsigned long attrs);
35void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
36 int nents, enum dma_data_direction dir, unsigned long attrs);
35int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, 37int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
36 bool *use_p2pdma); 38 bool *use_p2pdma);
37ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, 39ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
@@ -81,11 +83,17 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
81static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) 83static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
82{ 84{
83} 85}
84static inline int pci_p2pdma_map_sg(struct device *dev, 86static inline int pci_p2pdma_map_sg_attrs(struct device *dev,
85 struct scatterlist *sg, int nents, enum dma_data_direction dir) 87 struct scatterlist *sg, int nents, enum dma_data_direction dir,
88 unsigned long attrs)
86{ 89{
87 return 0; 90 return 0;
88} 91}
92static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev,
93 struct scatterlist *sg, int nents, enum dma_data_direction dir,
94 unsigned long attrs)
95{
96}
89static inline int pci_p2pdma_enable_store(const char *page, 97static inline int pci_p2pdma_enable_store(const char *page,
90 struct pci_dev **p2p_dev, bool *use_p2pdma) 98 struct pci_dev **p2p_dev, bool *use_p2pdma)
91{ 99{
@@ -111,4 +119,16 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
111 return pci_p2pmem_find_many(&client, 1); 119 return pci_p2pmem_find_many(&client, 1);
112} 120}
113 121
122static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg,
123 int nents, enum dma_data_direction dir)
124{
125 return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0);
126}
127
128static inline void pci_p2pdma_unmap_sg(struct device *dev,
129 struct scatterlist *sg, int nents, enum dma_data_direction dir)
130{
131 pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0);
132}
133
114#endif /* _LINUX_PCI_P2P_H */ 134#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 82e4cd1b7ac3..f9088c89a534 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -6,12 +6,18 @@
6 * Copyright 1994, Drew Eckhardt 6 * Copyright 1994, Drew Eckhardt
7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz> 7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8 * 8 *
9 * PCI Express ASPM defines and function prototypes
10 * Copyright (c) 2007 Intel Corp.
11 * Zhang Yanmin (yanmin.zhang@intel.com)
12 * Shaohua Li (shaohua.li@intel.com)
13 *
9 * For more information, please consult the following manuals (look at 14 * For more information, please consult the following manuals (look at
10 * http://www.pcisig.com/ for how to get them): 15 * http://www.pcisig.com/ for how to get them):
11 * 16 *
12 * PCI BIOS Specification 17 * PCI BIOS Specification
13 * PCI Local Bus Specification 18 * PCI Local Bus Specification
14 * PCI to PCI Bridge Specification 19 * PCI to PCI Bridge Specification
20 * PCI Express Specification
15 * PCI System Design Guide 21 * PCI System Design Guide
16 */ 22 */
17#ifndef LINUX_PCI_H 23#ifndef LINUX_PCI_H
@@ -145,11 +151,6 @@ static inline const char *pci_power_name(pci_power_t state)
145 return pci_power_names[1 + (__force int) state]; 151 return pci_power_names[1 + (__force int) state];
146} 152}
147 153
148#define PCI_PM_D2_DELAY 200
149#define PCI_PM_D3_WAIT 10
150#define PCI_PM_D3COLD_WAIT 100
151#define PCI_PM_BUS_WAIT 50
152
153/** 154/**
154 * typedef pci_channel_state_t 155 * typedef pci_channel_state_t
155 * 156 *
@@ -418,7 +419,6 @@ struct pci_dev {
418 unsigned int broken_intx_masking:1; /* INTx masking can't be used */ 419 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
419 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ 420 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
420 unsigned int irq_managed:1; 421 unsigned int irq_managed:1;
421 unsigned int has_secondary_link:1;
422 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ 422 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
423 unsigned int is_probed:1; /* Device probing in progress */ 423 unsigned int is_probed:1; /* Device probing in progress */
424 unsigned int link_active_reporting:1;/* Device capable of reporting link active */ 424 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
@@ -649,9 +649,6 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
649 return dev->bus->self; 649 return dev->bus->self;
650} 650}
651 651
652struct device *pci_get_host_bridge_device(struct pci_dev *dev);
653void pci_put_host_bridge_device(struct device *dev);
654
655#ifdef CONFIG_PCI_MSI 652#ifdef CONFIG_PCI_MSI
656static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 653static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
657{ 654{
@@ -925,6 +922,11 @@ enum {
925 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ 922 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
926}; 923};
927 924
925#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
926#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
927#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
928#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
929
928/* These external functions are only available when PCI support is enabled */ 930/* These external functions are only available when PCI support is enabled */
929#ifdef CONFIG_PCI 931#ifdef CONFIG_PCI
930 932
@@ -969,7 +971,7 @@ resource_size_t pcibios_align_resource(void *, const struct resource *,
969 resource_size_t, 971 resource_size_t,
970 resource_size_t); 972 resource_size_t);
971 973
972/* Weak but can be overriden by arch */ 974/* Weak but can be overridden by arch */
973void pci_fixup_cardbus(struct pci_bus *); 975void pci_fixup_cardbus(struct pci_bus *);
974 976
975/* Generic PCI functions used internally */ 977/* Generic PCI functions used internally */
@@ -995,7 +997,6 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
995int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); 997int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
996struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 998struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
997 int busnr); 999 int busnr);
998void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
999struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 1000struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1000 const char *name, 1001 const char *name,
1001 struct hotplug_slot *hotplug); 1002 struct hotplug_slot *hotplug);
@@ -1241,19 +1242,12 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1241int pci_prepare_to_sleep(struct pci_dev *dev); 1242int pci_prepare_to_sleep(struct pci_dev *dev);
1242int pci_back_from_sleep(struct pci_dev *dev); 1243int pci_back_from_sleep(struct pci_dev *dev);
1243bool pci_dev_run_wake(struct pci_dev *dev); 1244bool pci_dev_run_wake(struct pci_dev *dev);
1244bool pci_check_pme_status(struct pci_dev *dev);
1245void pci_pme_wakeup_bus(struct pci_bus *bus);
1246void pci_d3cold_enable(struct pci_dev *dev); 1245void pci_d3cold_enable(struct pci_dev *dev);
1247void pci_d3cold_disable(struct pci_dev *dev); 1246void pci_d3cold_disable(struct pci_dev *dev);
1248bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); 1247bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1249void pci_wakeup_bus(struct pci_bus *bus); 1248void pci_wakeup_bus(struct pci_bus *bus);
1250void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); 1249void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1251 1250
1252/* PCI Virtual Channel */
1253int pci_save_vc_state(struct pci_dev *dev);
1254void pci_restore_vc_state(struct pci_dev *dev);
1255void pci_allocate_vc_save_buffers(struct pci_dev *dev);
1256
1257/* For use by arch with custom probe code */ 1251/* For use by arch with custom probe code */
1258void set_pcie_port_type(struct pci_dev *pdev); 1252void set_pcie_port_type(struct pci_dev *pdev);
1259void set_pcie_hotplug_bridge(struct pci_dev *pdev); 1253void set_pcie_hotplug_bridge(struct pci_dev *pdev);
@@ -1297,8 +1291,6 @@ int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1297void pci_release_selected_regions(struct pci_dev *, int); 1291void pci_release_selected_regions(struct pci_dev *, int);
1298 1292
1299/* drivers/pci/bus.c */ 1293/* drivers/pci/bus.c */
1300struct pci_bus *pci_bus_get(struct pci_bus *bus);
1301void pci_bus_put(struct pci_bus *bus);
1302void pci_add_resource(struct list_head *resources, struct resource *res); 1294void pci_add_resource(struct list_head *resources, struct resource *res);
1303void pci_add_resource_offset(struct list_head *resources, struct resource *res, 1295void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1304 resource_size_t offset); 1296 resource_size_t offset);
@@ -1408,11 +1400,6 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1408int pci_set_vga_state(struct pci_dev *pdev, bool decode, 1400int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1409 unsigned int command_bits, u32 flags); 1401 unsigned int command_bits, u32 flags);
1410 1402
1411#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
1412#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
1413#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
1414#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
1415
1416/* 1403/*
1417 * Virtual interrupts allow for more interrupts to be allocated 1404 * Virtual interrupts allow for more interrupts to be allocated
1418 * than the device has interrupts for. These are not programmed 1405 * than the device has interrupts for. These are not programmed
@@ -1517,14 +1504,6 @@ static inline int pci_irq_get_node(struct pci_dev *pdev, int vec)
1517} 1504}
1518#endif 1505#endif
1519 1506
1520static inline int
1521pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1522 unsigned int max_vecs, unsigned int flags)
1523{
1524 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
1525 NULL);
1526}
1527
1528/** 1507/**
1529 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq 1508 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1530 * @d: the INTx IRQ domain 1509 * @d: the INTx IRQ domain
@@ -1565,10 +1544,22 @@ extern bool pcie_ports_native;
1565#define pcie_ports_native false 1544#define pcie_ports_native false
1566#endif 1545#endif
1567 1546
1547#define PCIE_LINK_STATE_L0S 1
1548#define PCIE_LINK_STATE_L1 2
1549#define PCIE_LINK_STATE_CLKPM 4
1550
1568#ifdef CONFIG_PCIEASPM 1551#ifdef CONFIG_PCIEASPM
1552int pci_disable_link_state(struct pci_dev *pdev, int state);
1553int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1554void pcie_no_aspm(void);
1569bool pcie_aspm_support_enabled(void); 1555bool pcie_aspm_support_enabled(void);
1570bool pcie_aspm_enabled(struct pci_dev *pdev); 1556bool pcie_aspm_enabled(struct pci_dev *pdev);
1571#else 1557#else
1558static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1559{ return 0; }
1560static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1561{ return 0; }
1562static inline void pcie_no_aspm(void) { }
1572static inline bool pcie_aspm_support_enabled(void) { return false; } 1563static inline bool pcie_aspm_support_enabled(void) { return false; }
1573static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } 1564static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1574#endif 1565#endif
@@ -1579,23 +1570,8 @@ bool pci_aer_available(void);
1579static inline bool pci_aer_available(void) { return false; } 1570static inline bool pci_aer_available(void) { return false; }
1580#endif 1571#endif
1581 1572
1582#ifdef CONFIG_PCIE_ECRC
1583void pcie_set_ecrc_checking(struct pci_dev *dev);
1584void pcie_ecrc_get_policy(char *str);
1585#else
1586static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
1587static inline void pcie_ecrc_get_policy(char *str) { }
1588#endif
1589
1590bool pci_ats_disabled(void); 1573bool pci_ats_disabled(void);
1591 1574
1592#ifdef CONFIG_PCIE_PTM
1593int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1594#else
1595static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1596{ return -EINVAL; }
1597#endif
1598
1599void pci_cfg_access_lock(struct pci_dev *dev); 1575void pci_cfg_access_lock(struct pci_dev *dev);
1600bool pci_cfg_access_trylock(struct pci_dev *dev); 1576bool pci_cfg_access_trylock(struct pci_dev *dev);
1601void pci_cfg_access_unlock(struct pci_dev *dev); 1577void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1749,11 +1725,6 @@ static inline void pci_release_regions(struct pci_dev *dev) { }
1749 1725
1750static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } 1726static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
1751 1727
1752static inline void pci_block_cfg_access(struct pci_dev *dev) { }
1753static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
1754{ return 0; }
1755static inline void pci_unblock_cfg_access(struct pci_dev *dev) { }
1756
1757static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) 1728static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
1758{ return NULL; } 1729{ return NULL; }
1759static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, 1730static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
@@ -1782,17 +1753,36 @@ static inline const struct pci_device_id *pci_match_id(const struct pci_device_i
1782 struct pci_dev *dev) 1753 struct pci_dev *dev)
1783{ return NULL; } 1754{ return NULL; }
1784static inline bool pci_ats_disabled(void) { return true; } 1755static inline bool pci_ats_disabled(void) { return true; }
1756
1757static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1758{
1759 return -EINVAL;
1760}
1761
1762static inline int
1763pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1764 unsigned int max_vecs, unsigned int flags,
1765 struct irq_affinity *aff_desc)
1766{
1767 return -ENOSPC;
1768}
1785#endif /* CONFIG_PCI */ 1769#endif /* CONFIG_PCI */
1786 1770
1771static inline int
1772pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1773 unsigned int max_vecs, unsigned int flags)
1774{
1775 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
1776 NULL);
1777}
1778
1787#ifdef CONFIG_PCI_ATS 1779#ifdef CONFIG_PCI_ATS
1788/* Address Translation Service */ 1780/* Address Translation Service */
1789void pci_ats_init(struct pci_dev *dev);
1790int pci_enable_ats(struct pci_dev *dev, int ps); 1781int pci_enable_ats(struct pci_dev *dev, int ps);
1791void pci_disable_ats(struct pci_dev *dev); 1782void pci_disable_ats(struct pci_dev *dev);
1792int pci_ats_queue_depth(struct pci_dev *dev); 1783int pci_ats_queue_depth(struct pci_dev *dev);
1793int pci_ats_page_aligned(struct pci_dev *dev); 1784int pci_ats_page_aligned(struct pci_dev *dev);
1794#else 1785#else
1795static inline void pci_ats_init(struct pci_dev *d) { }
1796static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } 1786static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
1797static inline void pci_disable_ats(struct pci_dev *d) { } 1787static inline void pci_disable_ats(struct pci_dev *d) { }
1798static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; } 1788static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
@@ -1803,7 +1793,7 @@ static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
1803 1793
1804#include <asm/pci.h> 1794#include <asm/pci.h>
1805 1795
1806/* These two functions provide almost identical functionality. Depennding 1796/* These two functions provide almost identical functionality. Depending
1807 * on the architecture, one will be implemented as a wrapper around the 1797 * on the architecture, one will be implemented as a wrapper around the
1808 * other (in drivers/pci/mmap.c). 1798 * other (in drivers/pci/mmap.c).
1809 * 1799 *
@@ -1872,25 +1862,9 @@ static inline const char *pci_name(const struct pci_dev *pdev)
1872 return dev_name(&pdev->dev); 1862 return dev_name(&pdev->dev);
1873} 1863}
1874 1864
1875
1876/*
1877 * Some archs don't want to expose struct resource to userland as-is
1878 * in sysfs and /proc
1879 */
1880#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
1881void pci_resource_to_user(const struct pci_dev *dev, int bar, 1865void pci_resource_to_user(const struct pci_dev *dev, int bar,
1882 const struct resource *rsrc, 1866 const struct resource *rsrc,
1883 resource_size_t *start, resource_size_t *end); 1867 resource_size_t *start, resource_size_t *end);
1884#else
1885static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
1886 const struct resource *rsrc, resource_size_t *start,
1887 resource_size_t *end)
1888{
1889 *start = rsrc->start;
1890 *end = rsrc->end;
1891}
1892#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
1893
1894 1868
1895/* 1869/*
1896 * The world is not perfect and supplies us with broken PCI devices. 1870 * The world is not perfect and supplies us with broken PCI devices.
@@ -2032,10 +2006,6 @@ extern unsigned long pci_cardbus_mem_size;
2032extern u8 pci_dfl_cache_line_size; 2006extern u8 pci_dfl_cache_line_size;
2033extern u8 pci_cache_line_size; 2007extern u8 pci_cache_line_size;
2034 2008
2035extern unsigned long pci_hotplug_io_size;
2036extern unsigned long pci_hotplug_mem_size;
2037extern unsigned long pci_hotplug_bus_size;
2038
2039/* Architecture-specific versions may override these (weak) */ 2009/* Architecture-specific versions may override these (weak) */
2040void pcibios_disable_device(struct pci_dev *dev); 2010void pcibios_disable_device(struct pci_dev *dev);
2041void pcibios_set_master(struct pci_dev *dev); 2011void pcibios_set_master(struct pci_dev *dev);
@@ -2305,10 +2275,6 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
2305#ifdef CONFIG_OF 2275#ifdef CONFIG_OF
2306struct device_node; 2276struct device_node;
2307struct irq_domain; 2277struct irq_domain;
2308void pci_set_of_node(struct pci_dev *dev);
2309void pci_release_of_node(struct pci_dev *dev);
2310void pci_set_bus_of_node(struct pci_bus *bus);
2311void pci_release_bus_of_node(struct pci_bus *bus);
2312struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); 2278struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2313int pci_parse_request_of_pci_ranges(struct device *dev, 2279int pci_parse_request_of_pci_ranges(struct device *dev,
2314 struct list_head *resources, 2280 struct list_head *resources,
@@ -2318,10 +2284,6 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
2318struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); 2284struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2319 2285
2320#else /* CONFIG_OF */ 2286#else /* CONFIG_OF */
2321static inline void pci_set_of_node(struct pci_dev *dev) { }
2322static inline void pci_release_of_node(struct pci_dev *dev) { }
2323static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
2324static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
2325static inline struct irq_domain * 2287static inline struct irq_domain *
2326pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } 2288pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
2327static inline int pci_parse_request_of_pci_ranges(struct device *dev, 2289static inline int pci_parse_request_of_pci_ranges(struct device *dev,
@@ -2435,4 +2397,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2435#define pci_notice_ratelimited(pdev, fmt, arg...) \ 2397#define pci_notice_ratelimited(pdev, fmt, arg...) \
2436 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg) 2398 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2437 2399
2400#define pci_info_ratelimited(pdev, fmt, arg...) \
2401 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2402
2438#endif /* LINUX_PCI_H */ 2403#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index f694eb2ca978..b482e42d7153 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -86,114 +86,14 @@ void pci_hp_deregister(struct hotplug_slot *slot);
86#define pci_hp_initialize(slot, bus, nr, name) \ 86#define pci_hp_initialize(slot, bus, nr, name) \
87 __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME) 87 __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME)
88 88
89/* PCI Setting Record (Type 0) */
90struct hpp_type0 {
91 u32 revision;
92 u8 cache_line_size;
93 u8 latency_timer;
94 u8 enable_serr;
95 u8 enable_perr;
96};
97
98/* PCI-X Setting Record (Type 1) */
99struct hpp_type1 {
100 u32 revision;
101 u8 max_mem_read;
102 u8 avg_max_split;
103 u16 tot_max_split;
104};
105
106/* PCI Express Setting Record (Type 2) */
107struct hpp_type2 {
108 u32 revision;
109 u32 unc_err_mask_and;
110 u32 unc_err_mask_or;
111 u32 unc_err_sever_and;
112 u32 unc_err_sever_or;
113 u32 cor_err_mask_and;
114 u32 cor_err_mask_or;
115 u32 adv_err_cap_and;
116 u32 adv_err_cap_or;
117 u16 pci_exp_devctl_and;
118 u16 pci_exp_devctl_or;
119 u16 pci_exp_lnkctl_and;
120 u16 pci_exp_lnkctl_or;
121 u32 sec_unc_err_sever_and;
122 u32 sec_unc_err_sever_or;
123 u32 sec_unc_err_mask_and;
124 u32 sec_unc_err_mask_or;
125};
126
127/*
128 * _HPX PCI Express Setting Record (Type 3)
129 */
130struct hpx_type3 {
131 u16 device_type;
132 u16 function_type;
133 u16 config_space_location;
134 u16 pci_exp_cap_id;
135 u16 pci_exp_cap_ver;
136 u16 pci_exp_vendor_id;
137 u16 dvsec_id;
138 u16 dvsec_rev;
139 u16 match_offset;
140 u32 match_mask_and;
141 u32 match_value;
142 u16 reg_offset;
143 u32 reg_mask_and;
144 u32 reg_mask_or;
145};
146
147struct hotplug_program_ops {
148 void (*program_type0)(struct pci_dev *dev, struct hpp_type0 *hpp);
149 void (*program_type1)(struct pci_dev *dev, struct hpp_type1 *hpp);
150 void (*program_type2)(struct pci_dev *dev, struct hpp_type2 *hpp);
151 void (*program_type3)(struct pci_dev *dev, struct hpx_type3 *hpp);
152};
153
154enum hpx_type3_dev_type {
155 HPX_TYPE_ENDPOINT = BIT(0),
156 HPX_TYPE_LEG_END = BIT(1),
157 HPX_TYPE_RC_END = BIT(2),
158 HPX_TYPE_RC_EC = BIT(3),
159 HPX_TYPE_ROOT_PORT = BIT(4),
160 HPX_TYPE_UPSTREAM = BIT(5),
161 HPX_TYPE_DOWNSTREAM = BIT(6),
162 HPX_TYPE_PCI_BRIDGE = BIT(7),
163 HPX_TYPE_PCIE_BRIDGE = BIT(8),
164};
165
166enum hpx_type3_fn_type {
167 HPX_FN_NORMAL = BIT(0),
168 HPX_FN_SRIOV_PHYS = BIT(1),
169 HPX_FN_SRIOV_VIRT = BIT(2),
170};
171
172enum hpx_type3_cfg_loc {
173 HPX_CFG_PCICFG = 0,
174 HPX_CFG_PCIE_CAP = 1,
175 HPX_CFG_PCIE_CAP_EXT = 2,
176 HPX_CFG_VEND_CAP = 3,
177 HPX_CFG_DVSEC = 4,
178 HPX_CFG_MAX,
179};
180
181#ifdef CONFIG_ACPI 89#ifdef CONFIG_ACPI
182#include <linux/acpi.h> 90#include <linux/acpi.h>
183int pci_acpi_program_hp_params(struct pci_dev *dev,
184 const struct hotplug_program_ops *hp_ops);
185bool pciehp_is_native(struct pci_dev *bridge); 91bool pciehp_is_native(struct pci_dev *bridge);
186int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge); 92int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge);
187bool shpchp_is_native(struct pci_dev *bridge); 93bool shpchp_is_native(struct pci_dev *bridge);
188int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); 94int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
189int acpi_pci_detect_ejectable(acpi_handle handle); 95int acpi_pci_detect_ejectable(acpi_handle handle);
190#else 96#else
191static inline int pci_acpi_program_hp_params(struct pci_dev *dev,
192 const struct hotplug_program_ops *hp_ops)
193{
194 return -ENODEV;
195}
196
197static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge) 97static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge)
198{ 98{
199 return 0; 99 return 0;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index de1b75e963ef..21a572469a4e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2134,6 +2134,7 @@
2134#define PCI_VENDOR_ID_MYRICOM 0x14c1 2134#define PCI_VENDOR_ID_MYRICOM 0x14c1
2135 2135
2136#define PCI_VENDOR_ID_MEDIATEK 0x14c3 2136#define PCI_VENDOR_ID_MEDIATEK 0x14c3
2137#define PCI_DEVICE_ID_MEDIATEK_7629 0x7629
2137 2138
2138#define PCI_VENDOR_ID_TITAN 0x14D2 2139#define PCI_VENDOR_ID_TITAN 0x14D2
2139#define PCI_DEVICE_ID_TITAN_010L 0x8001 2140#define PCI_DEVICE_ID_TITAN_010L 0x8001
@@ -2574,6 +2575,8 @@
2574 2575
2575#define PCI_VENDOR_ID_ASMEDIA 0x1b21 2576#define PCI_VENDOR_ID_ASMEDIA 0x1b21
2576 2577
2578#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
2579
2577#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 2580#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
2578#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 2581#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001
2579 2582
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 7ccb8757b79d..98415686cbfa 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -5513,6 +5513,18 @@ struct ec_params_fp_seed {
5513 uint8_t seed[FP_CONTEXT_TPM_BYTES]; 5513 uint8_t seed[FP_CONTEXT_TPM_BYTES];
5514} __ec_align4; 5514} __ec_align4;
5515 5515
5516#define EC_CMD_FP_ENC_STATUS 0x0409
5517
5518/* FP TPM seed has been set or not */
5519#define FP_ENC_STATUS_SEED_SET BIT(0)
5520
5521struct ec_response_fp_encryption_status {
5522 /* Used bits in encryption engine status */
5523 uint32_t valid_flags;
5524 /* Encryption engine status */
5525 uint32_t status;
5526} __ec_align4;
5527
5516/*****************************************************************************/ 5528/*****************************************************************************/
5517/* Touchpad MCU commands: range 0x0500-0x05FF */ 5529/* Touchpad MCU commands: range 0x0500-0x05FF */
5518 5530
diff --git a/include/linux/security.h b/include/linux/security.h
index 5f7441abbf42..ace6fdb604f9 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -259,7 +259,8 @@ int security_dentry_create_files_as(struct dentry *dentry, int mode,
259 struct qstr *name, 259 struct qstr *name,
260 const struct cred *old, 260 const struct cred *old,
261 struct cred *new); 261 struct cred *new);
262 262int security_path_notify(const struct path *path, u64 mask,
263 unsigned int obj_type);
263int security_inode_alloc(struct inode *inode); 264int security_inode_alloc(struct inode *inode);
264void security_inode_free(struct inode *inode); 265void security_inode_free(struct inode *inode);
265int security_inode_init_security(struct inode *inode, struct inode *dir, 266int security_inode_init_security(struct inode *inode, struct inode *dir,
@@ -387,7 +388,6 @@ int security_ismaclabel(const char *name);
387int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); 388int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
388int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); 389int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
389void security_release_secctx(char *secdata, u32 seclen); 390void security_release_secctx(char *secdata, u32 seclen);
390
391void security_inode_invalidate_secctx(struct inode *inode); 391void security_inode_invalidate_secctx(struct inode *inode);
392int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); 392int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
393int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); 393int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
@@ -621,6 +621,12 @@ static inline int security_move_mount(const struct path *from_path,
621 return 0; 621 return 0;
622} 622}
623 623
624static inline int security_path_notify(const struct path *path, u64 mask,
625 unsigned int obj_type)
626{
627 return 0;
628}
629
624static inline int security_inode_alloc(struct inode *inode) 630static inline int security_inode_alloc(struct inode *inode)
625{ 631{
626 return 0; 632 return 0;
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index f28e562d7ca8..29d6e93fd15e 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -591,6 +591,7 @@
591#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ 591#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */
592#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ 592#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */
593#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ 593#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */
594#define PCI_EXP_SLTCTL_ATTN_IND_SHIFT 6 /* Attention Indicator shift */
594#define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */ 595#define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */
595#define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */ 596#define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */
596#define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */ 597#define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */
@@ -713,7 +714,9 @@
713#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ 714#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
714#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ 715#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */
715#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ 716#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
716#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM 717#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
718#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
719#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT
717 720
718#define PCI_EXT_CAP_DSN_SIZEOF 12 721#define PCI_EXT_CAP_DSN_SIZEOF 12
719#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 722#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -1053,4 +1056,14 @@
1053#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ 1056#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
1054#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ 1057#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
1055 1058
1059/* Data Link Feature */
1060#define PCI_DLF_CAP 0x04 /* Capabilities Register */
1061#define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */
1062
1063/* Physical Layer 16.0 GT/s */
1064#define PCI_PL_16GT_LE_CTRL 0x20 /* Lane Equalization Control Register */
1065#define PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK 0x0000000F
1066#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
1067#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
1068
1056#endif /* LINUX_PCI_REGS_H */ 1069#endif /* LINUX_PCI_REGS_H */
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index cc0d0cf114e3..a70f7209cda3 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -14,8 +14,9 @@
14#include <linux/mount.h> 14#include <linux/mount.h>
15#include <linux/namei.h> 15#include <linux/namei.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/fs_context.h>
18#include <linux/fs_parser.h>
17#include <linux/kdev_t.h> 19#include <linux/kdev_t.h>
18#include <linux/parser.h>
19#include <linux/filter.h> 20#include <linux/filter.h>
20#include <linux/bpf.h> 21#include <linux/bpf.h>
21#include <linux/bpf_trace.h> 22#include <linux/bpf_trace.h>
@@ -583,58 +584,52 @@ static const struct super_operations bpf_super_ops = {
583 584
584enum { 585enum {
585 OPT_MODE, 586 OPT_MODE,
586 OPT_ERR,
587}; 587};
588 588
589static const match_table_t bpf_mount_tokens = { 589static const struct fs_parameter_spec bpf_param_specs[] = {
590 { OPT_MODE, "mode=%o" }, 590 fsparam_u32oct ("mode", OPT_MODE),
591 { OPT_ERR, NULL }, 591 {}
592};
593
594static const struct fs_parameter_description bpf_fs_parameters = {
595 .name = "bpf",
596 .specs = bpf_param_specs,
592}; 597};
593 598
594struct bpf_mount_opts { 599struct bpf_mount_opts {
595 umode_t mode; 600 umode_t mode;
596}; 601};
597 602
598static int bpf_parse_options(char *data, struct bpf_mount_opts *opts) 603static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
599{ 604{
600 substring_t args[MAX_OPT_ARGS]; 605 struct bpf_mount_opts *opts = fc->fs_private;
601 int option, token; 606 struct fs_parse_result result;
602 char *ptr; 607 int opt;
603 608
604 opts->mode = S_IRWXUGO; 609 opt = fs_parse(fc, &bpf_fs_parameters, param, &result);
605 610 if (opt < 0)
606 while ((ptr = strsep(&data, ",")) != NULL) {
607 if (!*ptr)
608 continue;
609
610 token = match_token(ptr, bpf_mount_tokens, args);
611 switch (token) {
612 case OPT_MODE:
613 if (match_octal(&args[0], &option))
614 return -EINVAL;
615 opts->mode = option & S_IALLUGO;
616 break;
617 /* We might like to report bad mount options here, but 611 /* We might like to report bad mount options here, but
618 * traditionally we've ignored all mount options, so we'd 612 * traditionally we've ignored all mount options, so we'd
619 * better continue to ignore non-existing options for bpf. 613 * better continue to ignore non-existing options for bpf.
620 */ 614 */
621 } 615 return opt == -ENOPARAM ? 0 : opt;
616
617 switch (opt) {
618 case OPT_MODE:
619 opts->mode = result.uint_32 & S_IALLUGO;
620 break;
622 } 621 }
623 622
624 return 0; 623 return 0;
625} 624}
626 625
627static int bpf_fill_super(struct super_block *sb, void *data, int silent) 626static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
628{ 627{
629 static const struct tree_descr bpf_rfiles[] = { { "" } }; 628 static const struct tree_descr bpf_rfiles[] = { { "" } };
630 struct bpf_mount_opts opts; 629 struct bpf_mount_opts *opts = fc->fs_private;
631 struct inode *inode; 630 struct inode *inode;
632 int ret; 631 int ret;
633 632
634 ret = bpf_parse_options(data, &opts);
635 if (ret)
636 return ret;
637
638 ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles); 633 ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
639 if (ret) 634 if (ret)
640 return ret; 635 return ret;
@@ -644,21 +639,50 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent)
644 inode = sb->s_root->d_inode; 639 inode = sb->s_root->d_inode;
645 inode->i_op = &bpf_dir_iops; 640 inode->i_op = &bpf_dir_iops;
646 inode->i_mode &= ~S_IALLUGO; 641 inode->i_mode &= ~S_IALLUGO;
647 inode->i_mode |= S_ISVTX | opts.mode; 642 inode->i_mode |= S_ISVTX | opts->mode;
648 643
649 return 0; 644 return 0;
650} 645}
651 646
652static struct dentry *bpf_mount(struct file_system_type *type, int flags, 647static int bpf_get_tree(struct fs_context *fc)
653 const char *dev_name, void *data) 648{
649 return get_tree_nodev(fc, bpf_fill_super);
650}
651
652static void bpf_free_fc(struct fs_context *fc)
654{ 653{
655 return mount_nodev(type, flags, data, bpf_fill_super); 654 kfree(fc->fs_private);
655}
656
657static const struct fs_context_operations bpf_context_ops = {
658 .free = bpf_free_fc,
659 .parse_param = bpf_parse_param,
660 .get_tree = bpf_get_tree,
661};
662
663/*
664 * Set up the filesystem mount context.
665 */
666static int bpf_init_fs_context(struct fs_context *fc)
667{
668 struct bpf_mount_opts *opts;
669
670 opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
671 if (!opts)
672 return -ENOMEM;
673
674 opts->mode = S_IRWXUGO;
675
676 fc->fs_private = opts;
677 fc->ops = &bpf_context_ops;
678 return 0;
656} 679}
657 680
658static struct file_system_type bpf_fs_type = { 681static struct file_system_type bpf_fs_type = {
659 .owner = THIS_MODULE, 682 .owner = THIS_MODULE,
660 .name = "bpf", 683 .name = "bpf",
661 .mount = bpf_mount, 684 .init_fs_context = bpf_init_fs_context,
685 .parameters = &bpf_fs_parameters,
662 .kill_sb = kill_litter_super, 686 .kill_sb = kill_litter_super,
663}; 687};
664 688
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index c4ce08f43bd6..ab4a4606d19b 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -1175,6 +1175,7 @@ err:
1175 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", 1175 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1176 patch->mod->name, obj->mod->name, obj->mod->name); 1176 patch->mod->name, obj->mod->name, obj->mod->name);
1177 mod->klp_alive = false; 1177 mod->klp_alive = false;
1178 obj->mod = NULL;
1178 klp_cleanup_module_patches_limited(mod, patch); 1179 klp_cleanup_module_patches_limited(mod, patch);
1179 mutex_unlock(&klp_mutex); 1180 mutex_unlock(&klp_mutex);
1180 1181
diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
index d568e17dd773..74a13d432ed8 100644
--- a/security/safesetid/securityfs.c
+++ b/security/safesetid/securityfs.c
@@ -187,7 +187,8 @@ out_free_rule:
187out_free_buf: 187out_free_buf:
188 kfree(buf); 188 kfree(buf);
189out_free_pol: 189out_free_pol:
190 release_ruleset(pol); 190 if (pol)
191 release_ruleset(pol);
191 return err; 192 return err;
192} 193}
193 194
diff --git a/security/security.c b/security/security.c
index 250ee2d76406..25ee5c75551f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -870,6 +870,12 @@ int security_move_mount(const struct path *from_path, const struct path *to_path
870 return call_int_hook(move_mount, 0, from_path, to_path); 870 return call_int_hook(move_mount, 0, from_path, to_path);
871} 871}
872 872
873int security_path_notify(const struct path *path, u64 mask,
874 unsigned int obj_type)
875{
876 return call_int_hook(path_notify, 0, path, mask, obj_type);
877}
878
873int security_inode_alloc(struct inode *inode) 879int security_inode_alloc(struct inode *inode)
874{ 880{
875 int rc = lsm_inode_alloc(inode); 881 int rc = lsm_inode_alloc(inode);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 74dd46de01b6..9625b99e677f 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -89,6 +89,8 @@
89#include <linux/kernfs.h> 89#include <linux/kernfs.h>
90#include <linux/stringhash.h> /* for hashlen_string() */ 90#include <linux/stringhash.h> /* for hashlen_string() */
91#include <uapi/linux/mount.h> 91#include <uapi/linux/mount.h>
92#include <linux/fsnotify.h>
93#include <linux/fanotify.h>
92 94
93#include "avc.h" 95#include "avc.h"
94#include "objsec.h" 96#include "objsec.h"
@@ -3275,6 +3277,50 @@ static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
3275 return -EACCES; 3277 return -EACCES;
3276} 3278}
3277 3279
3280static int selinux_path_notify(const struct path *path, u64 mask,
3281 unsigned int obj_type)
3282{
3283 int ret;
3284 u32 perm;
3285
3286 struct common_audit_data ad;
3287
3288 ad.type = LSM_AUDIT_DATA_PATH;
3289 ad.u.path = *path;
3290
3291 /*
3292 * Set permission needed based on the type of mark being set.
3293 * Performs an additional check for sb watches.
3294 */
3295 switch (obj_type) {
3296 case FSNOTIFY_OBJ_TYPE_VFSMOUNT:
3297 perm = FILE__WATCH_MOUNT;
3298 break;
3299 case FSNOTIFY_OBJ_TYPE_SB:
3300 perm = FILE__WATCH_SB;
3301 ret = superblock_has_perm(current_cred(), path->dentry->d_sb,
3302 FILESYSTEM__WATCH, &ad);
3303 if (ret)
3304 return ret;
3305 break;
3306 case FSNOTIFY_OBJ_TYPE_INODE:
3307 perm = FILE__WATCH;
3308 break;
3309 default:
3310 return -EINVAL;
3311 }
3312
3313 /* blocking watches require the file:watch_with_perm permission */
3314 if (mask & (ALL_FSNOTIFY_PERM_EVENTS))
3315 perm |= FILE__WATCH_WITH_PERM;
3316
3317 /* watches on read-like events need the file:watch_reads permission */
3318 if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_CLOSE_NOWRITE))
3319 perm |= FILE__WATCH_READS;
3320
3321 return path_has_perm(current_cred(), path, perm);
3322}
3323
3278/* 3324/*
3279 * Copy the inode security context value to the user. 3325 * Copy the inode security context value to the user.
3280 * 3326 *
@@ -3403,7 +3449,7 @@ static int selinux_inode_copy_up_xattr(const char *name)
3403static int selinux_kernfs_init_security(struct kernfs_node *kn_dir, 3449static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
3404 struct kernfs_node *kn) 3450 struct kernfs_node *kn)
3405{ 3451{
3406 const struct task_security_struct *tsec = current_security(); 3452 const struct task_security_struct *tsec = selinux_cred(current_cred());
3407 u32 parent_sid, newsid, clen; 3453 u32 parent_sid, newsid, clen;
3408 int rc; 3454 int rc;
3409 char *context; 3455 char *context;
@@ -6818,6 +6864,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
6818 LSM_HOOK_INIT(inode_getsecid, selinux_inode_getsecid), 6864 LSM_HOOK_INIT(inode_getsecid, selinux_inode_getsecid),
6819 LSM_HOOK_INIT(inode_copy_up, selinux_inode_copy_up), 6865 LSM_HOOK_INIT(inode_copy_up, selinux_inode_copy_up),
6820 LSM_HOOK_INIT(inode_copy_up_xattr, selinux_inode_copy_up_xattr), 6866 LSM_HOOK_INIT(inode_copy_up_xattr, selinux_inode_copy_up_xattr),
6867 LSM_HOOK_INIT(path_notify, selinux_path_notify),
6821 6868
6822 LSM_HOOK_INIT(kernfs_init_security, selinux_kernfs_init_security), 6869 LSM_HOOK_INIT(kernfs_init_security, selinux_kernfs_init_security),
6823 6870
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 201f7e588a29..32e9b03be3dd 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -7,7 +7,8 @@
7 7
8#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \ 8#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
9 "rename", "execute", "quotaon", "mounton", "audit_access", \ 9 "rename", "execute", "quotaon", "mounton", "audit_access", \
10 "open", "execmod" 10 "open", "execmod", "watch", "watch_mount", "watch_sb", \
11 "watch_with_perm", "watch_reads"
11 12
12#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \ 13#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \
13 "listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \ 14 "listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \
@@ -60,7 +61,7 @@ struct security_class_mapping secclass_map[] = {
60 { "filesystem", 61 { "filesystem",
61 { "mount", "remount", "unmount", "getattr", 62 { "mount", "remount", "unmount", "getattr",
62 "relabelfrom", "relabelto", "associate", "quotamod", 63 "relabelfrom", "relabelto", "associate", "quotamod",
63 "quotaget", NULL } }, 64 "quotaget", "watch", NULL } },
64 { "file", 65 { "file",
65 { COMMON_FILE_PERMS, 66 { COMMON_FILE_PERMS,
66 "execute_no_trans", "entrypoint", NULL } }, 67 "execute_no_trans", "entrypoint", NULL } },
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 91c5395dd20c..586b7abd0aa7 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -37,16 +37,6 @@ struct task_security_struct {
37 u32 sockcreate_sid; /* fscreate SID */ 37 u32 sockcreate_sid; /* fscreate SID */
38}; 38};
39 39
40/*
41 * get the subjective security ID of the current task
42 */
43static inline u32 current_sid(void)
44{
45 const struct task_security_struct *tsec = current_security();
46
47 return tsec->sid;
48}
49
50enum label_initialized { 40enum label_initialized {
51 LABEL_INVALID, /* invalid or not initialized */ 41 LABEL_INVALID, /* invalid or not initialized */
52 LABEL_INITIALIZED, /* initialized */ 42 LABEL_INITIALIZED, /* initialized */
@@ -185,4 +175,14 @@ static inline struct ipc_security_struct *selinux_ipc(
185 return ipc->security + selinux_blob_sizes.lbs_ipc; 175 return ipc->security + selinux_blob_sizes.lbs_ipc;
186} 176}
187 177
178/*
179 * get the subjective security ID of the current task
180 */
181static inline u32 current_sid(void)
182{
183 const struct task_security_struct *tsec = selinux_cred(current_cred());
184
185 return tsec->sid;
186}
187
188#endif /* _SELINUX_OBJSEC_H_ */ 188#endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 9cb83eeee1d9..e40fecd73752 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -132,9 +132,9 @@ static void sel_netif_destroy(struct sel_netif *netif)
132 */ 132 */
133static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid) 133static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
134{ 134{
135 int ret; 135 int ret = 0;
136 struct sel_netif *netif; 136 struct sel_netif *netif;
137 struct sel_netif *new = NULL; 137 struct sel_netif *new;
138 struct net_device *dev; 138 struct net_device *dev;
139 139
140 /* NOTE: we always use init's network namespace since we don't 140 /* NOTE: we always use init's network namespace since we don't
@@ -151,32 +151,27 @@ static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
151 netif = sel_netif_find(ns, ifindex); 151 netif = sel_netif_find(ns, ifindex);
152 if (netif != NULL) { 152 if (netif != NULL) {
153 *sid = netif->nsec.sid; 153 *sid = netif->nsec.sid;
154 ret = 0;
155 goto out; 154 goto out;
156 } 155 }
157 new = kzalloc(sizeof(*new), GFP_ATOMIC); 156
158 if (new == NULL) { 157 ret = security_netif_sid(&selinux_state, dev->name, sid);
159 ret = -ENOMEM;
160 goto out;
161 }
162 ret = security_netif_sid(&selinux_state, dev->name, &new->nsec.sid);
163 if (ret != 0)
164 goto out;
165 new->nsec.ns = ns;
166 new->nsec.ifindex = ifindex;
167 ret = sel_netif_insert(new);
168 if (ret != 0) 158 if (ret != 0)
169 goto out; 159 goto out;
170 *sid = new->nsec.sid; 160 new = kzalloc(sizeof(*new), GFP_ATOMIC);
161 if (new) {
162 new->nsec.ns = ns;
163 new->nsec.ifindex = ifindex;
164 new->nsec.sid = *sid;
165 if (sel_netif_insert(new))
166 kfree(new);
167 }
171 168
172out: 169out:
173 spin_unlock_bh(&sel_netif_lock); 170 spin_unlock_bh(&sel_netif_lock);
174 dev_put(dev); 171 dev_put(dev);
175 if (unlikely(ret)) { 172 if (unlikely(ret))
176 pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n", 173 pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n",
177 __func__, ifindex); 174 __func__, ifindex);
178 kfree(new);
179 }
180 return ret; 175 return ret;
181} 176}
182 177
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index cae1fcaffd1a..9ab84efa46c7 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -189,9 +189,9 @@ static void sel_netnode_insert(struct sel_netnode *node)
189 */ 189 */
190static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) 190static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
191{ 191{
192 int ret = -ENOMEM; 192 int ret;
193 struct sel_netnode *node; 193 struct sel_netnode *node;
194 struct sel_netnode *new = NULL; 194 struct sel_netnode *new;
195 195
196 spin_lock_bh(&sel_netnode_lock); 196 spin_lock_bh(&sel_netnode_lock);
197 node = sel_netnode_find(addr, family); 197 node = sel_netnode_find(addr, family);
@@ -200,38 +200,36 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
200 spin_unlock_bh(&sel_netnode_lock); 200 spin_unlock_bh(&sel_netnode_lock);
201 return 0; 201 return 0;
202 } 202 }
203
203 new = kzalloc(sizeof(*new), GFP_ATOMIC); 204 new = kzalloc(sizeof(*new), GFP_ATOMIC);
204 if (new == NULL)
205 goto out;
206 switch (family) { 205 switch (family) {
207 case PF_INET: 206 case PF_INET:
208 ret = security_node_sid(&selinux_state, PF_INET, 207 ret = security_node_sid(&selinux_state, PF_INET,
209 addr, sizeof(struct in_addr), sid); 208 addr, sizeof(struct in_addr), sid);
210 new->nsec.addr.ipv4 = *(__be32 *)addr; 209 if (new)
210 new->nsec.addr.ipv4 = *(__be32 *)addr;
211 break; 211 break;
212 case PF_INET6: 212 case PF_INET6:
213 ret = security_node_sid(&selinux_state, PF_INET6, 213 ret = security_node_sid(&selinux_state, PF_INET6,
214 addr, sizeof(struct in6_addr), sid); 214 addr, sizeof(struct in6_addr), sid);
215 new->nsec.addr.ipv6 = *(struct in6_addr *)addr; 215 if (new)
216 new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
216 break; 217 break;
217 default: 218 default:
218 BUG(); 219 BUG();
219 ret = -EINVAL; 220 ret = -EINVAL;
220 } 221 }
221 if (ret != 0) 222 if (ret == 0 && new) {
222 goto out; 223 new->nsec.family = family;
223 224 new->nsec.sid = *sid;
224 new->nsec.family = family; 225 sel_netnode_insert(new);
225 new->nsec.sid = *sid; 226 } else
226 sel_netnode_insert(new); 227 kfree(new);
227 228
228out:
229 spin_unlock_bh(&sel_netnode_lock); 229 spin_unlock_bh(&sel_netnode_lock);
230 if (unlikely(ret)) { 230 if (unlikely(ret))
231 pr_warn("SELinux: failure in %s(), unable to determine network node label\n", 231 pr_warn("SELinux: failure in %s(), unable to determine network node label\n",
232 __func__); 232 __func__);
233 kfree(new);
234 }
235 return ret; 233 return ret;
236} 234}
237 235
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 364b6d5b8968..3f8b2c0458c8 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -137,9 +137,9 @@ static void sel_netport_insert(struct sel_netport *port)
137 */ 137 */
138static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid) 138static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
139{ 139{
140 int ret = -ENOMEM; 140 int ret;
141 struct sel_netport *port; 141 struct sel_netport *port;
142 struct sel_netport *new = NULL; 142 struct sel_netport *new;
143 143
144 spin_lock_bh(&sel_netport_lock); 144 spin_lock_bh(&sel_netport_lock);
145 port = sel_netport_find(protocol, pnum); 145 port = sel_netport_find(protocol, pnum);
@@ -148,25 +148,23 @@ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
148 spin_unlock_bh(&sel_netport_lock); 148 spin_unlock_bh(&sel_netport_lock);
149 return 0; 149 return 0;
150 } 150 }
151 new = kzalloc(sizeof(*new), GFP_ATOMIC); 151
152 if (new == NULL)
153 goto out;
154 ret = security_port_sid(&selinux_state, protocol, pnum, sid); 152 ret = security_port_sid(&selinux_state, protocol, pnum, sid);
155 if (ret != 0) 153 if (ret != 0)
156 goto out; 154 goto out;
157 155 new = kzalloc(sizeof(*new), GFP_ATOMIC);
158 new->psec.port = pnum; 156 if (new) {
159 new->psec.protocol = protocol; 157 new->psec.port = pnum;
160 new->psec.sid = *sid; 158 new->psec.protocol = protocol;
161 sel_netport_insert(new); 159 new->psec.sid = *sid;
160 sel_netport_insert(new);
161 }
162 162
163out: 163out:
164 spin_unlock_bh(&sel_netport_lock); 164 spin_unlock_bh(&sel_netport_lock);
165 if (unlikely(ret)) { 165 if (unlikely(ret))
166 pr_warn("SELinux: failure in %s(), unable to determine network port label\n", 166 pr_warn("SELinux: failure in %s(), unable to determine network port label\n",
167 __func__); 167 __func__);
168 kfree(new);
169 }
170 return ret; 168 return ret;
171} 169}
172 170
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index f8efaa9f647c..1260f5fb766e 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -177,6 +177,195 @@ static struct policydb_compat_info *policydb_lookup_compat(int version)
177} 177}
178 178
179/* 179/*
180 * The following *_destroy functions are used to
181 * free any memory allocated for each kind of
182 * symbol data in the policy database.
183 */
184
185static int perm_destroy(void *key, void *datum, void *p)
186{
187 kfree(key);
188 kfree(datum);
189 return 0;
190}
191
192static int common_destroy(void *key, void *datum, void *p)
193{
194 struct common_datum *comdatum;
195
196 kfree(key);
197 if (datum) {
198 comdatum = datum;
199 hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
200 hashtab_destroy(comdatum->permissions.table);
201 }
202 kfree(datum);
203 return 0;
204}
205
206static void constraint_expr_destroy(struct constraint_expr *expr)
207{
208 if (expr) {
209 ebitmap_destroy(&expr->names);
210 if (expr->type_names) {
211 ebitmap_destroy(&expr->type_names->types);
212 ebitmap_destroy(&expr->type_names->negset);
213 kfree(expr->type_names);
214 }
215 kfree(expr);
216 }
217}
218
219static int cls_destroy(void *key, void *datum, void *p)
220{
221 struct class_datum *cladatum;
222 struct constraint_node *constraint, *ctemp;
223 struct constraint_expr *e, *etmp;
224
225 kfree(key);
226 if (datum) {
227 cladatum = datum;
228 hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
229 hashtab_destroy(cladatum->permissions.table);
230 constraint = cladatum->constraints;
231 while (constraint) {
232 e = constraint->expr;
233 while (e) {
234 etmp = e;
235 e = e->next;
236 constraint_expr_destroy(etmp);
237 }
238 ctemp = constraint;
239 constraint = constraint->next;
240 kfree(ctemp);
241 }
242
243 constraint = cladatum->validatetrans;
244 while (constraint) {
245 e = constraint->expr;
246 while (e) {
247 etmp = e;
248 e = e->next;
249 constraint_expr_destroy(etmp);
250 }
251 ctemp = constraint;
252 constraint = constraint->next;
253 kfree(ctemp);
254 }
255 kfree(cladatum->comkey);
256 }
257 kfree(datum);
258 return 0;
259}
260
261static int role_destroy(void *key, void *datum, void *p)
262{
263 struct role_datum *role;
264
265 kfree(key);
266 if (datum) {
267 role = datum;
268 ebitmap_destroy(&role->dominates);
269 ebitmap_destroy(&role->types);
270 }
271 kfree(datum);
272 return 0;
273}
274
275static int type_destroy(void *key, void *datum, void *p)
276{
277 kfree(key);
278 kfree(datum);
279 return 0;
280}
281
282static int user_destroy(void *key, void *datum, void *p)
283{
284 struct user_datum *usrdatum;
285
286 kfree(key);
287 if (datum) {
288 usrdatum = datum;
289 ebitmap_destroy(&usrdatum->roles);
290 ebitmap_destroy(&usrdatum->range.level[0].cat);
291 ebitmap_destroy(&usrdatum->range.level[1].cat);
292 ebitmap_destroy(&usrdatum->dfltlevel.cat);
293 }
294 kfree(datum);
295 return 0;
296}
297
298static int sens_destroy(void *key, void *datum, void *p)
299{
300 struct level_datum *levdatum;
301
302 kfree(key);
303 if (datum) {
304 levdatum = datum;
305 if (levdatum->level)
306 ebitmap_destroy(&levdatum->level->cat);
307 kfree(levdatum->level);
308 }
309 kfree(datum);
310 return 0;
311}
312
313static int cat_destroy(void *key, void *datum, void *p)
314{
315 kfree(key);
316 kfree(datum);
317 return 0;
318}
319
320static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
321{
322 common_destroy,
323 cls_destroy,
324 role_destroy,
325 type_destroy,
326 user_destroy,
327 cond_destroy_bool,
328 sens_destroy,
329 cat_destroy,
330};
331
332static int filenametr_destroy(void *key, void *datum, void *p)
333{
334 struct filename_trans *ft = key;
335
336 kfree(ft->name);
337 kfree(key);
338 kfree(datum);
339 cond_resched();
340 return 0;
341}
342
343static int range_tr_destroy(void *key, void *datum, void *p)
344{
345 struct mls_range *rt = datum;
346
347 kfree(key);
348 ebitmap_destroy(&rt->level[0].cat);
349 ebitmap_destroy(&rt->level[1].cat);
350 kfree(datum);
351 cond_resched();
352 return 0;
353}
354
355static void ocontext_destroy(struct ocontext *c, int i)
356{
357 if (!c)
358 return;
359
360 context_destroy(&c->context[0]);
361 context_destroy(&c->context[1]);
362 if (i == OCON_ISID || i == OCON_FS ||
363 i == OCON_NETIF || i == OCON_FSUSE)
364 kfree(c->u.name);
365 kfree(c);
366}
367
368/*
180 * Initialize the role table. 369 * Initialize the role table.
181 */ 370 */
182static int roles_init(struct policydb *p) 371static int roles_init(struct policydb *p)
@@ -250,6 +439,7 @@ static int filenametr_cmp(struct hashtab *h, const void *k1, const void *k2)
250static u32 rangetr_hash(struct hashtab *h, const void *k) 439static u32 rangetr_hash(struct hashtab *h, const void *k)
251{ 440{
252 const struct range_trans *key = k; 441 const struct range_trans *key = k;
442
253 return (key->source_type + (key->target_type << 3) + 443 return (key->source_type + (key->target_type << 3) +
254 (key->target_class << 5)) & (h->size - 1); 444 (key->target_class << 5)) & (h->size - 1);
255} 445}
@@ -272,8 +462,6 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
272 return v; 462 return v;
273} 463}
274 464
275static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
276
277/* 465/*
278 * Initialize a policy database structure. 466 * Initialize a policy database structure.
279 */ 467 */
@@ -301,7 +489,8 @@ static int policydb_init(struct policydb *p)
301 if (rc) 489 if (rc)
302 goto out; 490 goto out;
303 491
304 p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp, (1 << 10)); 492 p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp,
493 (1 << 10));
305 if (!p->filename_trans) { 494 if (!p->filename_trans) {
306 rc = -ENOMEM; 495 rc = -ENOMEM;
307 goto out; 496 goto out;
@@ -399,7 +588,7 @@ static int type_index(void *key, void *datum, void *datap)
399 || typdatum->bounds > p->p_types.nprim) 588 || typdatum->bounds > p->p_types.nprim)
400 return -EINVAL; 589 return -EINVAL;
401 p->sym_val_to_name[SYM_TYPES][typdatum->value - 1] = key; 590 p->sym_val_to_name[SYM_TYPES][typdatum->value - 1] = key;
402 p->type_val_to_struct_array[typdatum->value - 1] = typdatum; 591 p->type_val_to_struct[typdatum->value - 1] = typdatum;
403 } 592 }
404 593
405 return 0; 594 return 0;
@@ -477,9 +666,9 @@ static void hash_eval(struct hashtab *h, const char *hash_name)
477 struct hashtab_info info; 666 struct hashtab_info info;
478 667
479 hashtab_stat(h, &info); 668 hashtab_stat(h, &info);
480 pr_debug("SELinux: %s: %d entries and %d/%d buckets used, " 669 pr_debug("SELinux: %s: %d entries and %d/%d buckets used, longest chain length %d\n",
481 "longest chain length %d\n", hash_name, h->nel, 670 hash_name, h->nel, info.slots_used, h->size,
482 info.slots_used, h->size, info.max_chain_len); 671 info.max_chain_len);
483} 672}
484 673
485static void symtab_hash_eval(struct symtab *s) 674static void symtab_hash_eval(struct symtab *s)
@@ -541,10 +730,10 @@ static int policydb_index(struct policydb *p)
541 if (!p->user_val_to_struct) 730 if (!p->user_val_to_struct)
542 return -ENOMEM; 731 return -ENOMEM;
543 732
544 p->type_val_to_struct_array = kvcalloc(p->p_types.nprim, 733 p->type_val_to_struct = kvcalloc(p->p_types.nprim,
545 sizeof(*p->type_val_to_struct_array), 734 sizeof(*p->type_val_to_struct),
546 GFP_KERNEL); 735 GFP_KERNEL);
547 if (!p->type_val_to_struct_array) 736 if (!p->type_val_to_struct)
548 return -ENOMEM; 737 return -ENOMEM;
549 738
550 rc = cond_init_bool_indexes(p); 739 rc = cond_init_bool_indexes(p);
@@ -568,193 +757,6 @@ out:
568} 757}
569 758
570/* 759/*
571 * The following *_destroy functions are used to
572 * free any memory allocated for each kind of
573 * symbol data in the policy database.
574 */
575
576static int perm_destroy(void *key, void *datum, void *p)
577{
578 kfree(key);
579 kfree(datum);
580 return 0;
581}
582
583static int common_destroy(void *key, void *datum, void *p)
584{
585 struct common_datum *comdatum;
586
587 kfree(key);
588 if (datum) {
589 comdatum = datum;
590 hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
591 hashtab_destroy(comdatum->permissions.table);
592 }
593 kfree(datum);
594 return 0;
595}
596
597static void constraint_expr_destroy(struct constraint_expr *expr)
598{
599 if (expr) {
600 ebitmap_destroy(&expr->names);
601 if (expr->type_names) {
602 ebitmap_destroy(&expr->type_names->types);
603 ebitmap_destroy(&expr->type_names->negset);
604 kfree(expr->type_names);
605 }
606 kfree(expr);
607 }
608}
609
610static int cls_destroy(void *key, void *datum, void *p)
611{
612 struct class_datum *cladatum;
613 struct constraint_node *constraint, *ctemp;
614 struct constraint_expr *e, *etmp;
615
616 kfree(key);
617 if (datum) {
618 cladatum = datum;
619 hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
620 hashtab_destroy(cladatum->permissions.table);
621 constraint = cladatum->constraints;
622 while (constraint) {
623 e = constraint->expr;
624 while (e) {
625 etmp = e;
626 e = e->next;
627 constraint_expr_destroy(etmp);
628 }
629 ctemp = constraint;
630 constraint = constraint->next;
631 kfree(ctemp);
632 }
633
634 constraint = cladatum->validatetrans;
635 while (constraint) {
636 e = constraint->expr;
637 while (e) {
638 etmp = e;
639 e = e->next;
640 constraint_expr_destroy(etmp);
641 }
642 ctemp = constraint;
643 constraint = constraint->next;
644 kfree(ctemp);
645 }
646 kfree(cladatum->comkey);
647 }
648 kfree(datum);
649 return 0;
650}
651
652static int role_destroy(void *key, void *datum, void *p)
653{
654 struct role_datum *role;
655
656 kfree(key);
657 if (datum) {
658 role = datum;
659 ebitmap_destroy(&role->dominates);
660 ebitmap_destroy(&role->types);
661 }
662 kfree(datum);
663 return 0;
664}
665
666static int type_destroy(void *key, void *datum, void *p)
667{
668 kfree(key);
669 kfree(datum);
670 return 0;
671}
672
673static int user_destroy(void *key, void *datum, void *p)
674{
675 struct user_datum *usrdatum;
676
677 kfree(key);
678 if (datum) {
679 usrdatum = datum;
680 ebitmap_destroy(&usrdatum->roles);
681 ebitmap_destroy(&usrdatum->range.level[0].cat);
682 ebitmap_destroy(&usrdatum->range.level[1].cat);
683 ebitmap_destroy(&usrdatum->dfltlevel.cat);
684 }
685 kfree(datum);
686 return 0;
687}
688
689static int sens_destroy(void *key, void *datum, void *p)
690{
691 struct level_datum *levdatum;
692
693 kfree(key);
694 if (datum) {
695 levdatum = datum;
696 if (levdatum->level)
697 ebitmap_destroy(&levdatum->level->cat);
698 kfree(levdatum->level);
699 }
700 kfree(datum);
701 return 0;
702}
703
704static int cat_destroy(void *key, void *datum, void *p)
705{
706 kfree(key);
707 kfree(datum);
708 return 0;
709}
710
711static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
712{
713 common_destroy,
714 cls_destroy,
715 role_destroy,
716 type_destroy,
717 user_destroy,
718 cond_destroy_bool,
719 sens_destroy,
720 cat_destroy,
721};
722
723static int filenametr_destroy(void *key, void *datum, void *p)
724{
725 struct filename_trans *ft = key;
726 kfree(ft->name);
727 kfree(key);
728 kfree(datum);
729 cond_resched();
730 return 0;
731}
732
733static int range_tr_destroy(void *key, void *datum, void *p)
734{
735 struct mls_range *rt = datum;
736 kfree(key);
737 ebitmap_destroy(&rt->level[0].cat);
738 ebitmap_destroy(&rt->level[1].cat);
739 kfree(datum);
740 cond_resched();
741 return 0;
742}
743
744static void ocontext_destroy(struct ocontext *c, int i)
745{
746 if (!c)
747 return;
748
749 context_destroy(&c->context[0]);
750 context_destroy(&c->context[1]);
751 if (i == OCON_ISID || i == OCON_FS ||
752 i == OCON_NETIF || i == OCON_FSUSE)
753 kfree(c->u.name);
754 kfree(c);
755}
756
757/*
758 * Free any memory allocated by a policy database structure. 760 * Free any memory allocated by a policy database structure.
759 */ 761 */
760void policydb_destroy(struct policydb *p) 762void policydb_destroy(struct policydb *p)
@@ -777,7 +779,7 @@ void policydb_destroy(struct policydb *p)
777 kfree(p->class_val_to_struct); 779 kfree(p->class_val_to_struct);
778 kfree(p->role_val_to_struct); 780 kfree(p->role_val_to_struct);
779 kfree(p->user_val_to_struct); 781 kfree(p->user_val_to_struct);
780 kvfree(p->type_val_to_struct_array); 782 kvfree(p->type_val_to_struct);
781 783
782 avtab_destroy(&p->te_avtab); 784 avtab_destroy(&p->te_avtab);
783 785
@@ -1722,7 +1724,7 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap)
1722 return -EINVAL; 1724 return -EINVAL;
1723 } 1725 }
1724 1726
1725 upper = p->type_val_to_struct_array[upper->bounds - 1]; 1727 upper = p->type_val_to_struct[upper->bounds - 1];
1726 BUG_ON(!upper); 1728 BUG_ON(!upper);
1727 1729
1728 if (upper->attribute) { 1730 if (upper->attribute) {
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index fcc6366b447f..162d0e79b85b 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -253,7 +253,7 @@ struct policydb {
253 struct class_datum **class_val_to_struct; 253 struct class_datum **class_val_to_struct;
254 struct role_datum **role_val_to_struct; 254 struct role_datum **role_val_to_struct;
255 struct user_datum **user_val_to_struct; 255 struct user_datum **user_val_to_struct;
256 struct type_datum **type_val_to_struct_array; 256 struct type_datum **type_val_to_struct;
257 257
258 /* type enforcement access vectors and transitions */ 258 /* type enforcement access vectors and transitions */
259 struct avtab te_avtab; 259 struct avtab te_avtab;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index d61563a3695e..3a29e7c24ba9 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -542,13 +542,13 @@ static void type_attribute_bounds_av(struct policydb *policydb,
542 struct type_datum *target; 542 struct type_datum *target;
543 u32 masked = 0; 543 u32 masked = 0;
544 544
545 source = policydb->type_val_to_struct_array[scontext->type - 1]; 545 source = policydb->type_val_to_struct[scontext->type - 1];
546 BUG_ON(!source); 546 BUG_ON(!source);
547 547
548 if (!source->bounds) 548 if (!source->bounds)
549 return; 549 return;
550 550
551 target = policydb->type_val_to_struct_array[tcontext->type - 1]; 551 target = policydb->type_val_to_struct[tcontext->type - 1];
552 BUG_ON(!target); 552 BUG_ON(!target);
553 553
554 memset(&lo_avd, 0, sizeof(lo_avd)); 554 memset(&lo_avd, 0, sizeof(lo_avd));
@@ -891,7 +891,7 @@ int security_bounded_transition(struct selinux_state *state,
891 891
892 index = new_context->type; 892 index = new_context->type;
893 while (true) { 893 while (true) {
894 type = policydb->type_val_to_struct_array[index - 1]; 894 type = policydb->type_val_to_struct[index - 1];
895 BUG_ON(!type); 895 BUG_ON(!type);
896 896
897 /* not bounded anymore */ 897 /* not bounded anymore */
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index 1f0a6eaa2d6a..7d49994e8d5f 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -12,7 +12,7 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/atomic.h> 15#include <asm/barrier.h>
16#include "flask.h" 16#include "flask.h"
17#include "security.h" 17#include "security.h"
18#include "sidtab.h" 18#include "sidtab.h"
@@ -23,14 +23,14 @@ int sidtab_init(struct sidtab *s)
23 23
24 memset(s->roots, 0, sizeof(s->roots)); 24 memset(s->roots, 0, sizeof(s->roots));
25 25
26 /* max count is SIDTAB_MAX so valid index is always < SIDTAB_MAX */
26 for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) 27 for (i = 0; i < SIDTAB_RCACHE_SIZE; i++)
27 atomic_set(&s->rcache[i], -1); 28 s->rcache[i] = SIDTAB_MAX;
28 29
29 for (i = 0; i < SECINITSID_NUM; i++) 30 for (i = 0; i < SECINITSID_NUM; i++)
30 s->isids[i].set = 0; 31 s->isids[i].set = 0;
31 32
32 atomic_set(&s->count, 0); 33 s->count = 0;
33
34 s->convert = NULL; 34 s->convert = NULL;
35 35
36 spin_lock_init(&s->lock); 36 spin_lock_init(&s->lock);
@@ -130,14 +130,12 @@ static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
130 130
131static struct context *sidtab_lookup(struct sidtab *s, u32 index) 131static struct context *sidtab_lookup(struct sidtab *s, u32 index)
132{ 132{
133 u32 count = (u32)atomic_read(&s->count); 133 /* read entries only after reading count */
134 u32 count = smp_load_acquire(&s->count);
134 135
135 if (index >= count) 136 if (index >= count)
136 return NULL; 137 return NULL;
137 138
138 /* read entries after reading count */
139 smp_rmb();
140
141 return sidtab_do_lookup(s, index, 0); 139 return sidtab_do_lookup(s, index, 0);
142} 140}
143 141
@@ -210,10 +208,10 @@ static int sidtab_find_context(union sidtab_entry_inner entry,
210static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos) 208static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos)
211{ 209{
212 while (pos > 0) { 210 while (pos > 0) {
213 atomic_set(&s->rcache[pos], atomic_read(&s->rcache[pos - 1])); 211 WRITE_ONCE(s->rcache[pos], READ_ONCE(s->rcache[pos - 1]));
214 --pos; 212 --pos;
215 } 213 }
216 atomic_set(&s->rcache[0], (int)index); 214 WRITE_ONCE(s->rcache[0], index);
217} 215}
218 216
219static void sidtab_rcache_push(struct sidtab *s, u32 index) 217static void sidtab_rcache_push(struct sidtab *s, u32 index)
@@ -227,14 +225,14 @@ static int sidtab_rcache_search(struct sidtab *s, struct context *context,
227 u32 i; 225 u32 i;
228 226
229 for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) { 227 for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) {
230 int v = atomic_read(&s->rcache[i]); 228 u32 v = READ_ONCE(s->rcache[i]);
231 229
232 if (v < 0) 230 if (v >= SIDTAB_MAX)
233 continue; 231 continue;
234 232
235 if (context_cmp(sidtab_do_lookup(s, (u32)v, 0), context)) { 233 if (context_cmp(sidtab_do_lookup(s, v, 0), context)) {
236 sidtab_rcache_update(s, (u32)v, i); 234 sidtab_rcache_update(s, v, i);
237 *index = (u32)v; 235 *index = v;
238 return 0; 236 return 0;
239 } 237 }
240 } 238 }
@@ -245,8 +243,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
245 u32 *index) 243 u32 *index)
246{ 244{
247 unsigned long flags; 245 unsigned long flags;
248 u32 count = (u32)atomic_read(&s->count); 246 u32 count, count_locked, level, pos;
249 u32 count_locked, level, pos;
250 struct sidtab_convert_params *convert; 247 struct sidtab_convert_params *convert;
251 struct context *dst, *dst_convert; 248 struct context *dst, *dst_convert;
252 int rc; 249 int rc;
@@ -255,11 +252,10 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
255 if (rc == 0) 252 if (rc == 0)
256 return 0; 253 return 0;
257 254
255 /* read entries only after reading count */
256 count = smp_load_acquire(&s->count);
258 level = sidtab_level_from_count(count); 257 level = sidtab_level_from_count(count);
259 258
260 /* read entries after reading count */
261 smp_rmb();
262
263 pos = 0; 259 pos = 0;
264 rc = sidtab_find_context(s->roots[level], &pos, count, level, 260 rc = sidtab_find_context(s->roots[level], &pos, count, level,
265 context, index); 261 context, index);
@@ -272,7 +268,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
272 spin_lock_irqsave(&s->lock, flags); 268 spin_lock_irqsave(&s->lock, flags);
273 269
274 convert = s->convert; 270 convert = s->convert;
275 count_locked = (u32)atomic_read(&s->count); 271 count_locked = s->count;
276 level = sidtab_level_from_count(count_locked); 272 level = sidtab_level_from_count(count_locked);
277 273
278 /* if count has changed before we acquired the lock, then catch up */ 274 /* if count has changed before we acquired the lock, then catch up */
@@ -320,7 +316,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
320 } 316 }
321 317
322 /* at this point we know the insert won't fail */ 318 /* at this point we know the insert won't fail */
323 atomic_set(&convert->target->count, count + 1); 319 convert->target->count = count + 1;
324 } 320 }
325 321
326 if (context->len) 322 if (context->len)
@@ -331,9 +327,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
331 *index = count; 327 *index = count;
332 328
333 /* write entries before writing new count */ 329 /* write entries before writing new count */
334 smp_wmb(); 330 smp_store_release(&s->count, count + 1);
335
336 atomic_set(&s->count, count + 1);
337 331
338 rc = 0; 332 rc = 0;
339out_unlock: 333out_unlock:
@@ -423,7 +417,7 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
423 return -EBUSY; 417 return -EBUSY;
424 } 418 }
425 419
426 count = (u32)atomic_read(&s->count); 420 count = s->count;
427 level = sidtab_level_from_count(count); 421 level = sidtab_level_from_count(count);
428 422
429 /* allocate last leaf in the new sidtab (to avoid race with 423 /* allocate last leaf in the new sidtab (to avoid race with
@@ -436,7 +430,7 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
436 } 430 }
437 431
438 /* set count in case no new entries are added during conversion */ 432 /* set count in case no new entries are added during conversion */
439 atomic_set(&params->target->count, count); 433 params->target->count = count;
440 434
441 /* enable live convert of new entries */ 435 /* enable live convert of new entries */
442 s->convert = params; 436 s->convert = params;
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index bbd5c0d1f3bd..1f4763141aa1 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -40,8 +40,8 @@ union sidtab_entry_inner {
40#define SIDTAB_LEAF_ENTRIES \ 40#define SIDTAB_LEAF_ENTRIES \
41 (SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry_leaf)) 41 (SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry_leaf))
42 42
43#define SIDTAB_MAX_BITS 31 /* limited to INT_MAX due to atomic_t range */ 43#define SIDTAB_MAX_BITS 32
44#define SIDTAB_MAX (((u32)1 << SIDTAB_MAX_BITS) - 1) 44#define SIDTAB_MAX U32_MAX
45/* ensure enough tree levels for SIDTAB_MAX entries */ 45/* ensure enough tree levels for SIDTAB_MAX entries */
46#define SIDTAB_MAX_LEVEL \ 46#define SIDTAB_MAX_LEVEL \
47 DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \ 47 DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \
@@ -69,13 +69,22 @@ struct sidtab_convert_params {
69#define SIDTAB_RCACHE_SIZE 3 69#define SIDTAB_RCACHE_SIZE 3
70 70
71struct sidtab { 71struct sidtab {
72 /*
73 * lock-free read access only for as many items as a prior read of
74 * 'count'
75 */
72 union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1]; 76 union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
73 atomic_t count; 77 /*
78 * access atomically via {READ|WRITE}_ONCE(); only increment under
79 * spinlock
80 */
81 u32 count;
82 /* access only under spinlock */
74 struct sidtab_convert_params *convert; 83 struct sidtab_convert_params *convert;
75 spinlock_t lock; 84 spinlock_t lock;
76 85
77 /* reverse lookup cache */ 86 /* reverse lookup cache - access atomically via {READ|WRITE}_ONCE() */
78 atomic_t rcache[SIDTAB_RCACHE_SIZE]; 87 u32 rcache[SIDTAB_RCACHE_SIZE];
79 88
80 /* index == SID - 1 (no entry for SECSID_NULL) */ 89 /* index == SID - 1 (no entry for SECSID_NULL) */
81 struct sidtab_isid_entry isids[SECINITSID_NUM]; 90 struct sidtab_isid_entry isids[SECINITSID_NUM];
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index f1c93a7be9ec..38ac3da4e791 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -465,7 +465,7 @@ char *smk_parse_smack(const char *string, int len)
465 if (i == 0 || i >= SMK_LONGLABEL) 465 if (i == 0 || i >= SMK_LONGLABEL)
466 return ERR_PTR(-EINVAL); 466 return ERR_PTR(-EINVAL);
467 467
468 smack = kzalloc(i + 1, GFP_KERNEL); 468 smack = kzalloc(i + 1, GFP_NOFS);
469 if (smack == NULL) 469 if (smack == NULL)
470 return ERR_PTR(-ENOMEM); 470 return ERR_PTR(-ENOMEM);
471 471
@@ -500,7 +500,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
500 if ((m & *cp) == 0) 500 if ((m & *cp) == 0)
501 continue; 501 continue;
502 rc = netlbl_catmap_setbit(&sap->attr.mls.cat, 502 rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
503 cat, GFP_KERNEL); 503 cat, GFP_NOFS);
504 if (rc < 0) { 504 if (rc < 0) {
505 netlbl_catmap_free(sap->attr.mls.cat); 505 netlbl_catmap_free(sap->attr.mls.cat);
506 return rc; 506 return rc;
@@ -536,7 +536,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
536 if (skp != NULL) 536 if (skp != NULL)
537 goto freeout; 537 goto freeout;
538 538
539 skp = kzalloc(sizeof(*skp), GFP_KERNEL); 539 skp = kzalloc(sizeof(*skp), GFP_NOFS);
540 if (skp == NULL) { 540 if (skp == NULL) {
541 skp = ERR_PTR(-ENOMEM); 541 skp = ERR_PTR(-ENOMEM);
542 goto freeout; 542 goto freeout;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 4c5e5a438f8b..abeb09c30633 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -288,7 +288,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
288 if (!(ip->i_opflags & IOP_XATTR)) 288 if (!(ip->i_opflags & IOP_XATTR))
289 return ERR_PTR(-EOPNOTSUPP); 289 return ERR_PTR(-EOPNOTSUPP);
290 290
291 buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL); 291 buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
292 if (buffer == NULL) 292 if (buffer == NULL)
293 return ERR_PTR(-ENOMEM); 293 return ERR_PTR(-ENOMEM);
294 294
@@ -307,7 +307,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
307 307
308/** 308/**
309 * init_inode_smack - initialize an inode security blob 309 * init_inode_smack - initialize an inode security blob
310 * @isp: the blob to initialize 310 * @inode: inode to extract the info from
311 * @skp: a pointer to the Smack label entry to use in the blob 311 * @skp: a pointer to the Smack label entry to use in the blob
312 * 312 *
313 */ 313 */
@@ -509,7 +509,7 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
509 509
510/** 510/**
511 * smack_syslog - Smack approval on syslog 511 * smack_syslog - Smack approval on syslog
512 * @type: message type 512 * @typefrom_file: unused
513 * 513 *
514 * Returns 0 on success, error code otherwise. 514 * Returns 0 on success, error code otherwise.
515 */ 515 */
@@ -765,7 +765,7 @@ static int smack_sb_eat_lsm_opts(char *options, void **mnt_opts)
765/** 765/**
766 * smack_set_mnt_opts - set Smack specific mount options 766 * smack_set_mnt_opts - set Smack specific mount options
767 * @sb: the file system superblock 767 * @sb: the file system superblock
768 * @opts: Smack mount options 768 * @mnt_opts: Smack mount options
769 * @kern_flags: mount option from kernel space or user space 769 * @kern_flags: mount option from kernel space or user space
770 * @set_kern_flags: where to store converted mount opts 770 * @set_kern_flags: where to store converted mount opts
771 * 771 *
@@ -937,7 +937,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
937 937
938 if (rc != 0) 938 if (rc != 0)
939 return rc; 939 return rc;
940 } else if (bprm->unsafe) 940 }
941 if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
941 return -EPERM; 942 return -EPERM;
942 943
943 bsp->smk_task = isp->smk_task; 944 bsp->smk_task = isp->smk_task;
@@ -958,7 +959,7 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
958 * smack_inode_alloc_security - allocate an inode blob 959 * smack_inode_alloc_security - allocate an inode blob
959 * @inode: the inode in need of a blob 960 * @inode: the inode in need of a blob
960 * 961 *
961 * Returns 0 if it gets a blob, -ENOMEM otherwise 962 * Returns 0
962 */ 963 */
963static int smack_inode_alloc_security(struct inode *inode) 964static int smack_inode_alloc_security(struct inode *inode)
964{ 965{
@@ -1164,7 +1165,7 @@ static int smack_inode_rename(struct inode *old_inode,
1164 * 1165 *
1165 * This is the important Smack hook. 1166 * This is the important Smack hook.
1166 * 1167 *
1167 * Returns 0 if access is permitted, -EACCES otherwise 1168 * Returns 0 if access is permitted, an error code otherwise
1168 */ 1169 */
1169static int smack_inode_permission(struct inode *inode, int mask) 1170static int smack_inode_permission(struct inode *inode, int mask)
1170{ 1171{
@@ -1222,8 +1223,7 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
1222 1223
1223/** 1224/**
1224 * smack_inode_getattr - Smack check for getting attributes 1225 * smack_inode_getattr - Smack check for getting attributes
1225 * @mnt: vfsmount of the object 1226 * @path: path to extract the info from
1226 * @dentry: the object
1227 * 1227 *
1228 * Returns 0 if access is permitted, an error code otherwise 1228 * Returns 0 if access is permitted, an error code otherwise
1229 */ 1229 */
@@ -1870,14 +1870,13 @@ static int smack_file_receive(struct file *file)
1870/** 1870/**
1871 * smack_file_open - Smack dentry open processing 1871 * smack_file_open - Smack dentry open processing
1872 * @file: the object 1872 * @file: the object
1873 * @cred: task credential
1874 * 1873 *
1875 * Set the security blob in the file structure. 1874 * Set the security blob in the file structure.
1876 * Allow the open only if the task has read access. There are 1875 * Allow the open only if the task has read access. There are
1877 * many read operations (e.g. fstat) that you can do with an 1876 * many read operations (e.g. fstat) that you can do with an
1878 * fd even if you have the file open write-only. 1877 * fd even if you have the file open write-only.
1879 * 1878 *
1880 * Returns 0 1879 * Returns 0 if current has access, error code otherwise
1881 */ 1880 */
1882static int smack_file_open(struct file *file) 1881static int smack_file_open(struct file *file)
1883{ 1882{
@@ -1900,7 +1899,7 @@ static int smack_file_open(struct file *file)
1900 1899
1901/** 1900/**
1902 * smack_cred_alloc_blank - "allocate" blank task-level security credentials 1901 * smack_cred_alloc_blank - "allocate" blank task-level security credentials
1903 * @new: the new credentials 1902 * @cred: the new credentials
1904 * @gfp: the atomicity of any memory allocations 1903 * @gfp: the atomicity of any memory allocations
1905 * 1904 *
1906 * Prepare a blank set of credentials for modification. This must allocate all 1905 * Prepare a blank set of credentials for modification. This must allocate all
@@ -1983,7 +1982,7 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)
1983 1982
1984/** 1983/**
1985 * smack_cred_getsecid - get the secid corresponding to a creds structure 1984 * smack_cred_getsecid - get the secid corresponding to a creds structure
1986 * @c: the object creds 1985 * @cred: the object creds
1987 * @secid: where to put the result 1986 * @secid: where to put the result
1988 * 1987 *
1989 * Sets the secid to contain a u32 version of the smack label. 1988 * Sets the secid to contain a u32 version of the smack label.
@@ -2140,8 +2139,6 @@ static int smack_task_getioprio(struct task_struct *p)
2140/** 2139/**
2141 * smack_task_setscheduler - Smack check on setting scheduler 2140 * smack_task_setscheduler - Smack check on setting scheduler
2142 * @p: the task object 2141 * @p: the task object
2143 * @policy: unused
2144 * @lp: unused
2145 * 2142 *
2146 * Return 0 if read access is permitted 2143 * Return 0 if read access is permitted
2147 */ 2144 */
@@ -2611,8 +2608,9 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
2611 2608
2612/** 2609/**
2613 * smk_ipv6_port_check - check Smack port access 2610 * smk_ipv6_port_check - check Smack port access
2614 * @sock: socket 2611 * @sk: socket
2615 * @address: address 2612 * @address: address
2613 * @act: the action being taken
2616 * 2614 *
2617 * Create or update the port list entry 2615 * Create or update the port list entry
2618 */ 2616 */
@@ -2782,7 +2780,7 @@ static int smack_socket_post_create(struct socket *sock, int family,
2782 * 2780 *
2783 * Cross reference the peer labels for SO_PEERSEC 2781 * Cross reference the peer labels for SO_PEERSEC
2784 * 2782 *
2785 * Returns 0 on success, and error code otherwise 2783 * Returns 0
2786 */ 2784 */
2787static int smack_socket_socketpair(struct socket *socka, 2785static int smack_socket_socketpair(struct socket *socka,
2788 struct socket *sockb) 2786 struct socket *sockb)
@@ -3014,13 +3012,13 @@ static int smack_shm_shmctl(struct kern_ipc_perm *isp, int cmd)
3014 * 3012 *
3015 * Returns 0 if current has the requested access, error code otherwise 3013 * Returns 0 if current has the requested access, error code otherwise
3016 */ 3014 */
3017static int smack_shm_shmat(struct kern_ipc_perm *ipc, char __user *shmaddr, 3015static int smack_shm_shmat(struct kern_ipc_perm *isp, char __user *shmaddr,
3018 int shmflg) 3016 int shmflg)
3019{ 3017{
3020 int may; 3018 int may;
3021 3019
3022 may = smack_flags_to_may(shmflg); 3020 may = smack_flags_to_may(shmflg);
3023 return smk_curacc_shm(ipc, may); 3021 return smk_curacc_shm(isp, may);
3024} 3022}
3025 3023
3026/** 3024/**
@@ -3925,6 +3923,8 @@ access_check:
3925 skp = smack_ipv6host_label(&sadd); 3923 skp = smack_ipv6host_label(&sadd);
3926 if (skp == NULL) 3924 if (skp == NULL)
3927 skp = smack_net_ambient; 3925 skp = smack_net_ambient;
3926 if (skb == NULL)
3927 break;
3928#ifdef CONFIG_AUDIT 3928#ifdef CONFIG_AUDIT
3929 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); 3929 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
3930 ad.a.u.net->family = family; 3930 ad.a.u.net->family = family;
@@ -4762,7 +4762,7 @@ static __init void init_smack_known_list(void)
4762/** 4762/**
4763 * smack_init - initialize the smack system 4763 * smack_init - initialize the smack system
4764 * 4764 *
4765 * Returns 0 4765 * Returns 0 on success, -ENOMEM is there's no memory
4766 */ 4766 */
4767static __init int smack_init(void) 4767static __init int smack_init(void)
4768{ 4768{
diff --git a/tools/hv/Build b/tools/hv/Build
new file mode 100644
index 000000000000..6cf51fa4b306
--- /dev/null
+++ b/tools/hv/Build
@@ -0,0 +1,3 @@
1hv_kvp_daemon-y += hv_kvp_daemon.o
2hv_vss_daemon-y += hv_vss_daemon.o
3hv_fcopy_daemon-y += hv_fcopy_daemon.o
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 5db5e62cebda..b57143d9459c 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -1,28 +1,55 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for Hyper-V tools 2# Makefile for Hyper-V tools
3 3include ../scripts/Makefile.include
4WARNINGS = -Wall -Wextra
5CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
6
7CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
8 4
9sbindir ?= /usr/sbin 5sbindir ?= /usr/sbin
10libexecdir ?= /usr/libexec 6libexecdir ?= /usr/libexec
11sharedstatedir ?= /var/lib 7sharedstatedir ?= /var/lib
12 8
13ALL_PROGRAMS := hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon 9ifeq ($(srctree),)
10srctree := $(patsubst %/,%,$(dir $(CURDIR)))
11srctree := $(patsubst %/,%,$(dir $(srctree)))
12endif
13
14# Do not use make's built-in rules
15# (this improves performance and avoids hard-to-debug behaviour);
16MAKEFLAGS += -r
17
18override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
19
20ALL_TARGETS := hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
21ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
14 22
15ALL_SCRIPTS := hv_get_dhcp_info.sh hv_get_dns_info.sh hv_set_ifconfig.sh 23ALL_SCRIPTS := hv_get_dhcp_info.sh hv_get_dns_info.sh hv_set_ifconfig.sh
16 24
17all: $(ALL_PROGRAMS) 25all: $(ALL_PROGRAMS)
18 26
19%: %.c 27export srctree OUTPUT CC LD CFLAGS
20 $(CC) $(CFLAGS) -o $@ $^ 28include $(srctree)/tools/build/Makefile.include
29
30HV_KVP_DAEMON_IN := $(OUTPUT)hv_kvp_daemon-in.o
31$(HV_KVP_DAEMON_IN): FORCE
32 $(Q)$(MAKE) $(build)=hv_kvp_daemon
33$(OUTPUT)hv_kvp_daemon: $(HV_KVP_DAEMON_IN)
34 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
35
36HV_VSS_DAEMON_IN := $(OUTPUT)hv_vss_daemon-in.o
37$(HV_VSS_DAEMON_IN): FORCE
38 $(Q)$(MAKE) $(build)=hv_vss_daemon
39$(OUTPUT)hv_vss_daemon: $(HV_VSS_DAEMON_IN)
40 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
41
42HV_FCOPY_DAEMON_IN := $(OUTPUT)hv_fcopy_daemon-in.o
43$(HV_FCOPY_DAEMON_IN): FORCE
44 $(Q)$(MAKE) $(build)=hv_fcopy_daemon
45$(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
46 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
21 47
22clean: 48clean:
23 $(RM) hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon 49 rm -f $(ALL_PROGRAMS)
50 find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
24 51
25install: all 52install: $(ALL_PROGRAMS)
26 install -d -m 755 $(DESTDIR)$(sbindir); \ 53 install -d -m 755 $(DESTDIR)$(sbindir); \
27 install -d -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd; \ 54 install -d -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd; \
28 install -d -m 755 $(DESTDIR)$(sharedstatedir); \ 55 install -d -m 755 $(DESTDIR)$(sharedstatedir); \
@@ -33,3 +60,7 @@ install: all
33 for script in $(ALL_SCRIPTS); do \ 60 for script in $(ALL_SCRIPTS); do \
34 install $$script -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd/$${script%.sh}; \ 61 install $$script -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd/$${script%.sh}; \
35 done 62 done
63
64FORCE:
65
66.PHONY: all install clean FORCE prepare
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 59753b3917bb..2a9890c8395a 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -38,6 +38,7 @@ static int fact_avx = 0xFF;
38static unsigned long long fact_trl; 38static unsigned long long fact_trl;
39static int out_format_json; 39static int out_format_json;
40static int cmd_help; 40static int cmd_help;
41static int force_online_offline;
41 42
42/* clos related */ 43/* clos related */
43static int current_clos = -1; 44static int current_clos = -1;
@@ -138,14 +139,14 @@ int out_format_is_json(void)
138int get_physical_package_id(int cpu) 139int get_physical_package_id(int cpu)
139{ 140{
140 return parse_int_file( 141 return parse_int_file(
141 1, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", 142 0, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id",
142 cpu); 143 cpu);
143} 144}
144 145
145int get_physical_core_id(int cpu) 146int get_physical_core_id(int cpu)
146{ 147{
147 return parse_int_file( 148 return parse_int_file(
148 1, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); 149 0, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
149} 150}
150 151
151int get_physical_die_id(int cpu) 152int get_physical_die_id(int cpu)
@@ -165,6 +166,26 @@ int get_topo_max_cpus(void)
165 return topo_max_cpus; 166 return topo_max_cpus;
166} 167}
167 168
169static void set_cpu_online_offline(int cpu, int state)
170{
171 char buffer[128];
172 int fd;
173
174 snprintf(buffer, sizeof(buffer),
175 "/sys/devices/system/cpu/cpu%d/online", cpu);
176
177 fd = open(buffer, O_WRONLY);
178 if (fd < 0)
179 err(-1, "%s open failed", buffer);
180
181 if (state)
182 write(fd, "1\n", 2);
183 else
184 write(fd, "0\n", 2);
185
186 close(fd);
187}
188
168#define MAX_PACKAGE_COUNT 8 189#define MAX_PACKAGE_COUNT 8
169#define MAX_DIE_PER_PACKAGE 2 190#define MAX_DIE_PER_PACKAGE 2
170static void for_each_online_package_in_set(void (*callback)(int, void *, void *, 191static void for_each_online_package_in_set(void (*callback)(int, void *, void *,
@@ -402,6 +423,9 @@ void set_cpu_mask_from_punit_coremask(int cpu, unsigned long long core_mask,
402 int j; 423 int j;
403 424
404 for (j = 0; j < topo_max_cpus; ++j) { 425 for (j = 0; j < topo_max_cpus; ++j) {
426 if (!CPU_ISSET_S(j, present_cpumask_size, present_cpumask))
427 continue;
428
405 if (cpu_map[j].pkg_id == pkg_id && 429 if (cpu_map[j].pkg_id == pkg_id &&
406 cpu_map[j].die_id == die_id && 430 cpu_map[j].die_id == die_id &&
407 cpu_map[j].punit_cpu_core == i) { 431 cpu_map[j].punit_cpu_core == i) {
@@ -484,7 +508,7 @@ int isst_send_mbox_command(unsigned int cpu, unsigned char command,
484 int write = 0; 508 int write = 0;
485 int clos_id, core_id, ret = 0; 509 int clos_id, core_id, ret = 0;
486 510
487 debug_printf("CLOS %d\n", cpu); 511 debug_printf("CPU %d\n", cpu);
488 512
489 if (parameter & BIT(MBOX_CMD_WRITE_BIT)) { 513 if (parameter & BIT(MBOX_CMD_WRITE_BIT)) {
490 value = req_data; 514 value = req_data;
@@ -649,8 +673,8 @@ static void exec_on_get_ctdp_cpu(int cpu, void *arg1, void *arg2, void *arg3,
649 if (ret) 673 if (ret)
650 perror("get_tdp_*"); 674 perror("get_tdp_*");
651 else 675 else
652 isst_display_result(cpu, outf, "perf-profile", (char *)arg3, 676 isst_ctdp_display_core_info(cpu, outf, arg3,
653 *(unsigned int *)arg4); 677 *(unsigned int *)arg4);
654} 678}
655 679
656#define _get_tdp_level(desc, suffix, object, help) \ 680#define _get_tdp_level(desc, suffix, object, help) \
@@ -733,9 +757,34 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
733 ret = isst_set_tdp_level(cpu, tdp_level); 757 ret = isst_set_tdp_level(cpu, tdp_level);
734 if (ret) 758 if (ret)
735 perror("set_tdp_level_for_cpu"); 759 perror("set_tdp_level_for_cpu");
736 else 760 else {
737 isst_display_result(cpu, outf, "perf-profile", "set_tdp_level", 761 isst_display_result(cpu, outf, "perf-profile", "set_tdp_level",
738 ret); 762 ret);
763 if (force_online_offline) {
764 struct isst_pkg_ctdp_level_info ctdp_level;
765 int pkg_id = get_physical_package_id(cpu);
766 int die_id = get_physical_die_id(cpu);
767
768 fprintf(stderr, "Option is set to online/offline\n");
769 ctdp_level.core_cpumask_size =
770 alloc_cpu_set(&ctdp_level.core_cpumask);
771 isst_get_coremask_info(cpu, tdp_level, &ctdp_level);
772 if (ctdp_level.cpu_count) {
773 int i, max_cpus = get_topo_max_cpus();
774 for (i = 0; i < max_cpus; ++i) {
775 if (pkg_id != get_physical_package_id(i) || die_id != get_physical_die_id(i))
776 continue;
777 if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) {
778 fprintf(stderr, "online cpu %d\n", i);
779 set_cpu_online_offline(i, 1);
780 } else {
781 fprintf(stderr, "offline cpu %d\n", i);
782 set_cpu_online_offline(i, 0);
783 }
784 }
785 }
786 }
787 }
739} 788}
740 789
741static void set_tdp_level(void) 790static void set_tdp_level(void)
@@ -744,6 +793,8 @@ static void set_tdp_level(void)
744 fprintf(stderr, "Set Config TDP level\n"); 793 fprintf(stderr, "Set Config TDP level\n");
745 fprintf(stderr, 794 fprintf(stderr,
746 "\t Arguments: -l|--level : Specify tdp level\n"); 795 "\t Arguments: -l|--level : Specify tdp level\n");
796 fprintf(stderr,
797 "\t Optional Arguments: -o | online : online/offline for the tdp level\n");
747 exit(0); 798 exit(0);
748 } 799 }
749 800
@@ -1082,6 +1133,40 @@ static void dump_clos_config(void)
1082 isst_ctdp_display_information_end(outf); 1133 isst_ctdp_display_information_end(outf);
1083} 1134}
1084 1135
1136static void get_clos_info_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
1137 void *arg4)
1138{
1139 int enable, ret, prio_type;
1140
1141 ret = isst_clos_get_clos_information(cpu, &enable, &prio_type);
1142 if (ret)
1143 perror("isst_clos_get_info");
1144 else
1145 isst_clos_display_clos_information(cpu, outf, enable, prio_type);
1146}
1147
1148static void dump_clos_info(void)
1149{
1150 if (cmd_help) {
1151 fprintf(stderr,
1152 "Print Intel Speed Select Technology core power information\n");
1153 fprintf(stderr, "\tSpecify targeted cpu id with [--cpu|-c]\n");
1154 exit(0);
1155 }
1156
1157 if (!max_target_cpus) {
1158 fprintf(stderr,
1159 "Invalid target cpu. Specify with [-c|--cpu]\n");
1160 exit(0);
1161 }
1162
1163 isst_ctdp_display_information_start(outf);
1164 for_each_online_target_cpu_in_set(get_clos_info_for_cpu, NULL,
1165 NULL, NULL, NULL);
1166 isst_ctdp_display_information_end(outf);
1167
1168}
1169
1085static void set_clos_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3, 1170static void set_clos_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
1086 void *arg4) 1171 void *arg4)
1087{ 1172{
@@ -1198,7 +1283,7 @@ static void get_clos_assoc_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
1198 if (ret) 1283 if (ret)
1199 perror("isst_clos_get_assoc_status"); 1284 perror("isst_clos_get_assoc_status");
1200 else 1285 else
1201 isst_display_result(cpu, outf, "core-power", "get-assoc", clos); 1286 isst_clos_display_assoc_information(cpu, outf, clos);
1202} 1287}
1203 1288
1204static void get_clos_assoc(void) 1289static void get_clos_assoc(void)
@@ -1208,13 +1293,17 @@ static void get_clos_assoc(void)
1208 fprintf(stderr, "\tSpecify targeted cpu id with [--cpu|-c]\n"); 1293 fprintf(stderr, "\tSpecify targeted cpu id with [--cpu|-c]\n");
1209 exit(0); 1294 exit(0);
1210 } 1295 }
1211 if (max_target_cpus) 1296
1212 for_each_online_target_cpu_in_set(get_clos_assoc_for_cpu, NULL, 1297 if (!max_target_cpus) {
1213 NULL, NULL, NULL);
1214 else {
1215 fprintf(stderr, 1298 fprintf(stderr,
1216 "Invalid target cpu. Specify with [-c|--cpu]\n"); 1299 "Invalid target cpu. Specify with [-c|--cpu]\n");
1300 exit(0);
1217 } 1301 }
1302
1303 isst_ctdp_display_information_start(outf);
1304 for_each_online_target_cpu_in_set(get_clos_assoc_for_cpu, NULL,
1305 NULL, NULL, NULL);
1306 isst_ctdp_display_information_end(outf);
1218} 1307}
1219 1308
1220static struct process_cmd_struct isst_cmds[] = { 1309static struct process_cmd_struct isst_cmds[] = {
@@ -1231,10 +1320,11 @@ static struct process_cmd_struct isst_cmds[] = {
1231 { "turbo-freq", "info", dump_fact_config }, 1320 { "turbo-freq", "info", dump_fact_config },
1232 { "turbo-freq", "enable", set_fact_enable }, 1321 { "turbo-freq", "enable", set_fact_enable },
1233 { "turbo-freq", "disable", set_fact_disable }, 1322 { "turbo-freq", "disable", set_fact_disable },
1234 { "core-power", "info", dump_clos_config }, 1323 { "core-power", "info", dump_clos_info },
1235 { "core-power", "enable", set_clos_enable }, 1324 { "core-power", "enable", set_clos_enable },
1236 { "core-power", "disable", set_clos_disable }, 1325 { "core-power", "disable", set_clos_disable },
1237 { "core-power", "config", set_clos_config }, 1326 { "core-power", "config", set_clos_config },
1327 { "core-power", "get-config", dump_clos_config },
1238 { "core-power", "assoc", set_clos_assoc }, 1328 { "core-power", "assoc", set_clos_assoc },
1239 { "core-power", "get-assoc", get_clos_assoc }, 1329 { "core-power", "get-assoc", get_clos_assoc },
1240 { NULL, NULL, NULL } 1330 { NULL, NULL, NULL }
@@ -1316,6 +1406,7 @@ static void parse_cmd_args(int argc, int start, char **argv)
1316 static struct option long_options[] = { 1406 static struct option long_options[] = {
1317 { "bucket", required_argument, 0, 'b' }, 1407 { "bucket", required_argument, 0, 'b' },
1318 { "level", required_argument, 0, 'l' }, 1408 { "level", required_argument, 0, 'l' },
1409 { "online", required_argument, 0, 'o' },
1319 { "trl-type", required_argument, 0, 'r' }, 1410 { "trl-type", required_argument, 0, 'r' },
1320 { "trl", required_argument, 0, 't' }, 1411 { "trl", required_argument, 0, 't' },
1321 { "help", no_argument, 0, 'h' }, 1412 { "help", no_argument, 0, 'h' },
@@ -1332,7 +1423,7 @@ static void parse_cmd_args(int argc, int start, char **argv)
1332 option_index = start; 1423 option_index = start;
1333 1424
1334 optind = start + 1; 1425 optind = start + 1;
1335 while ((opt = getopt_long(argc, argv, "b:l:t:c:d:e:n:m:p:w:h", 1426 while ((opt = getopt_long(argc, argv, "b:l:t:c:d:e:n:m:p:w:ho",
1336 long_options, &option_index)) != -1) { 1427 long_options, &option_index)) != -1) {
1337 switch (opt) { 1428 switch (opt) {
1338 case 'b': 1429 case 'b':
@@ -1344,6 +1435,9 @@ static void parse_cmd_args(int argc, int start, char **argv)
1344 case 'l': 1435 case 'l':
1345 tdp_level = atoi(optarg); 1436 tdp_level = atoi(optarg);
1346 break; 1437 break;
1438 case 'o':
1439 force_online_offline = 1;
1440 break;
1347 case 't': 1441 case 't':
1348 sscanf(optarg, "0x%llx", &fact_trl); 1442 sscanf(optarg, "0x%llx", &fact_trl);
1349 break; 1443 break;
@@ -1362,7 +1456,6 @@ static void parse_cmd_args(int argc, int start, char **argv)
1362 /* CLOS related */ 1456 /* CLOS related */
1363 case 'c': 1457 case 'c':
1364 current_clos = atoi(optarg); 1458 current_clos = atoi(optarg);
1365 printf("clos %d\n", current_clos);
1366 break; 1459 break;
1367 case 'd': 1460 case 'd':
1368 clos_desired = atoi(optarg); 1461 clos_desired = atoi(optarg);
@@ -1433,6 +1526,7 @@ static void core_power_help(void)
1433 printf("\tenable\n"); 1526 printf("\tenable\n");
1434 printf("\tdisable\n"); 1527 printf("\tdisable\n");
1435 printf("\tconfig\n"); 1528 printf("\tconfig\n");
1529 printf("\tget-config\n");
1436 printf("\tassoc\n"); 1530 printf("\tassoc\n");
1437 printf("\tget-assoc\n"); 1531 printf("\tget-assoc\n");
1438} 1532}
diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
index 0bf341ad9697..6dee5332c9d3 100644
--- a/tools/power/x86/intel-speed-select/isst-core.c
+++ b/tools/power/x86/intel-speed-select/isst-core.c
@@ -619,6 +619,31 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
619 return 0; 619 return 0;
620} 620}
621 621
622int isst_clos_get_clos_information(int cpu, int *enable, int *type)
623{
624 unsigned int resp;
625 int ret;
626
627 ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
628 &resp);
629 if (ret)
630 return ret;
631
632 debug_printf("cpu:%d CLOS_PM_QOS_CONFIG resp:%x\n", cpu, resp);
633
634 if (resp & BIT(1))
635 *enable = 1;
636 else
637 *enable = 0;
638
639 if (resp & BIT(2))
640 *type = 1;
641 else
642 *type = 0;
643
644 return 0;
645}
646
622int isst_pm_qos_config(int cpu, int enable_clos, int priority_type) 647int isst_pm_qos_config(int cpu, int enable_clos, int priority_type)
623{ 648{
624 unsigned int req, resp; 649 unsigned int req, resp;
diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
index df4aa99c4e92..40346d534f78 100644
--- a/tools/power/x86/intel-speed-select/isst-display.c
+++ b/tools/power/x86/intel-speed-select/isst-display.c
@@ -287,6 +287,26 @@ static void _isst_fact_display_information(int cpu, FILE *outf, int level,
287 format_and_print(outf, base_level + 2, header, value); 287 format_and_print(outf, base_level + 2, header, value);
288} 288}
289 289
290void isst_ctdp_display_core_info(int cpu, FILE *outf, char *prefix,
291 unsigned int val)
292{
293 char header[256];
294 char value[256];
295
296 snprintf(header, sizeof(header), "package-%d",
297 get_physical_package_id(cpu));
298 format_and_print(outf, 1, header, NULL);
299 snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
300 format_and_print(outf, 2, header, NULL);
301 snprintf(header, sizeof(header), "cpu-%d", cpu);
302 format_and_print(outf, 3, header, NULL);
303
304 snprintf(value, sizeof(value), "%u", val);
305 format_and_print(outf, 4, prefix, value);
306
307 format_and_print(outf, 1, NULL, NULL);
308}
309
290void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level, 310void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
291 struct isst_pkg_ctdp *pkg_dev) 311 struct isst_pkg_ctdp *pkg_dev)
292{ 312{
@@ -503,6 +523,57 @@ void isst_clos_display_information(int cpu, FILE *outf, int clos,
503 format_and_print(outf, 1, NULL, NULL); 523 format_and_print(outf, 1, NULL, NULL);
504} 524}
505 525
526void isst_clos_display_clos_information(int cpu, FILE *outf,
527 int clos_enable, int type)
528{
529 char header[256];
530 char value[256];
531
532 snprintf(header, sizeof(header), "package-%d",
533 get_physical_package_id(cpu));
534 format_and_print(outf, 1, header, NULL);
535 snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
536 format_and_print(outf, 2, header, NULL);
537 snprintf(header, sizeof(header), "cpu-%d", cpu);
538 format_and_print(outf, 3, header, NULL);
539
540 snprintf(header, sizeof(header), "core-power");
541 format_and_print(outf, 4, header, NULL);
542
543 snprintf(header, sizeof(header), "enable-status");
544 snprintf(value, sizeof(value), "%d", clos_enable);
545 format_and_print(outf, 5, header, value);
546
547 snprintf(header, sizeof(header), "priority-type");
548 snprintf(value, sizeof(value), "%d", type);
549 format_and_print(outf, 5, header, value);
550
551 format_and_print(outf, 1, NULL, NULL);
552}
553
554void isst_clos_display_assoc_information(int cpu, FILE *outf, int clos)
555{
556 char header[256];
557 char value[256];
558
559 snprintf(header, sizeof(header), "package-%d",
560 get_physical_package_id(cpu));
561 format_and_print(outf, 1, header, NULL);
562 snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
563 format_and_print(outf, 2, header, NULL);
564 snprintf(header, sizeof(header), "cpu-%d", cpu);
565 format_and_print(outf, 3, header, NULL);
566
567 snprintf(header, sizeof(header), "get-assoc");
568 format_and_print(outf, 4, header, NULL);
569
570 snprintf(header, sizeof(header), "clos");
571 snprintf(value, sizeof(value), "%d", clos);
572 format_and_print(outf, 5, header, value);
573
574 format_and_print(outf, 1, NULL, NULL);
575}
576
506void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd, 577void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
507 int result) 578 int result)
508{ 579{
diff --git a/tools/power/x86/intel-speed-select/isst.h b/tools/power/x86/intel-speed-select/isst.h
index 2f7f62765eb6..d280b27d600d 100644
--- a/tools/power/x86/intel-speed-select/isst.h
+++ b/tools/power/x86/intel-speed-select/isst.h
@@ -187,12 +187,16 @@ extern int isst_send_msr_command(unsigned int cpu, unsigned int command,
187 int write, unsigned long long *req_resp); 187 int write, unsigned long long *req_resp);
188 188
189extern int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev); 189extern int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev);
190extern int isst_get_coremask_info(int cpu, int config_index,
191 struct isst_pkg_ctdp_level_info *ctdp_level);
190extern int isst_get_process_ctdp(int cpu, int tdp_level, 192extern int isst_get_process_ctdp(int cpu, int tdp_level,
191 struct isst_pkg_ctdp *pkg_dev); 193 struct isst_pkg_ctdp *pkg_dev);
192extern void isst_get_process_ctdp_complete(int cpu, 194extern void isst_get_process_ctdp_complete(int cpu,
193 struct isst_pkg_ctdp *pkg_dev); 195 struct isst_pkg_ctdp *pkg_dev);
194extern void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level, 196extern void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
195 struct isst_pkg_ctdp *pkg_dev); 197 struct isst_pkg_ctdp *pkg_dev);
198extern void isst_ctdp_display_core_info(int cpu, FILE *outf, char *prefix,
199 unsigned int val);
196extern void isst_ctdp_display_information_start(FILE *outf); 200extern void isst_ctdp_display_information_start(FILE *outf);
197extern void isst_ctdp_display_information_end(FILE *outf); 201extern void isst_ctdp_display_information_end(FILE *outf);
198extern void isst_pbf_display_information(int cpu, FILE *outf, int level, 202extern void isst_pbf_display_information(int cpu, FILE *outf, int level,
@@ -223,10 +227,14 @@ extern int isst_clos_associate(int cpu, int clos);
223extern int isst_clos_get_assoc_status(int cpu, int *clos_id); 227extern int isst_clos_get_assoc_status(int cpu, int *clos_id);
224extern void isst_clos_display_information(int cpu, FILE *outf, int clos, 228extern void isst_clos_display_information(int cpu, FILE *outf, int clos,
225 struct isst_clos_config *clos_config); 229 struct isst_clos_config *clos_config);
226 230extern void isst_clos_display_assoc_information(int cpu, FILE *outf, int clos);
227extern int isst_read_reg(unsigned short reg, unsigned int *val); 231extern int isst_read_reg(unsigned short reg, unsigned int *val);
228extern int isst_write_reg(int reg, unsigned int val); 232extern int isst_write_reg(int reg, unsigned int val);
229 233
230extern void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd, 234extern void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
231 int result); 235 int result);
236
237extern int isst_clos_get_clos_information(int cpu, int *enable, int *type);
238extern void isst_clos_display_clos_information(int cpu, FILE *outf,
239 int clos_enable, int type);
232#endif 240#endif