diff options
465 files changed, 4363 insertions, 2614 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl index b82deeaec314..470def06ab0a 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl +++ b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl | |||
@@ -1,4 +1,4 @@ | |||
1 | What: state | 1 | What: /sys/devices/system/ibm_rtl/state |
2 | Date: Sep 2010 | 2 | Date: Sep 2010 |
3 | KernelVersion: 2.6.37 | 3 | KernelVersion: 2.6.37 |
4 | Contact: Vernon Mauery <vernux@us.ibm.com> | 4 | Contact: Vernon Mauery <vernux@us.ibm.com> |
@@ -10,7 +10,7 @@ Description: The state file allows a means by which to change in and | |||
10 | Users: The ibm-prtm userspace daemon uses this interface. | 10 | Users: The ibm-prtm userspace daemon uses this interface. |
11 | 11 | ||
12 | 12 | ||
13 | What: version | 13 | What: /sys/devices/system/ibm_rtl/version |
14 | Date: Sep 2010 | 14 | Date: Sep 2010 |
15 | KernelVersion: 2.6.37 | 15 | KernelVersion: 2.6.37 |
16 | Contact: Vernon Mauery <vernux@us.ibm.com> | 16 | Contact: Vernon Mauery <vernux@us.ibm.com> |
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt index 4e00e859e885..bfa461aaac99 100644 --- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt | |||
@@ -43,6 +43,9 @@ Optional properties: | |||
43 | reset signal present internally in some host controller IC designs. | 43 | reset signal present internally in some host controller IC designs. |
44 | See Documentation/devicetree/bindings/reset/reset.txt for details. | 44 | See Documentation/devicetree/bindings/reset/reset.txt for details. |
45 | 45 | ||
46 | * reset-names: request name for using "resets" property. Must be "reset". | ||
47 | (It will be used together with "resets" property.) | ||
48 | |||
46 | * clocks: from common clock binding: handle to biu and ciu clocks for the | 49 | * clocks: from common clock binding: handle to biu and ciu clocks for the |
47 | bus interface unit clock and the card interface unit clock. | 50 | bus interface unit clock and the card interface unit clock. |
48 | 51 | ||
@@ -103,6 +106,8 @@ board specific portions as listed below. | |||
103 | interrupts = <0 75 0>; | 106 | interrupts = <0 75 0>; |
104 | #address-cells = <1>; | 107 | #address-cells = <1>; |
105 | #size-cells = <0>; | 108 | #size-cells = <0>; |
109 | resets = <&rst 20>; | ||
110 | reset-names = "reset"; | ||
106 | }; | 111 | }; |
107 | 112 | ||
108 | [board specific internal DMA resources] | 113 | [board specific internal DMA resources] |
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt index ba67b39939c1..71aeda1ca055 100644 --- a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt +++ b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt | |||
@@ -26,13 +26,16 @@ Required properties: | |||
26 | - "sys" | 26 | - "sys" |
27 | - "legacy" | 27 | - "legacy" |
28 | - "client" | 28 | - "client" |
29 | - resets: Must contain five entries for each entry in reset-names. | 29 | - resets: Must contain seven entries for each entry in reset-names. |
30 | See ../reset/reset.txt for details. | 30 | See ../reset/reset.txt for details. |
31 | - reset-names: Must include the following names | 31 | - reset-names: Must include the following names |
32 | - "core" | 32 | - "core" |
33 | - "mgmt" | 33 | - "mgmt" |
34 | - "mgmt-sticky" | 34 | - "mgmt-sticky" |
35 | - "pipe" | 35 | - "pipe" |
36 | - "pm" | ||
37 | - "aclk" | ||
38 | - "pclk" | ||
36 | - pinctrl-names : The pin control state names | 39 | - pinctrl-names : The pin control state names |
37 | - pinctrl-0: The "default" pinctrl state | 40 | - pinctrl-0: The "default" pinctrl state |
38 | - #interrupt-cells: specifies the number of cells needed to encode an | 41 | - #interrupt-cells: specifies the number of cells needed to encode an |
@@ -86,8 +89,10 @@ pcie0: pcie@f8000000 { | |||
86 | reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>; | 89 | reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>; |
87 | reg-names = "axi-base", "apb-base"; | 90 | reg-names = "axi-base", "apb-base"; |
88 | resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, | 91 | resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, |
89 | <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>; | 92 | <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> , |
90 | reset-names = "core", "mgmt", "mgmt-sticky", "pipe"; | 93 | <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>; |
94 | reset-names = "core", "mgmt", "mgmt-sticky", "pipe", | ||
95 | "pm", "pclk", "aclk"; | ||
91 | phys = <&pcie_phy>; | 96 | phys = <&pcie_phy>; |
92 | phy-names = "pcie-phy"; | 97 | phy-names = "pcie-phy"; |
93 | pinctrl-names = "default"; | 98 | pinctrl-names = "default"; |
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt index f9753c416974..b24583aa34c3 100644 --- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt | |||
@@ -14,11 +14,6 @@ Required properies: | |||
14 | - #size-cells : The value of this property must be 1 | 14 | - #size-cells : The value of this property must be 1 |
15 | - ranges : defines mapping between pin controller node (parent) to | 15 | - ranges : defines mapping between pin controller node (parent) to |
16 | gpio-bank node (children). | 16 | gpio-bank node (children). |
17 | - interrupt-parent: phandle of the interrupt parent to which the external | ||
18 | GPIO interrupts are forwarded to. | ||
19 | - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node | ||
20 | which includes IRQ mux selection register, and the offset of the IRQ mux | ||
21 | selection register. | ||
22 | - pins-are-numbered: Specify the subnodes are using numbered pinmux to | 17 | - pins-are-numbered: Specify the subnodes are using numbered pinmux to |
23 | specify pins. | 18 | specify pins. |
24 | 19 | ||
@@ -37,6 +32,11 @@ Required properties: | |||
37 | 32 | ||
38 | Optional properties: | 33 | Optional properties: |
39 | - reset: : Reference to the reset controller | 34 | - reset: : Reference to the reset controller |
35 | - interrupt-parent: phandle of the interrupt parent to which the external | ||
36 | GPIO interrupts are forwarded to. | ||
37 | - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node | ||
38 | which includes IRQ mux selection register, and the offset of the IRQ mux | ||
39 | selection register. | ||
40 | 40 | ||
41 | Example: | 41 | Example: |
42 | #include <dt-bindings/pinctrl/stm32f429-pinfunc.h> | 42 | #include <dt-bindings/pinctrl/stm32f429-pinfunc.h> |
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 14cdc101d165..1b5f15653b1b 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking | |||
@@ -447,7 +447,6 @@ prototypes: | |||
447 | int (*flush) (struct file *); | 447 | int (*flush) (struct file *); |
448 | int (*release) (struct inode *, struct file *); | 448 | int (*release) (struct inode *, struct file *); |
449 | int (*fsync) (struct file *, loff_t start, loff_t end, int datasync); | 449 | int (*fsync) (struct file *, loff_t start, loff_t end, int datasync); |
450 | int (*aio_fsync) (struct kiocb *, int datasync); | ||
451 | int (*fasync) (int, struct file *, int); | 450 | int (*fasync) (int, struct file *, int); |
452 | int (*lock) (struct file *, int, struct file_lock *); | 451 | int (*lock) (struct file *, int, struct file_lock *); |
453 | ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, | 452 | ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, |
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index d619c8d71966..b5039a00caaf 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt | |||
@@ -828,7 +828,6 @@ struct file_operations { | |||
828 | int (*flush) (struct file *, fl_owner_t id); | 828 | int (*flush) (struct file *, fl_owner_t id); |
829 | int (*release) (struct inode *, struct file *); | 829 | int (*release) (struct inode *, struct file *); |
830 | int (*fsync) (struct file *, loff_t, loff_t, int datasync); | 830 | int (*fsync) (struct file *, loff_t, loff_t, int datasync); |
831 | int (*aio_fsync) (struct kiocb *, int datasync); | ||
832 | int (*fasync) (int, struct file *, int); | 831 | int (*fasync) (int, struct file *, int); |
833 | int (*lock) (struct file *, int, struct file_lock *); | 832 | int (*lock) (struct file *, int, struct file_lock *); |
834 | ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); | 833 | ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); |
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 6d6c07cf1a9a..63912ef34606 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt | |||
@@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and | |||
67 | Switch tagging protocols | 67 | Switch tagging protocols |
68 | ------------------------ | 68 | ------------------------ |
69 | 69 | ||
70 | DSA currently supports 4 different tagging protocols, and a tag-less mode as | 70 | DSA currently supports 5 different tagging protocols, and a tag-less mode as |
71 | well. The different protocols are implemented in: | 71 | well. The different protocols are implemented in: |
72 | 72 | ||
73 | net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) | 73 | net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) |
74 | net/dsa/tag_dsa.c: Marvell's original DSA tag | 74 | net/dsa/tag_dsa.c: Marvell's original DSA tag |
75 | net/dsa/tag_edsa.c: Marvell's enhanced DSA tag | 75 | net/dsa/tag_edsa.c: Marvell's enhanced DSA tag |
76 | net/dsa/tag_brcm.c: Broadcom's 4 bytes tag | 76 | net/dsa/tag_brcm.c: Broadcom's 4 bytes tag |
77 | net/dsa/tag_qca.c: Qualcomm's 2 bytes tag | ||
77 | 78 | ||
78 | The exact format of the tag protocol is vendor specific, but in general, they | 79 | The exact format of the tag protocol is vendor specific, but in general, they |
79 | all contain something which: | 80 | all contain something which: |
diff --git a/MAINTAINERS b/MAINTAINERS index 411e3b87b8c2..2a58eeac9452 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -8057,6 +8057,7 @@ F: drivers/infiniband/hw/mlx4/ | |||
8057 | F: include/linux/mlx4/ | 8057 | F: include/linux/mlx4/ |
8058 | 8058 | ||
8059 | MELLANOX MLX5 core VPI driver | 8059 | MELLANOX MLX5 core VPI driver |
8060 | M: Saeed Mahameed <saeedm@mellanox.com> | ||
8060 | M: Matan Barak <matanb@mellanox.com> | 8061 | M: Matan Barak <matanb@mellanox.com> |
8061 | M: Leon Romanovsky <leonro@mellanox.com> | 8062 | M: Leon Romanovsky <leonro@mellanox.com> |
8062 | L: netdev@vger.kernel.org | 8063 | L: netdev@vger.kernel.org |
@@ -9335,7 +9336,7 @@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) | |||
9335 | M: Keith Busch <keith.busch@intel.com> | 9336 | M: Keith Busch <keith.busch@intel.com> |
9336 | L: linux-pci@vger.kernel.org | 9337 | L: linux-pci@vger.kernel.org |
9337 | S: Supported | 9338 | S: Supported |
9338 | F: arch/x86/pci/vmd.c | 9339 | F: drivers/pci/host/vmd.c |
9339 | 9340 | ||
9340 | PCIE DRIVER FOR ST SPEAR13XX | 9341 | PCIE DRIVER FOR ST SPEAR13XX |
9341 | M: Pratyush Anand <pratyush.anand@gmail.com> | 9342 | M: Pratyush Anand <pratyush.anand@gmail.com> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 9 | 2 | PATCHLEVEL = 9 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc4 | 4 | EXTRAVERSION = -rc5 |
5 | NAME = Psychotic Stoned Sheep | 5 | NAME = Psychotic Stoned Sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -370,7 +370,7 @@ LDFLAGS_MODULE = | |||
370 | CFLAGS_KERNEL = | 370 | CFLAGS_KERNEL = |
371 | AFLAGS_KERNEL = | 371 | AFLAGS_KERNEL = |
372 | LDFLAGS_vmlinux = | 372 | LDFLAGS_vmlinux = |
373 | CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im | 373 | CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized |
374 | CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) | 374 | CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) |
375 | 375 | ||
376 | 376 | ||
@@ -620,7 +620,6 @@ ARCH_CFLAGS := | |||
620 | include arch/$(SRCARCH)/Makefile | 620 | include arch/$(SRCARCH)/Makefile |
621 | 621 | ||
622 | KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) | 622 | KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) |
623 | KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) | ||
624 | KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) | 623 | KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) |
625 | 624 | ||
626 | ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION | 625 | ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION |
@@ -629,15 +628,18 @@ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,) | |||
629 | endif | 628 | endif |
630 | 629 | ||
631 | ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE | 630 | ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE |
632 | KBUILD_CFLAGS += -Os | 631 | KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) |
633 | else | 632 | else |
634 | ifdef CONFIG_PROFILE_ALL_BRANCHES | 633 | ifdef CONFIG_PROFILE_ALL_BRANCHES |
635 | KBUILD_CFLAGS += -O2 | 634 | KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) |
636 | else | 635 | else |
637 | KBUILD_CFLAGS += -O2 | 636 | KBUILD_CFLAGS += -O2 |
638 | endif | 637 | endif |
639 | endif | 638 | endif |
640 | 639 | ||
640 | KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \ | ||
641 | $(call cc-disable-warning,maybe-uninitialized,)) | ||
642 | |||
641 | # Tell gcc to never replace conditional load with a non-conditional one | 643 | # Tell gcc to never replace conditional load with a non-conditional one |
642 | KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) | 644 | KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) |
643 | 645 | ||
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 864adad52280..19cce226d1a8 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
@@ -50,6 +50,9 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y) | |||
50 | 50 | ||
51 | cflags-$(atleast_gcc44) += -fsection-anchors | 51 | cflags-$(atleast_gcc44) += -fsection-anchors |
52 | 52 | ||
53 | cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock | ||
54 | cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape | ||
55 | |||
53 | ifdef CONFIG_ISA_ARCV2 | 56 | ifdef CONFIG_ISA_ARCV2 |
54 | 57 | ||
55 | ifndef CONFIG_ARC_HAS_LL64 | 58 | ifndef CONFIG_ARC_HAS_LL64 |
@@ -68,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi) | |||
68 | ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE | 71 | ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE |
69 | # Generic build system uses -O2, we want -O3 | 72 | # Generic build system uses -O2, we want -O3 |
70 | # Note: No need to add to cflags-y as that happens anyways | 73 | # Note: No need to add to cflags-y as that happens anyways |
71 | ARCH_CFLAGS += -O3 | 74 | # |
75 | # Disable the false maybe-uninitialized warings gcc spits out at -O3 | ||
76 | ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,) | ||
72 | endif | 77 | endif |
73 | 78 | ||
74 | # small data is default for elf32 tool-chain. If not usable, disable it | 79 | # small data is default for elf32 tool-chain. If not usable, disable it |
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi index 6ae2c476ad82..53ce226f77a5 100644 --- a/arch/arc/boot/dts/axc001.dtsi +++ b/arch/arc/boot/dts/axc001.dtsi | |||
@@ -71,7 +71,7 @@ | |||
71 | reg-io-width = <4>; | 71 | reg-io-width = <4>; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | arcpmu0: pmu { | 74 | arcpct0: pct { |
75 | compatible = "snps,arc700-pct"; | 75 | compatible = "snps,arc700-pct"; |
76 | }; | 76 | }; |
77 | }; | 77 | }; |
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts index ce0ccd20b5bf..5ee96b067c08 100644 --- a/arch/arc/boot/dts/nsim_700.dts +++ b/arch/arc/boot/dts/nsim_700.dts | |||
@@ -69,7 +69,7 @@ | |||
69 | }; | 69 | }; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | arcpmu0: pmu { | 72 | arcpct0: pct { |
73 | compatible = "snps,arc700-pct"; | 73 | compatible = "snps,arc700-pct"; |
74 | }; | 74 | }; |
75 | }; | 75 | }; |
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts index bcf603142a33..3c391ba565ed 100644 --- a/arch/arc/boot/dts/nsimosci.dts +++ b/arch/arc/boot/dts/nsimosci.dts | |||
@@ -83,5 +83,9 @@ | |||
83 | reg = <0xf0003000 0x44>; | 83 | reg = <0xf0003000 0x44>; |
84 | interrupts = <7>; | 84 | interrupts = <7>; |
85 | }; | 85 | }; |
86 | |||
87 | arcpct0: pct { | ||
88 | compatible = "snps,arc700-pct"; | ||
89 | }; | ||
86 | }; | 90 | }; |
87 | }; | 91 | }; |
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 7314f538847b..b0066a749d4c 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig | |||
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" | 14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" |
15 | CONFIG_KALLSYMS_ALL=y | 15 | CONFIG_KALLSYMS_ALL=y |
16 | CONFIG_EMBEDDED=y | 16 | CONFIG_EMBEDDED=y |
17 | CONFIG_PERF_EVENTS=y | ||
17 | # CONFIG_SLUB_DEBUG is not set | 18 | # CONFIG_SLUB_DEBUG is not set |
18 | # CONFIG_COMPAT_BRK is not set | 19 | # CONFIG_COMPAT_BRK is not set |
19 | CONFIG_KPROBES=y | 20 | CONFIG_KPROBES=y |
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index 65ab9fbf83f2..ebe9ebb92933 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig | |||
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
14 | CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" | 14 | CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" |
15 | CONFIG_KALLSYMS_ALL=y | 15 | CONFIG_KALLSYMS_ALL=y |
16 | CONFIG_EMBEDDED=y | 16 | CONFIG_EMBEDDED=y |
17 | CONFIG_PERF_EVENTS=y | ||
17 | # CONFIG_SLUB_DEBUG is not set | 18 | # CONFIG_SLUB_DEBUG is not set |
18 | # CONFIG_COMPAT_BRK is not set | 19 | # CONFIG_COMPAT_BRK is not set |
19 | CONFIG_KPROBES=y | 20 | CONFIG_KPROBES=y |
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index 3b3990cddbe1..4bde43278be6 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig | |||
@@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
12 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" | 12 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" |
13 | CONFIG_KALLSYMS_ALL=y | 13 | CONFIG_KALLSYMS_ALL=y |
14 | CONFIG_EMBEDDED=y | 14 | CONFIG_EMBEDDED=y |
15 | CONFIG_PERF_EVENTS=y | ||
15 | # CONFIG_SLUB_DEBUG is not set | 16 | # CONFIG_SLUB_DEBUG is not set |
16 | # CONFIG_COMPAT_BRK is not set | 17 | # CONFIG_COMPAT_BRK is not set |
17 | CONFIG_KPROBES=y | 18 | CONFIG_KPROBES=y |
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 98cf20933bbb..f6fb3d26557e 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig | |||
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" | 14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" |
15 | CONFIG_KALLSYMS_ALL=y | 15 | CONFIG_KALLSYMS_ALL=y |
16 | CONFIG_EMBEDDED=y | 16 | CONFIG_EMBEDDED=y |
17 | CONFIG_PERF_EVENTS=y | ||
17 | # CONFIG_SLUB_DEBUG is not set | 18 | # CONFIG_SLUB_DEBUG is not set |
18 | # CONFIG_COMPAT_BRK is not set | 19 | # CONFIG_COMPAT_BRK is not set |
19 | CONFIG_KPROBES=y | 20 | CONFIG_KPROBES=y |
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index ddf8b96d494e..b9f0fe00044b 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig | |||
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" | 14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" |
15 | CONFIG_KALLSYMS_ALL=y | 15 | CONFIG_KALLSYMS_ALL=y |
16 | CONFIG_EMBEDDED=y | 16 | CONFIG_EMBEDDED=y |
17 | CONFIG_PERF_EVENTS=y | ||
17 | # CONFIG_SLUB_DEBUG is not set | 18 | # CONFIG_SLUB_DEBUG is not set |
18 | # CONFIG_COMPAT_BRK is not set | 19 | # CONFIG_COMPAT_BRK is not set |
19 | CONFIG_KPROBES=y | 20 | CONFIG_KPROBES=y |
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index ceb90745326e..6da71ba253a9 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | CONFIG_BLK_DEV_INITRD=y | 11 | CONFIG_BLK_DEV_INITRD=y |
12 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" | 12 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" |
13 | CONFIG_PERF_EVENTS=y | ||
13 | # CONFIG_COMPAT_BRK is not set | 14 | # CONFIG_COMPAT_BRK is not set |
14 | CONFIG_KPROBES=y | 15 | CONFIG_KPROBES=y |
15 | CONFIG_MODULES=y | 16 | CONFIG_MODULES=y |
@@ -34,7 +35,6 @@ CONFIG_INET=y | |||
34 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | 35 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set |
35 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | 36 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set |
36 | # CONFIG_INET_XFRM_MODE_BEET is not set | 37 | # CONFIG_INET_XFRM_MODE_BEET is not set |
37 | # CONFIG_INET_LRO is not set | ||
38 | # CONFIG_IPV6 is not set | 38 | # CONFIG_IPV6 is not set |
39 | # CONFIG_WIRELESS is not set | 39 | # CONFIG_WIRELESS is not set |
40 | CONFIG_DEVTMPFS=y | 40 | CONFIG_DEVTMPFS=y |
@@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y | |||
72 | # CONFIG_HWMON is not set | 72 | # CONFIG_HWMON is not set |
73 | CONFIG_DRM=y | 73 | CONFIG_DRM=y |
74 | CONFIG_DRM_ARCPGU=y | 74 | CONFIG_DRM_ARCPGU=y |
75 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
76 | CONFIG_LOGO=y | 75 | CONFIG_LOGO=y |
77 | # CONFIG_HID is not set | 76 | # CONFIG_HID is not set |
78 | # CONFIG_USB_SUPPORT is not set | 77 | # CONFIG_USB_SUPPORT is not set |
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 7f3f9f63708c..1bd24ec3e350 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h | |||
@@ -43,12 +43,14 @@ | |||
43 | #define STATUS_AE_BIT 5 /* Exception active */ | 43 | #define STATUS_AE_BIT 5 /* Exception active */ |
44 | #define STATUS_DE_BIT 6 /* PC is in delay slot */ | 44 | #define STATUS_DE_BIT 6 /* PC is in delay slot */ |
45 | #define STATUS_U_BIT 7 /* User/Kernel mode */ | 45 | #define STATUS_U_BIT 7 /* User/Kernel mode */ |
46 | #define STATUS_Z_BIT 11 | ||
46 | #define STATUS_L_BIT 12 /* Loop inhibit */ | 47 | #define STATUS_L_BIT 12 /* Loop inhibit */ |
47 | 48 | ||
48 | /* These masks correspond to the status word(STATUS_32) bits */ | 49 | /* These masks correspond to the status word(STATUS_32) bits */ |
49 | #define STATUS_AE_MASK (1<<STATUS_AE_BIT) | 50 | #define STATUS_AE_MASK (1<<STATUS_AE_BIT) |
50 | #define STATUS_DE_MASK (1<<STATUS_DE_BIT) | 51 | #define STATUS_DE_MASK (1<<STATUS_DE_BIT) |
51 | #define STATUS_U_MASK (1<<STATUS_U_BIT) | 52 | #define STATUS_U_MASK (1<<STATUS_U_BIT) |
53 | #define STATUS_Z_MASK (1<<STATUS_Z_BIT) | ||
52 | #define STATUS_L_MASK (1<<STATUS_L_BIT) | 54 | #define STATUS_L_MASK (1<<STATUS_L_BIT) |
53 | 55 | ||
54 | /* | 56 | /* |
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index 89fdd1b0a76e..0861007d9ef3 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h | |||
@@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void); | |||
37 | * API expected BY platform smp code (FROM arch smp code) | 37 | * API expected BY platform smp code (FROM arch smp code) |
38 | * | 38 | * |
39 | * smp_ipi_irq_setup: | 39 | * smp_ipi_irq_setup: |
40 | * Takes @cpu and @irq to which the arch-common ISR is hooked up | 40 | * Takes @cpu and @hwirq to which the arch-common ISR is hooked up |
41 | */ | 41 | */ |
42 | extern int smp_ipi_irq_setup(int cpu, int irq); | 42 | extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq); |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP | 45 | * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP |
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c index f1e07c2344f8..3b67f538f142 100644 --- a/arch/arc/kernel/devtree.c +++ b/arch/arc/kernel/devtree.c | |||
@@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root) | |||
31 | arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */ | 31 | arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */ |
32 | else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp")) | 32 | else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp")) |
33 | arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */ | 33 | arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */ |
34 | else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps")) | ||
35 | arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */ | ||
34 | else | 36 | else |
35 | arc_base_baud = 50000000; /* Fixed default 50MHz */ | 37 | arc_base_baud = 50000000; /* Fixed default 50MHz */ |
36 | } | 38 | } |
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index c424d5abc318..f39142acc89e 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -181,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, | |||
181 | { | 181 | { |
182 | unsigned long flags; | 182 | unsigned long flags; |
183 | cpumask_t online; | 183 | cpumask_t online; |
184 | unsigned int destination_bits; | ||
185 | unsigned int distribution_mode; | ||
184 | 186 | ||
185 | /* errout if no online cpu per @cpumask */ | 187 | /* errout if no online cpu per @cpumask */ |
186 | if (!cpumask_and(&online, cpumask, cpu_online_mask)) | 188 | if (!cpumask_and(&online, cpumask, cpu_online_mask)) |
@@ -188,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, | |||
188 | 190 | ||
189 | raw_spin_lock_irqsave(&mcip_lock, flags); | 191 | raw_spin_lock_irqsave(&mcip_lock, flags); |
190 | 192 | ||
191 | idu_set_dest(data->hwirq, cpumask_bits(&online)[0]); | 193 | destination_bits = cpumask_bits(&online)[0]; |
192 | idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); | 194 | idu_set_dest(data->hwirq, destination_bits); |
195 | |||
196 | if (ffs(destination_bits) == fls(destination_bits)) | ||
197 | distribution_mode = IDU_M_DISTRI_DEST; | ||
198 | else | ||
199 | distribution_mode = IDU_M_DISTRI_RR; | ||
200 | |||
201 | idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); | ||
193 | 202 | ||
194 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | 203 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
195 | 204 | ||
@@ -207,16 +216,15 @@ static struct irq_chip idu_irq_chip = { | |||
207 | 216 | ||
208 | }; | 217 | }; |
209 | 218 | ||
210 | static int idu_first_irq; | 219 | static irq_hw_number_t idu_first_hwirq; |
211 | 220 | ||
212 | static void idu_cascade_isr(struct irq_desc *desc) | 221 | static void idu_cascade_isr(struct irq_desc *desc) |
213 | { | 222 | { |
214 | struct irq_domain *domain = irq_desc_get_handler_data(desc); | 223 | struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); |
215 | unsigned int core_irq = irq_desc_get_irq(desc); | 224 | irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); |
216 | unsigned int idu_irq; | 225 | irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; |
217 | 226 | ||
218 | idu_irq = core_irq - idu_first_irq; | 227 | generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); |
219 | generic_handle_irq(irq_find_mapping(domain, idu_irq)); | ||
220 | } | 228 | } |
221 | 229 | ||
222 | static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) | 230 | static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) |
@@ -282,7 +290,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent) | |||
282 | struct irq_domain *domain; | 290 | struct irq_domain *domain; |
283 | /* Read IDU BCR to confirm nr_irqs */ | 291 | /* Read IDU BCR to confirm nr_irqs */ |
284 | int nr_irqs = of_irq_count(intc); | 292 | int nr_irqs = of_irq_count(intc); |
285 | int i, irq; | 293 | int i, virq; |
286 | struct mcip_bcr mp; | 294 | struct mcip_bcr mp; |
287 | 295 | ||
288 | READ_BCR(ARC_REG_MCIP_BCR, mp); | 296 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
@@ -303,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent) | |||
303 | * however we need it to get the parent virq and set IDU handler | 311 | * however we need it to get the parent virq and set IDU handler |
304 | * as first level isr | 312 | * as first level isr |
305 | */ | 313 | */ |
306 | irq = irq_of_parse_and_map(intc, i); | 314 | virq = irq_of_parse_and_map(intc, i); |
307 | if (!i) | 315 | if (!i) |
308 | idu_first_irq = irq; | 316 | idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq)); |
309 | 317 | ||
310 | irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); | 318 | irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); |
311 | } | 319 | } |
312 | 320 | ||
313 | __mcip_cmd(CMD_IDU_ENABLE, 0); | 321 | __mcip_cmd(CMD_IDU_ENABLE, 0); |
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 59aa43cb146e..a41a79a4f4fe 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c | |||
@@ -43,8 +43,8 @@ SYSCALL_DEFINE0(arc_gettls) | |||
43 | 43 | ||
44 | SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) | 44 | SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) |
45 | { | 45 | { |
46 | int uval; | 46 | struct pt_regs *regs = current_pt_regs(); |
47 | int ret; | 47 | int uval = -EFAULT; |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * This is only for old cores lacking LLOCK/SCOND, which by defintion | 50 | * This is only for old cores lacking LLOCK/SCOND, which by defintion |
@@ -54,24 +54,26 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) | |||
54 | */ | 54 | */ |
55 | WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP)); | 55 | WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP)); |
56 | 56 | ||
57 | /* Z indicates to userspace if operation succeded */ | ||
58 | regs->status32 &= ~STATUS_Z_MASK; | ||
59 | |||
57 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 60 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) |
58 | return -EFAULT; | 61 | return -EFAULT; |
59 | 62 | ||
60 | preempt_disable(); | 63 | preempt_disable(); |
61 | 64 | ||
62 | ret = __get_user(uval, uaddr); | 65 | if (__get_user(uval, uaddr)) |
63 | if (ret) | ||
64 | goto done; | 66 | goto done; |
65 | 67 | ||
66 | if (uval != expected) | 68 | if (uval == expected) { |
67 | ret = -EAGAIN; | 69 | if (!__put_user(new, uaddr)) |
68 | else | 70 | regs->status32 |= STATUS_Z_MASK; |
69 | ret = __put_user(new, uaddr); | 71 | } |
70 | 72 | ||
71 | done: | 73 | done: |
72 | preempt_enable(); | 74 | preempt_enable(); |
73 | 75 | ||
74 | return ret; | 76 | return uval; |
75 | } | 77 | } |
76 | 78 | ||
77 | void arch_cpu_idle(void) | 79 | void arch_cpu_idle(void) |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index f183cc648851..88674d972c9d 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
23 | #include <linux/cpumask.h> | 23 | #include <linux/cpumask.h> |
24 | #include <linux/reboot.h> | 24 | #include <linux/reboot.h> |
25 | #include <linux/irqdomain.h> | ||
25 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
26 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
27 | #include <asm/mach_desc.h> | 28 | #include <asm/mach_desc.h> |
@@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
67 | int i; | 68 | int i; |
68 | 69 | ||
69 | /* | 70 | /* |
70 | * Initialise the present map, which describes the set of CPUs | 71 | * if platform didn't set the present map already, do it now |
71 | * actually populated at the present time. | 72 | * boot cpu is set to present already by init/main.c |
72 | */ | 73 | */ |
73 | for (i = 0; i < max_cpus; i++) | 74 | if (num_present_cpus() <= 1) { |
74 | set_cpu_present(i, true); | 75 | for (i = 0; i < max_cpus; i++) |
76 | set_cpu_present(i, true); | ||
77 | } | ||
75 | } | 78 | } |
76 | 79 | ||
77 | void __init smp_cpus_done(unsigned int max_cpus) | 80 | void __init smp_cpus_done(unsigned int max_cpus) |
@@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id) | |||
351 | */ | 354 | */ |
352 | static DEFINE_PER_CPU(int, ipi_dev); | 355 | static DEFINE_PER_CPU(int, ipi_dev); |
353 | 356 | ||
354 | int smp_ipi_irq_setup(int cpu, int irq) | 357 | int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq) |
355 | { | 358 | { |
356 | int *dev = per_cpu_ptr(&ipi_dev, cpu); | 359 | int *dev = per_cpu_ptr(&ipi_dev, cpu); |
360 | unsigned int virq = irq_find_mapping(NULL, hwirq); | ||
361 | |||
362 | if (!virq) | ||
363 | panic("Cannot find virq for root domain and hwirq=%lu", hwirq); | ||
357 | 364 | ||
358 | /* Boot cpu calls request, all call enable */ | 365 | /* Boot cpu calls request, all call enable */ |
359 | if (!cpu) { | 366 | if (!cpu) { |
360 | int rc; | 367 | int rc; |
361 | 368 | ||
362 | rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev); | 369 | rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev); |
363 | if (rc) | 370 | if (rc) |
364 | panic("Percpu IRQ request failed for %d\n", irq); | 371 | panic("Percpu IRQ request failed for %u\n", virq); |
365 | } | 372 | } |
366 | 373 | ||
367 | enable_percpu_irq(irq, 0); | 374 | enable_percpu_irq(virq, 0); |
368 | 375 | ||
369 | return 0; | 376 | return 0; |
370 | } | 377 | } |
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index f927b8dc6edd..c10390d1ddb6 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c | |||
@@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs) | |||
152 | cycle_t full; | 152 | cycle_t full; |
153 | } stamp; | 153 | } stamp; |
154 | 154 | ||
155 | 155 | /* | |
156 | __asm__ __volatile( | 156 | * hardware has an internal state machine which tracks readout of |
157 | "1: \n" | 157 | * low/high and updates the CTRL.status if |
158 | " lr %0, [AUX_RTC_LOW] \n" | 158 | * - interrupt/exception taken between the two reads |
159 | " lr %1, [AUX_RTC_HIGH] \n" | 159 | * - high increments after low has been read |
160 | " lr %2, [AUX_RTC_CTRL] \n" | 160 | */ |
161 | " bbit0.nt %2, 31, 1b \n" | 161 | do { |
162 | : "=r" (stamp.low), "=r" (stamp.high), "=r" (status)); | 162 | stamp.low = read_aux_reg(AUX_RTC_LOW); |
163 | stamp.high = read_aux_reg(AUX_RTC_HIGH); | ||
164 | status = read_aux_reg(AUX_RTC_CTRL); | ||
165 | } while (!(status & _BITUL(31))); | ||
163 | 166 | ||
164 | return stamp.full; | 167 | return stamp.full; |
165 | } | 168 | } |
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 60aab5a7522b..cd8aad8226dd 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr, | |||
105 | __free_pages(page, get_order(size)); | 105 | __free_pages(page, get_order(size)); |
106 | } | 106 | } |
107 | 107 | ||
108 | static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
109 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
110 | unsigned long attrs) | ||
111 | { | ||
112 | unsigned long user_count = vma_pages(vma); | ||
113 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
114 | unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr)); | ||
115 | unsigned long off = vma->vm_pgoff; | ||
116 | int ret = -ENXIO; | ||
117 | |||
118 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
119 | |||
120 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | ||
121 | return ret; | ||
122 | |||
123 | if (off < count && user_count <= (count - off)) { | ||
124 | ret = remap_pfn_range(vma, vma->vm_start, | ||
125 | pfn + off, | ||
126 | user_count << PAGE_SHIFT, | ||
127 | vma->vm_page_prot); | ||
128 | } | ||
129 | |||
130 | return ret; | ||
131 | } | ||
132 | |||
108 | /* | 133 | /* |
109 | * streaming DMA Mapping API... | 134 | * streaming DMA Mapping API... |
110 | * CPU accesses page via normal paddr, thus needs to explicitly made | 135 | * CPU accesses page via normal paddr, thus needs to explicitly made |
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask) | |||
193 | struct dma_map_ops arc_dma_ops = { | 218 | struct dma_map_ops arc_dma_ops = { |
194 | .alloc = arc_dma_alloc, | 219 | .alloc = arc_dma_alloc, |
195 | .free = arc_dma_free, | 220 | .free = arc_dma_free, |
221 | .mmap = arc_dma_mmap, | ||
196 | .map_page = arc_dma_map_page, | 222 | .map_page = arc_dma_map_page, |
197 | .map_sg = arc_dma_map_sg, | 223 | .map_sg = arc_dma_map_sg, |
198 | .sync_single_for_device = arc_dma_sync_single_for_device, | 224 | .sync_single_for_device = arc_dma_sync_single_for_device, |
diff --git a/arch/arc/plat-eznps/smp.c b/arch/arc/plat-eznps/smp.c index 5e901f86e4bd..56a4c8522f11 100644 --- a/arch/arc/plat-eznps/smp.c +++ b/arch/arc/plat-eznps/smp.c | |||
@@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu) | |||
140 | mtm_enable_core(cpu); | 140 | mtm_enable_core(cpu); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void eznps_ipi_clear(int irq) | ||
144 | { | ||
145 | write_aux_reg(CTOP_AUX_IACK, 1 << irq); | ||
146 | } | ||
147 | |||
148 | struct plat_smp_ops plat_smp_ops = { | 143 | struct plat_smp_ops plat_smp_ops = { |
149 | .info = smp_cpuinfo_buf, | 144 | .info = smp_cpuinfo_buf, |
150 | .init_early_smp = eznps_init_cpumasks, | 145 | .init_early_smp = eznps_init_cpumasks, |
151 | .cpu_kick = eznps_smp_wakeup_cpu, | 146 | .cpu_kick = eznps_smp_wakeup_cpu, |
152 | .ipi_send = eznps_ipi_send, | 147 | .ipi_send = eznps_ipi_send, |
153 | .init_per_cpu = eznps_init_per_cpu, | 148 | .init_per_cpu = eznps_init_per_cpu, |
154 | .ipi_clear = eznps_ipi_clear, | ||
155 | }; | 149 | }; |
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index d7ea6bcb29bf..8ef05381984b 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h | |||
@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[]; | |||
66 | extern void __kvm_flush_vm_context(void); | 66 | extern void __kvm_flush_vm_context(void); |
67 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | 67 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
68 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 68 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); |
69 | extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); | ||
69 | 70 | ||
70 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | 71 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
71 | 72 | ||
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 2d19e02d03fd..d5423ab15ed5 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -57,6 +57,9 @@ struct kvm_arch { | |||
57 | /* VTTBR value associated with below pgd and vmid */ | 57 | /* VTTBR value associated with below pgd and vmid */ |
58 | u64 vttbr; | 58 | u64 vttbr; |
59 | 59 | ||
60 | /* The last vcpu id that ran on each physical CPU */ | ||
61 | int __percpu *last_vcpu_ran; | ||
62 | |||
60 | /* Timer */ | 63 | /* Timer */ |
61 | struct arch_timer_kvm timer; | 64 | struct arch_timer_kvm timer; |
62 | 65 | ||
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index 343135ede5fa..58508900c4bb 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h | |||
@@ -71,6 +71,7 @@ | |||
71 | #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) | 71 | #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) |
72 | #define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) | 72 | #define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) |
73 | #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) | 73 | #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) |
74 | #define TLBIALL __ACCESS_CP15(c8, 0, c7, 0) | ||
74 | #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) | 75 | #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) |
75 | #define PRRR __ACCESS_CP15(c10, 0, c2, 0) | 76 | #define PRRR __ACCESS_CP15(c10, 0, c2, 0) |
76 | #define NMRR __ACCESS_CP15(c10, 0, c2, 1) | 77 | #define NMRR __ACCESS_CP15(c10, 0, c2, 1) |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 08bb84f2ad58..19b5f5c1c0ff 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn) | |||
114 | */ | 114 | */ |
115 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 115 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
116 | { | 116 | { |
117 | int ret = 0; | 117 | int ret, cpu; |
118 | 118 | ||
119 | if (type) | 119 | if (type) |
120 | return -EINVAL; | 120 | return -EINVAL; |
121 | 121 | ||
122 | kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); | ||
123 | if (!kvm->arch.last_vcpu_ran) | ||
124 | return -ENOMEM; | ||
125 | |||
126 | for_each_possible_cpu(cpu) | ||
127 | *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1; | ||
128 | |||
122 | ret = kvm_alloc_stage2_pgd(kvm); | 129 | ret = kvm_alloc_stage2_pgd(kvm); |
123 | if (ret) | 130 | if (ret) |
124 | goto out_fail_alloc; | 131 | goto out_fail_alloc; |
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
141 | out_free_stage2_pgd: | 148 | out_free_stage2_pgd: |
142 | kvm_free_stage2_pgd(kvm); | 149 | kvm_free_stage2_pgd(kvm); |
143 | out_fail_alloc: | 150 | out_fail_alloc: |
151 | free_percpu(kvm->arch.last_vcpu_ran); | ||
152 | kvm->arch.last_vcpu_ran = NULL; | ||
144 | return ret; | 153 | return ret; |
145 | } | 154 | } |
146 | 155 | ||
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
168 | { | 177 | { |
169 | int i; | 178 | int i; |
170 | 179 | ||
180 | free_percpu(kvm->arch.last_vcpu_ran); | ||
181 | kvm->arch.last_vcpu_ran = NULL; | ||
182 | |||
171 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 183 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
172 | if (kvm->vcpus[i]) { | 184 | if (kvm->vcpus[i]) { |
173 | kvm_arch_vcpu_free(kvm->vcpus[i]); | 185 | kvm_arch_vcpu_free(kvm->vcpus[i]); |
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
312 | 324 | ||
313 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 325 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
314 | { | 326 | { |
327 | int *last_ran; | ||
328 | |||
329 | last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); | ||
330 | |||
331 | /* | ||
332 | * We might get preempted before the vCPU actually runs, but | ||
333 | * over-invalidation doesn't affect correctness. | ||
334 | */ | ||
335 | if (*last_ran != vcpu->vcpu_id) { | ||
336 | kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu); | ||
337 | *last_ran = vcpu->vcpu_id; | ||
338 | } | ||
339 | |||
315 | vcpu->cpu = cpu; | 340 | vcpu->cpu = cpu; |
316 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); | 341 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); |
317 | 342 | ||
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index 729652854f90..6d810af2d9fd 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c | |||
@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | |||
55 | __kvm_tlb_flush_vmid(kvm); | 55 | __kvm_tlb_flush_vmid(kvm); |
56 | } | 56 | } |
57 | 57 | ||
58 | void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) | ||
59 | { | ||
60 | struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); | ||
61 | |||
62 | /* Switch to requested VMID */ | ||
63 | write_sysreg(kvm->arch.vttbr, VTTBR); | ||
64 | isb(); | ||
65 | |||
66 | write_sysreg(0, TLBIALL); | ||
67 | dsb(nsh); | ||
68 | isb(); | ||
69 | |||
70 | write_sysreg(0, VTTBR); | ||
71 | } | ||
72 | |||
58 | void __hyp_text __kvm_flush_vm_context(void) | 73 | void __hyp_text __kvm_flush_vm_context(void) |
59 | { | 74 | { |
60 | write_sysreg(0, TLBIALLNSNHIS); | 75 | write_sysreg(0, TLBIALLNSNHIS); |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index b65c193dc64e..7afbfb0f96a3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi | |||
@@ -300,8 +300,11 @@ | |||
300 | ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000 | 300 | ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000 |
301 | 0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>; | 301 | 0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>; |
302 | resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, | 302 | resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, |
303 | <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>; | 303 | <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>, |
304 | reset-names = "core", "mgmt", "mgmt-sticky", "pipe"; | 304 | <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, |
305 | <&cru SRST_A_PCIE>; | ||
306 | reset-names = "core", "mgmt", "mgmt-sticky", "pipe", | ||
307 | "pm", "pclk", "aclk"; | ||
305 | status = "disabled"; | 308 | status = "disabled"; |
306 | 309 | ||
307 | pcie0_intc: interrupt-controller { | 310 | pcie0_intc: interrupt-controller { |
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 39feb85a6931..6e1cb8c5af4d 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_ALTERNATIVE_H | 1 | #ifndef __ASM_ALTERNATIVE_H |
2 | #define __ASM_ALTERNATIVE_H | 2 | #define __ASM_ALTERNATIVE_H |
3 | 3 | ||
4 | #include <asm/cpufeature.h> | 4 | #include <asm/cpucaps.h> |
5 | #include <asm/insn.h> | 5 | #include <asm/insn.h> |
6 | 6 | ||
7 | #ifndef __ASSEMBLY__ | 7 | #ifndef __ASSEMBLY__ |
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h new file mode 100644 index 000000000000..87b446535185 --- /dev/null +++ b/arch/arm64/include/asm/cpucaps.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * arch/arm64/include/asm/cpucaps.h | ||
3 | * | ||
4 | * Copyright (C) 2016 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software: you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #ifndef __ASM_CPUCAPS_H | ||
19 | #define __ASM_CPUCAPS_H | ||
20 | |||
21 | #define ARM64_WORKAROUND_CLEAN_CACHE 0 | ||
22 | #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 | ||
23 | #define ARM64_WORKAROUND_845719 2 | ||
24 | #define ARM64_HAS_SYSREG_GIC_CPUIF 3 | ||
25 | #define ARM64_HAS_PAN 4 | ||
26 | #define ARM64_HAS_LSE_ATOMICS 5 | ||
27 | #define ARM64_WORKAROUND_CAVIUM_23154 6 | ||
28 | #define ARM64_WORKAROUND_834220 7 | ||
29 | #define ARM64_HAS_NO_HW_PREFETCH 8 | ||
30 | #define ARM64_HAS_UAO 9 | ||
31 | #define ARM64_ALT_PAN_NOT_UAO 10 | ||
32 | #define ARM64_HAS_VIRT_HOST_EXTN 11 | ||
33 | #define ARM64_WORKAROUND_CAVIUM_27456 12 | ||
34 | #define ARM64_HAS_32BIT_EL0 13 | ||
35 | #define ARM64_HYP_OFFSET_LOW 14 | ||
36 | #define ARM64_MISMATCHED_CACHE_LINE_SIZE 15 | ||
37 | |||
38 | #define ARM64_NCAPS 16 | ||
39 | |||
40 | #endif /* __ASM_CPUCAPS_H */ | ||
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index a27c3245ba21..0bc0b1de90c4 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/jump_label.h> | 12 | #include <linux/jump_label.h> |
13 | 13 | ||
14 | #include <asm/cpucaps.h> | ||
14 | #include <asm/hwcap.h> | 15 | #include <asm/hwcap.h> |
15 | #include <asm/sysreg.h> | 16 | #include <asm/sysreg.h> |
16 | 17 | ||
@@ -24,25 +25,6 @@ | |||
24 | #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) | 25 | #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) |
25 | #define cpu_feature(x) ilog2(HWCAP_ ## x) | 26 | #define cpu_feature(x) ilog2(HWCAP_ ## x) |
26 | 27 | ||
27 | #define ARM64_WORKAROUND_CLEAN_CACHE 0 | ||
28 | #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 | ||
29 | #define ARM64_WORKAROUND_845719 2 | ||
30 | #define ARM64_HAS_SYSREG_GIC_CPUIF 3 | ||
31 | #define ARM64_HAS_PAN 4 | ||
32 | #define ARM64_HAS_LSE_ATOMICS 5 | ||
33 | #define ARM64_WORKAROUND_CAVIUM_23154 6 | ||
34 | #define ARM64_WORKAROUND_834220 7 | ||
35 | #define ARM64_HAS_NO_HW_PREFETCH 8 | ||
36 | #define ARM64_HAS_UAO 9 | ||
37 | #define ARM64_ALT_PAN_NOT_UAO 10 | ||
38 | #define ARM64_HAS_VIRT_HOST_EXTN 11 | ||
39 | #define ARM64_WORKAROUND_CAVIUM_27456 12 | ||
40 | #define ARM64_HAS_32BIT_EL0 13 | ||
41 | #define ARM64_HYP_OFFSET_LOW 14 | ||
42 | #define ARM64_MISMATCHED_CACHE_LINE_SIZE 15 | ||
43 | |||
44 | #define ARM64_NCAPS 16 | ||
45 | |||
46 | #ifndef __ASSEMBLY__ | 28 | #ifndef __ASSEMBLY__ |
47 | 29 | ||
48 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 18f746551bf6..ec3553eb9349 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[]; | |||
54 | extern void __kvm_flush_vm_context(void); | 54 | extern void __kvm_flush_vm_context(void); |
55 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | 55 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
56 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 56 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); |
57 | extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); | ||
57 | 58 | ||
58 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | 59 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
59 | 60 | ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index bd94e6766759..e5050388e062 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -62,6 +62,9 @@ struct kvm_arch { | |||
62 | /* VTTBR value associated with above pgd and vmid */ | 62 | /* VTTBR value associated with above pgd and vmid */ |
63 | u64 vttbr; | 63 | u64 vttbr; |
64 | 64 | ||
65 | /* The last vcpu id that ran on each physical CPU */ | ||
66 | int __percpu *last_vcpu_ran; | ||
67 | |||
65 | /* The maximum number of vCPUs depends on the used GIC model */ | 68 | /* The maximum number of vCPUs depends on the used GIC model */ |
66 | int max_vcpus; | 69 | int max_vcpus; |
67 | 70 | ||
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index a79b969c26fc..6f72fe8b0e3e 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v) | |||
128 | return v; | 128 | return v; |
129 | } | 129 | } |
130 | 130 | ||
131 | #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) | 131 | #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * We currently only support a 40bit IPA. | 134 | * We currently only support a 40bit IPA. |
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 23acc00be32d..fc756e22c84c 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h | |||
@@ -5,7 +5,6 @@ | |||
5 | 5 | ||
6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
7 | #include <asm/alternative.h> | 7 | #include <asm/alternative.h> |
8 | #include <asm/cpufeature.h> | ||
9 | 8 | ||
10 | #ifdef __ASSEMBLER__ | 9 | #ifdef __ASSEMBLER__ |
11 | 10 | ||
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 9cc0ea784ae6..88e2f2b938f0 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c | |||
@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) | |||
64 | write_sysreg(0, vttbr_el2); | 64 | write_sysreg(0, vttbr_el2); |
65 | } | 65 | } |
66 | 66 | ||
67 | void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) | ||
68 | { | ||
69 | struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); | ||
70 | |||
71 | /* Switch to requested VMID */ | ||
72 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | ||
73 | isb(); | ||
74 | |||
75 | asm volatile("tlbi vmalle1" : : ); | ||
76 | dsb(nsh); | ||
77 | isb(); | ||
78 | |||
79 | write_sysreg(0, vttbr_el2); | ||
80 | } | ||
81 | |||
67 | void __hyp_text __kvm_flush_vm_context(void) | 82 | void __hyp_text __kvm_flush_vm_context(void) |
68 | { | 83 | { |
69 | dsb(ishst); | 84 | dsb(ishst); |
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c index d9563ddb337e..746bf5caaffc 100644 --- a/arch/nios2/kernel/time.c +++ b/arch/nios2/kernel/time.c | |||
@@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer) | |||
324 | ret = nios2_clocksource_init(timer); | 324 | ret = nios2_clocksource_init(timer); |
325 | break; | 325 | break; |
326 | default: | 326 | default: |
327 | ret = 0; | ||
327 | break; | 328 | break; |
328 | } | 329 | } |
329 | 330 | ||
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h index 4ce7a01a252d..5f55da9cbfd5 100644 --- a/arch/openrisc/include/asm/cache.h +++ b/arch/openrisc/include/asm/cache.h | |||
@@ -23,6 +23,8 @@ | |||
23 | * they shouldn't be hard-coded! | 23 | * they shouldn't be hard-coded! |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #define __ro_after_init __read_mostly | ||
27 | |||
26 | #define L1_CACHE_BYTES 16 | 28 | #define L1_CACHE_BYTES 16 |
27 | #define L1_CACHE_SHIFT 4 | 29 | #define L1_CACHE_SHIFT 4 |
28 | 30 | ||
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 28f03ca60100..794bebb43d23 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c | |||
@@ -363,11 +363,11 @@ out: | |||
363 | static int diag224_get_name_table(void) | 363 | static int diag224_get_name_table(void) |
364 | { | 364 | { |
365 | /* memory must be below 2GB */ | 365 | /* memory must be below 2GB */ |
366 | diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA); | 366 | diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); |
367 | if (!diag224_cpu_names) | 367 | if (!diag224_cpu_names) |
368 | return -ENOMEM; | 368 | return -ENOMEM; |
369 | if (diag224(diag224_cpu_names)) { | 369 | if (diag224(diag224_cpu_names)) { |
370 | kfree(diag224_cpu_names); | 370 | free_page((unsigned long) diag224_cpu_names); |
371 | return -EOPNOTSUPP; | 371 | return -EOPNOTSUPP; |
372 | } | 372 | } |
373 | EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); | 373 | EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); |
@@ -376,7 +376,7 @@ static int diag224_get_name_table(void) | |||
376 | 376 | ||
377 | static void diag224_delete_name_table(void) | 377 | static void diag224_delete_name_table(void) |
378 | { | 378 | { |
379 | kfree(diag224_cpu_names); | 379 | free_page((unsigned long) diag224_cpu_names); |
380 | } | 380 | } |
381 | 381 | ||
382 | static int diag224_idx2name(int index, char *name) | 382 | static int diag224_idx2name(int index, char *name) |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 000e6e91f6a0..3667d20e997f 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -62,9 +62,11 @@ SECTIONS | |||
62 | 62 | ||
63 | . = ALIGN(PAGE_SIZE); | 63 | . = ALIGN(PAGE_SIZE); |
64 | __start_ro_after_init = .; | 64 | __start_ro_after_init = .; |
65 | __start_data_ro_after_init = .; | ||
65 | .data..ro_after_init : { | 66 | .data..ro_after_init : { |
66 | *(.data..ro_after_init) | 67 | *(.data..ro_after_init) |
67 | } | 68 | } |
69 | __end_data_ro_after_init = .; | ||
68 | EXCEPTION_TABLE(16) | 70 | EXCEPTION_TABLE(16) |
69 | . = ALIGN(PAGE_SIZE); | 71 | . = ALIGN(PAGE_SIZE); |
70 | __end_ro_after_init = .; | 72 | __end_ro_after_init = .; |
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 7350c8bc13a2..6b2f72f523b9 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -423,7 +423,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
423 | dma_addr_t dma_addr_base, dma_addr; | 423 | dma_addr_t dma_addr_base, dma_addr; |
424 | int flags = ZPCI_PTE_VALID; | 424 | int flags = ZPCI_PTE_VALID; |
425 | struct scatterlist *s; | 425 | struct scatterlist *s; |
426 | unsigned long pa; | 426 | unsigned long pa = 0; |
427 | int ret; | 427 | int ret; |
428 | 428 | ||
429 | size = PAGE_ALIGN(size); | 429 | size = PAGE_ALIGN(size); |
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h index 6160761d5f61..4810e48dbbbf 100644 --- a/arch/tile/include/asm/cache.h +++ b/arch/tile/include/asm/cache.h | |||
@@ -61,4 +61,7 @@ | |||
61 | */ | 61 | */ |
62 | #define __write_once __read_mostly | 62 | #define __write_once __read_mostly |
63 | 63 | ||
64 | /* __ro_after_init is the generic name for the tile arch __write_once. */ | ||
65 | #define __ro_after_init __read_mostly | ||
66 | |||
64 | #endif /* _ASM_TILE_CACHE_H */ | 67 | #endif /* _ASM_TILE_CACHE_H */ |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 0ab5ee1c26af..aa8b0672f87a 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -888,7 +888,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req) | |||
888 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); | 888 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); |
889 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | 889 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); |
890 | struct scatter_walk src_sg_walk; | 890 | struct scatter_walk src_sg_walk; |
891 | struct scatter_walk dst_sg_walk; | 891 | struct scatter_walk dst_sg_walk = {}; |
892 | unsigned int i; | 892 | unsigned int i; |
893 | 893 | ||
894 | /* Assuming we are supporting rfc4106 64-bit extended */ | 894 | /* Assuming we are supporting rfc4106 64-bit extended */ |
@@ -968,7 +968,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req) | |||
968 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | 968 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); |
969 | u8 authTag[16]; | 969 | u8 authTag[16]; |
970 | struct scatter_walk src_sg_walk; | 970 | struct scatter_walk src_sg_walk; |
971 | struct scatter_walk dst_sg_walk; | 971 | struct scatter_walk dst_sg_walk = {}; |
972 | unsigned int i; | 972 | unsigned int i; |
973 | 973 | ||
974 | if (unlikely(req->assoclen != 16 && req->assoclen != 20)) | 974 | if (unlikely(req->assoclen != 16 && req->assoclen != 20)) |
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 5f845eef9a4d..81195cca7eae 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c | |||
@@ -8,8 +8,12 @@ | |||
8 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 | 8 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 |
9 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 | 9 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 |
10 | #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 | 10 | #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 |
11 | #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f | 11 | #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 |
12 | #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c | 12 | #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c |
13 | #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 | ||
14 | #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 | ||
15 | #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f | ||
16 | #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f | ||
13 | 17 | ||
14 | /* SNB event control */ | 18 | /* SNB event control */ |
15 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | 19 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff |
@@ -616,13 +620,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = { | |||
616 | 620 | ||
617 | static const struct pci_device_id skl_uncore_pci_ids[] = { | 621 | static const struct pci_device_id skl_uncore_pci_ids[] = { |
618 | { /* IMC */ | 622 | { /* IMC */ |
619 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), | 623 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC), |
620 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | 624 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), |
621 | }, | 625 | }, |
622 | { /* IMC */ | 626 | { /* IMC */ |
623 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), | 627 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), |
624 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | 628 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), |
625 | }, | 629 | }, |
630 | { /* IMC */ | ||
631 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC), | ||
632 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
633 | }, | ||
634 | { /* IMC */ | ||
635 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC), | ||
636 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
637 | }, | ||
638 | { /* IMC */ | ||
639 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC), | ||
640 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
641 | }, | ||
642 | { /* IMC */ | ||
643 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), | ||
644 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
645 | }, | ||
626 | 646 | ||
627 | { /* end: all zeroes */ }, | 647 | { /* end: all zeroes */ }, |
628 | }; | 648 | }; |
@@ -666,8 +686,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { | |||
666 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ | 686 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ |
667 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ | 687 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ |
668 | IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ | 688 | IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ |
669 | IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ | 689 | IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ |
670 | IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ | 690 | IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ |
691 | IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ | ||
692 | IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ | ||
693 | IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ | ||
694 | IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ | ||
671 | { /* end marker */ } | 695 | { /* end marker */ } |
672 | }; | 696 | }; |
673 | 697 | ||
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index 5b6753d1f7f4..49da9f497b90 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | extern int intel_mid_pci_init(void); | 18 | extern int intel_mid_pci_init(void); |
19 | extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); | 19 | extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); |
20 | extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev); | ||
20 | 21 | ||
21 | extern void intel_mid_pwr_power_off(void); | 22 | extern void intel_mid_pwr_power_off(void); |
22 | 23 | ||
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index c7364bd633e1..51287cd90bf6 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1042,8 +1042,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) | |||
1042 | 1042 | ||
1043 | if (apm_info.get_power_status_broken) | 1043 | if (apm_info.get_power_status_broken) |
1044 | return APM_32_UNSUPPORTED; | 1044 | return APM_32_UNSUPPORTED; |
1045 | if (apm_bios_call(&call)) | 1045 | if (apm_bios_call(&call)) { |
1046 | if (!call.err) | ||
1047 | return APM_NO_ERROR; | ||
1046 | return call.err; | 1048 | return call.err; |
1049 | } | ||
1047 | *status = call.ebx; | 1050 | *status = call.ebx; |
1048 | *bat = call.ecx; | 1051 | *bat = call.ecx; |
1049 | if (apm_info.get_power_status_swabinminutes) { | 1052 | if (apm_info.get_power_status_swabinminutes) { |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index b81fe2d63e15..1e81a37c034e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) | |||
347 | #ifdef CONFIG_SMP | 347 | #ifdef CONFIG_SMP |
348 | unsigned bits; | 348 | unsigned bits; |
349 | int cpu = smp_processor_id(); | 349 | int cpu = smp_processor_id(); |
350 | unsigned int socket_id, core_complex_id; | ||
351 | 350 | ||
352 | bits = c->x86_coreid_bits; | 351 | bits = c->x86_coreid_bits; |
353 | /* Low order bits define the core id (index of core in socket) */ | 352 | /* Low order bits define the core id (index of core in socket) */ |
@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) | |||
365 | if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) | 364 | if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) |
366 | return; | 365 | return; |
367 | 366 | ||
368 | socket_id = (c->apicid >> bits) - 1; | 367 | per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; |
369 | core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3; | ||
370 | |||
371 | per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id; | ||
372 | #endif | 368 | #endif |
373 | } | 369 | } |
374 | 370 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9bd910a7dd0a..cc9e980c68ec 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -979,6 +979,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c) | |||
979 | } | 979 | } |
980 | 980 | ||
981 | /* | 981 | /* |
982 | * The physical to logical package id mapping is initialized from the | ||
983 | * acpi/mptables information. Make sure that CPUID actually agrees with | ||
984 | * that. | ||
985 | */ | ||
986 | static void sanitize_package_id(struct cpuinfo_x86 *c) | ||
987 | { | ||
988 | #ifdef CONFIG_SMP | ||
989 | unsigned int pkg, apicid, cpu = smp_processor_id(); | ||
990 | |||
991 | apicid = apic->cpu_present_to_apicid(cpu); | ||
992 | pkg = apicid >> boot_cpu_data.x86_coreid_bits; | ||
993 | |||
994 | if (apicid != c->initial_apicid) { | ||
995 | pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n", | ||
996 | cpu, apicid, c->initial_apicid); | ||
997 | c->initial_apicid = apicid; | ||
998 | } | ||
999 | if (pkg != c->phys_proc_id) { | ||
1000 | pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n", | ||
1001 | cpu, pkg, c->phys_proc_id); | ||
1002 | c->phys_proc_id = pkg; | ||
1003 | } | ||
1004 | c->logical_proc_id = topology_phys_to_logical_pkg(pkg); | ||
1005 | #else | ||
1006 | c->logical_proc_id = 0; | ||
1007 | #endif | ||
1008 | } | ||
1009 | |||
1010 | /* | ||
982 | * This does the hard work of actually picking apart the CPU stuff... | 1011 | * This does the hard work of actually picking apart the CPU stuff... |
983 | */ | 1012 | */ |
984 | static void identify_cpu(struct cpuinfo_x86 *c) | 1013 | static void identify_cpu(struct cpuinfo_x86 *c) |
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
1103 | #ifdef CONFIG_NUMA | 1132 | #ifdef CONFIG_NUMA |
1104 | numa_add_cpu(smp_processor_id()); | 1133 | numa_add_cpu(smp_processor_id()); |
1105 | #endif | 1134 | #endif |
1106 | /* The boot/hotplug time assigment got cleared, restore it */ | 1135 | sanitize_package_id(c); |
1107 | c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id); | ||
1108 | } | 1136 | } |
1109 | 1137 | ||
1110 | /* | 1138 | /* |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index bf99aa7005eb..936a488d6cf6 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void) | |||
861 | int count = 0, pg_shift = 0; | 861 | int count = 0, pg_shift = 0; |
862 | void *new_memmap = NULL; | 862 | void *new_memmap = NULL; |
863 | efi_status_t status; | 863 | efi_status_t status; |
864 | phys_addr_t pa; | 864 | unsigned long pa; |
865 | 865 | ||
866 | efi.systab = NULL; | 866 | efi.systab = NULL; |
867 | 867 | ||
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 58b0f801f66f..319148bd4b05 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/reboot.h> | 32 | #include <linux/reboot.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/ucs2_string.h> | ||
34 | 35 | ||
35 | #include <asm/setup.h> | 36 | #include <asm/setup.h> |
36 | #include <asm/page.h> | 37 | #include <asm/page.h> |
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void) | |||
211 | memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); | 212 | memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); |
212 | } | 213 | } |
213 | 214 | ||
215 | /* | ||
216 | * Wrapper for slow_virt_to_phys() that handles NULL addresses. | ||
217 | */ | ||
218 | static inline phys_addr_t | ||
219 | virt_to_phys_or_null_size(void *va, unsigned long size) | ||
220 | { | ||
221 | bool bad_size; | ||
222 | |||
223 | if (!va) | ||
224 | return 0; | ||
225 | |||
226 | if (virt_addr_valid(va)) | ||
227 | return virt_to_phys(va); | ||
228 | |||
229 | /* | ||
230 | * A fully aligned variable on the stack is guaranteed not to | ||
231 | * cross a page bounary. Try to catch strings on the stack by | ||
232 | * checking that 'size' is a power of two. | ||
233 | */ | ||
234 | bad_size = size > PAGE_SIZE || !is_power_of_2(size); | ||
235 | |||
236 | WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size); | ||
237 | |||
238 | return slow_virt_to_phys(va); | ||
239 | } | ||
240 | |||
241 | #define virt_to_phys_or_null(addr) \ | ||
242 | virt_to_phys_or_null_size((addr), sizeof(*(addr))) | ||
243 | |||
214 | int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) | 244 | int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) |
215 | { | 245 | { |
216 | unsigned long pfn, text; | 246 | unsigned long pfn, text; |
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc) | |||
494 | 524 | ||
495 | spin_lock(&rtc_lock); | 525 | spin_lock(&rtc_lock); |
496 | 526 | ||
497 | phys_tm = virt_to_phys(tm); | 527 | phys_tm = virt_to_phys_or_null(tm); |
498 | phys_tc = virt_to_phys(tc); | 528 | phys_tc = virt_to_phys_or_null(tc); |
499 | 529 | ||
500 | status = efi_thunk(get_time, phys_tm, phys_tc); | 530 | status = efi_thunk(get_time, phys_tm, phys_tc); |
501 | 531 | ||
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm) | |||
511 | 541 | ||
512 | spin_lock(&rtc_lock); | 542 | spin_lock(&rtc_lock); |
513 | 543 | ||
514 | phys_tm = virt_to_phys(tm); | 544 | phys_tm = virt_to_phys_or_null(tm); |
515 | 545 | ||
516 | status = efi_thunk(set_time, phys_tm); | 546 | status = efi_thunk(set_time, phys_tm); |
517 | 547 | ||
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, | |||
529 | 559 | ||
530 | spin_lock(&rtc_lock); | 560 | spin_lock(&rtc_lock); |
531 | 561 | ||
532 | phys_enabled = virt_to_phys(enabled); | 562 | phys_enabled = virt_to_phys_or_null(enabled); |
533 | phys_pending = virt_to_phys(pending); | 563 | phys_pending = virt_to_phys_or_null(pending); |
534 | phys_tm = virt_to_phys(tm); | 564 | phys_tm = virt_to_phys_or_null(tm); |
535 | 565 | ||
536 | status = efi_thunk(get_wakeup_time, phys_enabled, | 566 | status = efi_thunk(get_wakeup_time, phys_enabled, |
537 | phys_pending, phys_tm); | 567 | phys_pending, phys_tm); |
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) | |||
549 | 579 | ||
550 | spin_lock(&rtc_lock); | 580 | spin_lock(&rtc_lock); |
551 | 581 | ||
552 | phys_tm = virt_to_phys(tm); | 582 | phys_tm = virt_to_phys_or_null(tm); |
553 | 583 | ||
554 | status = efi_thunk(set_wakeup_time, enabled, phys_tm); | 584 | status = efi_thunk(set_wakeup_time, enabled, phys_tm); |
555 | 585 | ||
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) | |||
558 | return status; | 588 | return status; |
559 | } | 589 | } |
560 | 590 | ||
591 | static unsigned long efi_name_size(efi_char16_t *name) | ||
592 | { | ||
593 | return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1; | ||
594 | } | ||
561 | 595 | ||
562 | static efi_status_t | 596 | static efi_status_t |
563 | efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, | 597 | efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, |
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, | |||
567 | u32 phys_name, phys_vendor, phys_attr; | 601 | u32 phys_name, phys_vendor, phys_attr; |
568 | u32 phys_data_size, phys_data; | 602 | u32 phys_data_size, phys_data; |
569 | 603 | ||
570 | phys_data_size = virt_to_phys(data_size); | 604 | phys_data_size = virt_to_phys_or_null(data_size); |
571 | phys_vendor = virt_to_phys(vendor); | 605 | phys_vendor = virt_to_phys_or_null(vendor); |
572 | phys_name = virt_to_phys(name); | 606 | phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); |
573 | phys_attr = virt_to_phys(attr); | 607 | phys_attr = virt_to_phys_or_null(attr); |
574 | phys_data = virt_to_phys(data); | 608 | phys_data = virt_to_phys_or_null_size(data, *data_size); |
575 | 609 | ||
576 | status = efi_thunk(get_variable, phys_name, phys_vendor, | 610 | status = efi_thunk(get_variable, phys_name, phys_vendor, |
577 | phys_attr, phys_data_size, phys_data); | 611 | phys_attr, phys_data_size, phys_data); |
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, | |||
586 | u32 phys_name, phys_vendor, phys_data; | 620 | u32 phys_name, phys_vendor, phys_data; |
587 | efi_status_t status; | 621 | efi_status_t status; |
588 | 622 | ||
589 | phys_name = virt_to_phys(name); | 623 | phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); |
590 | phys_vendor = virt_to_phys(vendor); | 624 | phys_vendor = virt_to_phys_or_null(vendor); |
591 | phys_data = virt_to_phys(data); | 625 | phys_data = virt_to_phys_or_null_size(data, data_size); |
592 | 626 | ||
593 | /* If data_size is > sizeof(u32) we've got problems */ | 627 | /* If data_size is > sizeof(u32) we've got problems */ |
594 | status = efi_thunk(set_variable, phys_name, phys_vendor, | 628 | status = efi_thunk(set_variable, phys_name, phys_vendor, |
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size, | |||
605 | efi_status_t status; | 639 | efi_status_t status; |
606 | u32 phys_name_size, phys_name, phys_vendor; | 640 | u32 phys_name_size, phys_name, phys_vendor; |
607 | 641 | ||
608 | phys_name_size = virt_to_phys(name_size); | 642 | phys_name_size = virt_to_phys_or_null(name_size); |
609 | phys_vendor = virt_to_phys(vendor); | 643 | phys_vendor = virt_to_phys_or_null(vendor); |
610 | phys_name = virt_to_phys(name); | 644 | phys_name = virt_to_phys_or_null_size(name, *name_size); |
611 | 645 | ||
612 | status = efi_thunk(get_next_variable, phys_name_size, | 646 | status = efi_thunk(get_next_variable, phys_name_size, |
613 | phys_name, phys_vendor); | 647 | phys_name, phys_vendor); |
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count) | |||
621 | efi_status_t status; | 655 | efi_status_t status; |
622 | u32 phys_count; | 656 | u32 phys_count; |
623 | 657 | ||
624 | phys_count = virt_to_phys(count); | 658 | phys_count = virt_to_phys_or_null(count); |
625 | status = efi_thunk(get_next_high_mono_count, phys_count); | 659 | status = efi_thunk(get_next_high_mono_count, phys_count); |
626 | 660 | ||
627 | return status; | 661 | return status; |
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status, | |||
633 | { | 667 | { |
634 | u32 phys_data; | 668 | u32 phys_data; |
635 | 669 | ||
636 | phys_data = virt_to_phys(data); | 670 | phys_data = virt_to_phys_or_null_size(data, data_size); |
637 | 671 | ||
638 | efi_thunk(reset_system, reset_type, status, data_size, phys_data); | 672 | efi_thunk(reset_system, reset_type, status, data_size, phys_data); |
639 | } | 673 | } |
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space, | |||
661 | if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) | 695 | if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) |
662 | return EFI_UNSUPPORTED; | 696 | return EFI_UNSUPPORTED; |
663 | 697 | ||
664 | phys_storage = virt_to_phys(storage_space); | 698 | phys_storage = virt_to_phys_or_null(storage_space); |
665 | phys_remaining = virt_to_phys(remaining_space); | 699 | phys_remaining = virt_to_phys_or_null(remaining_space); |
666 | phys_max = virt_to_phys(max_variable_size); | 700 | phys_max = virt_to_phys_or_null(max_variable_size); |
667 | 701 | ||
668 | status = efi_thunk(query_variable_info, attr, phys_storage, | 702 | status = efi_thunk(query_variable_info, attr, phys_storage, |
669 | phys_remaining, phys_max); | 703 | phys_remaining, phys_max); |
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c index 5d3b45ad1c03..67375dda451c 100644 --- a/arch/x86/platform/intel-mid/pwr.c +++ b/arch/x86/platform/intel-mid/pwr.c | |||
@@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) | |||
272 | } | 272 | } |
273 | EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); | 273 | EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); |
274 | 274 | ||
275 | pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev) | ||
276 | { | ||
277 | struct mid_pwr *pwr = midpwr; | ||
278 | int id, reg, bit; | ||
279 | u32 power; | ||
280 | |||
281 | if (!pwr || !pwr->available) | ||
282 | return PCI_UNKNOWN; | ||
283 | |||
284 | id = intel_mid_pwr_get_lss_id(pdev); | ||
285 | if (id < 0) | ||
286 | return PCI_UNKNOWN; | ||
287 | |||
288 | reg = (id * LSS_PWS_BITS) / 32; | ||
289 | bit = (id * LSS_PWS_BITS) % 32; | ||
290 | power = mid_pwr_get_state(pwr, reg); | ||
291 | return (__force pci_power_t)((power >> bit) & 3); | ||
292 | } | ||
293 | |||
275 | void intel_mid_pwr_power_off(void) | 294 | void intel_mid_pwr_power_off(void) |
276 | { | 295 | { |
277 | struct mid_pwr *pwr = midpwr; | 296 | struct mid_pwr *pwr = midpwr; |
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index de9b14b2d348..cd400af4a6b2 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h | |||
@@ -767,7 +767,14 @@ __SYSCALL(346, sys_preadv2, 6) | |||
767 | #define __NR_pwritev2 347 | 767 | #define __NR_pwritev2 347 |
768 | __SYSCALL(347, sys_pwritev2, 6) | 768 | __SYSCALL(347, sys_pwritev2, 6) |
769 | 769 | ||
770 | #define __NR_syscall_count 348 | 770 | #define __NR_pkey_mprotect 348 |
771 | __SYSCALL(348, sys_pkey_mprotect, 4) | ||
772 | #define __NR_pkey_alloc 349 | ||
773 | __SYSCALL(349, sys_pkey_alloc, 2) | ||
774 | #define __NR_pkey_free 350 | ||
775 | __SYSCALL(350, sys_pkey_free, 1) | ||
776 | |||
777 | #define __NR_syscall_count 351 | ||
771 | 778 | ||
772 | /* | 779 | /* |
773 | * sysxtensa syscall handler | 780 | * sysxtensa syscall handler |
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 9a5bcd0381a7..be81e69b25bc 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -172,10 +172,11 @@ void __init time_init(void) | |||
172 | { | 172 | { |
173 | of_clk_init(NULL); | 173 | of_clk_init(NULL); |
174 | #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT | 174 | #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT |
175 | printk("Calibrating CPU frequency "); | 175 | pr_info("Calibrating CPU frequency "); |
176 | calibrate_ccount(); | 176 | calibrate_ccount(); |
177 | printk("%d.%02d MHz\n", (int)ccount_freq/1000000, | 177 | pr_cont("%d.%02d MHz\n", |
178 | (int)(ccount_freq/10000)%100); | 178 | (int)ccount_freq / 1000000, |
179 | (int)(ccount_freq / 10000) % 100); | ||
179 | #else | 180 | #else |
180 | ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; | 181 | ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; |
181 | #endif | 182 | #endif |
@@ -210,9 +211,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
210 | void calibrate_delay(void) | 211 | void calibrate_delay(void) |
211 | { | 212 | { |
212 | loops_per_jiffy = ccount_freq / HZ; | 213 | loops_per_jiffy = ccount_freq / HZ; |
213 | printk("Calibrating delay loop (skipped)... " | 214 | pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n", |
214 | "%lu.%02lu BogoMIPS preset\n", | 215 | loops_per_jiffy / (1000000 / HZ), |
215 | loops_per_jiffy/(1000000/HZ), | 216 | (loops_per_jiffy / (10000 / HZ)) % 100); |
216 | (loops_per_jiffy/(10000/HZ)) % 100); | ||
217 | } | 217 | } |
218 | #endif | 218 | #endif |
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index d02fc304b31c..ce37d5b899fe 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c | |||
@@ -465,26 +465,25 @@ void show_regs(struct pt_regs * regs) | |||
465 | 465 | ||
466 | for (i = 0; i < 16; i++) { | 466 | for (i = 0; i < 16; i++) { |
467 | if ((i % 8) == 0) | 467 | if ((i % 8) == 0) |
468 | printk(KERN_INFO "a%02d:", i); | 468 | pr_info("a%02d:", i); |
469 | printk(KERN_CONT " %08lx", regs->areg[i]); | 469 | pr_cont(" %08lx", regs->areg[i]); |
470 | } | 470 | } |
471 | printk(KERN_CONT "\n"); | 471 | pr_cont("\n"); |
472 | 472 | pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", | |
473 | printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", | 473 | regs->pc, regs->ps, regs->depc, regs->excvaddr); |
474 | regs->pc, regs->ps, regs->depc, regs->excvaddr); | 474 | pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", |
475 | printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", | 475 | regs->lbeg, regs->lend, regs->lcount, regs->sar); |
476 | regs->lbeg, regs->lend, regs->lcount, regs->sar); | ||
477 | if (user_mode(regs)) | 476 | if (user_mode(regs)) |
478 | printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", | 477 | pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", |
479 | regs->windowbase, regs->windowstart, regs->wmask, | 478 | regs->windowbase, regs->windowstart, regs->wmask, |
480 | regs->syscall); | 479 | regs->syscall); |
481 | } | 480 | } |
482 | 481 | ||
483 | static int show_trace_cb(struct stackframe *frame, void *data) | 482 | static int show_trace_cb(struct stackframe *frame, void *data) |
484 | { | 483 | { |
485 | if (kernel_text_address(frame->pc)) { | 484 | if (kernel_text_address(frame->pc)) { |
486 | printk(" [<%08lx>] ", frame->pc); | 485 | pr_cont(" [<%08lx>]", frame->pc); |
487 | print_symbol("%s\n", frame->pc); | 486 | print_symbol(" %s\n", frame->pc); |
488 | } | 487 | } |
489 | return 0; | 488 | return 0; |
490 | } | 489 | } |
@@ -494,19 +493,13 @@ void show_trace(struct task_struct *task, unsigned long *sp) | |||
494 | if (!sp) | 493 | if (!sp) |
495 | sp = stack_pointer(task); | 494 | sp = stack_pointer(task); |
496 | 495 | ||
497 | printk("Call Trace:"); | 496 | pr_info("Call Trace:\n"); |
498 | #ifdef CONFIG_KALLSYMS | ||
499 | printk("\n"); | ||
500 | #endif | ||
501 | walk_stackframe(sp, show_trace_cb, NULL); | 497 | walk_stackframe(sp, show_trace_cb, NULL); |
502 | printk("\n"); | 498 | #ifndef CONFIG_KALLSYMS |
499 | pr_cont("\n"); | ||
500 | #endif | ||
503 | } | 501 | } |
504 | 502 | ||
505 | /* | ||
506 | * This routine abuses get_user()/put_user() to reference pointers | ||
507 | * with at least a bit of error checking ... | ||
508 | */ | ||
509 | |||
510 | static int kstack_depth_to_print = 24; | 503 | static int kstack_depth_to_print = 24; |
511 | 504 | ||
512 | void show_stack(struct task_struct *task, unsigned long *sp) | 505 | void show_stack(struct task_struct *task, unsigned long *sp) |
@@ -518,52 +511,29 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
518 | sp = stack_pointer(task); | 511 | sp = stack_pointer(task); |
519 | stack = sp; | 512 | stack = sp; |
520 | 513 | ||
521 | printk("\nStack: "); | 514 | pr_info("Stack:\n"); |
522 | 515 | ||
523 | for (i = 0; i < kstack_depth_to_print; i++) { | 516 | for (i = 0; i < kstack_depth_to_print; i++) { |
524 | if (kstack_end(sp)) | 517 | if (kstack_end(sp)) |
525 | break; | 518 | break; |
526 | if (i && ((i % 8) == 0)) | 519 | pr_cont(" %08lx", *sp++); |
527 | printk("\n "); | 520 | if (i % 8 == 7) |
528 | printk("%08lx ", *sp++); | 521 | pr_cont("\n"); |
529 | } | 522 | } |
530 | printk("\n"); | ||
531 | show_trace(task, stack); | 523 | show_trace(task, stack); |
532 | } | 524 | } |
533 | 525 | ||
534 | void show_code(unsigned int *pc) | ||
535 | { | ||
536 | long i; | ||
537 | |||
538 | printk("\nCode:"); | ||
539 | |||
540 | for(i = -3 ; i < 6 ; i++) { | ||
541 | unsigned long insn; | ||
542 | if (__get_user(insn, pc + i)) { | ||
543 | printk(" (Bad address in pc)\n"); | ||
544 | break; | ||
545 | } | ||
546 | printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>')); | ||
547 | } | ||
548 | } | ||
549 | |||
550 | DEFINE_SPINLOCK(die_lock); | 526 | DEFINE_SPINLOCK(die_lock); |
551 | 527 | ||
552 | void die(const char * str, struct pt_regs * regs, long err) | 528 | void die(const char * str, struct pt_regs * regs, long err) |
553 | { | 529 | { |
554 | static int die_counter; | 530 | static int die_counter; |
555 | int nl = 0; | ||
556 | 531 | ||
557 | console_verbose(); | 532 | console_verbose(); |
558 | spin_lock_irq(&die_lock); | 533 | spin_lock_irq(&die_lock); |
559 | 534 | ||
560 | printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter); | 535 | pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, |
561 | #ifdef CONFIG_PREEMPT | 536 | IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : ""); |
562 | printk("PREEMPT "); | ||
563 | nl = 1; | ||
564 | #endif | ||
565 | if (nl) | ||
566 | printk("\n"); | ||
567 | show_regs(regs); | 537 | show_regs(regs); |
568 | if (!user_mode(regs)) | 538 | if (!user_mode(regs)) |
569 | show_stack(NULL, (unsigned long*)regs->areg[1]); | 539 | show_stack(NULL, (unsigned long*)regs->areg[1]); |
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index d58fbf7f04e6..7dd70927991e 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c | |||
@@ -122,7 +122,7 @@ static int acpi_apd_create_device(struct acpi_device *adev, | |||
122 | int ret; | 122 | int ret; |
123 | 123 | ||
124 | if (!dev_desc) { | 124 | if (!dev_desc) { |
125 | pdev = acpi_create_platform_device(adev); | 125 | pdev = acpi_create_platform_device(adev, NULL); |
126 | return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; | 126 | return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; |
127 | } | 127 | } |
128 | 128 | ||
@@ -139,14 +139,8 @@ static int acpi_apd_create_device(struct acpi_device *adev, | |||
139 | goto err_out; | 139 | goto err_out; |
140 | } | 140 | } |
141 | 141 | ||
142 | if (dev_desc->properties) { | ||
143 | ret = device_add_properties(&adev->dev, dev_desc->properties); | ||
144 | if (ret) | ||
145 | goto err_out; | ||
146 | } | ||
147 | |||
148 | adev->driver_data = pdata; | 142 | adev->driver_data = pdata; |
149 | pdev = acpi_create_platform_device(adev); | 143 | pdev = acpi_create_platform_device(adev, dev_desc->properties); |
150 | if (!IS_ERR_OR_NULL(pdev)) | 144 | if (!IS_ERR_OR_NULL(pdev)) |
151 | return 1; | 145 | return 1; |
152 | 146 | ||
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 552010288135..373657f7e35a 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -395,7 +395,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
395 | 395 | ||
396 | dev_desc = (const struct lpss_device_desc *)id->driver_data; | 396 | dev_desc = (const struct lpss_device_desc *)id->driver_data; |
397 | if (!dev_desc) { | 397 | if (!dev_desc) { |
398 | pdev = acpi_create_platform_device(adev); | 398 | pdev = acpi_create_platform_device(adev, NULL); |
399 | return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; | 399 | return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; |
400 | } | 400 | } |
401 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); | 401 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); |
@@ -451,14 +451,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
451 | goto err_out; | 451 | goto err_out; |
452 | } | 452 | } |
453 | 453 | ||
454 | if (dev_desc->properties) { | ||
455 | ret = device_add_properties(&adev->dev, dev_desc->properties); | ||
456 | if (ret) | ||
457 | goto err_out; | ||
458 | } | ||
459 | |||
460 | adev->driver_data = pdata; | 454 | adev->driver_data = pdata; |
461 | pdev = acpi_create_platform_device(adev); | 455 | pdev = acpi_create_platform_device(adev, dev_desc->properties); |
462 | if (!IS_ERR_OR_NULL(pdev)) { | 456 | if (!IS_ERR_OR_NULL(pdev)) { |
463 | return 1; | 457 | return 1; |
464 | } | 458 | } |
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index b200ae1f3c6f..b4c1a6a51da4 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c | |||
@@ -50,6 +50,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, | |||
50 | /** | 50 | /** |
51 | * acpi_create_platform_device - Create platform device for ACPI device node | 51 | * acpi_create_platform_device - Create platform device for ACPI device node |
52 | * @adev: ACPI device node to create a platform device for. | 52 | * @adev: ACPI device node to create a platform device for. |
53 | * @properties: Optional collection of build-in properties. | ||
53 | * | 54 | * |
54 | * Check if the given @adev can be represented as a platform device and, if | 55 | * Check if the given @adev can be represented as a platform device and, if |
55 | * that's the case, create and register a platform device, populate its common | 56 | * that's the case, create and register a platform device, populate its common |
@@ -57,7 +58,8 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, | |||
57 | * | 58 | * |
58 | * Name of the platform device will be the same as @adev's. | 59 | * Name of the platform device will be the same as @adev's. |
59 | */ | 60 | */ |
60 | struct platform_device *acpi_create_platform_device(struct acpi_device *adev) | 61 | struct platform_device *acpi_create_platform_device(struct acpi_device *adev, |
62 | struct property_entry *properties) | ||
61 | { | 63 | { |
62 | struct platform_device *pdev = NULL; | 64 | struct platform_device *pdev = NULL; |
63 | struct platform_device_info pdevinfo; | 65 | struct platform_device_info pdevinfo; |
@@ -106,6 +108,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) | |||
106 | pdevinfo.res = resources; | 108 | pdevinfo.res = resources; |
107 | pdevinfo.num_res = count; | 109 | pdevinfo.num_res = count; |
108 | pdevinfo.fwnode = acpi_fwnode_handle(adev); | 110 | pdevinfo.fwnode = acpi_fwnode_handle(adev); |
111 | pdevinfo.properties = properties; | ||
109 | 112 | ||
110 | if (acpi_dma_supported(adev)) | 113 | if (acpi_dma_supported(adev)) |
111 | pdevinfo.dma_mask = DMA_BIT_MASK(32); | 114 | pdevinfo.dma_mask = DMA_BIT_MASK(32); |
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index 33505c651f62..86364097e236 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c | |||
@@ -34,11 +34,11 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev, | |||
34 | const struct acpi_device_id *id) | 34 | const struct acpi_device_id *id) |
35 | { | 35 | { |
36 | if (IS_ENABLED(CONFIG_INT340X_THERMAL)) | 36 | if (IS_ENABLED(CONFIG_INT340X_THERMAL)) |
37 | acpi_create_platform_device(adev); | 37 | acpi_create_platform_device(adev, NULL); |
38 | /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ | 38 | /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ |
39 | else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && | 39 | else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && |
40 | id->driver_data == INT3401_DEVICE) | 40 | id->driver_data == INT3401_DEVICE) |
41 | acpi_create_platform_device(adev); | 41 | acpi_create_platform_device(adev, NULL); |
42 | return 1; | 42 | return 1; |
43 | } | 43 | } |
44 | 44 | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 035ac646d8db..3d1856f1f4d0 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -1734,7 +1734,7 @@ static void acpi_default_enumeration(struct acpi_device *device) | |||
1734 | &is_spi_i2c_slave); | 1734 | &is_spi_i2c_slave); |
1735 | acpi_dev_free_resource_list(&resource_list); | 1735 | acpi_dev_free_resource_list(&resource_list); |
1736 | if (!is_spi_i2c_slave) { | 1736 | if (!is_spi_i2c_slave) { |
1737 | acpi_create_platform_device(device); | 1737 | acpi_create_platform_device(device, NULL); |
1738 | acpi_device_set_enumerated(device); | 1738 | acpi_device_set_enumerated(device); |
1739 | } else { | 1739 | } else { |
1740 | blocking_notifier_call_chain(&acpi_reconfig_chain, | 1740 | blocking_notifier_call_chain(&acpi_reconfig_chain, |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index d22a7260f42b..d76cd97a98b6 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -324,7 +324,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) | |||
324 | { | 324 | { |
325 | int ret = -EPROBE_DEFER; | 325 | int ret = -EPROBE_DEFER; |
326 | int local_trigger_count = atomic_read(&deferred_trigger_count); | 326 | int local_trigger_count = atomic_read(&deferred_trigger_count); |
327 | bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE); | 327 | bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) && |
328 | !drv->suppress_bind_attrs; | ||
328 | 329 | ||
329 | if (defer_all_probes) { | 330 | if (defer_all_probes) { |
330 | /* | 331 | /* |
@@ -383,7 +384,7 @@ re_probe: | |||
383 | if (test_remove) { | 384 | if (test_remove) { |
384 | test_remove = false; | 385 | test_remove = false; |
385 | 386 | ||
386 | if (dev->bus && dev->bus->remove) | 387 | if (dev->bus->remove) |
387 | dev->bus->remove(dev); | 388 | dev->bus->remove(dev); |
388 | else if (drv->remove) | 389 | else if (drv->remove) |
389 | drv->remove(dev); | 390 | drv->remove(dev); |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e44944f4be77..2932a5bd892f 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a | |||
1027 | TRACE_DEVICE(dev); | 1027 | TRACE_DEVICE(dev); |
1028 | TRACE_SUSPEND(0); | 1028 | TRACE_SUSPEND(0); |
1029 | 1029 | ||
1030 | dpm_wait_for_children(dev, async); | ||
1031 | |||
1030 | if (async_error) | 1032 | if (async_error) |
1031 | goto Complete; | 1033 | goto Complete; |
1032 | 1034 | ||
@@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a | |||
1038 | if (dev->power.syscore || dev->power.direct_complete) | 1040 | if (dev->power.syscore || dev->power.direct_complete) |
1039 | goto Complete; | 1041 | goto Complete; |
1040 | 1042 | ||
1041 | dpm_wait_for_children(dev, async); | ||
1042 | |||
1043 | if (dev->pm_domain) { | 1043 | if (dev->pm_domain) { |
1044 | info = "noirq power domain "; | 1044 | info = "noirq power domain "; |
1045 | callback = pm_noirq_op(&dev->pm_domain->ops, state); | 1045 | callback = pm_noirq_op(&dev->pm_domain->ops, state); |
@@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as | |||
1174 | 1174 | ||
1175 | __pm_runtime_disable(dev, false); | 1175 | __pm_runtime_disable(dev, false); |
1176 | 1176 | ||
1177 | dpm_wait_for_children(dev, async); | ||
1178 | |||
1177 | if (async_error) | 1179 | if (async_error) |
1178 | goto Complete; | 1180 | goto Complete; |
1179 | 1181 | ||
@@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as | |||
1185 | if (dev->power.syscore || dev->power.direct_complete) | 1187 | if (dev->power.syscore || dev->power.direct_complete) |
1186 | goto Complete; | 1188 | goto Complete; |
1187 | 1189 | ||
1188 | dpm_wait_for_children(dev, async); | ||
1189 | |||
1190 | if (dev->pm_domain) { | 1190 | if (dev->pm_domain) { |
1191 | info = "late power domain "; | 1191 | info = "late power domain "; |
1192 | callback = pm_late_early_op(&dev->pm_domain->ops, state); | 1192 | callback = pm_late_early_op(&dev->pm_domain->ops, state); |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index ab19adb07a12..3c606c09fd5a 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -853,45 +853,6 @@ rqbiocnt(struct request *r) | |||
853 | return n; | 853 | return n; |
854 | } | 854 | } |
855 | 855 | ||
856 | /* This can be removed if we are certain that no users of the block | ||
857 | * layer will ever use zero-count pages in bios. Otherwise we have to | ||
858 | * protect against the put_page sometimes done by the network layer. | ||
859 | * | ||
860 | * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for | ||
861 | * discussion. | ||
862 | * | ||
863 | * We cannot use get_page in the workaround, because it insists on a | ||
864 | * positive page count as a precondition. So we use _refcount directly. | ||
865 | */ | ||
866 | static void | ||
867 | bio_pageinc(struct bio *bio) | ||
868 | { | ||
869 | struct bio_vec bv; | ||
870 | struct page *page; | ||
871 | struct bvec_iter iter; | ||
872 | |||
873 | bio_for_each_segment(bv, bio, iter) { | ||
874 | /* Non-zero page count for non-head members of | ||
875 | * compound pages is no longer allowed by the kernel. | ||
876 | */ | ||
877 | page = compound_head(bv.bv_page); | ||
878 | page_ref_inc(page); | ||
879 | } | ||
880 | } | ||
881 | |||
882 | static void | ||
883 | bio_pagedec(struct bio *bio) | ||
884 | { | ||
885 | struct page *page; | ||
886 | struct bio_vec bv; | ||
887 | struct bvec_iter iter; | ||
888 | |||
889 | bio_for_each_segment(bv, bio, iter) { | ||
890 | page = compound_head(bv.bv_page); | ||
891 | page_ref_dec(page); | ||
892 | } | ||
893 | } | ||
894 | |||
895 | static void | 856 | static void |
896 | bufinit(struct buf *buf, struct request *rq, struct bio *bio) | 857 | bufinit(struct buf *buf, struct request *rq, struct bio *bio) |
897 | { | 858 | { |
@@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio) | |||
899 | buf->rq = rq; | 860 | buf->rq = rq; |
900 | buf->bio = bio; | 861 | buf->bio = bio; |
901 | buf->iter = bio->bi_iter; | 862 | buf->iter = bio->bi_iter; |
902 | bio_pageinc(bio); | ||
903 | } | 863 | } |
904 | 864 | ||
905 | static struct buf * | 865 | static struct buf * |
@@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf) | |||
1127 | if (buf == d->ip.buf) | 1087 | if (buf == d->ip.buf) |
1128 | d->ip.buf = NULL; | 1088 | d->ip.buf = NULL; |
1129 | rq = buf->rq; | 1089 | rq = buf->rq; |
1130 | bio_pagedec(buf->bio); | ||
1131 | mempool_free(buf, d->bufpool); | 1090 | mempool_free(buf, d->bufpool); |
1132 | n = (unsigned long) rq->special; | 1091 | n = (unsigned long) rq->special; |
1133 | rq->special = (void *) --n; | 1092 | rq->special = (void *) --n; |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 100be556e613..83482721bc01 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, | |||
1871 | drbd_update_congested(connection); | 1871 | drbd_update_congested(connection); |
1872 | } | 1872 | } |
1873 | do { | 1873 | do { |
1874 | rv = kernel_sendmsg(sock, &msg, &iov, 1, size); | 1874 | rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); |
1875 | if (rv == -EAGAIN) { | 1875 | if (rv == -EAGAIN) { |
1876 | if (we_should_drop_the_connection(connection, sock)) | 1876 | if (we_should_drop_the_connection(connection, sock)) |
1877 | break; | 1877 | break; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 19a16b2dbb91..7a1048755914 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -599,7 +599,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
599 | return -EINVAL; | 599 | return -EINVAL; |
600 | 600 | ||
601 | sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0); | 601 | sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0); |
602 | if (!sreq) | 602 | if (IS_ERR(sreq)) |
603 | return -ENOMEM; | 603 | return -ENOMEM; |
604 | 604 | ||
605 | mutex_unlock(&nbd->tx_lock); | 605 | mutex_unlock(&nbd->tx_lock); |
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index d23368874710..6af1ce04b3da 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c | |||
@@ -748,10 +748,7 @@ static int pp_release(struct inode *inode, struct file *file) | |||
748 | } | 748 | } |
749 | 749 | ||
750 | if (pp->pdev) { | 750 | if (pp->pdev) { |
751 | const char *name = pp->pdev->name; | ||
752 | |||
753 | parport_unregister_device(pp->pdev); | 751 | parport_unregister_device(pp->pdev); |
754 | kfree(name); | ||
755 | pp->pdev = NULL; | 752 | pp->pdev = NULL; |
756 | pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); | 753 | pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); |
757 | } | 754 | } |
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 20b105584f82..80ae2a51452d 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c | |||
@@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg, | |||
700 | struct mux_hwclock *hwc, | 700 | struct mux_hwclock *hwc, |
701 | const struct clk_ops *ops, | 701 | const struct clk_ops *ops, |
702 | unsigned long min_rate, | 702 | unsigned long min_rate, |
703 | unsigned long max_rate, | ||
703 | unsigned long pct80_rate, | 704 | unsigned long pct80_rate, |
704 | const char *fmt, int idx) | 705 | const char *fmt, int idx) |
705 | { | 706 | { |
@@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg, | |||
728 | continue; | 729 | continue; |
729 | if (rate < min_rate) | 730 | if (rate < min_rate) |
730 | continue; | 731 | continue; |
732 | if (rate > max_rate) | ||
733 | continue; | ||
731 | 734 | ||
732 | parent_names[j] = div->name; | 735 | parent_names[j] = div->name; |
733 | hwc->parent_to_clksel[j] = i; | 736 | hwc->parent_to_clksel[j] = i; |
@@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) | |||
759 | struct mux_hwclock *hwc; | 762 | struct mux_hwclock *hwc; |
760 | const struct clockgen_pll_div *div; | 763 | const struct clockgen_pll_div *div; |
761 | unsigned long plat_rate, min_rate; | 764 | unsigned long plat_rate, min_rate; |
762 | u64 pct80_rate; | 765 | u64 max_rate, pct80_rate; |
763 | u32 clksel; | 766 | u32 clksel; |
764 | 767 | ||
765 | hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); | 768 | hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); |
@@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) | |||
787 | return NULL; | 790 | return NULL; |
788 | } | 791 | } |
789 | 792 | ||
790 | pct80_rate = clk_get_rate(div->clk); | 793 | max_rate = clk_get_rate(div->clk); |
791 | pct80_rate *= 8; | 794 | pct80_rate = max_rate * 8; |
792 | do_div(pct80_rate, 10); | 795 | do_div(pct80_rate, 10); |
793 | 796 | ||
794 | plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); | 797 | plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); |
@@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) | |||
798 | else | 801 | else |
799 | min_rate = plat_rate / 2; | 802 | min_rate = plat_rate / 2; |
800 | 803 | ||
801 | return create_mux_common(cg, hwc, &cmux_ops, min_rate, | 804 | return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate, |
802 | pct80_rate, "cg-cmux%d", idx); | 805 | pct80_rate, "cg-cmux%d", idx); |
803 | } | 806 | } |
804 | 807 | ||
@@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) | |||
813 | hwc->reg = cg->regs + 0x20 * idx + 0x10; | 816 | hwc->reg = cg->regs + 0x20 * idx + 0x10; |
814 | hwc->info = cg->info.hwaccel[idx]; | 817 | hwc->info = cg->info.hwaccel[idx]; |
815 | 818 | ||
816 | return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, | 819 | return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0, |
817 | "cg-hwaccel%d", idx); | 820 | "cg-hwaccel%d", idx); |
818 | } | 821 | } |
819 | 822 | ||
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c index 5daddf5ecc4b..bc37030e38ba 100644 --- a/drivers/clk/clk-xgene.c +++ b/drivers/clk/clk-xgene.c | |||
@@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw) | |||
463 | struct xgene_clk *pclk = to_xgene_clk(hw); | 463 | struct xgene_clk *pclk = to_xgene_clk(hw); |
464 | unsigned long flags = 0; | 464 | unsigned long flags = 0; |
465 | u32 data; | 465 | u32 data; |
466 | phys_addr_t reg; | ||
467 | 466 | ||
468 | if (pclk->lock) | 467 | if (pclk->lock) |
469 | spin_lock_irqsave(pclk->lock, flags); | 468 | spin_lock_irqsave(pclk->lock, flags); |
470 | 469 | ||
471 | if (pclk->param.csr_reg != NULL) { | 470 | if (pclk->param.csr_reg != NULL) { |
472 | pr_debug("%s clock enabled\n", clk_hw_get_name(hw)); | 471 | pr_debug("%s clock enabled\n", clk_hw_get_name(hw)); |
473 | reg = __pa(pclk->param.csr_reg); | ||
474 | /* First enable the clock */ | 472 | /* First enable the clock */ |
475 | data = xgene_clk_read(pclk->param.csr_reg + | 473 | data = xgene_clk_read(pclk->param.csr_reg + |
476 | pclk->param.reg_clk_offset); | 474 | pclk->param.reg_clk_offset); |
477 | data |= pclk->param.reg_clk_mask; | 475 | data |= pclk->param.reg_clk_mask; |
478 | xgene_clk_write(data, pclk->param.csr_reg + | 476 | xgene_clk_write(data, pclk->param.csr_reg + |
479 | pclk->param.reg_clk_offset); | 477 | pclk->param.reg_clk_offset); |
480 | pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n", | 478 | pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n", |
481 | clk_hw_get_name(hw), ®, | 479 | clk_hw_get_name(hw), |
482 | pclk->param.reg_clk_offset, pclk->param.reg_clk_mask, | 480 | pclk->param.reg_clk_offset, pclk->param.reg_clk_mask, |
483 | data); | 481 | data); |
484 | 482 | ||
@@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw) | |||
488 | data &= ~pclk->param.reg_csr_mask; | 486 | data &= ~pclk->param.reg_csr_mask; |
489 | xgene_clk_write(data, pclk->param.csr_reg + | 487 | xgene_clk_write(data, pclk->param.csr_reg + |
490 | pclk->param.reg_csr_offset); | 488 | pclk->param.reg_csr_offset); |
491 | pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n", | 489 | pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n", |
492 | clk_hw_get_name(hw), ®, | 490 | clk_hw_get_name(hw), |
493 | pclk->param.reg_csr_offset, pclk->param.reg_csr_mask, | 491 | pclk->param.reg_csr_offset, pclk->param.reg_csr_mask, |
494 | data); | 492 | data); |
495 | } | 493 | } |
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index 19f9b622981a..7a6acc3e4a92 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c | |||
@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw, | |||
223 | temp64 *= mfn; | 223 | temp64 *= mfn; |
224 | do_div(temp64, mfd); | 224 | do_div(temp64, mfd); |
225 | 225 | ||
226 | return (parent_rate * div) + (u32)temp64; | 226 | return parent_rate * div + (unsigned long)temp64; |
227 | } | 227 | } |
228 | 228 | ||
229 | static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate, | 229 | static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate, |
@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate, | |||
247 | do_div(temp64, parent_rate); | 247 | do_div(temp64, parent_rate); |
248 | mfn = temp64; | 248 | mfn = temp64; |
249 | 249 | ||
250 | return parent_rate * div + parent_rate * mfn / mfd; | 250 | temp64 = (u64)parent_rate; |
251 | temp64 *= mfn; | ||
252 | do_div(temp64, mfd); | ||
253 | |||
254 | return parent_rate * div + (unsigned long)temp64; | ||
251 | } | 255 | } |
252 | 256 | ||
253 | static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate, | 257 | static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate, |
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 3a51fff1b0e7..9adaf48aea23 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c | |||
@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np) | |||
313 | } | 313 | } |
314 | 314 | ||
315 | pxa_unit->apmu_base = of_iomap(np, 1); | 315 | pxa_unit->apmu_base = of_iomap(np, 1); |
316 | if (!pxa_unit->mpmu_base) { | 316 | if (!pxa_unit->apmu_base) { |
317 | pr_err("failed to map apmu registers\n"); | 317 | pr_err("failed to map apmu registers\n"); |
318 | return; | 318 | return; |
319 | } | 319 | } |
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c index 87f2317b2a00..f110c02e83cb 100644 --- a/drivers/clk/mmp/clk-of-pxa168.c +++ b/drivers/clk/mmp/clk-of-pxa168.c | |||
@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np) | |||
262 | } | 262 | } |
263 | 263 | ||
264 | pxa_unit->apmu_base = of_iomap(np, 1); | 264 | pxa_unit->apmu_base = of_iomap(np, 1); |
265 | if (!pxa_unit->mpmu_base) { | 265 | if (!pxa_unit->apmu_base) { |
266 | pr_err("failed to map apmu registers\n"); | 266 | pr_err("failed to map apmu registers\n"); |
267 | return; | 267 | return; |
268 | } | 268 | } |
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c index e22a67f76d93..64d1ef49caeb 100644 --- a/drivers/clk/mmp/clk-of-pxa910.c +++ b/drivers/clk/mmp/clk-of-pxa910.c | |||
@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np) | |||
282 | } | 282 | } |
283 | 283 | ||
284 | pxa_unit->apmu_base = of_iomap(np, 1); | 284 | pxa_unit->apmu_base = of_iomap(np, 1); |
285 | if (!pxa_unit->mpmu_base) { | 285 | if (!pxa_unit->apmu_base) { |
286 | pr_err("failed to map apmu registers\n"); | 286 | pr_err("failed to map apmu registers\n"); |
287 | return; | 287 | return; |
288 | } | 288 | } |
@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np) | |||
294 | } | 294 | } |
295 | 295 | ||
296 | pxa_unit->apbcp_base = of_iomap(np, 3); | 296 | pxa_unit->apbcp_base = of_iomap(np, 3); |
297 | if (!pxa_unit->mpmu_base) { | 297 | if (!pxa_unit->apbcp_base) { |
298 | pr_err("failed to map apbcp registers\n"); | 298 | pr_err("failed to map apbcp registers\n"); |
299 | return; | 299 | return; |
300 | } | 300 | } |
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c index 8feba93672c5..e8075359366b 100644 --- a/drivers/clk/rockchip/clk-ddr.c +++ b/drivers/clk/rockchip/clk-ddr.c | |||
@@ -144,11 +144,8 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, | |||
144 | ddrclk->ddr_flag = ddr_flag; | 144 | ddrclk->ddr_flag = ddr_flag; |
145 | 145 | ||
146 | clk = clk_register(NULL, &ddrclk->hw); | 146 | clk = clk_register(NULL, &ddrclk->hw); |
147 | if (IS_ERR(clk)) { | 147 | if (IS_ERR(clk)) |
148 | pr_err("%s: could not register ddrclk %s\n", __func__, name); | ||
149 | kfree(ddrclk); | 148 | kfree(ddrclk); |
150 | return NULL; | ||
151 | } | ||
152 | 149 | ||
153 | return clk; | 150 | return clk; |
154 | } | 151 | } |
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c index 96fab6cfb202..6c6afb87b4ce 100644 --- a/drivers/clk/samsung/clk-exynos-clkout.c +++ b/drivers/clk/samsung/clk-exynos-clkout.c | |||
@@ -132,28 +132,34 @@ free_clkout: | |||
132 | pr_err("%s: failed to register clkout clock\n", __func__); | 132 | pr_err("%s: failed to register clkout clock\n", __func__); |
133 | } | 133 | } |
134 | 134 | ||
135 | /* | ||
136 | * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting | ||
137 | * the OF_POPULATED flag on the pmu device tree node, so later the | ||
138 | * Exynos PMU platform device can be properly probed with PMU driver. | ||
139 | */ | ||
140 | |||
135 | static void __init exynos4_clkout_init(struct device_node *node) | 141 | static void __init exynos4_clkout_init(struct device_node *node) |
136 | { | 142 | { |
137 | exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK); | 143 | exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK); |
138 | } | 144 | } |
139 | CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu", | 145 | CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu", |
140 | exynos4_clkout_init); | 146 | exynos4_clkout_init); |
141 | CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu", | 147 | CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu", |
142 | exynos4_clkout_init); | 148 | exynos4_clkout_init); |
143 | CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu", | 149 | CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu", |
144 | exynos4_clkout_init); | 150 | exynos4_clkout_init); |
145 | CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu", | 151 | CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu", |
146 | exynos4_clkout_init); | 152 | exynos4_clkout_init); |
147 | 153 | ||
148 | static void __init exynos5_clkout_init(struct device_node *node) | 154 | static void __init exynos5_clkout_init(struct device_node *node) |
149 | { | 155 | { |
150 | exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK); | 156 | exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK); |
151 | } | 157 | } |
152 | CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu", | 158 | CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu", |
153 | exynos5_clkout_init); | 159 | exynos5_clkout_init); |
154 | CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu", | 160 | CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu", |
155 | exynos5_clkout_init); | 161 | exynos5_clkout_init); |
156 | CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu", | 162 | CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu", |
157 | exynos5_clkout_init); | 163 | exynos5_clkout_init); |
158 | CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu", | 164 | CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu", |
159 | exynos5_clkout_init); | 165 | exynos5_clkout_init); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 039b57e4644c..496f72b134eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -459,6 +459,7 @@ struct amdgpu_bo { | |||
459 | u64 metadata_flags; | 459 | u64 metadata_flags; |
460 | void *metadata; | 460 | void *metadata; |
461 | u32 metadata_size; | 461 | u32 metadata_size; |
462 | unsigned prime_shared_count; | ||
462 | /* list of all virtual address to which this bo | 463 | /* list of all virtual address to which this bo |
463 | * is associated to | 464 | * is associated to |
464 | */ | 465 | */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 892d60fb225b..2057683f7b59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | |||
@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle) | |||
395 | { | 395 | { |
396 | int i, ret; | 396 | int i, ret; |
397 | struct device *dev; | 397 | struct device *dev; |
398 | |||
399 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 398 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
400 | 399 | ||
400 | /* return early if no ACP */ | ||
401 | if (!adev->acp.acp_genpd) | ||
402 | return 0; | ||
403 | |||
401 | for (i = 0; i < ACP_DEVS ; i++) { | 404 | for (i = 0; i < ACP_DEVS ; i++) { |
402 | dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); | 405 | dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); |
403 | ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); | 406 | ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 651115dcce12..c02db01f6583 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | |||
@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, | |||
132 | entry->priority = min(info[i].bo_priority, | 132 | entry->priority = min(info[i].bo_priority, |
133 | AMDGPU_BO_LIST_MAX_PRIORITY); | 133 | AMDGPU_BO_LIST_MAX_PRIORITY); |
134 | entry->tv.bo = &entry->robj->tbo; | 134 | entry->tv.bo = &entry->robj->tbo; |
135 | entry->tv.shared = true; | 135 | entry->tv.shared = !entry->robj->prime_shared_count; |
136 | 136 | ||
137 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) | 137 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) |
138 | gds_obj = entry->robj; | 138 | gds_obj = entry->robj; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 7a8bfa34682f..662976292535 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
@@ -795,10 +795,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
795 | if (!adev->pm.fw) { | 795 | if (!adev->pm.fw) { |
796 | switch (adev->asic_type) { | 796 | switch (adev->asic_type) { |
797 | case CHIP_TOPAZ: | 797 | case CHIP_TOPAZ: |
798 | strcpy(fw_name, "amdgpu/topaz_smc.bin"); | 798 | if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || |
799 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || | ||
800 | ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) | ||
801 | strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); | ||
802 | else | ||
803 | strcpy(fw_name, "amdgpu/topaz_smc.bin"); | ||
799 | break; | 804 | break; |
800 | case CHIP_TONGA: | 805 | case CHIP_TONGA: |
801 | strcpy(fw_name, "amdgpu/tonga_smc.bin"); | 806 | if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || |
807 | ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) | ||
808 | strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); | ||
809 | else | ||
810 | strcpy(fw_name, "amdgpu/tonga_smc.bin"); | ||
802 | break; | 811 | break; |
803 | case CHIP_FIJI: | 812 | case CHIP_FIJI: |
804 | strcpy(fw_name, "amdgpu/fiji_smc.bin"); | 813 | strcpy(fw_name, "amdgpu/fiji_smc.bin"); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index e3281d4e3e41..086aa5c9c634 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
@@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector) | |||
769 | { | 769 | { |
770 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 770 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
771 | 771 | ||
772 | if (amdgpu_connector->ddc_bus->has_aux) { | 772 | if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) { |
773 | drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); | 773 | drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); |
774 | amdgpu_connector->ddc_bus->has_aux = false; | 774 | amdgpu_connector->ddc_bus->has_aux = false; |
775 | } | 775 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7ca07e7b25c1..3161d77bf299 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -658,12 +658,10 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) | |||
658 | return false; | 658 | return false; |
659 | 659 | ||
660 | if (amdgpu_passthrough(adev)) { | 660 | if (amdgpu_passthrough(adev)) { |
661 | /* for FIJI: In whole GPU pass-through virtualization case | 661 | /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot |
662 | * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH) | 662 | * some old smc fw still need driver do vPost otherwise gpu hang, while |
663 | * so amdgpu_card_posted return false and driver will incorrectly skip vPost. | 663 | * those smc fw version above 22.15 doesn't have this flaw, so we force |
664 | * but if we force vPost do in pass-through case, the driver reload will hang. | 664 | * vpost executed for smc version below 22.15 |
665 | * whether doing vPost depends on amdgpu_card_posted if smc version is above | ||
666 | * 00160e00 for FIJI. | ||
667 | */ | 665 | */ |
668 | if (adev->asic_type == CHIP_FIJI) { | 666 | if (adev->asic_type == CHIP_FIJI) { |
669 | int err; | 667 | int err; |
@@ -674,22 +672,11 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) | |||
674 | return true; | 672 | return true; |
675 | 673 | ||
676 | fw_ver = *((uint32_t *)adev->pm.fw->data + 69); | 674 | fw_ver = *((uint32_t *)adev->pm.fw->data + 69); |
677 | if (fw_ver >= 0x00160e00) | 675 | if (fw_ver < 0x00160e00) |
678 | return !amdgpu_card_posted(adev); | 676 | return true; |
679 | } | 677 | } |
680 | } else { | ||
681 | /* in bare-metal case, amdgpu_card_posted return false | ||
682 | * after system reboot/boot, and return true if driver | ||
683 | * reloaded. | ||
684 | * we shouldn't do vPost after driver reload otherwise GPU | ||
685 | * could hang. | ||
686 | */ | ||
687 | if (amdgpu_card_posted(adev)) | ||
688 | return false; | ||
689 | } | 678 | } |
690 | 679 | return !amdgpu_card_posted(adev); | |
691 | /* we assume vPost is neede for all other cases */ | ||
692 | return true; | ||
693 | } | 680 | } |
694 | 681 | ||
695 | /** | 682 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 71ed27eb3dde..02ff0747197c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -735,8 +735,20 @@ static struct pci_driver amdgpu_kms_pci_driver = { | |||
735 | 735 | ||
736 | static int __init amdgpu_init(void) | 736 | static int __init amdgpu_init(void) |
737 | { | 737 | { |
738 | amdgpu_sync_init(); | 738 | int r; |
739 | amdgpu_fence_slab_init(); | 739 | |
740 | r = amdgpu_sync_init(); | ||
741 | if (r) | ||
742 | goto error_sync; | ||
743 | |||
744 | r = amdgpu_fence_slab_init(); | ||
745 | if (r) | ||
746 | goto error_fence; | ||
747 | |||
748 | r = amd_sched_fence_slab_init(); | ||
749 | if (r) | ||
750 | goto error_sched; | ||
751 | |||
740 | if (vgacon_text_force()) { | 752 | if (vgacon_text_force()) { |
741 | DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); | 753 | DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); |
742 | return -EINVAL; | 754 | return -EINVAL; |
@@ -748,6 +760,15 @@ static int __init amdgpu_init(void) | |||
748 | amdgpu_register_atpx_handler(); | 760 | amdgpu_register_atpx_handler(); |
749 | /* let modprobe override vga console setting */ | 761 | /* let modprobe override vga console setting */ |
750 | return drm_pci_init(driver, pdriver); | 762 | return drm_pci_init(driver, pdriver); |
763 | |||
764 | error_sched: | ||
765 | amdgpu_fence_slab_fini(); | ||
766 | |||
767 | error_fence: | ||
768 | amdgpu_sync_fini(); | ||
769 | |||
770 | error_sync: | ||
771 | return r; | ||
751 | } | 772 | } |
752 | 773 | ||
753 | static void __exit amdgpu_exit(void) | 774 | static void __exit amdgpu_exit(void) |
@@ -756,6 +777,7 @@ static void __exit amdgpu_exit(void) | |||
756 | drm_pci_exit(driver, pdriver); | 777 | drm_pci_exit(driver, pdriver); |
757 | amdgpu_unregister_atpx_handler(); | 778 | amdgpu_unregister_atpx_handler(); |
758 | amdgpu_sync_fini(); | 779 | amdgpu_sync_fini(); |
780 | amd_sched_fence_slab_fini(); | ||
759 | amdgpu_fence_slab_fini(); | 781 | amdgpu_fence_slab_fini(); |
760 | } | 782 | } |
761 | 783 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 203d98b00555..3938fca1ea8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
99 | 99 | ||
100 | if ((amdgpu_runtime_pm != 0) && | 100 | if ((amdgpu_runtime_pm != 0) && |
101 | amdgpu_has_atpx() && | 101 | amdgpu_has_atpx() && |
102 | (amdgpu_is_atpx_hybrid() || | ||
103 | amdgpu_has_atpx_dgpu_power_cntl()) && | ||
102 | ((flags & AMD_IS_APU) == 0)) | 104 | ((flags & AMD_IS_APU) == 0)) |
103 | flags |= AMD_IS_PX; | 105 | flags |= AMD_IS_PX; |
104 | 106 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 7700dc22f243..3826d5aea0a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |||
74 | if (ret) | 74 | if (ret) |
75 | return ERR_PTR(ret); | 75 | return ERR_PTR(ret); |
76 | 76 | ||
77 | bo->prime_shared_count = 1; | ||
77 | return &bo->gem_base; | 78 | return &bo->gem_base; |
78 | } | 79 | } |
79 | 80 | ||
80 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj) | 81 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj) |
81 | { | 82 | { |
82 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | 83 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
83 | int ret = 0; | 84 | long ret = 0; |
84 | 85 | ||
85 | ret = amdgpu_bo_reserve(bo, false); | 86 | ret = amdgpu_bo_reserve(bo, false); |
86 | if (unlikely(ret != 0)) | 87 | if (unlikely(ret != 0)) |
87 | return ret; | 88 | return ret; |
88 | 89 | ||
90 | /* | ||
91 | * Wait for all shared fences to complete before we switch to future | ||
92 | * use of exclusive fence on this prime shared bo. | ||
93 | */ | ||
94 | ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, | ||
95 | MAX_SCHEDULE_TIMEOUT); | ||
96 | if (unlikely(ret < 0)) { | ||
97 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); | ||
98 | amdgpu_bo_unreserve(bo); | ||
99 | return ret; | ||
100 | } | ||
101 | |||
89 | /* pin buffer into GTT */ | 102 | /* pin buffer into GTT */ |
90 | ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); | 103 | ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); |
104 | if (likely(ret == 0)) | ||
105 | bo->prime_shared_count++; | ||
106 | |||
91 | amdgpu_bo_unreserve(bo); | 107 | amdgpu_bo_unreserve(bo); |
92 | return ret; | 108 | return ret; |
93 | } | 109 | } |
@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) | |||
102 | return; | 118 | return; |
103 | 119 | ||
104 | amdgpu_bo_unpin(bo); | 120 | amdgpu_bo_unpin(bo); |
121 | if (bo->prime_shared_count) | ||
122 | bo->prime_shared_count--; | ||
105 | amdgpu_bo_unreserve(bo); | 123 | amdgpu_bo_unreserve(bo); |
106 | } | 124 | } |
107 | 125 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 7c13090df7c0..f62f1a74f890 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
@@ -80,7 +80,9 @@ | |||
80 | #include "dce_virtual.h" | 80 | #include "dce_virtual.h" |
81 | 81 | ||
82 | MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); | 82 | MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); |
83 | MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); | ||
83 | MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); | 84 | MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); |
85 | MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); | ||
84 | MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); | 86 | MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); |
85 | MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); | 87 | MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); |
86 | MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); | 88 | MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 14f8c1f4da3d..0723758ed065 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | |||
@@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw | |||
272 | PHM_FUNC_CHECK(hwmgr); | 272 | PHM_FUNC_CHECK(hwmgr); |
273 | 273 | ||
274 | if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) | 274 | if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) |
275 | return -EINVAL; | 275 | return false; |
276 | 276 | ||
277 | return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr); | 277 | return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr); |
278 | } | 278 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 2ba7937d2545..e03dcb6ea9c1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
@@ -710,8 +710,10 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, | |||
710 | uint32_t vol; | 710 | uint32_t vol; |
711 | int ret = 0; | 711 | int ret = 0; |
712 | 712 | ||
713 | if (hwmgr->chip_id < CHIP_POLARIS10) { | 713 | if (hwmgr->chip_id < CHIP_TONGA) { |
714 | atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); | 714 | ret = atomctrl_get_voltage_evv(hwmgr, id, voltage); |
715 | } else if (hwmgr->chip_id < CHIP_POLARIS10) { | ||
716 | ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); | ||
715 | if (*voltage >= 2000 || *voltage == 0) | 717 | if (*voltage >= 2000 || *voltage == 0) |
716 | *voltage = 1150; | 718 | *voltage = 1150; |
717 | } else { | 719 | } else { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 75854021f403..13f2b705ea49 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -1460,19 +1460,17 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) | |||
1460 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; | 1460 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; |
1461 | 1461 | ||
1462 | 1462 | ||
1463 | if (table_info == NULL) | ||
1464 | return -EINVAL; | ||
1465 | |||
1466 | sclk_table = table_info->vdd_dep_on_sclk; | ||
1467 | |||
1468 | for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { | 1463 | for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { |
1469 | vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; | 1464 | vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; |
1470 | 1465 | ||
1471 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | 1466 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
1472 | if (0 == phm_get_sclk_for_voltage_evv(hwmgr, | 1467 | if ((hwmgr->pp_table_version == PP_TABLE_V1) |
1468 | && !phm_get_sclk_for_voltage_evv(hwmgr, | ||
1473 | table_info->vddgfx_lookup_table, vv_id, &sclk)) { | 1469 | table_info->vddgfx_lookup_table, vv_id, &sclk)) { |
1474 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 1470 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
1475 | PHM_PlatformCaps_ClockStretcher)) { | 1471 | PHM_PlatformCaps_ClockStretcher)) { |
1472 | sclk_table = table_info->vdd_dep_on_sclk; | ||
1473 | |||
1476 | for (j = 1; j < sclk_table->count; j++) { | 1474 | for (j = 1; j < sclk_table->count; j++) { |
1477 | if (sclk_table->entries[j].clk == sclk && | 1475 | if (sclk_table->entries[j].clk == sclk && |
1478 | sclk_table->entries[j].cks_enable == 0) { | 1476 | sclk_table->entries[j].cks_enable == 0) { |
@@ -1498,12 +1496,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) | |||
1498 | } | 1496 | } |
1499 | } | 1497 | } |
1500 | } else { | 1498 | } else { |
1501 | |||
1502 | if ((hwmgr->pp_table_version == PP_TABLE_V0) | 1499 | if ((hwmgr->pp_table_version == PP_TABLE_V0) |
1503 | || !phm_get_sclk_for_voltage_evv(hwmgr, | 1500 | || !phm_get_sclk_for_voltage_evv(hwmgr, |
1504 | table_info->vddc_lookup_table, vv_id, &sclk)) { | 1501 | table_info->vddc_lookup_table, vv_id, &sclk)) { |
1505 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 1502 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
1506 | PHM_PlatformCaps_ClockStretcher)) { | 1503 | PHM_PlatformCaps_ClockStretcher)) { |
1504 | if (table_info == NULL) | ||
1505 | return -EINVAL; | ||
1506 | sclk_table = table_info->vdd_dep_on_sclk; | ||
1507 | |||
1507 | for (j = 1; j < sclk_table->count; j++) { | 1508 | for (j = 1; j < sclk_table->count; j++) { |
1508 | if (sclk_table->entries[j].clk == sclk && | 1509 | if (sclk_table->entries[j].clk == sclk && |
1509 | sclk_table->entries[j].cks_enable == 0) { | 1510 | sclk_table->entries[j].cks_enable == 0) { |
@@ -2133,9 +2134,11 @@ static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, | |||
2133 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 2134 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
2134 | 2135 | ||
2135 | if (tab) { | 2136 | if (tab) { |
2137 | vddc = tab->vddc; | ||
2136 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, | 2138 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, |
2137 | &data->vddc_leakage); | 2139 | &data->vddc_leakage); |
2138 | tab->vddc = vddc; | 2140 | tab->vddc = vddc; |
2141 | vddci = tab->vddci; | ||
2139 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, | 2142 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, |
2140 | &data->vddci_leakage); | 2143 | &data->vddci_leakage); |
2141 | tab->vddci = vddci; | 2144 | tab->vddci = vddci; |
@@ -4228,18 +4231,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) | |||
4228 | { | 4231 | { |
4229 | struct phm_ppt_v1_information *table_info = | 4232 | struct phm_ppt_v1_information *table_info = |
4230 | (struct phm_ppt_v1_information *)hwmgr->pptable; | 4233 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
4231 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; | 4234 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; |
4235 | struct phm_clock_voltage_dependency_table *sclk_table; | ||
4232 | int i; | 4236 | int i; |
4233 | 4237 | ||
4234 | if (table_info == NULL) | 4238 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
4235 | return -EINVAL; | 4239 | if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) |
4236 | 4240 | return -EINVAL; | |
4237 | dep_sclk_table = table_info->vdd_dep_on_sclk; | 4241 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
4238 | 4242 | for (i = 0; i < dep_sclk_table->count; i++) { | |
4239 | for (i = 0; i < dep_sclk_table->count; i++) { | 4243 | clocks->clock[i] = dep_sclk_table->entries[i].clk; |
4240 | clocks->clock[i] = dep_sclk_table->entries[i].clk; | 4244 | clocks->count++; |
4241 | clocks->count++; | 4245 | } |
4246 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { | ||
4247 | sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; | ||
4248 | for (i = 0; i < sclk_table->count; i++) { | ||
4249 | clocks->clock[i] = sclk_table->entries[i].clk; | ||
4250 | clocks->count++; | ||
4251 | } | ||
4242 | } | 4252 | } |
4253 | |||
4243 | return 0; | 4254 | return 0; |
4244 | } | 4255 | } |
4245 | 4256 | ||
@@ -4261,17 +4272,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) | |||
4261 | (struct phm_ppt_v1_information *)hwmgr->pptable; | 4272 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
4262 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; | 4273 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; |
4263 | int i; | 4274 | int i; |
4275 | struct phm_clock_voltage_dependency_table *mclk_table; | ||
4264 | 4276 | ||
4265 | if (table_info == NULL) | 4277 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
4266 | return -EINVAL; | 4278 | if (table_info == NULL) |
4267 | 4279 | return -EINVAL; | |
4268 | dep_mclk_table = table_info->vdd_dep_on_mclk; | 4280 | dep_mclk_table = table_info->vdd_dep_on_mclk; |
4269 | 4281 | for (i = 0; i < dep_mclk_table->count; i++) { | |
4270 | for (i = 0; i < dep_mclk_table->count; i++) { | 4282 | clocks->clock[i] = dep_mclk_table->entries[i].clk; |
4271 | clocks->clock[i] = dep_mclk_table->entries[i].clk; | 4283 | clocks->latency[i] = smu7_get_mem_latency(hwmgr, |
4272 | clocks->latency[i] = smu7_get_mem_latency(hwmgr, | ||
4273 | dep_mclk_table->entries[i].clk); | 4284 | dep_mclk_table->entries[i].clk); |
4274 | clocks->count++; | 4285 | clocks->count++; |
4286 | } | ||
4287 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { | ||
4288 | mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; | ||
4289 | for (i = 0; i < mclk_table->count; i++) { | ||
4290 | clocks->clock[i] = mclk_table->entries[i].clk; | ||
4291 | clocks->count++; | ||
4292 | } | ||
4275 | } | 4293 | } |
4276 | return 0; | 4294 | return 0; |
4277 | } | 4295 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index fb6c6f6106d5..29d0319b22e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c | |||
@@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, | |||
30 | struct phm_fan_speed_info *fan_speed_info) | 30 | struct phm_fan_speed_info *fan_speed_info) |
31 | { | 31 | { |
32 | if (hwmgr->thermal_controller.fanInfo.bNoFan) | 32 | if (hwmgr->thermal_controller.fanInfo.bNoFan) |
33 | return 0; | 33 | return -ENODEV; |
34 | 34 | ||
35 | fan_speed_info->supports_percent_read = true; | 35 | fan_speed_info->supports_percent_read = true; |
36 | fan_speed_info->supports_percent_write = true; | 36 | fan_speed_info->supports_percent_write = true; |
@@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, | |||
60 | uint64_t tmp64; | 60 | uint64_t tmp64; |
61 | 61 | ||
62 | if (hwmgr->thermal_controller.fanInfo.bNoFan) | 62 | if (hwmgr->thermal_controller.fanInfo.bNoFan) |
63 | return 0; | 63 | return -ENODEV; |
64 | 64 | ||
65 | duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | 65 | duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
66 | CG_FDO_CTRL1, FMAX_DUTY100); | 66 | CG_FDO_CTRL1, FMAX_DUTY100); |
@@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) | |||
89 | if (hwmgr->thermal_controller.fanInfo.bNoFan || | 89 | if (hwmgr->thermal_controller.fanInfo.bNoFan || |
90 | (hwmgr->thermal_controller.fanInfo. | 90 | (hwmgr->thermal_controller.fanInfo. |
91 | ucTachometerPulsesPerRevolution == 0)) | 91 | ucTachometerPulsesPerRevolution == 0)) |
92 | return 0; | 92 | return -ENODEV; |
93 | 93 | ||
94 | tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | 94 | tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
95 | CG_TACH_STATUS, TACH_PERIOD); | 95 | CG_TACH_STATUS, TACH_PERIOD); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 910b8d5b21c5..ffe1f85ce300 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); | |||
34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); | 34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
35 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); | 35 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); |
36 | 36 | ||
37 | struct kmem_cache *sched_fence_slab; | ||
38 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); | ||
39 | |||
40 | /* Initialize a given run queue struct */ | 37 | /* Initialize a given run queue struct */ |
41 | static void amd_sched_rq_init(struct amd_sched_rq *rq) | 38 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
42 | { | 39 | { |
@@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, | |||
618 | INIT_LIST_HEAD(&sched->ring_mirror_list); | 615 | INIT_LIST_HEAD(&sched->ring_mirror_list); |
619 | spin_lock_init(&sched->job_list_lock); | 616 | spin_lock_init(&sched->job_list_lock); |
620 | atomic_set(&sched->hw_rq_count, 0); | 617 | atomic_set(&sched->hw_rq_count, 0); |
621 | if (atomic_inc_return(&sched_fence_slab_ref) == 1) { | ||
622 | sched_fence_slab = kmem_cache_create( | ||
623 | "amd_sched_fence", sizeof(struct amd_sched_fence), 0, | ||
624 | SLAB_HWCACHE_ALIGN, NULL); | ||
625 | if (!sched_fence_slab) | ||
626 | return -ENOMEM; | ||
627 | } | ||
628 | 618 | ||
629 | /* Each scheduler will run on a seperate kernel thread */ | 619 | /* Each scheduler will run on a seperate kernel thread */ |
630 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); | 620 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
@@ -645,7 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) | |||
645 | { | 635 | { |
646 | if (sched->thread) | 636 | if (sched->thread) |
647 | kthread_stop(sched->thread); | 637 | kthread_stop(sched->thread); |
648 | rcu_barrier(); | ||
649 | if (atomic_dec_and_test(&sched_fence_slab_ref)) | ||
650 | kmem_cache_destroy(sched_fence_slab); | ||
651 | } | 638 | } |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 7cbbbfb502ef..51068e6c3d9a 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |||
@@ -30,9 +30,6 @@ | |||
30 | struct amd_gpu_scheduler; | 30 | struct amd_gpu_scheduler; |
31 | struct amd_sched_rq; | 31 | struct amd_sched_rq; |
32 | 32 | ||
33 | extern struct kmem_cache *sched_fence_slab; | ||
34 | extern atomic_t sched_fence_slab_ref; | ||
35 | |||
36 | /** | 33 | /** |
37 | * A scheduler entity is a wrapper around a job queue or a group | 34 | * A scheduler entity is a wrapper around a job queue or a group |
38 | * of other entities. Entities take turns emitting jobs from their | 35 | * of other entities. Entities take turns emitting jobs from their |
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | |||
145 | struct amd_sched_entity *entity); | 142 | struct amd_sched_entity *entity); |
146 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job); | 143 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job); |
147 | 144 | ||
145 | int amd_sched_fence_slab_init(void); | ||
146 | void amd_sched_fence_slab_fini(void); | ||
147 | |||
148 | struct amd_sched_fence *amd_sched_fence_create( | 148 | struct amd_sched_fence *amd_sched_fence_create( |
149 | struct amd_sched_entity *s_entity, void *owner); | 149 | struct amd_sched_entity *s_entity, void *owner); |
150 | void amd_sched_fence_scheduled(struct amd_sched_fence *fence); | 150 | void amd_sched_fence_scheduled(struct amd_sched_fence *fence); |
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 3653b5a40494..88fc2d662579 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c | |||
@@ -27,6 +27,25 @@ | |||
27 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
28 | #include "gpu_scheduler.h" | 28 | #include "gpu_scheduler.h" |
29 | 29 | ||
30 | static struct kmem_cache *sched_fence_slab; | ||
31 | |||
32 | int amd_sched_fence_slab_init(void) | ||
33 | { | ||
34 | sched_fence_slab = kmem_cache_create( | ||
35 | "amd_sched_fence", sizeof(struct amd_sched_fence), 0, | ||
36 | SLAB_HWCACHE_ALIGN, NULL); | ||
37 | if (!sched_fence_slab) | ||
38 | return -ENOMEM; | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | void amd_sched_fence_slab_fini(void) | ||
44 | { | ||
45 | rcu_barrier(); | ||
46 | kmem_cache_destroy(sched_fence_slab); | ||
47 | } | ||
48 | |||
30 | struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, | 49 | struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, |
31 | void *owner) | 50 | void *owner) |
32 | { | 51 | { |
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c index b7a8b2ac4055..b69c66b4897e 100644 --- a/drivers/gpu/drm/arc/arcpgu_hdmi.c +++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c | |||
@@ -14,170 +14,45 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <drm/drm_crtc_helper.h> | 17 | #include <drm/drm_crtc.h> |
18 | #include <drm/drm_encoder_slave.h> | 18 | #include <drm/drm_encoder_slave.h> |
19 | #include <drm/drm_atomic_helper.h> | ||
20 | 19 | ||
21 | #include "arcpgu.h" | 20 | #include "arcpgu.h" |
22 | 21 | ||
23 | struct arcpgu_drm_connector { | ||
24 | struct drm_connector connector; | ||
25 | struct drm_encoder_slave *encoder_slave; | ||
26 | }; | ||
27 | |||
28 | static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) | ||
29 | { | ||
30 | const struct drm_encoder_slave_funcs *sfuncs; | ||
31 | struct drm_encoder_slave *slave; | ||
32 | struct arcpgu_drm_connector *con = | ||
33 | container_of(connector, struct arcpgu_drm_connector, connector); | ||
34 | |||
35 | slave = con->encoder_slave; | ||
36 | if (slave == NULL) { | ||
37 | dev_err(connector->dev->dev, | ||
38 | "connector_get_modes: cannot find slave encoder for connector\n"); | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | sfuncs = slave->slave_funcs; | ||
43 | if (sfuncs->get_modes == NULL) | ||
44 | return 0; | ||
45 | |||
46 | return sfuncs->get_modes(&slave->base, connector); | ||
47 | } | ||
48 | |||
49 | static enum drm_connector_status | ||
50 | arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) | ||
51 | { | ||
52 | enum drm_connector_status status = connector_status_unknown; | ||
53 | const struct drm_encoder_slave_funcs *sfuncs; | ||
54 | struct drm_encoder_slave *slave; | ||
55 | |||
56 | struct arcpgu_drm_connector *con = | ||
57 | container_of(connector, struct arcpgu_drm_connector, connector); | ||
58 | |||
59 | slave = con->encoder_slave; | ||
60 | if (slave == NULL) { | ||
61 | dev_err(connector->dev->dev, | ||
62 | "connector_detect: cannot find slave encoder for connector\n"); | ||
63 | return status; | ||
64 | } | ||
65 | |||
66 | sfuncs = slave->slave_funcs; | ||
67 | if (sfuncs && sfuncs->detect) | ||
68 | return sfuncs->detect(&slave->base, connector); | ||
69 | |||
70 | dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n"); | ||
71 | return status; | ||
72 | } | ||
73 | |||
74 | static void arcpgu_drm_connector_destroy(struct drm_connector *connector) | ||
75 | { | ||
76 | drm_connector_unregister(connector); | ||
77 | drm_connector_cleanup(connector); | ||
78 | } | ||
79 | |||
80 | static const struct drm_connector_helper_funcs | ||
81 | arcpgu_drm_connector_helper_funcs = { | ||
82 | .get_modes = arcpgu_drm_connector_get_modes, | ||
83 | }; | ||
84 | |||
85 | static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { | ||
86 | .dpms = drm_helper_connector_dpms, | ||
87 | .reset = drm_atomic_helper_connector_reset, | ||
88 | .detect = arcpgu_drm_connector_detect, | ||
89 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
90 | .destroy = arcpgu_drm_connector_destroy, | ||
91 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | ||
92 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | ||
93 | }; | ||
94 | |||
95 | static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = { | ||
96 | .dpms = drm_i2c_encoder_dpms, | ||
97 | .mode_fixup = drm_i2c_encoder_mode_fixup, | ||
98 | .mode_set = drm_i2c_encoder_mode_set, | ||
99 | .prepare = drm_i2c_encoder_prepare, | ||
100 | .commit = drm_i2c_encoder_commit, | ||
101 | .detect = drm_i2c_encoder_detect, | ||
102 | }; | ||
103 | |||
104 | static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { | 22 | static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { |
105 | .destroy = drm_encoder_cleanup, | 23 | .destroy = drm_encoder_cleanup, |
106 | }; | 24 | }; |
107 | 25 | ||
108 | int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) | 26 | int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) |
109 | { | 27 | { |
110 | struct arcpgu_drm_connector *arcpgu_connector; | 28 | struct drm_encoder *encoder; |
111 | struct drm_i2c_encoder_driver *driver; | 29 | struct drm_bridge *bridge; |
112 | struct drm_encoder_slave *encoder; | 30 | |
113 | struct drm_connector *connector; | 31 | int ret = 0; |
114 | struct i2c_client *i2c_slave; | ||
115 | int ret; | ||
116 | 32 | ||
117 | encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); | 33 | encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); |
118 | if (encoder == NULL) | 34 | if (encoder == NULL) |
119 | return -ENOMEM; | 35 | return -ENOMEM; |
120 | 36 | ||
121 | i2c_slave = of_find_i2c_device_by_node(np); | 37 | /* Locate drm bridge from the hdmi encoder DT node */ |
122 | if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) { | 38 | bridge = of_drm_find_bridge(np); |
123 | dev_err(drm->dev, "failed to find i2c slave encoder\n"); | 39 | if (!bridge) |
124 | return -EPROBE_DEFER; | ||
125 | } | ||
126 | |||
127 | if (i2c_slave->dev.driver == NULL) { | ||
128 | dev_err(drm->dev, "failed to find i2c slave driver\n"); | ||
129 | return -EPROBE_DEFER; | 40 | return -EPROBE_DEFER; |
130 | } | ||
131 | 41 | ||
132 | driver = | 42 | encoder->possible_crtcs = 1; |
133 | to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver)); | 43 | encoder->possible_clones = 0; |
134 | ret = driver->encoder_init(i2c_slave, drm, encoder); | 44 | ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs, |
135 | if (ret) { | ||
136 | dev_err(drm->dev, "failed to initialize i2c encoder slave\n"); | ||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | encoder->base.possible_crtcs = 1; | ||
141 | encoder->base.possible_clones = 0; | ||
142 | ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs, | ||
143 | DRM_MODE_ENCODER_TMDS, NULL); | 45 | DRM_MODE_ENCODER_TMDS, NULL); |
144 | if (ret) | 46 | if (ret) |
145 | return ret; | 47 | return ret; |
146 | 48 | ||
147 | drm_encoder_helper_add(&encoder->base, | 49 | /* Link drm_bridge to encoder */ |
148 | &arcpgu_drm_encoder_helper_funcs); | 50 | bridge->encoder = encoder; |
149 | 51 | encoder->bridge = bridge; | |
150 | arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector), | ||
151 | GFP_KERNEL); | ||
152 | if (!arcpgu_connector) { | ||
153 | ret = -ENOMEM; | ||
154 | goto error_encoder_cleanup; | ||
155 | } | ||
156 | |||
157 | connector = &arcpgu_connector->connector; | ||
158 | drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs); | ||
159 | ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, | ||
160 | DRM_MODE_CONNECTOR_HDMIA); | ||
161 | if (ret < 0) { | ||
162 | dev_err(drm->dev, "failed to initialize drm connector\n"); | ||
163 | goto error_encoder_cleanup; | ||
164 | } | ||
165 | 52 | ||
166 | ret = drm_mode_connector_attach_encoder(connector, &encoder->base); | 53 | ret = drm_bridge_attach(drm, bridge); |
167 | if (ret < 0) { | 54 | if (ret) |
168 | dev_err(drm->dev, "could not attach connector to encoder\n"); | 55 | drm_encoder_cleanup(encoder); |
169 | drm_connector_unregister(connector); | ||
170 | goto error_connector_cleanup; | ||
171 | } | ||
172 | |||
173 | arcpgu_connector->encoder_slave = encoder; | ||
174 | |||
175 | return 0; | ||
176 | |||
177 | error_connector_cleanup: | ||
178 | drm_connector_cleanup(connector); | ||
179 | 56 | ||
180 | error_encoder_cleanup: | ||
181 | drm_encoder_cleanup(&encoder->base); | ||
182 | return ret; | 57 | return ret; |
183 | } | 58 | } |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index b2d5e188b1b8..deb57435cc89 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c | |||
@@ -25,8 +25,13 @@ | |||
25 | static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, | 25 | static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, |
26 | struct drm_crtc_state *old_crtc_state) | 26 | struct drm_crtc_state *old_crtc_state) |
27 | { | 27 | { |
28 | struct drm_device *dev = crtc->dev; | ||
29 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; | ||
28 | struct drm_pending_vblank_event *event = crtc->state->event; | 30 | struct drm_pending_vblank_event *event = crtc->state->event; |
29 | 31 | ||
32 | regmap_write(fsl_dev->regmap, | ||
33 | DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); | ||
34 | |||
30 | if (event) { | 35 | if (event) { |
31 | crtc->state->event = NULL; | 36 | crtc->state->event = NULL; |
32 | 37 | ||
@@ -39,11 +44,15 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, | |||
39 | } | 44 | } |
40 | } | 45 | } |
41 | 46 | ||
42 | static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) | 47 | static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc, |
48 | struct drm_crtc_state *old_crtc_state) | ||
43 | { | 49 | { |
44 | struct drm_device *dev = crtc->dev; | 50 | struct drm_device *dev = crtc->dev; |
45 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; | 51 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; |
46 | 52 | ||
53 | /* always disable planes on the CRTC */ | ||
54 | drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); | ||
55 | |||
47 | drm_crtc_vblank_off(crtc); | 56 | drm_crtc_vblank_off(crtc); |
48 | 57 | ||
49 | regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, | 58 | regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, |
@@ -122,8 +131,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
122 | } | 131 | } |
123 | 132 | ||
124 | static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { | 133 | static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { |
134 | .atomic_disable = fsl_dcu_drm_crtc_atomic_disable, | ||
125 | .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, | 135 | .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, |
126 | .disable = fsl_dcu_drm_disable_crtc, | ||
127 | .enable = fsl_dcu_drm_crtc_enable, | 136 | .enable = fsl_dcu_drm_crtc_enable, |
128 | .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, | 137 | .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, |
129 | }; | 138 | }; |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index e04efbed1a54..cc2fde2ae5ef 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | |||
@@ -59,8 +59,6 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev) | |||
59 | 59 | ||
60 | regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); | 60 | regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); |
61 | regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); | 61 | regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); |
62 | regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, | ||
63 | DCU_UPDATE_MODE_READREG); | ||
64 | 62 | ||
65 | return ret; | 63 | return ret; |
66 | } | 64 | } |
@@ -139,8 +137,6 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) | |||
139 | drm_handle_vblank(dev, 0); | 137 | drm_handle_vblank(dev, 0); |
140 | 138 | ||
141 | regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); | 139 | regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); |
142 | regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, | ||
143 | DCU_UPDATE_MODE_READREG); | ||
144 | 140 | ||
145 | return IRQ_HANDLED; | 141 | return IRQ_HANDLED; |
146 | } | 142 | } |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 9e6f7d8112b3..a99f48847420 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c | |||
@@ -160,11 +160,6 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, | |||
160 | DCU_LAYER_POST_SKIP(0) | | 160 | DCU_LAYER_POST_SKIP(0) | |
161 | DCU_LAYER_PRE_SKIP(0)); | 161 | DCU_LAYER_PRE_SKIP(0)); |
162 | } | 162 | } |
163 | regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, | ||
164 | DCU_MODE_DCU_MODE_MASK, | ||
165 | DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); | ||
166 | regmap_write(fsl_dev->regmap, | ||
167 | DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); | ||
168 | 163 | ||
169 | return; | 164 | return; |
170 | } | 165 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 23960de81b57..91ab7e9d6d2e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1806,7 +1806,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) | |||
1806 | /* Use a partial view if it is bigger than available space */ | 1806 | /* Use a partial view if it is bigger than available space */ |
1807 | chunk_size = MIN_CHUNK_PAGES; | 1807 | chunk_size = MIN_CHUNK_PAGES; |
1808 | if (i915_gem_object_is_tiled(obj)) | 1808 | if (i915_gem_object_is_tiled(obj)) |
1809 | chunk_size = max(chunk_size, tile_row_pages(obj)); | 1809 | chunk_size = roundup(chunk_size, tile_row_pages(obj)); |
1810 | 1810 | ||
1811 | memset(&view, 0, sizeof(view)); | 1811 | memset(&view, 0, sizeof(view)); |
1812 | view.type = I915_GGTT_VIEW_PARTIAL; | 1812 | view.type = I915_GGTT_VIEW_PARTIAL; |
@@ -3543,8 +3543,22 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
3543 | if (view->type == I915_GGTT_VIEW_NORMAL) | 3543 | if (view->type == I915_GGTT_VIEW_NORMAL) |
3544 | vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, | 3544 | vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, |
3545 | PIN_MAPPABLE | PIN_NONBLOCK); | 3545 | PIN_MAPPABLE | PIN_NONBLOCK); |
3546 | if (IS_ERR(vma)) | 3546 | if (IS_ERR(vma)) { |
3547 | vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0); | 3547 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
3548 | unsigned int flags; | ||
3549 | |||
3550 | /* Valleyview is definitely limited to scanning out the first | ||
3551 | * 512MiB. Lets presume this behaviour was inherited from the | ||
3552 | * g4x display engine and that all earlier gen are similarly | ||
3553 | * limited. Testing suggests that it is a little more | ||
3554 | * complicated than this. For example, Cherryview appears quite | ||
3555 | * happy to scanout from anywhere within its global aperture. | ||
3556 | */ | ||
3557 | flags = 0; | ||
3558 | if (HAS_GMCH_DISPLAY(i915)) | ||
3559 | flags = PIN_MAPPABLE; | ||
3560 | vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); | ||
3561 | } | ||
3548 | if (IS_ERR(vma)) | 3562 | if (IS_ERR(vma)) |
3549 | goto err_unpin_display; | 3563 | goto err_unpin_display; |
3550 | 3564 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7adb4c77cc7f..a218c2e395e7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -1281,6 +1281,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, | |||
1281 | return ctx; | 1281 | return ctx; |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) | ||
1285 | { | ||
1286 | return !(obj->cache_level == I915_CACHE_NONE || | ||
1287 | obj->cache_level == I915_CACHE_WT); | ||
1288 | } | ||
1289 | |||
1284 | void i915_vma_move_to_active(struct i915_vma *vma, | 1290 | void i915_vma_move_to_active(struct i915_vma *vma, |
1285 | struct drm_i915_gem_request *req, | 1291 | struct drm_i915_gem_request *req, |
1286 | unsigned int flags) | 1292 | unsigned int flags) |
@@ -1311,6 +1317,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, | |||
1311 | 1317 | ||
1312 | /* update for the implicit flush after a batch */ | 1318 | /* update for the implicit flush after a batch */ |
1313 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | 1319 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
1320 | if (!obj->cache_dirty && gpu_write_needs_clflush(obj)) | ||
1321 | obj->cache_dirty = true; | ||
1314 | } | 1322 | } |
1315 | 1323 | ||
1316 | if (flags & EXEC_OBJECT_NEEDS_FENCE) | 1324 | if (flags & EXEC_OBJECT_NEEDS_FENCE) |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1f8af87c6294..cf2560708e03 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1143,7 +1143,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
1143 | if (!child) | 1143 | if (!child) |
1144 | return; | 1144 | return; |
1145 | 1145 | ||
1146 | aux_channel = child->raw[25]; | 1146 | aux_channel = child->common.aux_channel; |
1147 | ddc_pin = child->common.ddc_pin; | 1147 | ddc_pin = child->common.ddc_pin; |
1148 | 1148 | ||
1149 | is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; | 1149 | is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; |
@@ -1673,7 +1673,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) | |||
1673 | return false; | 1673 | return false; |
1674 | } | 1674 | } |
1675 | 1675 | ||
1676 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) | 1676 | static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child, |
1677 | enum port port) | ||
1677 | { | 1678 | { |
1678 | static const struct { | 1679 | static const struct { |
1679 | u16 dp, hdmi; | 1680 | u16 dp, hdmi; |
@@ -1687,22 +1688,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por | |||
1687 | [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, | 1688 | [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, |
1688 | [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, | 1689 | [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, |
1689 | }; | 1690 | }; |
1690 | int i; | ||
1691 | 1691 | ||
1692 | if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) | 1692 | if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) |
1693 | return false; | 1693 | return false; |
1694 | 1694 | ||
1695 | if (!dev_priv->vbt.child_dev_num) | 1695 | if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != |
1696 | (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) | ||
1696 | return false; | 1697 | return false; |
1697 | 1698 | ||
1699 | if (p_child->common.dvo_port == port_mapping[port].dp) | ||
1700 | return true; | ||
1701 | |||
1702 | /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ | ||
1703 | if (p_child->common.dvo_port == port_mapping[port].hdmi && | ||
1704 | p_child->common.aux_channel != 0) | ||
1705 | return true; | ||
1706 | |||
1707 | return false; | ||
1708 | } | ||
1709 | |||
1710 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, | ||
1711 | enum port port) | ||
1712 | { | ||
1713 | int i; | ||
1714 | |||
1698 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | 1715 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { |
1699 | const union child_device_config *p_child = | 1716 | const union child_device_config *p_child = |
1700 | &dev_priv->vbt.child_dev[i]; | 1717 | &dev_priv->vbt.child_dev[i]; |
1701 | 1718 | ||
1702 | if ((p_child->common.dvo_port == port_mapping[port].dp || | 1719 | if (child_dev_is_dp_dual_mode(p_child, port)) |
1703 | p_child->common.dvo_port == port_mapping[port].hdmi) && | ||
1704 | (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) == | ||
1705 | (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) | ||
1706 | return true; | 1720 | return true; |
1707 | } | 1721 | } |
1708 | 1722 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0ad1879bfd9d..81c11499bcf0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -10243,6 +10243,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) | |||
10243 | bxt_set_cdclk(to_i915(dev), req_cdclk); | 10243 | bxt_set_cdclk(to_i915(dev), req_cdclk); |
10244 | } | 10244 | } |
10245 | 10245 | ||
10246 | static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state, | ||
10247 | int pixel_rate) | ||
10248 | { | ||
10249 | struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); | ||
10250 | |||
10251 | /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ | ||
10252 | if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) | ||
10253 | pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); | ||
10254 | |||
10255 | /* BSpec says "Do not use DisplayPort with CDCLK less than | ||
10256 | * 432 MHz, audio enabled, port width x4, and link rate | ||
10257 | * HBR2 (5.4 GHz), or else there may be audio corruption or | ||
10258 | * screen corruption." | ||
10259 | */ | ||
10260 | if (intel_crtc_has_dp_encoder(crtc_state) && | ||
10261 | crtc_state->has_audio && | ||
10262 | crtc_state->port_clock >= 540000 && | ||
10263 | crtc_state->lane_count == 4) | ||
10264 | pixel_rate = max(432000, pixel_rate); | ||
10265 | |||
10266 | return pixel_rate; | ||
10267 | } | ||
10268 | |||
10246 | /* compute the max rate for new configuration */ | 10269 | /* compute the max rate for new configuration */ |
10247 | static int ilk_max_pixel_rate(struct drm_atomic_state *state) | 10270 | static int ilk_max_pixel_rate(struct drm_atomic_state *state) |
10248 | { | 10271 | { |
@@ -10268,9 +10291,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state) | |||
10268 | 10291 | ||
10269 | pixel_rate = ilk_pipe_pixel_rate(crtc_state); | 10292 | pixel_rate = ilk_pipe_pixel_rate(crtc_state); |
10270 | 10293 | ||
10271 | /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ | 10294 | if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv)) |
10272 | if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) | 10295 | pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state, |
10273 | pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); | 10296 | pixel_rate); |
10274 | 10297 | ||
10275 | intel_state->min_pixclk[i] = pixel_rate; | 10298 | intel_state->min_pixclk[i] = pixel_rate; |
10276 | } | 10299 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3581b5a7f716..bf344d08356a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -4463,21 +4463,11 @@ static enum drm_connector_status | |||
4463 | intel_dp_detect(struct drm_connector *connector, bool force) | 4463 | intel_dp_detect(struct drm_connector *connector, bool force) |
4464 | { | 4464 | { |
4465 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 4465 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
4466 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
4467 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | ||
4468 | enum drm_connector_status status = connector->status; | 4466 | enum drm_connector_status status = connector->status; |
4469 | 4467 | ||
4470 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 4468 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
4471 | connector->base.id, connector->name); | 4469 | connector->base.id, connector->name); |
4472 | 4470 | ||
4473 | if (intel_dp->is_mst) { | ||
4474 | /* MST devices are disconnected from a monitor POV */ | ||
4475 | intel_dp_unset_edid(intel_dp); | ||
4476 | if (intel_encoder->type != INTEL_OUTPUT_EDP) | ||
4477 | intel_encoder->type = INTEL_OUTPUT_DP; | ||
4478 | return connector_status_disconnected; | ||
4479 | } | ||
4480 | |||
4481 | /* If full detect is not performed yet, do a full detect */ | 4471 | /* If full detect is not performed yet, do a full detect */ |
4482 | if (!intel_dp->detect_done) | 4472 | if (!intel_dp->detect_done) |
4483 | status = intel_dp_long_pulse(intel_dp->attached_connector); | 4473 | status = intel_dp_long_pulse(intel_dp->attached_connector); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f40a35f2913a..13c306173f27 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -1799,6 +1799,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c | |||
1799 | intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; | 1799 | intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; |
1800 | } | 1800 | } |
1801 | 1801 | ||
1802 | static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, | ||
1803 | enum port port) | ||
1804 | { | ||
1805 | const struct ddi_vbt_port_info *info = | ||
1806 | &dev_priv->vbt.ddi_port_info[port]; | ||
1807 | u8 ddc_pin; | ||
1808 | |||
1809 | if (info->alternate_ddc_pin) { | ||
1810 | DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n", | ||
1811 | info->alternate_ddc_pin, port_name(port)); | ||
1812 | return info->alternate_ddc_pin; | ||
1813 | } | ||
1814 | |||
1815 | switch (port) { | ||
1816 | case PORT_B: | ||
1817 | if (IS_BROXTON(dev_priv)) | ||
1818 | ddc_pin = GMBUS_PIN_1_BXT; | ||
1819 | else | ||
1820 | ddc_pin = GMBUS_PIN_DPB; | ||
1821 | break; | ||
1822 | case PORT_C: | ||
1823 | if (IS_BROXTON(dev_priv)) | ||
1824 | ddc_pin = GMBUS_PIN_2_BXT; | ||
1825 | else | ||
1826 | ddc_pin = GMBUS_PIN_DPC; | ||
1827 | break; | ||
1828 | case PORT_D: | ||
1829 | if (IS_CHERRYVIEW(dev_priv)) | ||
1830 | ddc_pin = GMBUS_PIN_DPD_CHV; | ||
1831 | else | ||
1832 | ddc_pin = GMBUS_PIN_DPD; | ||
1833 | break; | ||
1834 | default: | ||
1835 | MISSING_CASE(port); | ||
1836 | ddc_pin = GMBUS_PIN_DPB; | ||
1837 | break; | ||
1838 | } | ||
1839 | |||
1840 | DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n", | ||
1841 | ddc_pin, port_name(port)); | ||
1842 | |||
1843 | return ddc_pin; | ||
1844 | } | ||
1845 | |||
1802 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | 1846 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
1803 | struct intel_connector *intel_connector) | 1847 | struct intel_connector *intel_connector) |
1804 | { | 1848 | { |
@@ -1808,7 +1852,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
1808 | struct drm_device *dev = intel_encoder->base.dev; | 1852 | struct drm_device *dev = intel_encoder->base.dev; |
1809 | struct drm_i915_private *dev_priv = to_i915(dev); | 1853 | struct drm_i915_private *dev_priv = to_i915(dev); |
1810 | enum port port = intel_dig_port->port; | 1854 | enum port port = intel_dig_port->port; |
1811 | uint8_t alternate_ddc_pin; | ||
1812 | 1855 | ||
1813 | DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", | 1856 | DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", |
1814 | port_name(port)); | 1857 | port_name(port)); |
@@ -1826,12 +1869,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
1826 | connector->doublescan_allowed = 0; | 1869 | connector->doublescan_allowed = 0; |
1827 | connector->stereo_allowed = 1; | 1870 | connector->stereo_allowed = 1; |
1828 | 1871 | ||
1872 | intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); | ||
1873 | |||
1829 | switch (port) { | 1874 | switch (port) { |
1830 | case PORT_B: | 1875 | case PORT_B: |
1831 | if (IS_BROXTON(dev_priv)) | ||
1832 | intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT; | ||
1833 | else | ||
1834 | intel_hdmi->ddc_bus = GMBUS_PIN_DPB; | ||
1835 | /* | 1876 | /* |
1836 | * On BXT A0/A1, sw needs to activate DDIA HPD logic and | 1877 | * On BXT A0/A1, sw needs to activate DDIA HPD logic and |
1837 | * interrupts to check the external panel connection. | 1878 | * interrupts to check the external panel connection. |
@@ -1842,46 +1883,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
1842 | intel_encoder->hpd_pin = HPD_PORT_B; | 1883 | intel_encoder->hpd_pin = HPD_PORT_B; |
1843 | break; | 1884 | break; |
1844 | case PORT_C: | 1885 | case PORT_C: |
1845 | if (IS_BROXTON(dev_priv)) | ||
1846 | intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT; | ||
1847 | else | ||
1848 | intel_hdmi->ddc_bus = GMBUS_PIN_DPC; | ||
1849 | intel_encoder->hpd_pin = HPD_PORT_C; | 1886 | intel_encoder->hpd_pin = HPD_PORT_C; |
1850 | break; | 1887 | break; |
1851 | case PORT_D: | 1888 | case PORT_D: |
1852 | if (WARN_ON(IS_BROXTON(dev_priv))) | ||
1853 | intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED; | ||
1854 | else if (IS_CHERRYVIEW(dev_priv)) | ||
1855 | intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV; | ||
1856 | else | ||
1857 | intel_hdmi->ddc_bus = GMBUS_PIN_DPD; | ||
1858 | intel_encoder->hpd_pin = HPD_PORT_D; | 1889 | intel_encoder->hpd_pin = HPD_PORT_D; |
1859 | break; | 1890 | break; |
1860 | case PORT_E: | 1891 | case PORT_E: |
1861 | /* On SKL PORT E doesn't have seperate GMBUS pin | ||
1862 | * We rely on VBT to set a proper alternate GMBUS pin. */ | ||
1863 | alternate_ddc_pin = | ||
1864 | dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin; | ||
1865 | switch (alternate_ddc_pin) { | ||
1866 | case DDC_PIN_B: | ||
1867 | intel_hdmi->ddc_bus = GMBUS_PIN_DPB; | ||
1868 | break; | ||
1869 | case DDC_PIN_C: | ||
1870 | intel_hdmi->ddc_bus = GMBUS_PIN_DPC; | ||
1871 | break; | ||
1872 | case DDC_PIN_D: | ||
1873 | intel_hdmi->ddc_bus = GMBUS_PIN_DPD; | ||
1874 | break; | ||
1875 | default: | ||
1876 | MISSING_CASE(alternate_ddc_pin); | ||
1877 | } | ||
1878 | intel_encoder->hpd_pin = HPD_PORT_E; | 1892 | intel_encoder->hpd_pin = HPD_PORT_E; |
1879 | break; | 1893 | break; |
1880 | case PORT_A: | ||
1881 | intel_encoder->hpd_pin = HPD_PORT_A; | ||
1882 | /* Internal port only for eDP. */ | ||
1883 | default: | 1894 | default: |
1884 | BUG(); | 1895 | MISSING_CASE(port); |
1896 | return; | ||
1885 | } | 1897 | } |
1886 | 1898 | ||
1887 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { | 1899 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6c11168facd6..a38c2fefe85a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -1139,7 +1139,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) | |||
1139 | 1139 | ||
1140 | intel_power_sequencer_reset(dev_priv); | 1140 | intel_power_sequencer_reset(dev_priv); |
1141 | 1141 | ||
1142 | intel_hpd_poll_init(dev_priv); | 1142 | /* Prevent us from re-enabling polling on accident in late suspend */ |
1143 | if (!dev_priv->drm.dev->power.is_suspended) | ||
1144 | intel_hpd_poll_init(dev_priv); | ||
1143 | } | 1145 | } |
1144 | 1146 | ||
1145 | static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, | 1147 | static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 73a521fdf1bd..dbed12c484c9 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -358,7 +358,7 @@ vlv_update_plane(struct drm_plane *dplane, | |||
358 | int plane = intel_plane->plane; | 358 | int plane = intel_plane->plane; |
359 | u32 sprctl; | 359 | u32 sprctl; |
360 | u32 sprsurf_offset, linear_offset; | 360 | u32 sprsurf_offset, linear_offset; |
361 | unsigned int rotation = dplane->state->rotation; | 361 | unsigned int rotation = plane_state->base.rotation; |
362 | const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; | 362 | const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; |
363 | int crtc_x = plane_state->base.dst.x1; | 363 | int crtc_x = plane_state->base.dst.x1; |
364 | int crtc_y = plane_state->base.dst.y1; | 364 | int crtc_y = plane_state->base.dst.y1; |
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 68db9621f1f0..8886cab19f98 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h | |||
@@ -280,7 +280,8 @@ struct common_child_dev_config { | |||
280 | u8 dp_support:1; | 280 | u8 dp_support:1; |
281 | u8 tmds_support:1; | 281 | u8 tmds_support:1; |
282 | u8 support_reserved:5; | 282 | u8 support_reserved:5; |
283 | u8 not_common3[12]; | 283 | u8 aux_channel; |
284 | u8 not_common3[11]; | ||
284 | u8 iboost_level; | 285 | u8 iboost_level; |
285 | } __packed; | 286 | } __packed; |
286 | 287 | ||
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 4e1ae3fc462d..6be515a9fb69 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
@@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, | |||
68 | 68 | ||
69 | ipu_dc_disable_channel(ipu_crtc->dc); | 69 | ipu_dc_disable_channel(ipu_crtc->dc); |
70 | ipu_di_disable(ipu_crtc->di); | 70 | ipu_di_disable(ipu_crtc->di); |
71 | /* | ||
72 | * Planes must be disabled before DC clock is removed, as otherwise the | ||
73 | * attached IDMACs will be left in undefined state, possibly hanging | ||
74 | * the IPU or even system. | ||
75 | */ | ||
76 | drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); | ||
71 | ipu_dc_disable(ipu); | 77 | ipu_dc_disable(ipu); |
72 | 78 | ||
73 | spin_lock_irq(&crtc->dev->event_lock); | 79 | spin_lock_irq(&crtc->dev->event_lock); |
@@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, | |||
77 | } | 83 | } |
78 | spin_unlock_irq(&crtc->dev->event_lock); | 84 | spin_unlock_irq(&crtc->dev->event_lock); |
79 | 85 | ||
80 | /* always disable planes on the CRTC */ | ||
81 | drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); | ||
82 | |||
83 | drm_crtc_vblank_off(crtc); | 86 | drm_crtc_vblank_off(crtc); |
84 | } | 87 | } |
85 | 88 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 019b7ca392d7..f75c5b5a536c 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c | |||
@@ -80,6 +80,7 @@ static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp, | |||
80 | ddp_comp); | 80 | ddp_comp); |
81 | 81 | ||
82 | priv->crtc = crtc; | 82 | priv->crtc = crtc; |
83 | writel(0x0, comp->regs + DISP_REG_OVL_INTSTA); | ||
83 | writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); | 84 | writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); |
84 | } | 85 | } |
85 | 86 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 0186e500d2a5..90fb831ef031 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c | |||
@@ -432,11 +432,16 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, | |||
432 | unsigned long pll_rate; | 432 | unsigned long pll_rate; |
433 | unsigned int factor; | 433 | unsigned int factor; |
434 | 434 | ||
435 | /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */ | ||
435 | pix_rate = 1000UL * mode->clock; | 436 | pix_rate = 1000UL * mode->clock; |
436 | if (mode->clock <= 74000) | 437 | if (mode->clock <= 27000) |
438 | factor = 16 * 3; | ||
439 | else if (mode->clock <= 84000) | ||
437 | factor = 8 * 3; | 440 | factor = 8 * 3; |
438 | else | 441 | else if (mode->clock <= 167000) |
439 | factor = 4 * 3; | 442 | factor = 4 * 3; |
443 | else | ||
444 | factor = 2 * 3; | ||
440 | pll_rate = pix_rate * factor; | 445 | pll_rate = pix_rate * factor; |
441 | 446 | ||
442 | dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n", | 447 | dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n", |
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 71227deef21b..0e8c4d9af340 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c | |||
@@ -1133,12 +1133,6 @@ static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi, | |||
1133 | phy_power_on(hdmi->phy); | 1133 | phy_power_on(hdmi->phy); |
1134 | mtk_hdmi_aud_output_config(hdmi, mode); | 1134 | mtk_hdmi_aud_output_config(hdmi, mode); |
1135 | 1135 | ||
1136 | mtk_hdmi_setup_audio_infoframe(hdmi); | ||
1137 | mtk_hdmi_setup_avi_infoframe(hdmi, mode); | ||
1138 | mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); | ||
1139 | if (mode->flags & DRM_MODE_FLAG_3D_MASK) | ||
1140 | mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); | ||
1141 | |||
1142 | mtk_hdmi_hw_vid_black(hdmi, false); | 1136 | mtk_hdmi_hw_vid_black(hdmi, false); |
1143 | mtk_hdmi_hw_aud_unmute(hdmi); | 1137 | mtk_hdmi_hw_aud_unmute(hdmi); |
1144 | mtk_hdmi_hw_send_av_unmute(hdmi); | 1138 | mtk_hdmi_hw_send_av_unmute(hdmi); |
@@ -1401,6 +1395,16 @@ static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge) | |||
1401 | hdmi->powered = true; | 1395 | hdmi->powered = true; |
1402 | } | 1396 | } |
1403 | 1397 | ||
1398 | static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi, | ||
1399 | struct drm_display_mode *mode) | ||
1400 | { | ||
1401 | mtk_hdmi_setup_audio_infoframe(hdmi); | ||
1402 | mtk_hdmi_setup_avi_infoframe(hdmi, mode); | ||
1403 | mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); | ||
1404 | if (mode->flags & DRM_MODE_FLAG_3D_MASK) | ||
1405 | mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); | ||
1406 | } | ||
1407 | |||
1404 | static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) | 1408 | static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) |
1405 | { | 1409 | { |
1406 | struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); | 1410 | struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); |
@@ -1409,6 +1413,7 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) | |||
1409 | clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); | 1413 | clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); |
1410 | clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); | 1414 | clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); |
1411 | phy_power_on(hdmi->phy); | 1415 | phy_power_on(hdmi->phy); |
1416 | mtk_hdmi_send_infoframe(hdmi, &hdmi->mode); | ||
1412 | 1417 | ||
1413 | hdmi->enabled = true; | 1418 | hdmi->enabled = true; |
1414 | } | 1419 | } |
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c index 8a24754b440f..51cb9cfb6646 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c | |||
@@ -265,6 +265,9 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
265 | struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); | 265 | struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); |
266 | unsigned int pre_div; | 266 | unsigned int pre_div; |
267 | unsigned int div; | 267 | unsigned int div; |
268 | unsigned int pre_ibias; | ||
269 | unsigned int hdmi_ibias; | ||
270 | unsigned int imp_en; | ||
268 | 271 | ||
269 | dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, | 272 | dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, |
270 | rate, parent_rate); | 273 | rate, parent_rate); |
@@ -298,18 +301,31 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
298 | (0x1 << PLL_BR_SHIFT), | 301 | (0x1 << PLL_BR_SHIFT), |
299 | RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | | 302 | RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | |
300 | RG_HDMITX_PLL_BR); | 303 | RG_HDMITX_PLL_BR); |
301 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN); | 304 | if (rate < 165000000) { |
305 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, | ||
306 | RG_HDMITX_PRD_IMP_EN); | ||
307 | pre_ibias = 0x3; | ||
308 | imp_en = 0x0; | ||
309 | hdmi_ibias = hdmi_phy->ibias; | ||
310 | } else { | ||
311 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3, | ||
312 | RG_HDMITX_PRD_IMP_EN); | ||
313 | pre_ibias = 0x6; | ||
314 | imp_en = 0xf; | ||
315 | hdmi_ibias = hdmi_phy->ibias_up; | ||
316 | } | ||
302 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, | 317 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, |
303 | (0x3 << PRD_IBIAS_CLK_SHIFT) | | 318 | (pre_ibias << PRD_IBIAS_CLK_SHIFT) | |
304 | (0x3 << PRD_IBIAS_D2_SHIFT) | | 319 | (pre_ibias << PRD_IBIAS_D2_SHIFT) | |
305 | (0x3 << PRD_IBIAS_D1_SHIFT) | | 320 | (pre_ibias << PRD_IBIAS_D1_SHIFT) | |
306 | (0x3 << PRD_IBIAS_D0_SHIFT), | 321 | (pre_ibias << PRD_IBIAS_D0_SHIFT), |
307 | RG_HDMITX_PRD_IBIAS_CLK | | 322 | RG_HDMITX_PRD_IBIAS_CLK | |
308 | RG_HDMITX_PRD_IBIAS_D2 | | 323 | RG_HDMITX_PRD_IBIAS_D2 | |
309 | RG_HDMITX_PRD_IBIAS_D1 | | 324 | RG_HDMITX_PRD_IBIAS_D1 | |
310 | RG_HDMITX_PRD_IBIAS_D0); | 325 | RG_HDMITX_PRD_IBIAS_D0); |
311 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, | 326 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, |
312 | (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN); | 327 | (imp_en << DRV_IMP_EN_SHIFT), |
328 | RG_HDMITX_DRV_IMP_EN); | ||
313 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, | 329 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, |
314 | (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | | 330 | (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | |
315 | (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | | 331 | (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | |
@@ -318,12 +334,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
318 | RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | | 334 | RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | |
319 | RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); | 335 | RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); |
320 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, | 336 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, |
321 | (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) | | 337 | (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) | |
322 | (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) | | 338 | (hdmi_ibias << DRV_IBIAS_D2_SHIFT) | |
323 | (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) | | 339 | (hdmi_ibias << DRV_IBIAS_D1_SHIFT) | |
324 | (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT), | 340 | (hdmi_ibias << DRV_IBIAS_D0_SHIFT), |
325 | RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 | | 341 | RG_HDMITX_DRV_IBIAS_CLK | |
326 | RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0); | 342 | RG_HDMITX_DRV_IBIAS_D2 | |
343 | RG_HDMITX_DRV_IBIAS_D1 | | ||
344 | RG_HDMITX_DRV_IBIAS_D0); | ||
327 | return 0; | 345 | return 0; |
328 | } | 346 | } |
329 | 347 | ||
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f05ed0e1f3d6..6f240021705b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
@@ -139,6 +139,7 @@ struct msm_dsi_host { | |||
139 | 139 | ||
140 | u32 err_work_state; | 140 | u32 err_work_state; |
141 | struct work_struct err_work; | 141 | struct work_struct err_work; |
142 | struct work_struct hpd_work; | ||
142 | struct workqueue_struct *workqueue; | 143 | struct workqueue_struct *workqueue; |
143 | 144 | ||
144 | /* DSI 6G TX buffer*/ | 145 | /* DSI 6G TX buffer*/ |
@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) | |||
1294 | wmb(); /* make sure dsi controller enabled again */ | 1295 | wmb(); /* make sure dsi controller enabled again */ |
1295 | } | 1296 | } |
1296 | 1297 | ||
1298 | static void dsi_hpd_worker(struct work_struct *work) | ||
1299 | { | ||
1300 | struct msm_dsi_host *msm_host = | ||
1301 | container_of(work, struct msm_dsi_host, hpd_work); | ||
1302 | |||
1303 | drm_helper_hpd_irq_event(msm_host->dev); | ||
1304 | } | ||
1305 | |||
1297 | static void dsi_err_worker(struct work_struct *work) | 1306 | static void dsi_err_worker(struct work_struct *work) |
1298 | { | 1307 | { |
1299 | struct msm_dsi_host *msm_host = | 1308 | struct msm_dsi_host *msm_host = |
@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host, | |||
1480 | 1489 | ||
1481 | DBG("id=%d", msm_host->id); | 1490 | DBG("id=%d", msm_host->id); |
1482 | if (msm_host->dev) | 1491 | if (msm_host->dev) |
1483 | drm_helper_hpd_irq_event(msm_host->dev); | 1492 | queue_work(msm_host->workqueue, &msm_host->hpd_work); |
1484 | 1493 | ||
1485 | return 0; | 1494 | return 0; |
1486 | } | 1495 | } |
@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host, | |||
1494 | 1503 | ||
1495 | DBG("id=%d", msm_host->id); | 1504 | DBG("id=%d", msm_host->id); |
1496 | if (msm_host->dev) | 1505 | if (msm_host->dev) |
1497 | drm_helper_hpd_irq_event(msm_host->dev); | 1506 | queue_work(msm_host->workqueue, &msm_host->hpd_work); |
1498 | 1507 | ||
1499 | return 0; | 1508 | return 0; |
1500 | } | 1509 | } |
@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) | |||
1748 | /* setup workqueue */ | 1757 | /* setup workqueue */ |
1749 | msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); | 1758 | msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); |
1750 | INIT_WORK(&msm_host->err_work, dsi_err_worker); | 1759 | INIT_WORK(&msm_host->err_work, dsi_err_worker); |
1760 | INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); | ||
1751 | 1761 | ||
1752 | msm_dsi->host = &msm_host->base; | 1762 | msm_dsi->host = &msm_host->base; |
1753 | msm_dsi->id = msm_host->id; | 1763 | msm_dsi->id = msm_host->id; |
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c index 598fdaff0a41..26e3a01a99c2 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c | |||
@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm) | |||
521 | .parent_names = (const char *[]){ "xo" }, | 521 | .parent_names = (const char *[]){ "xo" }, |
522 | .num_parents = 1, | 522 | .num_parents = 1, |
523 | .name = vco_name, | 523 | .name = vco_name, |
524 | .flags = CLK_IGNORE_UNUSED, | ||
524 | .ops = &clk_ops_dsi_pll_28nm_vco, | 525 | .ops = &clk_ops_dsi_pll_28nm_vco, |
525 | }; | 526 | }; |
526 | struct device *dev = &pll_28nm->pdev->dev; | 527 | struct device *dev = &pll_28nm->pdev->dev; |
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c index 38c90e1eb002..49008451085b 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c | |||
@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm) | |||
412 | struct clk_init_data vco_init = { | 412 | struct clk_init_data vco_init = { |
413 | .parent_names = (const char *[]){ "pxo" }, | 413 | .parent_names = (const char *[]){ "pxo" }, |
414 | .num_parents = 1, | 414 | .num_parents = 1, |
415 | .flags = CLK_IGNORE_UNUSED, | ||
415 | .ops = &clk_ops_dsi_pll_28nm_vco, | 416 | .ops = &clk_ops_dsi_pll_28nm_vco, |
416 | }; | 417 | }; |
417 | struct device *dev = &pll_28nm->pdev->dev; | 418 | struct device *dev = &pll_28nm->pdev->dev; |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c index aa94a553794f..143eab46ba68 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c | |||
@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = { | |||
702 | .ops = &hdmi_8996_pll_ops, | 702 | .ops = &hdmi_8996_pll_ops, |
703 | .parent_names = hdmi_pll_parents, | 703 | .parent_names = hdmi_pll_parents, |
704 | .num_parents = ARRAY_SIZE(hdmi_pll_parents), | 704 | .num_parents = ARRAY_SIZE(hdmi_pll_parents), |
705 | .flags = CLK_IGNORE_UNUSED, | ||
705 | }; | 706 | }; |
706 | 707 | ||
707 | int msm_hdmi_pll_8996_init(struct platform_device *pdev) | 708 | int msm_hdmi_pll_8996_init(struct platform_device *pdev) |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c index 92da69aa6187..99590758c68b 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c | |||
@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = { | |||
424 | .ops = &hdmi_pll_ops, | 424 | .ops = &hdmi_pll_ops, |
425 | .parent_names = hdmi_pll_parents, | 425 | .parent_names = hdmi_pll_parents, |
426 | .num_parents = ARRAY_SIZE(hdmi_pll_parents), | 426 | .num_parents = ARRAY_SIZE(hdmi_pll_parents), |
427 | .flags = CLK_IGNORE_UNUSED, | ||
427 | }; | 428 | }; |
428 | 429 | ||
429 | int msm_hdmi_pll_8960_init(struct platform_device *pdev) | 430 | int msm_hdmi_pll_8960_init(struct platform_device *pdev) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index ac9e4cde1380..8b4e3004f451 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | |||
@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = { | |||
272 | .count = 2, | 272 | .count = 2, |
273 | .base = { 0x14000, 0x16000 }, | 273 | .base = { 0x14000, 0x16000 }, |
274 | .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | | 274 | .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | |
275 | MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, | 275 | MDP_PIPE_CAP_DECIMATION, |
276 | }, | 276 | }, |
277 | .pipe_dma = { | 277 | .pipe_dma = { |
278 | .count = 1, | 278 | .count = 1, |
@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = { | |||
282 | .lm = { | 282 | .lm = { |
283 | .count = 2, /* LM0 and LM3 */ | 283 | .count = 2, /* LM0 and LM3 */ |
284 | .base = { 0x44000, 0x47000 }, | 284 | .base = { 0x44000, 0x47000 }, |
285 | .nb_stages = 5, | 285 | .nb_stages = 8, |
286 | .max_width = 2048, | 286 | .max_width = 2048, |
287 | .max_height = 0xFFFF, | 287 | .max_height = 0xFFFF, |
288 | }, | 288 | }, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index fa2be7ce9468..c205c360e16d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
@@ -223,12 +223,7 @@ static void blend_setup(struct drm_crtc *crtc) | |||
223 | plane_cnt++; | 223 | plane_cnt++; |
224 | } | 224 | } |
225 | 225 | ||
226 | /* | 226 | if (!pstates[STAGE_BASE]) { |
227 | * If there is no base layer, enable border color. | ||
228 | * Although it's not possbile in current blend logic, | ||
229 | * put it here as a reminder. | ||
230 | */ | ||
231 | if (!pstates[STAGE_BASE] && plane_cnt) { | ||
232 | ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; | 227 | ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; |
233 | DBG("Border Color is enabled"); | 228 | DBG("Border Color is enabled"); |
234 | } | 229 | } |
@@ -365,6 +360,15 @@ static int pstate_cmp(const void *a, const void *b) | |||
365 | return pa->state->zpos - pb->state->zpos; | 360 | return pa->state->zpos - pb->state->zpos; |
366 | } | 361 | } |
367 | 362 | ||
363 | /* is there a helper for this? */ | ||
364 | static bool is_fullscreen(struct drm_crtc_state *cstate, | ||
365 | struct drm_plane_state *pstate) | ||
366 | { | ||
367 | return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) && | ||
368 | ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) && | ||
369 | ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); | ||
370 | } | ||
371 | |||
368 | static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, | 372 | static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, |
369 | struct drm_crtc_state *state) | 373 | struct drm_crtc_state *state) |
370 | { | 374 | { |
@@ -375,21 +379,11 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, | |||
375 | struct plane_state pstates[STAGE_MAX + 1]; | 379 | struct plane_state pstates[STAGE_MAX + 1]; |
376 | const struct mdp5_cfg_hw *hw_cfg; | 380 | const struct mdp5_cfg_hw *hw_cfg; |
377 | const struct drm_plane_state *pstate; | 381 | const struct drm_plane_state *pstate; |
378 | int cnt = 0, i; | 382 | int cnt = 0, base = 0, i; |
379 | 383 | ||
380 | DBG("%s: check", mdp5_crtc->name); | 384 | DBG("%s: check", mdp5_crtc->name); |
381 | 385 | ||
382 | /* verify that there are not too many planes attached to crtc | ||
383 | * and that we don't have conflicting mixer stages: | ||
384 | */ | ||
385 | hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); | ||
386 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { | 386 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { |
387 | if (cnt >= (hw_cfg->lm.nb_stages)) { | ||
388 | dev_err(dev->dev, "too many planes!\n"); | ||
389 | return -EINVAL; | ||
390 | } | ||
391 | |||
392 | |||
393 | pstates[cnt].plane = plane; | 387 | pstates[cnt].plane = plane; |
394 | pstates[cnt].state = to_mdp5_plane_state(pstate); | 388 | pstates[cnt].state = to_mdp5_plane_state(pstate); |
395 | 389 | ||
@@ -399,8 +393,24 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, | |||
399 | /* assign a stage based on sorted zpos property */ | 393 | /* assign a stage based on sorted zpos property */ |
400 | sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); | 394 | sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); |
401 | 395 | ||
396 | /* if the bottom-most layer is not fullscreen, we need to use | ||
397 | * it for solid-color: | ||
398 | */ | ||
399 | if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) | ||
400 | base++; | ||
401 | |||
402 | /* verify that there are not too many planes attached to crtc | ||
403 | * and that we don't have conflicting mixer stages: | ||
404 | */ | ||
405 | hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); | ||
406 | |||
407 | if ((cnt + base) >= hw_cfg->lm.nb_stages) { | ||
408 | dev_err(dev->dev, "too many planes!\n"); | ||
409 | return -EINVAL; | ||
410 | } | ||
411 | |||
402 | for (i = 0; i < cnt; i++) { | 412 | for (i = 0; i < cnt; i++) { |
403 | pstates[i].state->stage = STAGE_BASE + i; | 413 | pstates[i].state->stage = STAGE_BASE + i + base; |
404 | DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, | 414 | DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, |
405 | pipe2name(mdp5_plane_pipe(pstates[i].plane)), | 415 | pipe2name(mdp5_plane_pipe(pstates[i].plane)), |
406 | pstates[i].state->stage); | 416 | pstates[i].state->stage); |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 951c002b05df..83bf997dda03 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
@@ -292,8 +292,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
292 | format = to_mdp_format(msm_framebuffer_format(state->fb)); | 292 | format = to_mdp_format(msm_framebuffer_format(state->fb)); |
293 | if (MDP_FORMAT_IS_YUV(format) && | 293 | if (MDP_FORMAT_IS_YUV(format) && |
294 | !pipe_supports_yuv(mdp5_plane->caps)) { | 294 | !pipe_supports_yuv(mdp5_plane->caps)) { |
295 | dev_err(plane->dev->dev, | 295 | DBG("Pipe doesn't support YUV\n"); |
296 | "Pipe doesn't support YUV\n"); | ||
297 | 296 | ||
298 | return -EINVAL; | 297 | return -EINVAL; |
299 | } | 298 | } |
@@ -301,8 +300,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
301 | if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && | 300 | if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && |
302 | (((state->src_w >> 16) != state->crtc_w) || | 301 | (((state->src_w >> 16) != state->crtc_w) || |
303 | ((state->src_h >> 16) != state->crtc_h))) { | 302 | ((state->src_h >> 16) != state->crtc_h))) { |
304 | dev_err(plane->dev->dev, | 303 | DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n", |
305 | "Pipe doesn't support scaling (%dx%d -> %dx%d)\n", | ||
306 | state->src_w >> 16, state->src_h >> 16, | 304 | state->src_w >> 16, state->src_h >> 16, |
307 | state->crtc_w, state->crtc_h); | 305 | state->crtc_w, state->crtc_h); |
308 | 306 | ||
@@ -313,8 +311,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
313 | vflip = !!(state->rotation & DRM_REFLECT_Y); | 311 | vflip = !!(state->rotation & DRM_REFLECT_Y); |
314 | if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || | 312 | if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || |
315 | (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { | 313 | (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { |
316 | dev_err(plane->dev->dev, | 314 | DBG("Pipe doesn't support flip\n"); |
317 | "Pipe doesn't support flip\n"); | ||
318 | 315 | ||
319 | return -EINVAL; | 316 | return -EINVAL; |
320 | } | 317 | } |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index fb5c0b0a7594..46568fc80848 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -228,7 +228,7 @@ static int msm_drm_uninit(struct device *dev) | |||
228 | flush_workqueue(priv->atomic_wq); | 228 | flush_workqueue(priv->atomic_wq); |
229 | destroy_workqueue(priv->atomic_wq); | 229 | destroy_workqueue(priv->atomic_wq); |
230 | 230 | ||
231 | if (kms) | 231 | if (kms && kms->funcs) |
232 | kms->funcs->destroy(kms); | 232 | kms->funcs->destroy(kms); |
233 | 233 | ||
234 | if (gpu) { | 234 | if (gpu) { |
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 283d2841ba58..192b2d3a79cb 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c | |||
@@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev) | |||
163 | void msm_gem_shrinker_cleanup(struct drm_device *dev) | 163 | void msm_gem_shrinker_cleanup(struct drm_device *dev) |
164 | { | 164 | { |
165 | struct msm_drm_private *priv = dev->dev_private; | 165 | struct msm_drm_private *priv = dev->dev_private; |
166 | WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); | 166 | |
167 | unregister_shrinker(&priv->shrinker); | 167 | if (priv->shrinker.nr_deferred) { |
168 | WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); | ||
169 | unregister_shrinker(&priv->shrinker); | ||
170 | } | ||
168 | } | 171 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index e18839d52e3e..27affbde058c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector) | |||
931 | { | 931 | { |
932 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 932 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
933 | 933 | ||
934 | if (radeon_connector->ddc_bus->has_aux) { | 934 | if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) { |
935 | drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux); | 935 | drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux); |
936 | radeon_connector->ddc_bus->has_aux = false; | 936 | radeon_connector->ddc_bus->has_aux = false; |
937 | } | 937 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index eb92aef46e3c..621af069a3d2 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = { | |||
104 | "LAST", | 104 | "LAST", |
105 | }; | 105 | }; |
106 | 106 | ||
107 | #if defined(CONFIG_VGA_SWITCHEROO) | ||
108 | bool radeon_has_atpx_dgpu_power_cntl(void); | ||
109 | bool radeon_is_atpx_hybrid(void); | ||
110 | #else | ||
111 | static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } | ||
112 | static inline bool radeon_is_atpx_hybrid(void) { return false; } | ||
113 | #endif | ||
114 | |||
107 | #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) | 115 | #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) |
108 | #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) | 116 | #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) |
109 | 117 | ||
@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev) | |||
160 | 168 | ||
161 | if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) | 169 | if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) |
162 | rdev->flags &= ~RADEON_IS_PX; | 170 | rdev->flags &= ~RADEON_IS_PX; |
171 | |||
172 | /* disable PX is the system doesn't support dGPU power control or hybrid gfx */ | ||
173 | if (!radeon_is_atpx_hybrid() && | ||
174 | !radeon_has_atpx_dgpu_power_cntl()) | ||
175 | rdev->flags &= ~RADEON_IS_PX; | ||
163 | } | 176 | } |
164 | 177 | ||
165 | /** | 178 | /** |
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 0da9862ad8ed..70e9fd59c5a2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
@@ -142,9 +142,9 @@ static int sun4i_drv_bind(struct device *dev) | |||
142 | 142 | ||
143 | /* Create our layers */ | 143 | /* Create our layers */ |
144 | drv->layers = sun4i_layers_init(drm); | 144 | drv->layers = sun4i_layers_init(drm); |
145 | if (!drv->layers) { | 145 | if (IS_ERR(drv->layers)) { |
146 | dev_err(drm->dev, "Couldn't create the planes\n"); | 146 | dev_err(drm->dev, "Couldn't create the planes\n"); |
147 | ret = -EINVAL; | 147 | ret = PTR_ERR(drv->layers); |
148 | goto free_drm; | 148 | goto free_drm; |
149 | } | 149 | } |
150 | 150 | ||
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index c3ff10f559cc..d198ad7e5323 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c | |||
@@ -152,15 +152,13 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) | |||
152 | 152 | ||
153 | DRM_DEBUG_DRIVER("Enabling RGB output\n"); | 153 | DRM_DEBUG_DRIVER("Enabling RGB output\n"); |
154 | 154 | ||
155 | if (!IS_ERR(tcon->panel)) { | 155 | if (!IS_ERR(tcon->panel)) |
156 | drm_panel_prepare(tcon->panel); | 156 | drm_panel_prepare(tcon->panel); |
157 | drm_panel_enable(tcon->panel); | ||
158 | } | ||
159 | |||
160 | /* encoder->bridge can be NULL; drm_bridge_enable checks for it */ | ||
161 | drm_bridge_enable(encoder->bridge); | ||
162 | 157 | ||
163 | sun4i_tcon_channel_enable(tcon, 0); | 158 | sun4i_tcon_channel_enable(tcon, 0); |
159 | |||
160 | if (!IS_ERR(tcon->panel)) | ||
161 | drm_panel_enable(tcon->panel); | ||
164 | } | 162 | } |
165 | 163 | ||
166 | static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) | 164 | static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) |
@@ -171,15 +169,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) | |||
171 | 169 | ||
172 | DRM_DEBUG_DRIVER("Disabling RGB output\n"); | 170 | DRM_DEBUG_DRIVER("Disabling RGB output\n"); |
173 | 171 | ||
174 | sun4i_tcon_channel_disable(tcon, 0); | 172 | if (!IS_ERR(tcon->panel)) |
173 | drm_panel_disable(tcon->panel); | ||
175 | 174 | ||
176 | /* encoder->bridge can be NULL; drm_bridge_disable checks for it */ | 175 | sun4i_tcon_channel_disable(tcon, 0); |
177 | drm_bridge_disable(encoder->bridge); | ||
178 | 176 | ||
179 | if (!IS_ERR(tcon->panel)) { | 177 | if (!IS_ERR(tcon->panel)) |
180 | drm_panel_disable(tcon->panel); | ||
181 | drm_panel_unprepare(tcon->panel); | 178 | drm_panel_unprepare(tcon->panel); |
182 | } | ||
183 | } | 179 | } |
184 | 180 | ||
185 | static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder, | 181 | static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 29f0207fa677..873f010d9616 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c | |||
@@ -98,17 +98,23 @@ success: | |||
98 | static int udl_select_std_channel(struct udl_device *udl) | 98 | static int udl_select_std_channel(struct udl_device *udl) |
99 | { | 99 | { |
100 | int ret; | 100 | int ret; |
101 | u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, | 101 | static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, |
102 | 0x1C, 0x88, 0x5E, 0x15, | 102 | 0x1C, 0x88, 0x5E, 0x15, |
103 | 0x60, 0xFE, 0xC6, 0x97, | 103 | 0x60, 0xFE, 0xC6, 0x97, |
104 | 0x16, 0x3D, 0x47, 0xF2}; | 104 | 0x16, 0x3D, 0x47, 0xF2}; |
105 | void *sendbuf; | ||
106 | |||
107 | sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); | ||
108 | if (!sendbuf) | ||
109 | return -ENOMEM; | ||
105 | 110 | ||
106 | ret = usb_control_msg(udl->udev, | 111 | ret = usb_control_msg(udl->udev, |
107 | usb_sndctrlpipe(udl->udev, 0), | 112 | usb_sndctrlpipe(udl->udev, 0), |
108 | NR_USB_REQUEST_CHANNEL, | 113 | NR_USB_REQUEST_CHANNEL, |
109 | (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, | 114 | (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, |
110 | set_def_chn, sizeof(set_def_chn), | 115 | sendbuf, sizeof(set_def_chn), |
111 | USB_CTRL_SET_TIMEOUT); | 116 | USB_CTRL_SET_TIMEOUT); |
117 | kfree(sendbuf); | ||
112 | return ret < 0 ? ret : 0; | 118 | return ret < 0 ? ret : 0; |
113 | } | 119 | } |
114 | 120 | ||
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 6cfb5cacc253..575aa65436d1 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -179,6 +179,7 @@ | |||
179 | #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 | 179 | #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 |
180 | #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 | 180 | #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 |
181 | #define USB_DEVICE_ID_ATEN_CS682 0x2213 | 181 | #define USB_DEVICE_ID_ATEN_CS682 0x2213 |
182 | #define USB_DEVICE_ID_ATEN_CS692 0x8021 | ||
182 | 183 | ||
183 | #define USB_VENDOR_ID_ATMEL 0x03eb | 184 | #define USB_VENDOR_ID_ATMEL 0x03eb |
184 | #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c | 185 | #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c |
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index 5614fee82347..3a84aaf1418b 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c | |||
@@ -292,11 +292,11 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr, | |||
292 | bool input = false; | 292 | bool input = false; |
293 | int value = 0; | 293 | int value = 0; |
294 | 294 | ||
295 | if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage, | 295 | if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage, |
296 | name) == 3) { | 296 | name) == 3) { |
297 | feature = true; | 297 | feature = true; |
298 | field_index = index + sensor_inst->input_field_count; | 298 | field_index = index + sensor_inst->input_field_count; |
299 | } else if (sscanf(attr->attr.name, "input-%d-%x-%s", &index, &usage, | 299 | } else if (sscanf(attr->attr.name, "input-%x-%x-%s", &index, &usage, |
300 | name) == 3) { | 300 | name) == 3) { |
301 | input = true; | 301 | input = true; |
302 | field_index = index; | 302 | field_index = index; |
@@ -398,7 +398,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr, | |||
398 | char name[HID_CUSTOM_NAME_LENGTH]; | 398 | char name[HID_CUSTOM_NAME_LENGTH]; |
399 | int value; | 399 | int value; |
400 | 400 | ||
401 | if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage, | 401 | if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage, |
402 | name) == 3) { | 402 | name) == 3) { |
403 | field_index = index + sensor_inst->input_field_count; | 403 | field_index = index + sensor_inst->input_field_count; |
404 | } else | 404 | } else |
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 658a607dc6d9..c5c3d6111729 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c | |||
@@ -251,6 +251,9 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, | |||
251 | struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev); | 251 | struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev); |
252 | int report_size; | 252 | int report_size; |
253 | int ret = 0; | 253 | int ret = 0; |
254 | u8 *val_ptr; | ||
255 | int buffer_index = 0; | ||
256 | int i; | ||
254 | 257 | ||
255 | mutex_lock(&data->mutex); | 258 | mutex_lock(&data->mutex); |
256 | report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); | 259 | report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); |
@@ -271,7 +274,17 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, | |||
271 | goto done_proc; | 274 | goto done_proc; |
272 | } | 275 | } |
273 | ret = min(report_size, buffer_size); | 276 | ret = min(report_size, buffer_size); |
274 | memcpy(buffer, report->field[field_index]->value, ret); | 277 | |
278 | val_ptr = (u8 *)report->field[field_index]->value; | ||
279 | for (i = 0; i < report->field[field_index]->report_count; ++i) { | ||
280 | if (buffer_index >= ret) | ||
281 | break; | ||
282 | |||
283 | memcpy(&((u8 *)buffer)[buffer_index], val_ptr, | ||
284 | report->field[field_index]->report_size / 8); | ||
285 | val_ptr += sizeof(__s32); | ||
286 | buffer_index += (report->field[field_index]->report_size / 8); | ||
287 | } | ||
275 | 288 | ||
276 | done_proc: | 289 | done_proc: |
277 | mutex_unlock(&data->mutex); | 290 | mutex_unlock(&data->mutex); |
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c index e2517c11e0ee..0c9ac4d5d850 100644 --- a/drivers/hid/intel-ish-hid/ipc/ipc.c +++ b/drivers/hid/intel-ish-hid/ipc/ipc.c | |||
@@ -638,6 +638,58 @@ eoi: | |||
638 | } | 638 | } |
639 | 639 | ||
640 | /** | 640 | /** |
641 | * ish_disable_dma() - disable dma communication between host and ISHFW | ||
642 | * @dev: ishtp device pointer | ||
643 | * | ||
644 | * Clear the dma enable bit and wait for dma inactive. | ||
645 | * | ||
646 | * Return: 0 for success else error code. | ||
647 | */ | ||
648 | static int ish_disable_dma(struct ishtp_device *dev) | ||
649 | { | ||
650 | unsigned int dma_delay; | ||
651 | |||
652 | /* Clear the dma enable bit */ | ||
653 | ish_reg_write(dev, IPC_REG_ISH_RMP2, 0); | ||
654 | |||
655 | /* wait for dma inactive */ | ||
656 | for (dma_delay = 0; dma_delay < MAX_DMA_DELAY && | ||
657 | _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA); | ||
658 | dma_delay += 5) | ||
659 | mdelay(5); | ||
660 | |||
661 | if (dma_delay >= MAX_DMA_DELAY) { | ||
662 | dev_err(dev->devc, | ||
663 | "Wait for DMA inactive timeout\n"); | ||
664 | return -EBUSY; | ||
665 | } | ||
666 | |||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | /** | ||
671 | * ish_wakeup() - wakeup ishfw from waiting-for-host state | ||
672 | * @dev: ishtp device pointer | ||
673 | * | ||
674 | * Set the dma enable bit and send a void message to FW, | ||
675 | * it wil wakeup FW from waiting-for-host state. | ||
676 | */ | ||
677 | static void ish_wakeup(struct ishtp_device *dev) | ||
678 | { | ||
679 | /* Set dma enable bit */ | ||
680 | ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED); | ||
681 | |||
682 | /* | ||
683 | * Send 0 IPC message so that ISH FW wakes up if it was already | ||
684 | * asleep. | ||
685 | */ | ||
686 | ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT); | ||
687 | |||
688 | /* Flush writes to doorbell and REMAP2 */ | ||
689 | ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS); | ||
690 | } | ||
691 | |||
692 | /** | ||
641 | * _ish_hw_reset() - HW reset | 693 | * _ish_hw_reset() - HW reset |
642 | * @dev: ishtp device pointer | 694 | * @dev: ishtp device pointer |
643 | * | 695 | * |
@@ -649,7 +701,6 @@ static int _ish_hw_reset(struct ishtp_device *dev) | |||
649 | { | 701 | { |
650 | struct pci_dev *pdev = dev->pdev; | 702 | struct pci_dev *pdev = dev->pdev; |
651 | int rv; | 703 | int rv; |
652 | unsigned int dma_delay; | ||
653 | uint16_t csr; | 704 | uint16_t csr; |
654 | 705 | ||
655 | if (!pdev) | 706 | if (!pdev) |
@@ -664,15 +715,8 @@ static int _ish_hw_reset(struct ishtp_device *dev) | |||
664 | return -EINVAL; | 715 | return -EINVAL; |
665 | } | 716 | } |
666 | 717 | ||
667 | /* Now trigger reset to FW */ | 718 | /* Disable dma communication between FW and host */ |
668 | ish_reg_write(dev, IPC_REG_ISH_RMP2, 0); | 719 | if (ish_disable_dma(dev)) { |
669 | |||
670 | for (dma_delay = 0; dma_delay < MAX_DMA_DELAY && | ||
671 | _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA); | ||
672 | dma_delay += 5) | ||
673 | mdelay(5); | ||
674 | |||
675 | if (dma_delay >= MAX_DMA_DELAY) { | ||
676 | dev_err(&pdev->dev, | 720 | dev_err(&pdev->dev, |
677 | "Can't reset - stuck with DMA in-progress\n"); | 721 | "Can't reset - stuck with DMA in-progress\n"); |
678 | return -EBUSY; | 722 | return -EBUSY; |
@@ -690,16 +734,8 @@ static int _ish_hw_reset(struct ishtp_device *dev) | |||
690 | csr |= PCI_D0; | 734 | csr |= PCI_D0; |
691 | pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr); | 735 | pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr); |
692 | 736 | ||
693 | ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED); | 737 | /* Now we can enable ISH DMA operation and wakeup ISHFW */ |
694 | 738 | ish_wakeup(dev); | |
695 | /* | ||
696 | * Send 0 IPC message so that ISH FW wakes up if it was already | ||
697 | * asleep | ||
698 | */ | ||
699 | ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT); | ||
700 | |||
701 | /* Flush writes to doorbell and REMAP2 */ | ||
702 | ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS); | ||
703 | 739 | ||
704 | return 0; | 740 | return 0; |
705 | } | 741 | } |
@@ -758,16 +794,9 @@ static int _ish_ipc_reset(struct ishtp_device *dev) | |||
758 | int ish_hw_start(struct ishtp_device *dev) | 794 | int ish_hw_start(struct ishtp_device *dev) |
759 | { | 795 | { |
760 | ish_set_host_rdy(dev); | 796 | ish_set_host_rdy(dev); |
761 | /* After that we can enable ISH DMA operation */ | ||
762 | ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED); | ||
763 | 797 | ||
764 | /* | 798 | /* After that we can enable ISH DMA operation and wakeup ISHFW */ |
765 | * Send 0 IPC message so that ISH FW wakes up if it was already | 799 | ish_wakeup(dev); |
766 | * asleep | ||
767 | */ | ||
768 | ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT); | ||
769 | /* Flush write to doorbell */ | ||
770 | ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS); | ||
771 | 800 | ||
772 | set_host_ready(dev); | 801 | set_host_ready(dev); |
773 | 802 | ||
@@ -876,6 +905,21 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev) | |||
876 | */ | 905 | */ |
877 | void ish_device_disable(struct ishtp_device *dev) | 906 | void ish_device_disable(struct ishtp_device *dev) |
878 | { | 907 | { |
908 | struct pci_dev *pdev = dev->pdev; | ||
909 | |||
910 | if (!pdev) | ||
911 | return; | ||
912 | |||
913 | /* Disable dma communication between FW and host */ | ||
914 | if (ish_disable_dma(dev)) { | ||
915 | dev_err(&pdev->dev, | ||
916 | "Can't reset - stuck with DMA in-progress\n"); | ||
917 | return; | ||
918 | } | ||
919 | |||
920 | /* Put ISH to D3hot state for power saving */ | ||
921 | pci_set_power_state(pdev, PCI_D3hot); | ||
922 | |||
879 | dev->dev_state = ISHTP_DEV_DISABLED; | 923 | dev->dev_state = ISHTP_DEV_DISABLED; |
880 | ish_clr_host_rdy(dev); | 924 | ish_clr_host_rdy(dev); |
881 | } | 925 | } |
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 42f0beeb09fd..20d647d2dd2c 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c | |||
@@ -146,7 +146,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
146 | pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; | 146 | pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; |
147 | 147 | ||
148 | /* request and enable interrupt */ | 148 | /* request and enable interrupt */ |
149 | ret = request_irq(pdev->irq, ish_irq_handler, IRQF_NO_SUSPEND, | 149 | ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED, |
150 | KBUILD_MODNAME, dev); | 150 | KBUILD_MODNAME, dev); |
151 | if (ret) { | 151 | if (ret) { |
152 | dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n", | 152 | dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n", |
@@ -202,6 +202,7 @@ static void ish_remove(struct pci_dev *pdev) | |||
202 | kfree(ishtp_dev); | 202 | kfree(ishtp_dev); |
203 | } | 203 | } |
204 | 204 | ||
205 | #ifdef CONFIG_PM | ||
205 | static struct device *ish_resume_device; | 206 | static struct device *ish_resume_device; |
206 | 207 | ||
207 | /** | 208 | /** |
@@ -293,7 +294,6 @@ static int ish_resume(struct device *device) | |||
293 | return 0; | 294 | return 0; |
294 | } | 295 | } |
295 | 296 | ||
296 | #ifdef CONFIG_PM | ||
297 | static const struct dev_pm_ops ish_pm_ops = { | 297 | static const struct dev_pm_ops ish_pm_ops = { |
298 | .suspend = ish_suspend, | 298 | .suspend = ish_suspend, |
299 | .resume = ish_resume, | 299 | .resume = ish_resume, |
@@ -301,7 +301,7 @@ static const struct dev_pm_ops ish_pm_ops = { | |||
301 | #define ISHTP_ISH_PM_OPS (&ish_pm_ops) | 301 | #define ISHTP_ISH_PM_OPS (&ish_pm_ops) |
302 | #else | 302 | #else |
303 | #define ISHTP_ISH_PM_OPS NULL | 303 | #define ISHTP_ISH_PM_OPS NULL |
304 | #endif | 304 | #endif /* CONFIG_PM */ |
305 | 305 | ||
306 | static struct pci_driver ish_driver = { | 306 | static struct pci_driver ish_driver = { |
307 | .name = KBUILD_MODNAME, | 307 | .name = KBUILD_MODNAME, |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 354d49ea36dd..e6cfd323babc 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -63,6 +63,7 @@ static const struct hid_blacklist { | |||
63 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, | 63 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, |
64 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, | 64 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, |
65 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET }, | 65 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET }, |
66 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET }, | ||
66 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET }, | 67 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET }, |
67 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, | 68 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, |
68 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET }, | 69 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET }, |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a259e18d22d5..0276d2ef06ee 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
@@ -961,7 +961,7 @@ int vmbus_device_register(struct hv_device *child_device_obj) | |||
961 | { | 961 | { |
962 | int ret = 0; | 962 | int ret = 0; |
963 | 963 | ||
964 | dev_set_name(&child_device_obj->device, "vmbus-%pUl", | 964 | dev_set_name(&child_device_obj->device, "%pUl", |
965 | child_device_obj->channel->offermsg.offer.if_instance.b); | 965 | child_device_obj->channel->offermsg.offer.if_instance.b); |
966 | 966 | ||
967 | child_device_obj->device.bus = &hv_bus; | 967 | child_device_obj->device.bus = &hv_bus; |
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index adae6848ffb2..a74c075a30ec 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c | |||
@@ -536,8 +536,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, | |||
536 | 536 | ||
537 | hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups), | 537 | hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups), |
538 | GFP_KERNEL); | 538 | GFP_KERNEL); |
539 | if (!hwdev->groups) | 539 | if (!hwdev->groups) { |
540 | return ERR_PTR(-ENOMEM); | 540 | err = -ENOMEM; |
541 | goto free_hwmon; | ||
542 | } | ||
541 | 543 | ||
542 | attrs = __hwmon_create_attrs(dev, drvdata, chip); | 544 | attrs = __hwmon_create_attrs(dev, drvdata, chip); |
543 | if (IS_ERR(attrs)) { | 545 | if (IS_ERR(attrs)) { |
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index da3fb069ec5c..ce69048c88e9 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c | |||
@@ -743,8 +743,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev, | |||
743 | 743 | ||
744 | return IIO_VAL_INT; | 744 | return IIO_VAL_INT; |
745 | case IIO_CHAN_INFO_SCALE: | 745 | case IIO_CHAN_INFO_SCALE: |
746 | *val = 0; | 746 | *val = adata->current_fullscale->gain / 1000000; |
747 | *val2 = adata->current_fullscale->gain; | 747 | *val2 = adata->current_fullscale->gain % 1000000; |
748 | return IIO_VAL_INT_PLUS_MICRO; | 748 | return IIO_VAL_INT_PLUS_MICRO; |
749 | case IIO_CHAN_INFO_SAMP_FREQ: | 749 | case IIO_CHAN_INFO_SAMP_FREQ: |
750 | *val = adata->odr; | 750 | *val = adata->odr; |
@@ -763,9 +763,13 @@ static int st_accel_write_raw(struct iio_dev *indio_dev, | |||
763 | int err; | 763 | int err; |
764 | 764 | ||
765 | switch (mask) { | 765 | switch (mask) { |
766 | case IIO_CHAN_INFO_SCALE: | 766 | case IIO_CHAN_INFO_SCALE: { |
767 | err = st_sensors_set_fullscale_by_gain(indio_dev, val2); | 767 | int gain; |
768 | |||
769 | gain = val * 1000000 + val2; | ||
770 | err = st_sensors_set_fullscale_by_gain(indio_dev, gain); | ||
768 | break; | 771 | break; |
772 | } | ||
769 | case IIO_CHAN_INFO_SAMP_FREQ: | 773 | case IIO_CHAN_INFO_SAMP_FREQ: |
770 | if (val2) | 774 | if (val2) |
771 | return -EINVAL; | 775 | return -EINVAL; |
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index dc33c1dd5191..b5beea53d6f6 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c | |||
@@ -30,26 +30,26 @@ static struct { | |||
30 | u32 usage_id; | 30 | u32 usage_id; |
31 | int unit; /* 0 for default others from HID sensor spec */ | 31 | int unit; /* 0 for default others from HID sensor spec */ |
32 | int scale_val0; /* scale, whole number */ | 32 | int scale_val0; /* scale, whole number */ |
33 | int scale_val1; /* scale, fraction in micros */ | 33 | int scale_val1; /* scale, fraction in nanos */ |
34 | } unit_conversion[] = { | 34 | } unit_conversion[] = { |
35 | {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650}, | 35 | {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000}, |
36 | {HID_USAGE_SENSOR_ACCEL_3D, | 36 | {HID_USAGE_SENSOR_ACCEL_3D, |
37 | HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0}, | 37 | HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0}, |
38 | {HID_USAGE_SENSOR_ACCEL_3D, | 38 | {HID_USAGE_SENSOR_ACCEL_3D, |
39 | HID_USAGE_SENSOR_UNITS_G, 9, 806650}, | 39 | HID_USAGE_SENSOR_UNITS_G, 9, 806650000}, |
40 | 40 | ||
41 | {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453}, | 41 | {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293}, |
42 | {HID_USAGE_SENSOR_GYRO_3D, | 42 | {HID_USAGE_SENSOR_GYRO_3D, |
43 | HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0}, | 43 | HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0}, |
44 | {HID_USAGE_SENSOR_GYRO_3D, | 44 | {HID_USAGE_SENSOR_GYRO_3D, |
45 | HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453}, | 45 | HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293}, |
46 | 46 | ||
47 | {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000}, | 47 | {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000}, |
48 | {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0}, | 48 | {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0}, |
49 | 49 | ||
50 | {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453}, | 50 | {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293}, |
51 | {HID_USAGE_SENSOR_INCLINOMETER_3D, | 51 | {HID_USAGE_SENSOR_INCLINOMETER_3D, |
52 | HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453}, | 52 | HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293}, |
53 | {HID_USAGE_SENSOR_INCLINOMETER_3D, | 53 | {HID_USAGE_SENSOR_INCLINOMETER_3D, |
54 | HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0}, | 54 | HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0}, |
55 | 55 | ||
@@ -57,7 +57,7 @@ static struct { | |||
57 | {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, | 57 | {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, |
58 | 58 | ||
59 | {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0}, | 59 | {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0}, |
60 | {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000}, | 60 | {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000}, |
61 | }; | 61 | }; |
62 | 62 | ||
63 | static int pow_10(unsigned power) | 63 | static int pow_10(unsigned power) |
@@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value); | |||
266 | /* | 266 | /* |
267 | * This fuction applies the unit exponent to the scale. | 267 | * This fuction applies the unit exponent to the scale. |
268 | * For example: | 268 | * For example: |
269 | * 9.806650 ->exp:2-> val0[980]val1[665000] | 269 | * 9.806650000 ->exp:2-> val0[980]val1[665000000] |
270 | * 9.000806 ->exp:2-> val0[900]val1[80600] | 270 | * 9.000806000 ->exp:2-> val0[900]val1[80600000] |
271 | * 0.174535 ->exp:2-> val0[17]val1[453500] | 271 | * 0.174535293 ->exp:2-> val0[17]val1[453529300] |
272 | * 1.001745 ->exp:0-> val0[1]val1[1745] | 272 | * 1.001745329 ->exp:0-> val0[1]val1[1745329] |
273 | * 1.001745 ->exp:2-> val0[100]val1[174500] | 273 | * 1.001745329 ->exp:2-> val0[100]val1[174532900] |
274 | * 1.001745 ->exp:4-> val0[10017]val1[450000] | 274 | * 1.001745329 ->exp:4-> val0[10017]val1[453290000] |
275 | * 9.806650 ->exp:-2-> val0[0]val1[98066] | 275 | * 9.806650000 ->exp:-2-> val0[0]val1[98066500] |
276 | */ | 276 | */ |
277 | static void adjust_exponent_micro(int *val0, int *val1, int scale0, | 277 | static void adjust_exponent_nano(int *val0, int *val1, int scale0, |
278 | int scale1, int exp) | 278 | int scale1, int exp) |
279 | { | 279 | { |
280 | int i; | 280 | int i; |
@@ -285,32 +285,32 @@ static void adjust_exponent_micro(int *val0, int *val1, int scale0, | |||
285 | if (exp > 0) { | 285 | if (exp > 0) { |
286 | *val0 = scale0 * pow_10(exp); | 286 | *val0 = scale0 * pow_10(exp); |
287 | res = 0; | 287 | res = 0; |
288 | if (exp > 6) { | 288 | if (exp > 9) { |
289 | *val1 = 0; | 289 | *val1 = 0; |
290 | return; | 290 | return; |
291 | } | 291 | } |
292 | for (i = 0; i < exp; ++i) { | 292 | for (i = 0; i < exp; ++i) { |
293 | x = scale1 / pow_10(5 - i); | 293 | x = scale1 / pow_10(8 - i); |
294 | res += (pow_10(exp - 1 - i) * x); | 294 | res += (pow_10(exp - 1 - i) * x); |
295 | scale1 = scale1 % pow_10(5 - i); | 295 | scale1 = scale1 % pow_10(8 - i); |
296 | } | 296 | } |
297 | *val0 += res; | 297 | *val0 += res; |
298 | *val1 = scale1 * pow_10(exp); | 298 | *val1 = scale1 * pow_10(exp); |
299 | } else if (exp < 0) { | 299 | } else if (exp < 0) { |
300 | exp = abs(exp); | 300 | exp = abs(exp); |
301 | if (exp > 6) { | 301 | if (exp > 9) { |
302 | *val0 = *val1 = 0; | 302 | *val0 = *val1 = 0; |
303 | return; | 303 | return; |
304 | } | 304 | } |
305 | *val0 = scale0 / pow_10(exp); | 305 | *val0 = scale0 / pow_10(exp); |
306 | rem = scale0 % pow_10(exp); | 306 | rem = scale0 % pow_10(exp); |
307 | res = 0; | 307 | res = 0; |
308 | for (i = 0; i < (6 - exp); ++i) { | 308 | for (i = 0; i < (9 - exp); ++i) { |
309 | x = scale1 / pow_10(5 - i); | 309 | x = scale1 / pow_10(8 - i); |
310 | res += (pow_10(5 - exp - i) * x); | 310 | res += (pow_10(8 - exp - i) * x); |
311 | scale1 = scale1 % pow_10(5 - i); | 311 | scale1 = scale1 % pow_10(8 - i); |
312 | } | 312 | } |
313 | *val1 = rem * pow_10(6 - exp) + res; | 313 | *val1 = rem * pow_10(9 - exp) + res; |
314 | } else { | 314 | } else { |
315 | *val0 = scale0; | 315 | *val0 = scale0; |
316 | *val1 = scale1; | 316 | *val1 = scale1; |
@@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id, | |||
332 | unit_conversion[i].unit == attr_info->units) { | 332 | unit_conversion[i].unit == attr_info->units) { |
333 | exp = hid_sensor_convert_exponent( | 333 | exp = hid_sensor_convert_exponent( |
334 | attr_info->unit_expo); | 334 | attr_info->unit_expo); |
335 | adjust_exponent_micro(val0, val1, | 335 | adjust_exponent_nano(val0, val1, |
336 | unit_conversion[i].scale_val0, | 336 | unit_conversion[i].scale_val0, |
337 | unit_conversion[i].scale_val1, exp); | 337 | unit_conversion[i].scale_val1, exp); |
338 | break; | 338 | break; |
339 | } | 339 | } |
340 | } | 340 | } |
341 | 341 | ||
342 | return IIO_VAL_INT_PLUS_MICRO; | 342 | return IIO_VAL_INT_PLUS_NANO; |
343 | } | 343 | } |
344 | EXPORT_SYMBOL(hid_sensor_format_scale); | 344 | EXPORT_SYMBOL(hid_sensor_format_scale); |
345 | 345 | ||
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 285a64a589d7..975a1f19f747 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c | |||
@@ -612,7 +612,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail); | |||
612 | ssize_t st_sensors_sysfs_scale_avail(struct device *dev, | 612 | ssize_t st_sensors_sysfs_scale_avail(struct device *dev, |
613 | struct device_attribute *attr, char *buf) | 613 | struct device_attribute *attr, char *buf) |
614 | { | 614 | { |
615 | int i, len = 0; | 615 | int i, len = 0, q, r; |
616 | struct iio_dev *indio_dev = dev_get_drvdata(dev); | 616 | struct iio_dev *indio_dev = dev_get_drvdata(dev); |
617 | struct st_sensor_data *sdata = iio_priv(indio_dev); | 617 | struct st_sensor_data *sdata = iio_priv(indio_dev); |
618 | 618 | ||
@@ -621,8 +621,10 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev, | |||
621 | if (sdata->sensor_settings->fs.fs_avl[i].num == 0) | 621 | if (sdata->sensor_settings->fs.fs_avl[i].num == 0) |
622 | break; | 622 | break; |
623 | 623 | ||
624 | len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", | 624 | q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000; |
625 | sdata->sensor_settings->fs.fs_avl[i].gain); | 625 | r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000; |
626 | |||
627 | len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r); | ||
626 | } | 628 | } |
627 | mutex_unlock(&indio_dev->mlock); | 629 | mutex_unlock(&indio_dev->mlock); |
628 | buf[len - 1] = '\n'; | 630 | buf[len - 1] = '\n'; |
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c index b98b9d94d184..a97e802ca523 100644 --- a/drivers/iio/orientation/hid-sensor-rotation.c +++ b/drivers/iio/orientation/hid-sensor-rotation.c | |||
@@ -335,6 +335,7 @@ static struct platform_driver hid_dev_rot_platform_driver = { | |||
335 | .id_table = hid_dev_rot_ids, | 335 | .id_table = hid_dev_rot_ids, |
336 | .driver = { | 336 | .driver = { |
337 | .name = KBUILD_MODNAME, | 337 | .name = KBUILD_MODNAME, |
338 | .pm = &hid_sensor_pm_ops, | ||
338 | }, | 339 | }, |
339 | .probe = hid_dev_rot_probe, | 340 | .probe = hid_dev_rot_probe, |
340 | .remove = hid_dev_rot_remove, | 341 | .remove = hid_dev_rot_remove, |
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index 066161a4bccd..f962f31a5eb2 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c | |||
@@ -136,6 +136,8 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, | |||
136 | ret = spi_read(data->spi, (void *)&buf32, storage_bytes); | 136 | ret = spi_read(data->spi, (void *)&buf32, storage_bytes); |
137 | *val = be32_to_cpu(buf32); | 137 | *val = be32_to_cpu(buf32); |
138 | break; | 138 | break; |
139 | default: | ||
140 | ret = -EINVAL; | ||
139 | } | 141 | } |
140 | 142 | ||
141 | if (ret) | 143 | if (ret) |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index b136d3acc5bd..0f58f46dbad7 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel); | |||
699 | struct resolve_cb_context { | 699 | struct resolve_cb_context { |
700 | struct rdma_dev_addr *addr; | 700 | struct rdma_dev_addr *addr; |
701 | struct completion comp; | 701 | struct completion comp; |
702 | int status; | ||
702 | }; | 703 | }; |
703 | 704 | ||
704 | static void resolve_cb(int status, struct sockaddr *src_addr, | 705 | static void resolve_cb(int status, struct sockaddr *src_addr, |
705 | struct rdma_dev_addr *addr, void *context) | 706 | struct rdma_dev_addr *addr, void *context) |
706 | { | 707 | { |
707 | memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct | 708 | if (!status) |
708 | rdma_dev_addr)); | 709 | memcpy(((struct resolve_cb_context *)context)->addr, |
710 | addr, sizeof(struct rdma_dev_addr)); | ||
711 | ((struct resolve_cb_context *)context)->status = status; | ||
709 | complete(&((struct resolve_cb_context *)context)->comp); | 712 | complete(&((struct resolve_cb_context *)context)->comp); |
710 | } | 713 | } |
711 | 714 | ||
@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, | |||
743 | 746 | ||
744 | wait_for_completion(&ctx.comp); | 747 | wait_for_completion(&ctx.comp); |
745 | 748 | ||
749 | ret = ctx.status; | ||
750 | if (ret) | ||
751 | return ret; | ||
752 | |||
746 | memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); | 753 | memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); |
747 | dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); | 754 | dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); |
748 | if (!dev) | 755 | if (!dev) |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c99525512b34..71c7c4c328ef 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -80,6 +80,8 @@ static struct ib_cm { | |||
80 | __be32 random_id_operand; | 80 | __be32 random_id_operand; |
81 | struct list_head timewait_list; | 81 | struct list_head timewait_list; |
82 | struct workqueue_struct *wq; | 82 | struct workqueue_struct *wq; |
83 | /* Sync on cm change port state */ | ||
84 | spinlock_t state_lock; | ||
83 | } cm; | 85 | } cm; |
84 | 86 | ||
85 | /* Counter indexes ordered by attribute ID */ | 87 | /* Counter indexes ordered by attribute ID */ |
@@ -161,6 +163,8 @@ struct cm_port { | |||
161 | struct ib_mad_agent *mad_agent; | 163 | struct ib_mad_agent *mad_agent; |
162 | struct kobject port_obj; | 164 | struct kobject port_obj; |
163 | u8 port_num; | 165 | u8 port_num; |
166 | struct list_head cm_priv_prim_list; | ||
167 | struct list_head cm_priv_altr_list; | ||
164 | struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; | 168 | struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; |
165 | }; | 169 | }; |
166 | 170 | ||
@@ -241,6 +245,12 @@ struct cm_id_private { | |||
241 | u8 service_timeout; | 245 | u8 service_timeout; |
242 | u8 target_ack_delay; | 246 | u8 target_ack_delay; |
243 | 247 | ||
248 | struct list_head prim_list; | ||
249 | struct list_head altr_list; | ||
250 | /* Indicates that the send port mad is registered and av is set */ | ||
251 | int prim_send_port_not_ready; | ||
252 | int altr_send_port_not_ready; | ||
253 | |||
244 | struct list_head work_list; | 254 | struct list_head work_list; |
245 | atomic_t work_count; | 255 | atomic_t work_count; |
246 | }; | 256 | }; |
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | |||
259 | struct ib_mad_agent *mad_agent; | 269 | struct ib_mad_agent *mad_agent; |
260 | struct ib_mad_send_buf *m; | 270 | struct ib_mad_send_buf *m; |
261 | struct ib_ah *ah; | 271 | struct ib_ah *ah; |
272 | struct cm_av *av; | ||
273 | unsigned long flags, flags2; | ||
274 | int ret = 0; | ||
262 | 275 | ||
276 | /* don't let the port to be released till the agent is down */ | ||
277 | spin_lock_irqsave(&cm.state_lock, flags2); | ||
278 | spin_lock_irqsave(&cm.lock, flags); | ||
279 | if (!cm_id_priv->prim_send_port_not_ready) | ||
280 | av = &cm_id_priv->av; | ||
281 | else if (!cm_id_priv->altr_send_port_not_ready && | ||
282 | (cm_id_priv->alt_av.port)) | ||
283 | av = &cm_id_priv->alt_av; | ||
284 | else { | ||
285 | pr_info("%s: not valid CM id\n", __func__); | ||
286 | ret = -ENODEV; | ||
287 | spin_unlock_irqrestore(&cm.lock, flags); | ||
288 | goto out; | ||
289 | } | ||
290 | spin_unlock_irqrestore(&cm.lock, flags); | ||
291 | /* Make sure the port haven't released the mad yet */ | ||
263 | mad_agent = cm_id_priv->av.port->mad_agent; | 292 | mad_agent = cm_id_priv->av.port->mad_agent; |
264 | ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); | 293 | if (!mad_agent) { |
265 | if (IS_ERR(ah)) | 294 | pr_info("%s: not a valid MAD agent\n", __func__); |
266 | return PTR_ERR(ah); | 295 | ret = -ENODEV; |
296 | goto out; | ||
297 | } | ||
298 | ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); | ||
299 | if (IS_ERR(ah)) { | ||
300 | ret = PTR_ERR(ah); | ||
301 | goto out; | ||
302 | } | ||
267 | 303 | ||
268 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, | 304 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, |
269 | cm_id_priv->av.pkey_index, | 305 | av->pkey_index, |
270 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, | 306 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, |
271 | GFP_ATOMIC, | 307 | GFP_ATOMIC, |
272 | IB_MGMT_BASE_VERSION); | 308 | IB_MGMT_BASE_VERSION); |
273 | if (IS_ERR(m)) { | 309 | if (IS_ERR(m)) { |
274 | ib_destroy_ah(ah); | 310 | ib_destroy_ah(ah); |
275 | return PTR_ERR(m); | 311 | ret = PTR_ERR(m); |
312 | goto out; | ||
276 | } | 313 | } |
277 | 314 | ||
278 | /* Timeout set by caller if response is expected. */ | 315 | /* Timeout set by caller if response is expected. */ |
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | |||
282 | atomic_inc(&cm_id_priv->refcount); | 319 | atomic_inc(&cm_id_priv->refcount); |
283 | m->context[0] = cm_id_priv; | 320 | m->context[0] = cm_id_priv; |
284 | *msg = m; | 321 | *msg = m; |
285 | return 0; | 322 | |
323 | out: | ||
324 | spin_unlock_irqrestore(&cm.state_lock, flags2); | ||
325 | return ret; | ||
286 | } | 326 | } |
287 | 327 | ||
288 | static int cm_alloc_response_msg(struct cm_port *port, | 328 | static int cm_alloc_response_msg(struct cm_port *port, |
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, | |||
352 | grh, &av->ah_attr); | 392 | grh, &av->ah_attr); |
353 | } | 393 | } |
354 | 394 | ||
355 | static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) | 395 | static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, |
396 | struct cm_id_private *cm_id_priv) | ||
356 | { | 397 | { |
357 | struct cm_device *cm_dev; | 398 | struct cm_device *cm_dev; |
358 | struct cm_port *port = NULL; | 399 | struct cm_port *port = NULL; |
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) | |||
387 | &av->ah_attr); | 428 | &av->ah_attr); |
388 | av->timeout = path->packet_life_time + 1; | 429 | av->timeout = path->packet_life_time + 1; |
389 | 430 | ||
390 | return 0; | 431 | spin_lock_irqsave(&cm.lock, flags); |
432 | if (&cm_id_priv->av == av) | ||
433 | list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); | ||
434 | else if (&cm_id_priv->alt_av == av) | ||
435 | list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); | ||
436 | else | ||
437 | ret = -EINVAL; | ||
438 | |||
439 | spin_unlock_irqrestore(&cm.lock, flags); | ||
440 | |||
441 | return ret; | ||
391 | } | 442 | } |
392 | 443 | ||
393 | static int cm_alloc_id(struct cm_id_private *cm_id_priv) | 444 | static int cm_alloc_id(struct cm_id_private *cm_id_priv) |
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, | |||
677 | spin_lock_init(&cm_id_priv->lock); | 728 | spin_lock_init(&cm_id_priv->lock); |
678 | init_completion(&cm_id_priv->comp); | 729 | init_completion(&cm_id_priv->comp); |
679 | INIT_LIST_HEAD(&cm_id_priv->work_list); | 730 | INIT_LIST_HEAD(&cm_id_priv->work_list); |
731 | INIT_LIST_HEAD(&cm_id_priv->prim_list); | ||
732 | INIT_LIST_HEAD(&cm_id_priv->altr_list); | ||
680 | atomic_set(&cm_id_priv->work_count, -1); | 733 | atomic_set(&cm_id_priv->work_count, -1); |
681 | atomic_set(&cm_id_priv->refcount, 1); | 734 | atomic_set(&cm_id_priv->refcount, 1); |
682 | return &cm_id_priv->id; | 735 | return &cm_id_priv->id; |
@@ -892,6 +945,15 @@ retest: | |||
892 | break; | 945 | break; |
893 | } | 946 | } |
894 | 947 | ||
948 | spin_lock_irq(&cm.lock); | ||
949 | if (!list_empty(&cm_id_priv->altr_list) && | ||
950 | (!cm_id_priv->altr_send_port_not_ready)) | ||
951 | list_del(&cm_id_priv->altr_list); | ||
952 | if (!list_empty(&cm_id_priv->prim_list) && | ||
953 | (!cm_id_priv->prim_send_port_not_ready)) | ||
954 | list_del(&cm_id_priv->prim_list); | ||
955 | spin_unlock_irq(&cm.lock); | ||
956 | |||
895 | cm_free_id(cm_id->local_id); | 957 | cm_free_id(cm_id->local_id); |
896 | cm_deref_id(cm_id_priv); | 958 | cm_deref_id(cm_id_priv); |
897 | wait_for_completion(&cm_id_priv->comp); | 959 | wait_for_completion(&cm_id_priv->comp); |
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
1192 | goto out; | 1254 | goto out; |
1193 | } | 1255 | } |
1194 | 1256 | ||
1195 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); | 1257 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, |
1258 | cm_id_priv); | ||
1196 | if (ret) | 1259 | if (ret) |
1197 | goto error1; | 1260 | goto error1; |
1198 | if (param->alternate_path) { | 1261 | if (param->alternate_path) { |
1199 | ret = cm_init_av_by_path(param->alternate_path, | 1262 | ret = cm_init_av_by_path(param->alternate_path, |
1200 | &cm_id_priv->alt_av); | 1263 | &cm_id_priv->alt_av, cm_id_priv); |
1201 | if (ret) | 1264 | if (ret) |
1202 | goto error1; | 1265 | goto error1; |
1203 | } | 1266 | } |
@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work) | |||
1653 | dev_put(gid_attr.ndev); | 1716 | dev_put(gid_attr.ndev); |
1654 | } | 1717 | } |
1655 | work->path[0].gid_type = gid_attr.gid_type; | 1718 | work->path[0].gid_type = gid_attr.gid_type; |
1656 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); | 1719 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, |
1720 | cm_id_priv); | ||
1657 | } | 1721 | } |
1658 | if (ret) { | 1722 | if (ret) { |
1659 | int err = ib_get_cached_gid(work->port->cm_dev->ib_device, | 1723 | int err = ib_get_cached_gid(work->port->cm_dev->ib_device, |
@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work) | |||
1672 | goto rejected; | 1736 | goto rejected; |
1673 | } | 1737 | } |
1674 | if (req_msg->alt_local_lid) { | 1738 | if (req_msg->alt_local_lid) { |
1675 | ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); | 1739 | ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, |
1740 | cm_id_priv); | ||
1676 | if (ret) { | 1741 | if (ret) { |
1677 | ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, | 1742 | ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, |
1678 | &work->path[0].sgid, | 1743 | &work->path[0].sgid, |
@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, | |||
2727 | goto out; | 2792 | goto out; |
2728 | } | 2793 | } |
2729 | 2794 | ||
2730 | ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); | 2795 | ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, |
2796 | cm_id_priv); | ||
2731 | if (ret) | 2797 | if (ret) |
2732 | goto out; | 2798 | goto out; |
2733 | cm_id_priv->alt_av.timeout = | 2799 | cm_id_priv->alt_av.timeout = |
@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work) | |||
2839 | cm_init_av_for_response(work->port, work->mad_recv_wc->wc, | 2905 | cm_init_av_for_response(work->port, work->mad_recv_wc->wc, |
2840 | work->mad_recv_wc->recv_buf.grh, | 2906 | work->mad_recv_wc->recv_buf.grh, |
2841 | &cm_id_priv->av); | 2907 | &cm_id_priv->av); |
2842 | cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); | 2908 | cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, |
2909 | cm_id_priv); | ||
2843 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 2910 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
2844 | if (!ret) | 2911 | if (!ret) |
2845 | list_add_tail(&work->list, &cm_id_priv->work_list); | 2912 | list_add_tail(&work->list, &cm_id_priv->work_list); |
@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, | |||
3031 | return -EINVAL; | 3098 | return -EINVAL; |
3032 | 3099 | ||
3033 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | 3100 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
3034 | ret = cm_init_av_by_path(param->path, &cm_id_priv->av); | 3101 | ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); |
3035 | if (ret) | 3102 | if (ret) |
3036 | goto out; | 3103 | goto out; |
3037 | 3104 | ||
@@ -3468,7 +3535,9 @@ out: | |||
3468 | static int cm_migrate(struct ib_cm_id *cm_id) | 3535 | static int cm_migrate(struct ib_cm_id *cm_id) |
3469 | { | 3536 | { |
3470 | struct cm_id_private *cm_id_priv; | 3537 | struct cm_id_private *cm_id_priv; |
3538 | struct cm_av tmp_av; | ||
3471 | unsigned long flags; | 3539 | unsigned long flags; |
3540 | int tmp_send_port_not_ready; | ||
3472 | int ret = 0; | 3541 | int ret = 0; |
3473 | 3542 | ||
3474 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | 3543 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id) | |||
3477 | (cm_id->lap_state == IB_CM_LAP_UNINIT || | 3546 | (cm_id->lap_state == IB_CM_LAP_UNINIT || |
3478 | cm_id->lap_state == IB_CM_LAP_IDLE)) { | 3547 | cm_id->lap_state == IB_CM_LAP_IDLE)) { |
3479 | cm_id->lap_state = IB_CM_LAP_IDLE; | 3548 | cm_id->lap_state = IB_CM_LAP_IDLE; |
3549 | /* Swap address vector */ | ||
3550 | tmp_av = cm_id_priv->av; | ||
3480 | cm_id_priv->av = cm_id_priv->alt_av; | 3551 | cm_id_priv->av = cm_id_priv->alt_av; |
3552 | cm_id_priv->alt_av = tmp_av; | ||
3553 | /* Swap port send ready state */ | ||
3554 | tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; | ||
3555 | cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; | ||
3556 | cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; | ||
3481 | } else | 3557 | } else |
3482 | ret = -EINVAL; | 3558 | ret = -EINVAL; |
3483 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 3559 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device) | |||
3888 | port->cm_dev = cm_dev; | 3964 | port->cm_dev = cm_dev; |
3889 | port->port_num = i; | 3965 | port->port_num = i; |
3890 | 3966 | ||
3967 | INIT_LIST_HEAD(&port->cm_priv_prim_list); | ||
3968 | INIT_LIST_HEAD(&port->cm_priv_altr_list); | ||
3969 | |||
3891 | ret = cm_create_port_fs(port); | 3970 | ret = cm_create_port_fs(port); |
3892 | if (ret) | 3971 | if (ret) |
3893 | goto error1; | 3972 | goto error1; |
@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) | |||
3945 | { | 4024 | { |
3946 | struct cm_device *cm_dev = client_data; | 4025 | struct cm_device *cm_dev = client_data; |
3947 | struct cm_port *port; | 4026 | struct cm_port *port; |
4027 | struct cm_id_private *cm_id_priv; | ||
4028 | struct ib_mad_agent *cur_mad_agent; | ||
3948 | struct ib_port_modify port_modify = { | 4029 | struct ib_port_modify port_modify = { |
3949 | .clr_port_cap_mask = IB_PORT_CM_SUP | 4030 | .clr_port_cap_mask = IB_PORT_CM_SUP |
3950 | }; | 4031 | }; |
@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) | |||
3968 | 4049 | ||
3969 | port = cm_dev->port[i-1]; | 4050 | port = cm_dev->port[i-1]; |
3970 | ib_modify_port(ib_device, port->port_num, 0, &port_modify); | 4051 | ib_modify_port(ib_device, port->port_num, 0, &port_modify); |
4052 | /* Mark all the cm_id's as not valid */ | ||
4053 | spin_lock_irq(&cm.lock); | ||
4054 | list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) | ||
4055 | cm_id_priv->altr_send_port_not_ready = 1; | ||
4056 | list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) | ||
4057 | cm_id_priv->prim_send_port_not_ready = 1; | ||
4058 | spin_unlock_irq(&cm.lock); | ||
3971 | /* | 4059 | /* |
3972 | * We flush the queue here after the going_down set, this | 4060 | * We flush the queue here after the going_down set, this |
3973 | * verify that no new works will be queued in the recv handler, | 4061 | * verify that no new works will be queued in the recv handler, |
3974 | * after that we can call the unregister_mad_agent | 4062 | * after that we can call the unregister_mad_agent |
3975 | */ | 4063 | */ |
3976 | flush_workqueue(cm.wq); | 4064 | flush_workqueue(cm.wq); |
3977 | ib_unregister_mad_agent(port->mad_agent); | 4065 | spin_lock_irq(&cm.state_lock); |
4066 | cur_mad_agent = port->mad_agent; | ||
4067 | port->mad_agent = NULL; | ||
4068 | spin_unlock_irq(&cm.state_lock); | ||
4069 | ib_unregister_mad_agent(cur_mad_agent); | ||
3978 | cm_remove_port_fs(port); | 4070 | cm_remove_port_fs(port); |
3979 | } | 4071 | } |
4072 | |||
3980 | device_unregister(cm_dev->device); | 4073 | device_unregister(cm_dev->device); |
3981 | kfree(cm_dev); | 4074 | kfree(cm_dev); |
3982 | } | 4075 | } |
@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void) | |||
3989 | INIT_LIST_HEAD(&cm.device_list); | 4082 | INIT_LIST_HEAD(&cm.device_list); |
3990 | rwlock_init(&cm.device_lock); | 4083 | rwlock_init(&cm.device_lock); |
3991 | spin_lock_init(&cm.lock); | 4084 | spin_lock_init(&cm.lock); |
4085 | spin_lock_init(&cm.state_lock); | ||
3992 | cm.listen_service_table = RB_ROOT; | 4086 | cm.listen_service_table = RB_ROOT; |
3993 | cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); | 4087 | cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); |
3994 | cm.remote_id_table = RB_ROOT; | 4088 | cm.remote_id_table = RB_ROOT; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 36bf50ebb187..2a6fc47a1dfb 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -1094,47 +1094,47 @@ static void cma_save_ib_info(struct sockaddr *src_addr, | |||
1094 | } | 1094 | } |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | static void cma_save_ip4_info(struct sockaddr *src_addr, | 1097 | static void cma_save_ip4_info(struct sockaddr_in *src_addr, |
1098 | struct sockaddr *dst_addr, | 1098 | struct sockaddr_in *dst_addr, |
1099 | struct cma_hdr *hdr, | 1099 | struct cma_hdr *hdr, |
1100 | __be16 local_port) | 1100 | __be16 local_port) |
1101 | { | 1101 | { |
1102 | struct sockaddr_in *ip4; | ||
1103 | |||
1104 | if (src_addr) { | 1102 | if (src_addr) { |
1105 | ip4 = (struct sockaddr_in *)src_addr; | 1103 | *src_addr = (struct sockaddr_in) { |
1106 | ip4->sin_family = AF_INET; | 1104 | .sin_family = AF_INET, |
1107 | ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; | 1105 | .sin_addr.s_addr = hdr->dst_addr.ip4.addr, |
1108 | ip4->sin_port = local_port; | 1106 | .sin_port = local_port, |
1107 | }; | ||
1109 | } | 1108 | } |
1110 | 1109 | ||
1111 | if (dst_addr) { | 1110 | if (dst_addr) { |
1112 | ip4 = (struct sockaddr_in *)dst_addr; | 1111 | *dst_addr = (struct sockaddr_in) { |
1113 | ip4->sin_family = AF_INET; | 1112 | .sin_family = AF_INET, |
1114 | ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; | 1113 | .sin_addr.s_addr = hdr->src_addr.ip4.addr, |
1115 | ip4->sin_port = hdr->port; | 1114 | .sin_port = hdr->port, |
1115 | }; | ||
1116 | } | 1116 | } |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | static void cma_save_ip6_info(struct sockaddr *src_addr, | 1119 | static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, |
1120 | struct sockaddr *dst_addr, | 1120 | struct sockaddr_in6 *dst_addr, |
1121 | struct cma_hdr *hdr, | 1121 | struct cma_hdr *hdr, |
1122 | __be16 local_port) | 1122 | __be16 local_port) |
1123 | { | 1123 | { |
1124 | struct sockaddr_in6 *ip6; | ||
1125 | |||
1126 | if (src_addr) { | 1124 | if (src_addr) { |
1127 | ip6 = (struct sockaddr_in6 *)src_addr; | 1125 | *src_addr = (struct sockaddr_in6) { |
1128 | ip6->sin6_family = AF_INET6; | 1126 | .sin6_family = AF_INET6, |
1129 | ip6->sin6_addr = hdr->dst_addr.ip6; | 1127 | .sin6_addr = hdr->dst_addr.ip6, |
1130 | ip6->sin6_port = local_port; | 1128 | .sin6_port = local_port, |
1129 | }; | ||
1131 | } | 1130 | } |
1132 | 1131 | ||
1133 | if (dst_addr) { | 1132 | if (dst_addr) { |
1134 | ip6 = (struct sockaddr_in6 *)dst_addr; | 1133 | *dst_addr = (struct sockaddr_in6) { |
1135 | ip6->sin6_family = AF_INET6; | 1134 | .sin6_family = AF_INET6, |
1136 | ip6->sin6_addr = hdr->src_addr.ip6; | 1135 | .sin6_addr = hdr->src_addr.ip6, |
1137 | ip6->sin6_port = hdr->port; | 1136 | .sin6_port = hdr->port, |
1137 | }; | ||
1138 | } | 1138 | } |
1139 | } | 1139 | } |
1140 | 1140 | ||
@@ -1159,10 +1159,12 @@ static int cma_save_ip_info(struct sockaddr *src_addr, | |||
1159 | 1159 | ||
1160 | switch (cma_get_ip_ver(hdr)) { | 1160 | switch (cma_get_ip_ver(hdr)) { |
1161 | case 4: | 1161 | case 4: |
1162 | cma_save_ip4_info(src_addr, dst_addr, hdr, port); | 1162 | cma_save_ip4_info((struct sockaddr_in *)src_addr, |
1163 | (struct sockaddr_in *)dst_addr, hdr, port); | ||
1163 | break; | 1164 | break; |
1164 | case 6: | 1165 | case 6: |
1165 | cma_save_ip6_info(src_addr, dst_addr, hdr, port); | 1166 | cma_save_ip6_info((struct sockaddr_in6 *)src_addr, |
1167 | (struct sockaddr_in6 *)dst_addr, hdr, port); | ||
1166 | break; | 1168 | break; |
1167 | default: | 1169 | default: |
1168 | return -EAFNOSUPPORT; | 1170 | return -EAFNOSUPPORT; |
@@ -2436,6 +2438,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos) | |||
2436 | return 0; | 2438 | return 0; |
2437 | } | 2439 | } |
2438 | 2440 | ||
2441 | static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, | ||
2442 | unsigned long supported_gids, | ||
2443 | enum ib_gid_type default_gid) | ||
2444 | { | ||
2445 | if ((network_type == RDMA_NETWORK_IPV4 || | ||
2446 | network_type == RDMA_NETWORK_IPV6) && | ||
2447 | test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) | ||
2448 | return IB_GID_TYPE_ROCE_UDP_ENCAP; | ||
2449 | |||
2450 | return default_gid; | ||
2451 | } | ||
2452 | |||
2439 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | 2453 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) |
2440 | { | 2454 | { |
2441 | struct rdma_route *route = &id_priv->id.route; | 2455 | struct rdma_route *route = &id_priv->id.route; |
@@ -2461,6 +2475,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
2461 | route->num_paths = 1; | 2475 | route->num_paths = 1; |
2462 | 2476 | ||
2463 | if (addr->dev_addr.bound_dev_if) { | 2477 | if (addr->dev_addr.bound_dev_if) { |
2478 | unsigned long supported_gids; | ||
2479 | |||
2464 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); | 2480 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); |
2465 | if (!ndev) { | 2481 | if (!ndev) { |
2466 | ret = -ENODEV; | 2482 | ret = -ENODEV; |
@@ -2484,7 +2500,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
2484 | 2500 | ||
2485 | route->path_rec->net = &init_net; | 2501 | route->path_rec->net = &init_net; |
2486 | route->path_rec->ifindex = ndev->ifindex; | 2502 | route->path_rec->ifindex = ndev->ifindex; |
2487 | route->path_rec->gid_type = id_priv->gid_type; | 2503 | supported_gids = roce_gid_type_mask_support(id_priv->id.device, |
2504 | id_priv->id.port_num); | ||
2505 | route->path_rec->gid_type = | ||
2506 | cma_route_gid_type(addr->dev_addr.network, | ||
2507 | supported_gids, | ||
2508 | id_priv->gid_type); | ||
2488 | } | 2509 | } |
2489 | if (!ndev) { | 2510 | if (!ndev) { |
2490 | ret = -ENODEV; | 2511 | ret = -ENODEV; |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 224ad274ea0b..84b4eff90395 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
175 | 175 | ||
176 | cur_base = addr & PAGE_MASK; | 176 | cur_base = addr & PAGE_MASK; |
177 | 177 | ||
178 | if (npages == 0) { | 178 | if (npages == 0 || npages > UINT_MAX) { |
179 | ret = -EINVAL; | 179 | ret = -EINVAL; |
180 | goto out; | 180 | goto out; |
181 | } | 181 | } |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 0012fa58c105..44b1104eb168 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
262 | container_of(uobj, struct ib_uqp_object, uevent.uobject); | 262 | container_of(uobj, struct ib_uqp_object, uevent.uobject); |
263 | 263 | ||
264 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); | 264 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); |
265 | if (qp != qp->real_qp) { | 265 | if (qp == qp->real_qp) |
266 | ib_close_qp(qp); | ||
267 | } else { | ||
268 | ib_uverbs_detach_umcast(qp, uqp); | 266 | ib_uverbs_detach_umcast(qp, uqp); |
269 | ib_destroy_qp(qp); | 267 | ib_destroy_qp(qp); |
270 | } | ||
271 | ib_uverbs_release_uevent(file, &uqp->uevent); | 268 | ib_uverbs_release_uevent(file, &uqp->uevent); |
272 | kfree(uqp); | 269 | kfree(uqp); |
273 | } | 270 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 867b8cf82be8..19c6477af19f 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -666,18 +666,6 @@ skip_cqe: | |||
666 | return ret; | 666 | return ret; |
667 | } | 667 | } |
668 | 668 | ||
669 | static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey) | ||
670 | { | ||
671 | struct c4iw_mr *mhp; | ||
672 | unsigned long flags; | ||
673 | |||
674 | spin_lock_irqsave(&rhp->lock, flags); | ||
675 | mhp = get_mhp(rhp, rkey >> 8); | ||
676 | if (mhp) | ||
677 | mhp->attr.state = 0; | ||
678 | spin_unlock_irqrestore(&rhp->lock, flags); | ||
679 | } | ||
680 | |||
681 | /* | 669 | /* |
682 | * Get one cq entry from c4iw and map it to openib. | 670 | * Get one cq entry from c4iw and map it to openib. |
683 | * | 671 | * |
@@ -733,7 +721,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | |||
733 | CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { | 721 | CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { |
734 | wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); | 722 | wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); |
735 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; | 723 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; |
736 | invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); | 724 | c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); |
737 | } | 725 | } |
738 | } else { | 726 | } else { |
739 | switch (CQE_OPCODE(&cqe)) { | 727 | switch (CQE_OPCODE(&cqe)) { |
@@ -762,7 +750,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | |||
762 | 750 | ||
763 | /* Invalidate the MR if the fastreg failed */ | 751 | /* Invalidate the MR if the fastreg failed */ |
764 | if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) | 752 | if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) |
765 | invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); | 753 | c4iw_invalidate_mr(qhp->rhp, |
754 | CQE_WRID_FR_STAG(&cqe)); | ||
766 | break; | 755 | break; |
767 | default: | 756 | default: |
768 | printk(KERN_ERR MOD "Unexpected opcode %d " | 757 | printk(KERN_ERR MOD "Unexpected opcode %d " |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7e7f79e55006..4788e1a46fde 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -999,6 +999,6 @@ extern int db_coalescing_threshold; | |||
999 | extern int use_dsgl; | 999 | extern int use_dsgl; |
1000 | void c4iw_drain_rq(struct ib_qp *qp); | 1000 | void c4iw_drain_rq(struct ib_qp *qp); |
1001 | void c4iw_drain_sq(struct ib_qp *qp); | 1001 | void c4iw_drain_sq(struct ib_qp *qp); |
1002 | 1002 | void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); | |
1003 | 1003 | ||
1004 | #endif | 1004 | #endif |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 80e27749420a..410408f886c1 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -770,3 +770,15 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) | |||
770 | kfree(mhp); | 770 | kfree(mhp); |
771 | return 0; | 771 | return 0; |
772 | } | 772 | } |
773 | |||
774 | void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) | ||
775 | { | ||
776 | struct c4iw_mr *mhp; | ||
777 | unsigned long flags; | ||
778 | |||
779 | spin_lock_irqsave(&rhp->lock, flags); | ||
780 | mhp = get_mhp(rhp, rkey >> 8); | ||
781 | if (mhp) | ||
782 | mhp->attr.state = 0; | ||
783 | spin_unlock_irqrestore(&rhp->lock, flags); | ||
784 | } | ||
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index f57deba6717c..b7ac97b27c88 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -706,12 +706,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, | |||
706 | return 0; | 706 | return 0; |
707 | } | 707 | } |
708 | 708 | ||
709 | static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe, | 709 | static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) |
710 | struct ib_send_wr *wr, u8 *len16) | ||
711 | { | 710 | { |
712 | struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8); | ||
713 | |||
714 | mhp->attr.state = 0; | ||
715 | wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); | 711 | wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); |
716 | wqe->inv.r2 = 0; | 712 | wqe->inv.r2 = 0; |
717 | *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); | 713 | *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); |
@@ -797,11 +793,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
797 | spin_lock_irqsave(&qhp->lock, flag); | 793 | spin_lock_irqsave(&qhp->lock, flag); |
798 | if (t4_wq_in_error(&qhp->wq)) { | 794 | if (t4_wq_in_error(&qhp->wq)) { |
799 | spin_unlock_irqrestore(&qhp->lock, flag); | 795 | spin_unlock_irqrestore(&qhp->lock, flag); |
796 | *bad_wr = wr; | ||
800 | return -EINVAL; | 797 | return -EINVAL; |
801 | } | 798 | } |
802 | num_wrs = t4_sq_avail(&qhp->wq); | 799 | num_wrs = t4_sq_avail(&qhp->wq); |
803 | if (num_wrs == 0) { | 800 | if (num_wrs == 0) { |
804 | spin_unlock_irqrestore(&qhp->lock, flag); | 801 | spin_unlock_irqrestore(&qhp->lock, flag); |
802 | *bad_wr = wr; | ||
805 | return -ENOMEM; | 803 | return -ENOMEM; |
806 | } | 804 | } |
807 | while (wr) { | 805 | while (wr) { |
@@ -840,10 +838,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
840 | case IB_WR_RDMA_READ_WITH_INV: | 838 | case IB_WR_RDMA_READ_WITH_INV: |
841 | fw_opcode = FW_RI_RDMA_READ_WR; | 839 | fw_opcode = FW_RI_RDMA_READ_WR; |
842 | swsqe->opcode = FW_RI_READ_REQ; | 840 | swsqe->opcode = FW_RI_READ_REQ; |
843 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) | 841 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { |
842 | c4iw_invalidate_mr(qhp->rhp, | ||
843 | wr->sg_list[0].lkey); | ||
844 | fw_flags = FW_RI_RDMA_READ_INVALIDATE; | 844 | fw_flags = FW_RI_RDMA_READ_INVALIDATE; |
845 | else | 845 | } else { |
846 | fw_flags = 0; | 846 | fw_flags = 0; |
847 | } | ||
847 | err = build_rdma_read(wqe, wr, &len16); | 848 | err = build_rdma_read(wqe, wr, &len16); |
848 | if (err) | 849 | if (err) |
849 | break; | 850 | break; |
@@ -876,7 +877,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
876 | fw_flags |= FW_RI_LOCAL_FENCE_FLAG; | 877 | fw_flags |= FW_RI_LOCAL_FENCE_FLAG; |
877 | fw_opcode = FW_RI_INV_LSTAG_WR; | 878 | fw_opcode = FW_RI_INV_LSTAG_WR; |
878 | swsqe->opcode = FW_RI_LOCAL_INV; | 879 | swsqe->opcode = FW_RI_LOCAL_INV; |
879 | err = build_inv_stag(qhp->rhp, wqe, wr, &len16); | 880 | err = build_inv_stag(wqe, wr, &len16); |
881 | c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey); | ||
880 | break; | 882 | break; |
881 | default: | 883 | default: |
882 | PDBG("%s post of type=%d TBD!\n", __func__, | 884 | PDBG("%s post of type=%d TBD!\n", __func__, |
@@ -934,11 +936,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
934 | spin_lock_irqsave(&qhp->lock, flag); | 936 | spin_lock_irqsave(&qhp->lock, flag); |
935 | if (t4_wq_in_error(&qhp->wq)) { | 937 | if (t4_wq_in_error(&qhp->wq)) { |
936 | spin_unlock_irqrestore(&qhp->lock, flag); | 938 | spin_unlock_irqrestore(&qhp->lock, flag); |
939 | *bad_wr = wr; | ||
937 | return -EINVAL; | 940 | return -EINVAL; |
938 | } | 941 | } |
939 | num_wrs = t4_rq_avail(&qhp->wq); | 942 | num_wrs = t4_rq_avail(&qhp->wq); |
940 | if (num_wrs == 0) { | 943 | if (num_wrs == 0) { |
941 | spin_unlock_irqrestore(&qhp->lock, flag); | 944 | spin_unlock_irqrestore(&qhp->lock, flag); |
945 | *bad_wr = wr; | ||
942 | return -ENOMEM; | 946 | return -ENOMEM; |
943 | } | 947 | } |
944 | while (wr) { | 948 | while (wr) { |
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index a26a9a0bfc41..67ea85a56945 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c | |||
@@ -775,75 +775,3 @@ void hfi1_put_proc_affinity(int cpu) | |||
775 | } | 775 | } |
776 | mutex_unlock(&affinity->lock); | 776 | mutex_unlock(&affinity->lock); |
777 | } | 777 | } |
778 | |||
779 | int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | ||
780 | size_t count) | ||
781 | { | ||
782 | struct hfi1_affinity_node *entry; | ||
783 | cpumask_var_t mask; | ||
784 | int ret, i; | ||
785 | |||
786 | mutex_lock(&node_affinity.lock); | ||
787 | entry = node_affinity_lookup(dd->node); | ||
788 | |||
789 | if (!entry) { | ||
790 | ret = -EINVAL; | ||
791 | goto unlock; | ||
792 | } | ||
793 | |||
794 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); | ||
795 | if (!ret) { | ||
796 | ret = -ENOMEM; | ||
797 | goto unlock; | ||
798 | } | ||
799 | |||
800 | ret = cpulist_parse(buf, mask); | ||
801 | if (ret) | ||
802 | goto out; | ||
803 | |||
804 | if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) { | ||
805 | dd_dev_warn(dd, "Invalid CPU mask\n"); | ||
806 | ret = -EINVAL; | ||
807 | goto out; | ||
808 | } | ||
809 | |||
810 | /* reset the SDMA interrupt affinity details */ | ||
811 | init_cpu_mask_set(&entry->def_intr); | ||
812 | cpumask_copy(&entry->def_intr.mask, mask); | ||
813 | |||
814 | /* Reassign the affinity for each SDMA interrupt. */ | ||
815 | for (i = 0; i < dd->num_msix_entries; i++) { | ||
816 | struct hfi1_msix_entry *msix; | ||
817 | |||
818 | msix = &dd->msix_entries[i]; | ||
819 | if (msix->type != IRQ_SDMA) | ||
820 | continue; | ||
821 | |||
822 | ret = get_irq_affinity(dd, msix); | ||
823 | |||
824 | if (ret) | ||
825 | break; | ||
826 | } | ||
827 | out: | ||
828 | free_cpumask_var(mask); | ||
829 | unlock: | ||
830 | mutex_unlock(&node_affinity.lock); | ||
831 | return ret ? ret : strnlen(buf, PAGE_SIZE); | ||
832 | } | ||
833 | |||
834 | int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf) | ||
835 | { | ||
836 | struct hfi1_affinity_node *entry; | ||
837 | |||
838 | mutex_lock(&node_affinity.lock); | ||
839 | entry = node_affinity_lookup(dd->node); | ||
840 | |||
841 | if (!entry) { | ||
842 | mutex_unlock(&node_affinity.lock); | ||
843 | return -EINVAL; | ||
844 | } | ||
845 | |||
846 | cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask); | ||
847 | mutex_unlock(&node_affinity.lock); | ||
848 | return strnlen(buf, PAGE_SIZE); | ||
849 | } | ||
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h index b89ea3c0ee1a..42e63316afd1 100644 --- a/drivers/infiniband/hw/hfi1/affinity.h +++ b/drivers/infiniband/hw/hfi1/affinity.h | |||
@@ -102,10 +102,6 @@ int hfi1_get_proc_affinity(int); | |||
102 | /* Release a CPU used by a user process. */ | 102 | /* Release a CPU used by a user process. */ |
103 | void hfi1_put_proc_affinity(int); | 103 | void hfi1_put_proc_affinity(int); |
104 | 104 | ||
105 | int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf); | ||
106 | int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | ||
107 | size_t count); | ||
108 | |||
109 | struct hfi1_affinity_node { | 105 | struct hfi1_affinity_node { |
110 | int node; | 106 | int node; |
111 | struct cpu_mask_set def_intr; | 107 | struct cpu_mask_set def_intr; |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 9bf5f23544d4..24d0820873cf 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -6301,19 +6301,8 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) | |||
6301 | /* leave shared count at zero for both global and VL15 */ | 6301 | /* leave shared count at zero for both global and VL15 */ |
6302 | write_global_credit(dd, vau, vl15buf, 0); | 6302 | write_global_credit(dd, vau, vl15buf, 0); |
6303 | 6303 | ||
6304 | /* We may need some credits for another VL when sending packets | 6304 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf |
6305 | * with the snoop interface. Dividing it down the middle for VL15 | 6305 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); |
6306 | * and VL0 should suffice. | ||
6307 | */ | ||
6308 | if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) { | ||
6309 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1) | ||
6310 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); | ||
6311 | write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1) | ||
6312 | << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT); | ||
6313 | } else { | ||
6314 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf | ||
6315 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); | ||
6316 | } | ||
6317 | } | 6306 | } |
6318 | 6307 | ||
6319 | /* | 6308 | /* |
@@ -9915,9 +9904,6 @@ static void set_lidlmc(struct hfi1_pportdata *ppd) | |||
9915 | u32 mask = ~((1U << ppd->lmc) - 1); | 9904 | u32 mask = ~((1U << ppd->lmc) - 1); |
9916 | u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); | 9905 | u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); |
9917 | 9906 | ||
9918 | if (dd->hfi1_snoop.mode_flag) | ||
9919 | dd_dev_info(dd, "Set lid/lmc while snooping"); | ||
9920 | |||
9921 | c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK | 9907 | c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK |
9922 | | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); | 9908 | | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); |
9923 | c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) | 9909 | c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) |
@@ -12112,7 +12098,7 @@ static void update_synth_timer(unsigned long opaque) | |||
12112 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); | 12098 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); |
12113 | } | 12099 | } |
12114 | 12100 | ||
12115 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ | 12101 | #define C_MAX_NAME 16 /* 15 chars + one for /0 */ |
12116 | static int init_cntrs(struct hfi1_devdata *dd) | 12102 | static int init_cntrs(struct hfi1_devdata *dd) |
12117 | { | 12103 | { |
12118 | int i, rcv_ctxts, j; | 12104 | int i, rcv_ctxts, j; |
@@ -14463,7 +14449,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, | |||
14463 | * Any error printing is already done by the init code. | 14449 | * Any error printing is already done by the init code. |
14464 | * On return, we have the chip mapped. | 14450 | * On return, we have the chip mapped. |
14465 | */ | 14451 | */ |
14466 | ret = hfi1_pcie_ddinit(dd, pdev, ent); | 14452 | ret = hfi1_pcie_ddinit(dd, pdev); |
14467 | if (ret < 0) | 14453 | if (ret < 0) |
14468 | goto bail_free; | 14454 | goto bail_free; |
14469 | 14455 | ||
@@ -14691,6 +14677,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, | |||
14691 | if (ret) | 14677 | if (ret) |
14692 | goto bail_free_cntrs; | 14678 | goto bail_free_cntrs; |
14693 | 14679 | ||
14680 | init_completion(&dd->user_comp); | ||
14681 | |||
14682 | /* The user refcount starts with one to inidicate an active device */ | ||
14683 | atomic_set(&dd->user_refcount, 1); | ||
14684 | |||
14694 | goto bail; | 14685 | goto bail; |
14695 | 14686 | ||
14696 | bail_free_rcverr: | 14687 | bail_free_rcverr: |
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index 92345259a8f4..043fd21dc5f3 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h | |||
@@ -320,6 +320,9 @@ | |||
320 | /* DC_DC8051_CFG_MODE.GENERAL bits */ | 320 | /* DC_DC8051_CFG_MODE.GENERAL bits */ |
321 | #define DISABLE_SELF_GUID_CHECK 0x2 | 321 | #define DISABLE_SELF_GUID_CHECK 0x2 |
322 | 322 | ||
323 | /* Bad L2 frame error code */ | ||
324 | #define BAD_L2_ERR 0x6 | ||
325 | |||
323 | /* | 326 | /* |
324 | * Eager buffer minimum and maximum sizes supported by the hardware. | 327 | * Eager buffer minimum and maximum sizes supported by the hardware. |
325 | * All power-of-two sizes in between are supported as well. | 328 | * All power-of-two sizes in between are supported as well. |
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 6563e4d38b80..c5efff29c147 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c | |||
@@ -599,7 +599,6 @@ static void __prescan_rxq(struct hfi1_packet *packet) | |||
599 | dd->rhf_offset; | 599 | dd->rhf_offset; |
600 | struct rvt_qp *qp; | 600 | struct rvt_qp *qp; |
601 | struct ib_header *hdr; | 601 | struct ib_header *hdr; |
602 | struct ib_other_headers *ohdr; | ||
603 | struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; | 602 | struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; |
604 | u64 rhf = rhf_to_cpu(rhf_addr); | 603 | u64 rhf = rhf_to_cpu(rhf_addr); |
605 | u32 etype = rhf_rcv_type(rhf), qpn, bth1; | 604 | u32 etype = rhf_rcv_type(rhf), qpn, bth1; |
@@ -615,18 +614,21 @@ static void __prescan_rxq(struct hfi1_packet *packet) | |||
615 | if (etype != RHF_RCV_TYPE_IB) | 614 | if (etype != RHF_RCV_TYPE_IB) |
616 | goto next; | 615 | goto next; |
617 | 616 | ||
618 | hdr = hfi1_get_msgheader(dd, rhf_addr); | 617 | packet->hdr = hfi1_get_msgheader(dd, rhf_addr); |
618 | hdr = packet->hdr; | ||
619 | 619 | ||
620 | lnh = be16_to_cpu(hdr->lrh[0]) & 3; | 620 | lnh = be16_to_cpu(hdr->lrh[0]) & 3; |
621 | 621 | ||
622 | if (lnh == HFI1_LRH_BTH) | 622 | if (lnh == HFI1_LRH_BTH) { |
623 | ohdr = &hdr->u.oth; | 623 | packet->ohdr = &hdr->u.oth; |
624 | else if (lnh == HFI1_LRH_GRH) | 624 | } else if (lnh == HFI1_LRH_GRH) { |
625 | ohdr = &hdr->u.l.oth; | 625 | packet->ohdr = &hdr->u.l.oth; |
626 | else | 626 | packet->rcv_flags |= HFI1_HAS_GRH; |
627 | } else { | ||
627 | goto next; /* just in case */ | 628 | goto next; /* just in case */ |
629 | } | ||
628 | 630 | ||
629 | bth1 = be32_to_cpu(ohdr->bth[1]); | 631 | bth1 = be32_to_cpu(packet->ohdr->bth[1]); |
630 | is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); | 632 | is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); |
631 | 633 | ||
632 | if (!is_ecn) | 634 | if (!is_ecn) |
@@ -646,7 +648,7 @@ static void __prescan_rxq(struct hfi1_packet *packet) | |||
646 | 648 | ||
647 | /* turn off BECN, FECN */ | 649 | /* turn off BECN, FECN */ |
648 | bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); | 650 | bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); |
649 | ohdr->bth[1] = cpu_to_be32(bth1); | 651 | packet->ohdr->bth[1] = cpu_to_be32(bth1); |
650 | next: | 652 | next: |
651 | update_ps_mdata(&mdata, rcd); | 653 | update_ps_mdata(&mdata, rcd); |
652 | } | 654 | } |
@@ -1360,12 +1362,25 @@ int process_receive_ib(struct hfi1_packet *packet) | |||
1360 | 1362 | ||
1361 | int process_receive_bypass(struct hfi1_packet *packet) | 1363 | int process_receive_bypass(struct hfi1_packet *packet) |
1362 | { | 1364 | { |
1365 | struct hfi1_devdata *dd = packet->rcd->dd; | ||
1366 | |||
1363 | if (unlikely(rhf_err_flags(packet->rhf))) | 1367 | if (unlikely(rhf_err_flags(packet->rhf))) |
1364 | handle_eflags(packet); | 1368 | handle_eflags(packet); |
1365 | 1369 | ||
1366 | dd_dev_err(packet->rcd->dd, | 1370 | dd_dev_err(dd, |
1367 | "Bypass packets are not supported in normal operation. Dropping\n"); | 1371 | "Bypass packets are not supported in normal operation. Dropping\n"); |
1368 | incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors); | 1372 | incr_cntr64(&dd->sw_rcv_bypass_packet_errors); |
1373 | if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) { | ||
1374 | u64 *flits = packet->ebuf; | ||
1375 | |||
1376 | if (flits && !(packet->rhf & RHF_LEN_ERR)) { | ||
1377 | dd->err_info_rcvport.packet_flit1 = flits[0]; | ||
1378 | dd->err_info_rcvport.packet_flit2 = | ||
1379 | packet->tlen > sizeof(flits[0]) ? flits[1] : 0; | ||
1380 | } | ||
1381 | dd->err_info_rcvport.status_and_code |= | ||
1382 | (OPA_EI_STATUS_SMASK | BAD_L2_ERR); | ||
1383 | } | ||
1369 | return RHF_RCV_CONTINUE; | 1384 | return RHF_RCV_CONTINUE; |
1370 | } | 1385 | } |
1371 | 1386 | ||
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 677efa0e8cd6..bd786b7bd30b 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -172,6 +172,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) | |||
172 | struct hfi1_devdata, | 172 | struct hfi1_devdata, |
173 | user_cdev); | 173 | user_cdev); |
174 | 174 | ||
175 | if (!atomic_inc_not_zero(&dd->user_refcount)) | ||
176 | return -ENXIO; | ||
177 | |||
175 | /* Just take a ref now. Not all opens result in a context assign */ | 178 | /* Just take a ref now. Not all opens result in a context assign */ |
176 | kobject_get(&dd->kobj); | 179 | kobject_get(&dd->kobj); |
177 | 180 | ||
@@ -183,11 +186,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) | |||
183 | fd->rec_cpu_num = -1; /* no cpu affinity by default */ | 186 | fd->rec_cpu_num = -1; /* no cpu affinity by default */ |
184 | fd->mm = current->mm; | 187 | fd->mm = current->mm; |
185 | atomic_inc(&fd->mm->mm_count); | 188 | atomic_inc(&fd->mm->mm_count); |
186 | } | 189 | fp->private_data = fd; |
190 | } else { | ||
191 | fp->private_data = NULL; | ||
192 | |||
193 | if (atomic_dec_and_test(&dd->user_refcount)) | ||
194 | complete(&dd->user_comp); | ||
187 | 195 | ||
188 | fp->private_data = fd; | 196 | return -ENOMEM; |
197 | } | ||
189 | 198 | ||
190 | return fd ? 0 : -ENOMEM; | 199 | return 0; |
191 | } | 200 | } |
192 | 201 | ||
193 | static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | 202 | static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, |
@@ -798,6 +807,10 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) | |||
798 | done: | 807 | done: |
799 | mmdrop(fdata->mm); | 808 | mmdrop(fdata->mm); |
800 | kobject_put(&dd->kobj); | 809 | kobject_put(&dd->kobj); |
810 | |||
811 | if (atomic_dec_and_test(&dd->user_refcount)) | ||
812 | complete(&dd->user_comp); | ||
813 | |||
801 | kfree(fdata); | 814 | kfree(fdata); |
802 | return 0; | 815 | return 0; |
803 | } | 816 | } |
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 7eef11b316ff..cc87fd4e534b 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
@@ -367,26 +367,6 @@ struct hfi1_packet { | |||
367 | u8 etype; | 367 | u8 etype; |
368 | }; | 368 | }; |
369 | 369 | ||
370 | /* | ||
371 | * Private data for snoop/capture support. | ||
372 | */ | ||
373 | struct hfi1_snoop_data { | ||
374 | int mode_flag; | ||
375 | struct cdev cdev; | ||
376 | struct device *class_dev; | ||
377 | /* protect snoop data */ | ||
378 | spinlock_t snoop_lock; | ||
379 | struct list_head queue; | ||
380 | wait_queue_head_t waitq; | ||
381 | void *filter_value; | ||
382 | int (*filter_callback)(void *hdr, void *data, void *value); | ||
383 | u64 dcc_cfg; /* saved value of DCC Cfg register */ | ||
384 | }; | ||
385 | |||
386 | /* snoop mode_flag values */ | ||
387 | #define HFI1_PORT_SNOOP_MODE 1U | ||
388 | #define HFI1_PORT_CAPTURE_MODE 2U | ||
389 | |||
390 | struct rvt_sge_state; | 370 | struct rvt_sge_state; |
391 | 371 | ||
392 | /* | 372 | /* |
@@ -613,8 +593,6 @@ struct hfi1_pportdata { | |||
613 | struct mutex hls_lock; | 593 | struct mutex hls_lock; |
614 | u32 host_link_state; | 594 | u32 host_link_state; |
615 | 595 | ||
616 | spinlock_t sdma_alllock ____cacheline_aligned_in_smp; | ||
617 | |||
618 | u32 lstate; /* logical link state */ | 596 | u32 lstate; /* logical link state */ |
619 | 597 | ||
620 | /* these are the "32 bit" regs */ | 598 | /* these are the "32 bit" regs */ |
@@ -1104,8 +1082,6 @@ struct hfi1_devdata { | |||
1104 | char *portcntrnames; | 1082 | char *portcntrnames; |
1105 | size_t portcntrnameslen; | 1083 | size_t portcntrnameslen; |
1106 | 1084 | ||
1107 | struct hfi1_snoop_data hfi1_snoop; | ||
1108 | |||
1109 | struct err_info_rcvport err_info_rcvport; | 1085 | struct err_info_rcvport err_info_rcvport; |
1110 | struct err_info_constraint err_info_rcv_constraint; | 1086 | struct err_info_constraint err_info_rcv_constraint; |
1111 | struct err_info_constraint err_info_xmit_constraint; | 1087 | struct err_info_constraint err_info_xmit_constraint; |
@@ -1141,8 +1117,8 @@ struct hfi1_devdata { | |||
1141 | rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; | 1117 | rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; |
1142 | 1118 | ||
1143 | /* | 1119 | /* |
1144 | * Handlers for outgoing data so that snoop/capture does not | 1120 | * Capability to have different send engines simply by changing a |
1145 | * have to have its hooks in the send path | 1121 | * pointer value. |
1146 | */ | 1122 | */ |
1147 | send_routine process_pio_send; | 1123 | send_routine process_pio_send; |
1148 | send_routine process_dma_send; | 1124 | send_routine process_dma_send; |
@@ -1174,6 +1150,10 @@ struct hfi1_devdata { | |||
1174 | spinlock_t aspm_lock; | 1150 | spinlock_t aspm_lock; |
1175 | /* Number of verbs contexts which have disabled ASPM */ | 1151 | /* Number of verbs contexts which have disabled ASPM */ |
1176 | atomic_t aspm_disabled_cnt; | 1152 | atomic_t aspm_disabled_cnt; |
1153 | /* Keeps track of user space clients */ | ||
1154 | atomic_t user_refcount; | ||
1155 | /* Used to wait for outstanding user space clients before dev removal */ | ||
1156 | struct completion user_comp; | ||
1177 | 1157 | ||
1178 | struct hfi1_affinity *affinity; | 1158 | struct hfi1_affinity *affinity; |
1179 | struct rhashtable sdma_rht; | 1159 | struct rhashtable sdma_rht; |
@@ -1221,8 +1201,6 @@ struct hfi1_devdata *hfi1_lookup(int unit); | |||
1221 | extern u32 hfi1_cpulist_count; | 1201 | extern u32 hfi1_cpulist_count; |
1222 | extern unsigned long *hfi1_cpulist; | 1202 | extern unsigned long *hfi1_cpulist; |
1223 | 1203 | ||
1224 | extern unsigned int snoop_drop_send; | ||
1225 | extern unsigned int snoop_force_capture; | ||
1226 | int hfi1_init(struct hfi1_devdata *, int); | 1204 | int hfi1_init(struct hfi1_devdata *, int); |
1227 | int hfi1_count_units(int *npresentp, int *nupp); | 1205 | int hfi1_count_units(int *npresentp, int *nupp); |
1228 | int hfi1_count_active_units(void); | 1206 | int hfi1_count_active_units(void); |
@@ -1557,13 +1535,6 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); | |||
1557 | void reset_link_credits(struct hfi1_devdata *dd); | 1535 | void reset_link_credits(struct hfi1_devdata *dd); |
1558 | void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); | 1536 | void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); |
1559 | 1537 | ||
1560 | int snoop_recv_handler(struct hfi1_packet *packet); | ||
1561 | int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | ||
1562 | u64 pbc); | ||
1563 | int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | ||
1564 | u64 pbc); | ||
1565 | void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, | ||
1566 | u64 pbc, const void *from, size_t count); | ||
1567 | int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); | 1538 | int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); |
1568 | 1539 | ||
1569 | static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) | 1540 | static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) |
@@ -1763,8 +1734,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); | |||
1763 | 1734 | ||
1764 | int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); | 1735 | int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); |
1765 | void hfi1_pcie_cleanup(struct pci_dev *); | 1736 | void hfi1_pcie_cleanup(struct pci_dev *); |
1766 | int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *, | 1737 | int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *); |
1767 | const struct pci_device_id *); | ||
1768 | void hfi1_pcie_ddcleanup(struct hfi1_devdata *); | 1738 | void hfi1_pcie_ddcleanup(struct hfi1_devdata *); |
1769 | void hfi1_pcie_flr(struct hfi1_devdata *); | 1739 | void hfi1_pcie_flr(struct hfi1_devdata *); |
1770 | int pcie_speeds(struct hfi1_devdata *); | 1740 | int pcie_speeds(struct hfi1_devdata *); |
@@ -1799,8 +1769,6 @@ int kdeth_process_expected(struct hfi1_packet *packet); | |||
1799 | int kdeth_process_eager(struct hfi1_packet *packet); | 1769 | int kdeth_process_eager(struct hfi1_packet *packet); |
1800 | int process_receive_invalid(struct hfi1_packet *packet); | 1770 | int process_receive_invalid(struct hfi1_packet *packet); |
1801 | 1771 | ||
1802 | extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8]; | ||
1803 | |||
1804 | void update_sge(struct rvt_sge_state *ss, u32 length); | 1772 | void update_sge(struct rvt_sge_state *ss, u32 length); |
1805 | 1773 | ||
1806 | /* global module parameter variables */ | 1774 | /* global module parameter variables */ |
@@ -1827,9 +1795,6 @@ extern struct mutex hfi1_mutex; | |||
1827 | #define DRIVER_NAME "hfi1" | 1795 | #define DRIVER_NAME "hfi1" |
1828 | #define HFI1_USER_MINOR_BASE 0 | 1796 | #define HFI1_USER_MINOR_BASE 0 |
1829 | #define HFI1_TRACE_MINOR 127 | 1797 | #define HFI1_TRACE_MINOR 127 |
1830 | #define HFI1_DIAGPKT_MINOR 128 | ||
1831 | #define HFI1_DIAG_MINOR_BASE 129 | ||
1832 | #define HFI1_SNOOP_CAPTURE_BASE 200 | ||
1833 | #define HFI1_NMINORS 255 | 1798 | #define HFI1_NMINORS 255 |
1834 | 1799 | ||
1835 | #define PCI_VENDOR_ID_INTEL 0x8086 | 1800 | #define PCI_VENDOR_ID_INTEL 0x8086 |
@@ -1848,7 +1813,13 @@ extern struct mutex hfi1_mutex; | |||
1848 | static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, | 1813 | static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, |
1849 | u16 ctxt_type) | 1814 | u16 ctxt_type) |
1850 | { | 1815 | { |
1851 | u64 base_sc_integrity = | 1816 | u64 base_sc_integrity; |
1817 | |||
1818 | /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ | ||
1819 | if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) | ||
1820 | return 0; | ||
1821 | |||
1822 | base_sc_integrity = | ||
1852 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK | 1823 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK |
1853 | | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK | 1824 | | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK |
1854 | | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK | 1825 | | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK |
@@ -1863,7 +1834,6 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, | |||
1863 | | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK | 1834 | | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK |
1864 | | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK | 1835 | | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK |
1865 | | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK | 1836 | | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK |
1866 | | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK | ||
1867 | | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK | 1837 | | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK |
1868 | | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; | 1838 | | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; |
1869 | 1839 | ||
@@ -1872,18 +1842,23 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, | |||
1872 | else | 1842 | else |
1873 | base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; | 1843 | base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; |
1874 | 1844 | ||
1875 | if (is_ax(dd)) | 1845 | /* turn on send-side job key checks if !A0 */ |
1876 | /* turn off send-side job key checks - A0 */ | 1846 | if (!is_ax(dd)) |
1877 | return base_sc_integrity & | 1847 | base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; |
1878 | ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; | 1848 | |
1879 | return base_sc_integrity; | 1849 | return base_sc_integrity; |
1880 | } | 1850 | } |
1881 | 1851 | ||
1882 | static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) | 1852 | static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) |
1883 | { | 1853 | { |
1884 | u64 base_sdma_integrity = | 1854 | u64 base_sdma_integrity; |
1855 | |||
1856 | /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ | ||
1857 | if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) | ||
1858 | return 0; | ||
1859 | |||
1860 | base_sdma_integrity = | ||
1885 | SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK | 1861 | SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK |
1886 | | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK | ||
1887 | | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK | 1862 | | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK |
1888 | | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK | 1863 | | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK |
1889 | | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK | 1864 | | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK |
@@ -1895,14 +1870,18 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) | |||
1895 | | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK | 1870 | | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK |
1896 | | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK | 1871 | | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK |
1897 | | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK | 1872 | | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK |
1898 | | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK | ||
1899 | | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK | 1873 | | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK |
1900 | | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; | 1874 | | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; |
1901 | 1875 | ||
1902 | if (is_ax(dd)) | 1876 | if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) |
1903 | /* turn off send-side job key checks - A0 */ | 1877 | base_sdma_integrity |= |
1904 | return base_sdma_integrity & | 1878 | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK; |
1905 | ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; | 1879 | |
1880 | /* turn on send-side job key checks if !A0 */ | ||
1881 | if (!is_ax(dd)) | ||
1882 | base_sdma_integrity |= | ||
1883 | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; | ||
1884 | |||
1906 | return base_sdma_integrity; | 1885 | return base_sdma_integrity; |
1907 | } | 1886 | } |
1908 | 1887 | ||
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 60db61536fed..e3b5bc93bc70 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
@@ -144,6 +144,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) | |||
144 | struct hfi1_ctxtdata *rcd; | 144 | struct hfi1_ctxtdata *rcd; |
145 | 145 | ||
146 | ppd = dd->pport + (i % dd->num_pports); | 146 | ppd = dd->pport + (i % dd->num_pports); |
147 | |||
148 | /* dd->rcd[i] gets assigned inside the callee */ | ||
147 | rcd = hfi1_create_ctxtdata(ppd, i, dd->node); | 149 | rcd = hfi1_create_ctxtdata(ppd, i, dd->node); |
148 | if (!rcd) { | 150 | if (!rcd) { |
149 | dd_dev_err(dd, | 151 | dd_dev_err(dd, |
@@ -169,8 +171,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) | |||
169 | if (!rcd->sc) { | 171 | if (!rcd->sc) { |
170 | dd_dev_err(dd, | 172 | dd_dev_err(dd, |
171 | "Unable to allocate kernel send context, failing\n"); | 173 | "Unable to allocate kernel send context, failing\n"); |
172 | dd->rcd[rcd->ctxt] = NULL; | ||
173 | hfi1_free_ctxtdata(dd, rcd); | ||
174 | goto nomem; | 174 | goto nomem; |
175 | } | 175 | } |
176 | 176 | ||
@@ -178,9 +178,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) | |||
178 | if (ret < 0) { | 178 | if (ret < 0) { |
179 | dd_dev_err(dd, | 179 | dd_dev_err(dd, |
180 | "Failed to setup kernel receive context, failing\n"); | 180 | "Failed to setup kernel receive context, failing\n"); |
181 | sc_free(rcd->sc); | ||
182 | dd->rcd[rcd->ctxt] = NULL; | ||
183 | hfi1_free_ctxtdata(dd, rcd); | ||
184 | ret = -EFAULT; | 181 | ret = -EFAULT; |
185 | goto bail; | 182 | goto bail; |
186 | } | 183 | } |
@@ -196,6 +193,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) | |||
196 | nomem: | 193 | nomem: |
197 | ret = -ENOMEM; | 194 | ret = -ENOMEM; |
198 | bail: | 195 | bail: |
196 | if (dd->rcd) { | ||
197 | for (i = 0; i < dd->num_rcv_contexts; ++i) | ||
198 | hfi1_free_ctxtdata(dd, dd->rcd[i]); | ||
199 | } | ||
199 | kfree(dd->rcd); | 200 | kfree(dd->rcd); |
200 | dd->rcd = NULL; | 201 | dd->rcd = NULL; |
201 | return ret; | 202 | return ret; |
@@ -216,7 +217,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, | |||
216 | dd->num_rcv_contexts - dd->first_user_ctxt) | 217 | dd->num_rcv_contexts - dd->first_user_ctxt) |
217 | kctxt_ngroups = (dd->rcv_entries.nctxt_extra - | 218 | kctxt_ngroups = (dd->rcv_entries.nctxt_extra - |
218 | (dd->num_rcv_contexts - dd->first_user_ctxt)); | 219 | (dd->num_rcv_contexts - dd->first_user_ctxt)); |
219 | rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); | 220 | rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); |
220 | if (rcd) { | 221 | if (rcd) { |
221 | u32 rcvtids, max_entries; | 222 | u32 rcvtids, max_entries; |
222 | 223 | ||
@@ -261,13 +262,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, | |||
261 | } | 262 | } |
262 | rcd->eager_base = base * dd->rcv_entries.group_size; | 263 | rcd->eager_base = base * dd->rcv_entries.group_size; |
263 | 264 | ||
264 | /* Validate and initialize Rcv Hdr Q variables */ | ||
265 | if (rcvhdrcnt % HDRQ_INCREMENT) { | ||
266 | dd_dev_err(dd, | ||
267 | "ctxt%u: header queue count %d must be divisible by %lu\n", | ||
268 | rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); | ||
269 | goto bail; | ||
270 | } | ||
271 | rcd->rcvhdrq_cnt = rcvhdrcnt; | 265 | rcd->rcvhdrq_cnt = rcvhdrcnt; |
272 | rcd->rcvhdrqentsize = hfi1_hdrq_entsize; | 266 | rcd->rcvhdrqentsize = hfi1_hdrq_entsize; |
273 | /* | 267 | /* |
@@ -506,7 +500,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, | |||
506 | INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); | 500 | INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); |
507 | 501 | ||
508 | mutex_init(&ppd->hls_lock); | 502 | mutex_init(&ppd->hls_lock); |
509 | spin_lock_init(&ppd->sdma_alllock); | ||
510 | spin_lock_init(&ppd->qsfp_info.qsfp_lock); | 503 | spin_lock_init(&ppd->qsfp_info.qsfp_lock); |
511 | 504 | ||
512 | ppd->qsfp_info.ppd = ppd; | 505 | ppd->qsfp_info.ppd = ppd; |
@@ -1399,28 +1392,43 @@ static void postinit_cleanup(struct hfi1_devdata *dd) | |||
1399 | hfi1_free_devdata(dd); | 1392 | hfi1_free_devdata(dd); |
1400 | } | 1393 | } |
1401 | 1394 | ||
1395 | static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) | ||
1396 | { | ||
1397 | if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { | ||
1398 | hfi1_early_err(dev, "Receive header queue count too small\n"); | ||
1399 | return -EINVAL; | ||
1400 | } | ||
1401 | |||
1402 | if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { | ||
1403 | hfi1_early_err(dev, | ||
1404 | "Receive header queue count cannot be greater than %u\n", | ||
1405 | HFI1_MAX_HDRQ_EGRBUF_CNT); | ||
1406 | return -EINVAL; | ||
1407 | } | ||
1408 | |||
1409 | if (thecnt % HDRQ_INCREMENT) { | ||
1410 | hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", | ||
1411 | thecnt, HDRQ_INCREMENT); | ||
1412 | return -EINVAL; | ||
1413 | } | ||
1414 | |||
1415 | return 0; | ||
1416 | } | ||
1417 | |||
1402 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1418 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1403 | { | 1419 | { |
1404 | int ret = 0, j, pidx, initfail; | 1420 | int ret = 0, j, pidx, initfail; |
1405 | struct hfi1_devdata *dd = ERR_PTR(-EINVAL); | 1421 | struct hfi1_devdata *dd; |
1406 | struct hfi1_pportdata *ppd; | 1422 | struct hfi1_pportdata *ppd; |
1407 | 1423 | ||
1408 | /* First, lock the non-writable module parameters */ | 1424 | /* First, lock the non-writable module parameters */ |
1409 | HFI1_CAP_LOCK(); | 1425 | HFI1_CAP_LOCK(); |
1410 | 1426 | ||
1411 | /* Validate some global module parameters */ | 1427 | /* Validate some global module parameters */ |
1412 | if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { | 1428 | ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); |
1413 | hfi1_early_err(&pdev->dev, "Header queue count too small\n"); | 1429 | if (ret) |
1414 | ret = -EINVAL; | ||
1415 | goto bail; | ||
1416 | } | ||
1417 | if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { | ||
1418 | hfi1_early_err(&pdev->dev, | ||
1419 | "Receive header queue count cannot be greater than %u\n", | ||
1420 | HFI1_MAX_HDRQ_EGRBUF_CNT); | ||
1421 | ret = -EINVAL; | ||
1422 | goto bail; | 1430 | goto bail; |
1423 | } | 1431 | |
1424 | /* use the encoding function as a sanitization check */ | 1432 | /* use the encoding function as a sanitization check */ |
1425 | if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { | 1433 | if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { |
1426 | hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", | 1434 | hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", |
@@ -1461,26 +1469,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1461 | if (ret) | 1469 | if (ret) |
1462 | goto bail; | 1470 | goto bail; |
1463 | 1471 | ||
1464 | /* | 1472 | if (!(ent->device == PCI_DEVICE_ID_INTEL0 || |
1465 | * Do device-specific initialization, function table setup, dd | 1473 | ent->device == PCI_DEVICE_ID_INTEL1)) { |
1466 | * allocation, etc. | ||
1467 | */ | ||
1468 | switch (ent->device) { | ||
1469 | case PCI_DEVICE_ID_INTEL0: | ||
1470 | case PCI_DEVICE_ID_INTEL1: | ||
1471 | dd = hfi1_init_dd(pdev, ent); | ||
1472 | break; | ||
1473 | default: | ||
1474 | hfi1_early_err(&pdev->dev, | 1474 | hfi1_early_err(&pdev->dev, |
1475 | "Failing on unknown Intel deviceid 0x%x\n", | 1475 | "Failing on unknown Intel deviceid 0x%x\n", |
1476 | ent->device); | 1476 | ent->device); |
1477 | ret = -ENODEV; | 1477 | ret = -ENODEV; |
1478 | goto clean_bail; | ||
1478 | } | 1479 | } |
1479 | 1480 | ||
1480 | if (IS_ERR(dd)) | 1481 | /* |
1482 | * Do device-specific initialization, function table setup, dd | ||
1483 | * allocation, etc. | ||
1484 | */ | ||
1485 | dd = hfi1_init_dd(pdev, ent); | ||
1486 | |||
1487 | if (IS_ERR(dd)) { | ||
1481 | ret = PTR_ERR(dd); | 1488 | ret = PTR_ERR(dd); |
1482 | if (ret) | ||
1483 | goto clean_bail; /* error already printed */ | 1489 | goto clean_bail; /* error already printed */ |
1490 | } | ||
1484 | 1491 | ||
1485 | ret = create_workqueues(dd); | 1492 | ret = create_workqueues(dd); |
1486 | if (ret) | 1493 | if (ret) |
@@ -1538,12 +1545,31 @@ bail: | |||
1538 | return ret; | 1545 | return ret; |
1539 | } | 1546 | } |
1540 | 1547 | ||
1548 | static void wait_for_clients(struct hfi1_devdata *dd) | ||
1549 | { | ||
1550 | /* | ||
1551 | * Remove the device init value and complete the device if there is | ||
1552 | * no clients or wait for active clients to finish. | ||
1553 | */ | ||
1554 | if (atomic_dec_and_test(&dd->user_refcount)) | ||
1555 | complete(&dd->user_comp); | ||
1556 | |||
1557 | wait_for_completion(&dd->user_comp); | ||
1558 | } | ||
1559 | |||
1541 | static void remove_one(struct pci_dev *pdev) | 1560 | static void remove_one(struct pci_dev *pdev) |
1542 | { | 1561 | { |
1543 | struct hfi1_devdata *dd = pci_get_drvdata(pdev); | 1562 | struct hfi1_devdata *dd = pci_get_drvdata(pdev); |
1544 | 1563 | ||
1545 | /* close debugfs files before ib unregister */ | 1564 | /* close debugfs files before ib unregister */ |
1546 | hfi1_dbg_ibdev_exit(&dd->verbs_dev); | 1565 | hfi1_dbg_ibdev_exit(&dd->verbs_dev); |
1566 | |||
1567 | /* remove the /dev hfi1 interface */ | ||
1568 | hfi1_device_remove(dd); | ||
1569 | |||
1570 | /* wait for existing user space clients to finish */ | ||
1571 | wait_for_clients(dd); | ||
1572 | |||
1547 | /* unregister from IB core */ | 1573 | /* unregister from IB core */ |
1548 | hfi1_unregister_ib_device(dd); | 1574 | hfi1_unregister_ib_device(dd); |
1549 | 1575 | ||
@@ -1558,8 +1584,6 @@ static void remove_one(struct pci_dev *pdev) | |||
1558 | /* wait until all of our (qsfp) queue_work() calls complete */ | 1584 | /* wait until all of our (qsfp) queue_work() calls complete */ |
1559 | flush_workqueue(ib_wq); | 1585 | flush_workqueue(ib_wq); |
1560 | 1586 | ||
1561 | hfi1_device_remove(dd); | ||
1562 | |||
1563 | postinit_cleanup(dd); | 1587 | postinit_cleanup(dd); |
1564 | } | 1588 | } |
1565 | 1589 | ||
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 89c68da1c273..4ac8f330c5cb 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c | |||
@@ -157,8 +157,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev) | |||
157 | * fields required to re-initialize after a chip reset, or for | 157 | * fields required to re-initialize after a chip reset, or for |
158 | * various other purposes | 158 | * various other purposes |
159 | */ | 159 | */ |
160 | int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, | 160 | int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) |
161 | const struct pci_device_id *ent) | ||
162 | { | 161 | { |
163 | unsigned long len; | 162 | unsigned long len; |
164 | resource_size_t addr; | 163 | resource_size_t addr; |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 50a3a36d9363..d89b8745d4c1 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
@@ -668,19 +668,12 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) | |||
668 | void set_pio_integrity(struct send_context *sc) | 668 | void set_pio_integrity(struct send_context *sc) |
669 | { | 669 | { |
670 | struct hfi1_devdata *dd = sc->dd; | 670 | struct hfi1_devdata *dd = sc->dd; |
671 | u64 reg = 0; | ||
672 | u32 hw_context = sc->hw_context; | 671 | u32 hw_context = sc->hw_context; |
673 | int type = sc->type; | 672 | int type = sc->type; |
674 | 673 | ||
675 | /* | 674 | write_kctxt_csr(dd, hw_context, |
676 | * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if | 675 | SC(CHECK_ENABLE), |
677 | * we're snooping. | 676 | hfi1_pkt_default_send_ctxt_mask(dd, type)); |
678 | */ | ||
679 | if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) && | ||
680 | dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE) | ||
681 | reg = hfi1_pkt_default_send_ctxt_mask(dd, type); | ||
682 | |||
683 | write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg); | ||
684 | } | 677 | } |
685 | 678 | ||
686 | static u32 get_buffers_allocated(struct send_context *sc) | 679 | static u32 get_buffers_allocated(struct send_context *sc) |
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 8bc5013f39a1..83198a8a8797 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
@@ -89,7 +89,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) | |||
89 | 89 | ||
90 | lockdep_assert_held(&qp->s_lock); | 90 | lockdep_assert_held(&qp->s_lock); |
91 | qp->s_flags |= RVT_S_WAIT_RNR; | 91 | qp->s_flags |= RVT_S_WAIT_RNR; |
92 | qp->s_timer.expires = jiffies + usecs_to_jiffies(to); | 92 | priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to); |
93 | add_timer(&priv->s_rnr_timer); | 93 | add_timer(&priv->s_rnr_timer); |
94 | } | 94 | } |
95 | 95 | ||
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index fd39bcaa062d..9cbe52d21077 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c | |||
@@ -2009,11 +2009,6 @@ static void sdma_hw_start_up(struct sdma_engine *sde) | |||
2009 | write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); | 2009 | write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); |
2010 | } | 2010 | } |
2011 | 2011 | ||
2012 | #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ | ||
2013 | (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) | ||
2014 | |||
2015 | #define SET_STATIC_RATE_CONTROL_SMASK(r) \ | ||
2016 | (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) | ||
2017 | /* | 2012 | /* |
2018 | * set_sdma_integrity | 2013 | * set_sdma_integrity |
2019 | * | 2014 | * |
@@ -2022,19 +2017,9 @@ static void sdma_hw_start_up(struct sdma_engine *sde) | |||
2022 | static void set_sdma_integrity(struct sdma_engine *sde) | 2017 | static void set_sdma_integrity(struct sdma_engine *sde) |
2023 | { | 2018 | { |
2024 | struct hfi1_devdata *dd = sde->dd; | 2019 | struct hfi1_devdata *dd = sde->dd; |
2025 | u64 reg; | ||
2026 | |||
2027 | if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY))) | ||
2028 | return; | ||
2029 | |||
2030 | reg = hfi1_pkt_base_sdma_integrity(dd); | ||
2031 | |||
2032 | if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) | ||
2033 | CLEAR_STATIC_RATE_CONTROL_SMASK(reg); | ||
2034 | else | ||
2035 | SET_STATIC_RATE_CONTROL_SMASK(reg); | ||
2036 | 2020 | ||
2037 | write_sde_csr(sde, SD(CHECK_ENABLE), reg); | 2021 | write_sde_csr(sde, SD(CHECK_ENABLE), |
2022 | hfi1_pkt_base_sdma_integrity(dd)); | ||
2038 | } | 2023 | } |
2039 | 2024 | ||
2040 | static void init_sdma_regs( | 2025 | static void init_sdma_regs( |
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index edba22461a9c..919a5474e651 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include "hfi.h" | 49 | #include "hfi.h" |
50 | #include "mad.h" | 50 | #include "mad.h" |
51 | #include "trace.h" | 51 | #include "trace.h" |
52 | #include "affinity.h" | ||
53 | 52 | ||
54 | /* | 53 | /* |
55 | * Start of per-port congestion control structures and support code | 54 | * Start of per-port congestion control structures and support code |
@@ -623,27 +622,6 @@ static ssize_t show_tempsense(struct device *device, | |||
623 | return ret; | 622 | return ret; |
624 | } | 623 | } |
625 | 624 | ||
626 | static ssize_t show_sdma_affinity(struct device *device, | ||
627 | struct device_attribute *attr, char *buf) | ||
628 | { | ||
629 | struct hfi1_ibdev *dev = | ||
630 | container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); | ||
631 | struct hfi1_devdata *dd = dd_from_dev(dev); | ||
632 | |||
633 | return hfi1_get_sdma_affinity(dd, buf); | ||
634 | } | ||
635 | |||
636 | static ssize_t store_sdma_affinity(struct device *device, | ||
637 | struct device_attribute *attr, | ||
638 | const char *buf, size_t count) | ||
639 | { | ||
640 | struct hfi1_ibdev *dev = | ||
641 | container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); | ||
642 | struct hfi1_devdata *dd = dd_from_dev(dev); | ||
643 | |||
644 | return hfi1_set_sdma_affinity(dd, buf, count); | ||
645 | } | ||
646 | |||
647 | /* | 625 | /* |
648 | * end of per-unit (or driver, in some cases, but replicated | 626 | * end of per-unit (or driver, in some cases, but replicated |
649 | * per unit) functions | 627 | * per unit) functions |
@@ -658,8 +636,6 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); | |||
658 | static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); | 636 | static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); |
659 | static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); | 637 | static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); |
660 | static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); | 638 | static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); |
661 | static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity, | ||
662 | store_sdma_affinity); | ||
663 | 639 | ||
664 | static struct device_attribute *hfi1_attributes[] = { | 640 | static struct device_attribute *hfi1_attributes[] = { |
665 | &dev_attr_hw_rev, | 641 | &dev_attr_hw_rev, |
@@ -670,7 +646,6 @@ static struct device_attribute *hfi1_attributes[] = { | |||
670 | &dev_attr_boardversion, | 646 | &dev_attr_boardversion, |
671 | &dev_attr_tempsense, | 647 | &dev_attr_tempsense, |
672 | &dev_attr_chip_reset, | 648 | &dev_attr_chip_reset, |
673 | &dev_attr_sdma_affinity, | ||
674 | }; | 649 | }; |
675 | 650 | ||
676 | int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, | 651 | int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, |
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h index 11e02b228922..f77e59fb43fe 100644 --- a/drivers/infiniband/hw/hfi1/trace_rx.h +++ b/drivers/infiniband/hw/hfi1/trace_rx.h | |||
@@ -253,66 +253,6 @@ TRACE_EVENT(hfi1_mmu_invalidate, | |||
253 | ) | 253 | ) |
254 | ); | 254 | ); |
255 | 255 | ||
256 | #define SNOOP_PRN \ | ||
257 | "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \ | ||
258 | "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]" | ||
259 | |||
260 | TRACE_EVENT(snoop_capture, | ||
261 | TP_PROTO(struct hfi1_devdata *dd, | ||
262 | int hdr_len, | ||
263 | struct ib_header *hdr, | ||
264 | int data_len, | ||
265 | void *data), | ||
266 | TP_ARGS(dd, hdr_len, hdr, data_len, data), | ||
267 | TP_STRUCT__entry( | ||
268 | DD_DEV_ENTRY(dd) | ||
269 | __field(u16, slid) | ||
270 | __field(u16, dlid) | ||
271 | __field(u32, qpn) | ||
272 | __field(u8, opcode) | ||
273 | __field(u8, sl) | ||
274 | __field(u16, pkey) | ||
275 | __field(u32, hdr_len) | ||
276 | __field(u32, data_len) | ||
277 | __field(u8, lnh) | ||
278 | __dynamic_array(u8, raw_hdr, hdr_len) | ||
279 | __dynamic_array(u8, raw_pkt, data_len) | ||
280 | ), | ||
281 | TP_fast_assign( | ||
282 | struct ib_other_headers *ohdr; | ||
283 | |||
284 | __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3); | ||
285 | if (__entry->lnh == HFI1_LRH_BTH) | ||
286 | ohdr = &hdr->u.oth; | ||
287 | else | ||
288 | ohdr = &hdr->u.l.oth; | ||
289 | DD_DEV_ASSIGN(dd); | ||
290 | __entry->slid = be16_to_cpu(hdr->lrh[3]); | ||
291 | __entry->dlid = be16_to_cpu(hdr->lrh[1]); | ||
292 | __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; | ||
293 | __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; | ||
294 | __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf; | ||
295 | __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff; | ||
296 | __entry->hdr_len = hdr_len; | ||
297 | __entry->data_len = data_len; | ||
298 | memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len); | ||
299 | memcpy(__get_dynamic_array(raw_pkt), data, data_len); | ||
300 | ), | ||
301 | TP_printk( | ||
302 | "[%s] " SNOOP_PRN, | ||
303 | __get_str(dev), | ||
304 | __entry->slid, | ||
305 | __entry->dlid, | ||
306 | __entry->qpn, | ||
307 | __entry->opcode, | ||
308 | show_ib_opcode(__entry->opcode), | ||
309 | __entry->sl, | ||
310 | __entry->pkey, | ||
311 | __entry->hdr_len, | ||
312 | __entry->data_len | ||
313 | ) | ||
314 | ); | ||
315 | |||
316 | #endif /* __HFI1_TRACE_RX_H */ | 256 | #endif /* __HFI1_TRACE_RX_H */ |
317 | 257 | ||
318 | #undef TRACE_INCLUDE_PATH | 258 | #undef TRACE_INCLUDE_PATH |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index a761f804111e..77697d690f3e 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
@@ -1144,7 +1144,7 @@ static int pin_vector_pages(struct user_sdma_request *req, | |||
1144 | rb_node = hfi1_mmu_rb_extract(pq->handler, | 1144 | rb_node = hfi1_mmu_rb_extract(pq->handler, |
1145 | (unsigned long)iovec->iov.iov_base, | 1145 | (unsigned long)iovec->iov.iov_base, |
1146 | iovec->iov.iov_len); | 1146 | iovec->iov.iov_len); |
1147 | if (rb_node && !IS_ERR(rb_node)) | 1147 | if (rb_node) |
1148 | node = container_of(rb_node, struct sdma_mmu_node, rb); | 1148 | node = container_of(rb_node, struct sdma_mmu_node, rb); |
1149 | else | 1149 | else |
1150 | rb_node = NULL; | 1150 | rb_node = NULL; |
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 5fc623362731..b9bf0759f10a 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr | |||
102 | if (vlan_tag < 0x1000) | 102 | if (vlan_tag < 0x1000) |
103 | vlan_tag |= (ah_attr->sl & 7) << 13; | 103 | vlan_tag |= (ah_attr->sl & 7) << 13; |
104 | ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); | 104 | ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); |
105 | ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); | 105 | ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); |
106 | if (ret < 0) | ||
107 | return ERR_PTR(ret); | ||
108 | ah->av.eth.gid_index = ret; | ||
106 | ah->av.eth.vlan = cpu_to_be16(vlan_tag); | 109 | ah->av.eth.vlan = cpu_to_be16(vlan_tag); |
107 | ah->av.eth.hop_limit = ah_attr->grh.hop_limit; | 110 | ah->av.eth.hop_limit = ah_attr->grh.hop_limit; |
108 | if (ah_attr->static_rate) { | 111 | if (ah_attr->static_rate) { |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 1ea686b9e0f9..6a0fec357dae 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, | |||
253 | if (context) | 253 | if (context) |
254 | if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { | 254 | if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { |
255 | err = -EFAULT; | 255 | err = -EFAULT; |
256 | goto err_dbmap; | 256 | goto err_cq_free; |
257 | } | 257 | } |
258 | 258 | ||
259 | return &cq->ibcq; | 259 | return &cq->ibcq; |
260 | 260 | ||
261 | err_cq_free: | ||
262 | mlx4_cq_free(dev->dev, &cq->mcq); | ||
263 | |||
261 | err_dbmap: | 264 | err_dbmap: |
262 | if (context) | 265 | if (context) |
263 | mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); | 266 | mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 79d017baf6f4..fcd04b881ec1 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -932,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, | |||
932 | if (err) | 932 | if (err) |
933 | goto err_create; | 933 | goto err_create; |
934 | } else { | 934 | } else { |
935 | /* for now choose 64 bytes till we have a proper interface */ | 935 | cqe_size = cache_line_size() == 128 ? 128 : 64; |
936 | cqe_size = 64; | ||
937 | err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, | 936 | err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, |
938 | &index, &inlen); | 937 | &index, &inlen); |
939 | if (err) | 938 | if (err) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 63036c731626..32b09f059c84 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -2311,14 +2311,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
2311 | { | 2311 | { |
2312 | struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; | 2312 | struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; |
2313 | struct ib_event ibev; | 2313 | struct ib_event ibev; |
2314 | 2314 | bool fatal = false; | |
2315 | u8 port = 0; | 2315 | u8 port = 0; |
2316 | 2316 | ||
2317 | switch (event) { | 2317 | switch (event) { |
2318 | case MLX5_DEV_EVENT_SYS_ERROR: | 2318 | case MLX5_DEV_EVENT_SYS_ERROR: |
2319 | ibdev->ib_active = false; | ||
2320 | ibev.event = IB_EVENT_DEVICE_FATAL; | 2319 | ibev.event = IB_EVENT_DEVICE_FATAL; |
2321 | mlx5_ib_handle_internal_error(ibdev); | 2320 | mlx5_ib_handle_internal_error(ibdev); |
2321 | fatal = true; | ||
2322 | break; | 2322 | break; |
2323 | 2323 | ||
2324 | case MLX5_DEV_EVENT_PORT_UP: | 2324 | case MLX5_DEV_EVENT_PORT_UP: |
@@ -2370,6 +2370,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
2370 | 2370 | ||
2371 | if (ibdev->ib_active) | 2371 | if (ibdev->ib_active) |
2372 | ib_dispatch_event(&ibev); | 2372 | ib_dispatch_event(&ibev); |
2373 | |||
2374 | if (fatal) | ||
2375 | ibdev->ib_active = false; | ||
2373 | } | 2376 | } |
2374 | 2377 | ||
2375 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) | 2378 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) |
@@ -3115,7 +3118,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3115 | } | 3118 | } |
3116 | err = init_node_data(dev); | 3119 | err = init_node_data(dev); |
3117 | if (err) | 3120 | if (err) |
3118 | goto err_dealloc; | 3121 | goto err_free_port; |
3119 | 3122 | ||
3120 | mutex_init(&dev->flow_db.lock); | 3123 | mutex_init(&dev->flow_db.lock); |
3121 | mutex_init(&dev->cap_mask_mutex); | 3124 | mutex_init(&dev->cap_mask_mutex); |
@@ -3125,7 +3128,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3125 | if (ll == IB_LINK_LAYER_ETHERNET) { | 3128 | if (ll == IB_LINK_LAYER_ETHERNET) { |
3126 | err = mlx5_enable_roce(dev); | 3129 | err = mlx5_enable_roce(dev); |
3127 | if (err) | 3130 | if (err) |
3128 | goto err_dealloc; | 3131 | goto err_free_port; |
3129 | } | 3132 | } |
3130 | 3133 | ||
3131 | err = create_dev_resources(&dev->devr); | 3134 | err = create_dev_resources(&dev->devr); |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index dcdcd195fe53..7d689903c87c 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -626,6 +626,8 @@ struct mlx5_ib_dev { | |||
626 | struct mlx5_ib_resources devr; | 626 | struct mlx5_ib_resources devr; |
627 | struct mlx5_mr_cache cache; | 627 | struct mlx5_mr_cache cache; |
628 | struct timer_list delay_timer; | 628 | struct timer_list delay_timer; |
629 | /* Prevents soft lock on massive reg MRs */ | ||
630 | struct mutex slow_path_mutex; | ||
629 | int fill_delay; | 631 | int fill_delay; |
630 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | 632 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
631 | struct ib_odp_caps odp_caps; | 633 | struct ib_odp_caps odp_caps; |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index d4ad672b905b..4e9012463c37 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) | |||
610 | int err; | 610 | int err; |
611 | int i; | 611 | int i; |
612 | 612 | ||
613 | mutex_init(&dev->slow_path_mutex); | ||
613 | cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); | 614 | cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); |
614 | if (!cache->wq) { | 615 | if (!cache->wq) { |
615 | mlx5_ib_warn(dev, "failed to create work queue\n"); | 616 | mlx5_ib_warn(dev, "failed to create work queue\n"); |
@@ -1182,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1182 | goto error; | 1183 | goto error; |
1183 | } | 1184 | } |
1184 | 1185 | ||
1185 | if (!mr) | 1186 | if (!mr) { |
1187 | mutex_lock(&dev->slow_path_mutex); | ||
1186 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, | 1188 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, |
1187 | page_shift, access_flags); | 1189 | page_shift, access_flags); |
1190 | mutex_unlock(&dev->slow_path_mutex); | ||
1191 | } | ||
1188 | 1192 | ||
1189 | if (IS_ERR(mr)) { | 1193 | if (IS_ERR(mr)) { |
1190 | err = PTR_ERR(mr); | 1194 | err = PTR_ERR(mr); |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7ce97daf26c6..d1e921816bfe 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -2051,8 +2051,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, | |||
2051 | 2051 | ||
2052 | mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", | 2052 | mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", |
2053 | qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, | 2053 | qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, |
2054 | to_mcq(init_attr->recv_cq)->mcq.cqn, | 2054 | init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, |
2055 | to_mcq(init_attr->send_cq)->mcq.cqn); | 2055 | init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); |
2056 | 2056 | ||
2057 | qp->trans_qp.xrcdn = xrcdn; | 2057 | qp->trans_qp.xrcdn = xrcdn; |
2058 | 2058 | ||
@@ -4814,6 +4814,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, | |||
4814 | udata->inlen)) | 4814 | udata->inlen)) |
4815 | return ERR_PTR(-EOPNOTSUPP); | 4815 | return ERR_PTR(-EOPNOTSUPP); |
4816 | 4816 | ||
4817 | if (init_attr->log_ind_tbl_size > | ||
4818 | MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { | ||
4819 | mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", | ||
4820 | init_attr->log_ind_tbl_size, | ||
4821 | MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); | ||
4822 | return ERR_PTR(-EINVAL); | ||
4823 | } | ||
4824 | |||
4817 | min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); | 4825 | min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); |
4818 | if (udata->outlen && udata->outlen < min_resp_len) | 4826 | if (udata->outlen && udata->outlen < min_resp_len) |
4819 | return ERR_PTR(-EINVAL); | 4827 | return ERR_PTR(-EINVAL); |
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c index 01f71caa3ac4..f2cefb0d9180 100644 --- a/drivers/infiniband/sw/rdmavt/dma.c +++ b/drivers/infiniband/sw/rdmavt/dma.c | |||
@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page, | |||
90 | if (WARN_ON(!valid_dma_direction(direction))) | 90 | if (WARN_ON(!valid_dma_direction(direction))) |
91 | return BAD_DMA_ADDRESS; | 91 | return BAD_DMA_ADDRESS; |
92 | 92 | ||
93 | if (offset + size > PAGE_SIZE) | ||
94 | return BAD_DMA_ADDRESS; | ||
95 | |||
96 | addr = (u64)page_address(page); | 93 | addr = (u64)page_address(page); |
97 | if (addr) | 94 | if (addr) |
98 | addr += offset; | 95 | addr += offset; |
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index b8258e4f0aea..ffff5a54cb34 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c | |||
@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, | |||
243 | { | 243 | { |
244 | int err; | 244 | int err; |
245 | struct socket *sock; | 245 | struct socket *sock; |
246 | struct udp_port_cfg udp_cfg; | 246 | struct udp_port_cfg udp_cfg = {0}; |
247 | struct udp_tunnel_sock_cfg tnl_cfg; | 247 | struct udp_tunnel_sock_cfg tnl_cfg = {0}; |
248 | |||
249 | memset(&udp_cfg, 0, sizeof(udp_cfg)); | ||
250 | 248 | ||
251 | if (ipv6) { | 249 | if (ipv6) { |
252 | udp_cfg.family = AF_INET6; | 250 | udp_cfg.family = AF_INET6; |
@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, | |||
264 | return ERR_PTR(err); | 262 | return ERR_PTR(err); |
265 | } | 263 | } |
266 | 264 | ||
267 | tnl_cfg.sk_user_data = NULL; | ||
268 | tnl_cfg.encap_type = 1; | 265 | tnl_cfg.encap_type = 1; |
269 | tnl_cfg.encap_rcv = rxe_udp_encap_recv; | 266 | tnl_cfg.encap_rcv = rxe_udp_encap_recv; |
270 | tnl_cfg.encap_destroy = NULL; | ||
271 | 267 | ||
272 | /* Setup UDP tunnel */ | 268 | /* Setup UDP tunnel */ |
273 | setup_udp_tunnel_sock(net, sock, &tnl_cfg); | 269 | setup_udp_tunnel_sock(net, sock, &tnl_cfg); |
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index b8036cfbce04..c3e60e4bde6e 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c | |||
@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp) | |||
522 | if (qp->sq.queue) { | 522 | if (qp->sq.queue) { |
523 | __rxe_do_task(&qp->comp.task); | 523 | __rxe_do_task(&qp->comp.task); |
524 | __rxe_do_task(&qp->req.task); | 524 | __rxe_do_task(&qp->req.task); |
525 | rxe_queue_reset(qp->sq.queue); | ||
525 | } | 526 | } |
526 | 527 | ||
527 | /* cleanup attributes */ | 528 | /* cleanup attributes */ |
@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp) | |||
573 | { | 574 | { |
574 | qp->req.state = QP_STATE_ERROR; | 575 | qp->req.state = QP_STATE_ERROR; |
575 | qp->resp.state = QP_STATE_ERROR; | 576 | qp->resp.state = QP_STATE_ERROR; |
577 | qp->attr.qp_state = IB_QPS_ERR; | ||
576 | 578 | ||
577 | /* drain work and packet queues */ | 579 | /* drain work and packet queues */ |
578 | rxe_run_task(&qp->resp.task, 1); | 580 | rxe_run_task(&qp->resp.task, 1); |
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 08274254eb88..d14bf496d62d 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c | |||
@@ -84,6 +84,15 @@ err1: | |||
84 | return -EINVAL; | 84 | return -EINVAL; |
85 | } | 85 | } |
86 | 86 | ||
87 | inline void rxe_queue_reset(struct rxe_queue *q) | ||
88 | { | ||
89 | /* queue is comprised from header and the memory | ||
90 | * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h | ||
91 | * reset only the queue itself and not the management header | ||
92 | */ | ||
93 | memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); | ||
94 | } | ||
95 | |||
87 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, | 96 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, |
88 | int *num_elem, | 97 | int *num_elem, |
89 | unsigned int elem_size) | 98 | unsigned int elem_size) |
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h index 239fd609c31e..8c8641c87817 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.h +++ b/drivers/infiniband/sw/rxe/rxe_queue.h | |||
@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe, | |||
84 | size_t buf_size, | 84 | size_t buf_size, |
85 | struct rxe_mmap_info **ip_p); | 85 | struct rxe_mmap_info **ip_p); |
86 | 86 | ||
87 | void rxe_queue_reset(struct rxe_queue *q); | ||
88 | |||
87 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, | 89 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, |
88 | int *num_elem, | 90 | int *num_elem, |
89 | unsigned int elem_size); | 91 | unsigned int elem_size); |
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 832846b73ea0..22bd9630dcd9 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c | |||
@@ -696,7 +696,8 @@ next_wqe: | |||
696 | qp->req.wqe_index); | 696 | qp->req.wqe_index); |
697 | wqe->state = wqe_state_done; | 697 | wqe->state = wqe_state_done; |
698 | wqe->status = IB_WC_SUCCESS; | 698 | wqe->status = IB_WC_SUCCESS; |
699 | goto complete; | 699 | __rxe_do_task(&qp->comp.task); |
700 | return 0; | ||
700 | } | 701 | } |
701 | payload = mtu; | 702 | payload = mtu; |
702 | } | 703 | } |
@@ -745,13 +746,17 @@ err: | |||
745 | wqe->status = IB_WC_LOC_PROT_ERR; | 746 | wqe->status = IB_WC_LOC_PROT_ERR; |
746 | wqe->state = wqe_state_error; | 747 | wqe->state = wqe_state_error; |
747 | 748 | ||
748 | complete: | 749 | /* |
749 | if (qp_type(qp) != IB_QPT_RC) { | 750 | * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS |
750 | while (rxe_completer(qp) == 0) | 751 | * ---------8<---------8<------------- |
751 | ; | 752 | * ...Note that if a completion error occurs, a Work Completion |
752 | } | 753 | * will always be generated, even if the signaling |
753 | 754 | * indicator requests an Unsignaled Completion. | |
754 | return 0; | 755 | * ---------8<---------8<------------- |
756 | */ | ||
757 | wqe->wr.send_flags |= IB_SEND_SIGNALED; | ||
758 | __rxe_do_task(&qp->comp.task); | ||
759 | return -EAGAIN; | ||
755 | 760 | ||
756 | exit: | 761 | exit: |
757 | return -EAGAIN; | 762 | return -EAGAIN; |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 15c01c3cd540..e6f9b2d745ca 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -2636,17 +2636,26 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
2636 | /* And we're up. Go go go! */ | 2636 | /* And we're up. Go go go! */ |
2637 | of_iommu_set_ops(dev->of_node, &arm_smmu_ops); | 2637 | of_iommu_set_ops(dev->of_node, &arm_smmu_ops); |
2638 | #ifdef CONFIG_PCI | 2638 | #ifdef CONFIG_PCI |
2639 | pci_request_acs(); | 2639 | if (pci_bus_type.iommu_ops != &arm_smmu_ops) { |
2640 | ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | 2640 | pci_request_acs(); |
2641 | if (ret) | 2641 | ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); |
2642 | return ret; | 2642 | if (ret) |
2643 | return ret; | ||
2644 | } | ||
2643 | #endif | 2645 | #endif |
2644 | #ifdef CONFIG_ARM_AMBA | 2646 | #ifdef CONFIG_ARM_AMBA |
2645 | ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); | 2647 | if (amba_bustype.iommu_ops != &arm_smmu_ops) { |
2646 | if (ret) | 2648 | ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); |
2647 | return ret; | 2649 | if (ret) |
2650 | return ret; | ||
2651 | } | ||
2648 | #endif | 2652 | #endif |
2649 | return bus_set_iommu(&platform_bus_type, &arm_smmu_ops); | 2653 | if (platform_bus_type.iommu_ops != &arm_smmu_ops) { |
2654 | ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops); | ||
2655 | if (ret) | ||
2656 | return ret; | ||
2657 | } | ||
2658 | return 0; | ||
2650 | } | 2659 | } |
2651 | 2660 | ||
2652 | static int arm_smmu_device_remove(struct platform_device *pdev) | 2661 | static int arm_smmu_device_remove(struct platform_device *pdev) |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index c841eb7a1a74..8f7281444551 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -324,8 +324,10 @@ struct arm_smmu_master_cfg { | |||
324 | #define INVALID_SMENDX -1 | 324 | #define INVALID_SMENDX -1 |
325 | #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv) | 325 | #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv) |
326 | #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu) | 326 | #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu) |
327 | #define fwspec_smendx(fw, i) \ | ||
328 | (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i]) | ||
327 | #define for_each_cfg_sme(fw, i, idx) \ | 329 | #define for_each_cfg_sme(fw, i, idx) \ |
328 | for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i) | 330 | for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i) |
329 | 331 | ||
330 | struct arm_smmu_device { | 332 | struct arm_smmu_device { |
331 | struct device *dev; | 333 | struct device *dev; |
@@ -1228,6 +1230,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1228 | return -ENXIO; | 1230 | return -ENXIO; |
1229 | } | 1231 | } |
1230 | 1232 | ||
1233 | /* | ||
1234 | * FIXME: The arch/arm DMA API code tries to attach devices to its own | ||
1235 | * domains between of_xlate() and add_device() - we have no way to cope | ||
1236 | * with that, so until ARM gets converted to rely on groups and default | ||
1237 | * domains, just say no (but more politely than by dereferencing NULL). | ||
1238 | * This should be at least a WARN_ON once that's sorted. | ||
1239 | */ | ||
1240 | if (!fwspec->iommu_priv) | ||
1241 | return -ENODEV; | ||
1242 | |||
1231 | smmu = fwspec_smmu(fwspec); | 1243 | smmu = fwspec_smmu(fwspec); |
1232 | /* Ensure that the domain is finalised */ | 1244 | /* Ensure that the domain is finalised */ |
1233 | ret = arm_smmu_init_domain_context(domain, smmu); | 1245 | ret = arm_smmu_init_domain_context(domain, smmu); |
@@ -1390,7 +1402,7 @@ static int arm_smmu_add_device(struct device *dev) | |||
1390 | fwspec = dev->iommu_fwspec; | 1402 | fwspec = dev->iommu_fwspec; |
1391 | if (ret) | 1403 | if (ret) |
1392 | goto out_free; | 1404 | goto out_free; |
1393 | } else if (fwspec) { | 1405 | } else if (fwspec && fwspec->ops == &arm_smmu_ops) { |
1394 | smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode)); | 1406 | smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode)); |
1395 | } else { | 1407 | } else { |
1396 | return -ENODEV; | 1408 | return -ENODEV; |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a4407eabf0e6..3965e73db51c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -1711,6 +1711,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) | |||
1711 | if (!iommu->domains || !iommu->domain_ids) | 1711 | if (!iommu->domains || !iommu->domain_ids) |
1712 | return; | 1712 | return; |
1713 | 1713 | ||
1714 | again: | ||
1714 | spin_lock_irqsave(&device_domain_lock, flags); | 1715 | spin_lock_irqsave(&device_domain_lock, flags); |
1715 | list_for_each_entry_safe(info, tmp, &device_domain_list, global) { | 1716 | list_for_each_entry_safe(info, tmp, &device_domain_list, global) { |
1716 | struct dmar_domain *domain; | 1717 | struct dmar_domain *domain; |
@@ -1723,10 +1724,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) | |||
1723 | 1724 | ||
1724 | domain = info->domain; | 1725 | domain = info->domain; |
1725 | 1726 | ||
1726 | dmar_remove_one_dev_info(domain, info->dev); | 1727 | __dmar_remove_one_dev_info(info); |
1727 | 1728 | ||
1728 | if (!domain_type_is_vm_or_si(domain)) | 1729 | if (!domain_type_is_vm_or_si(domain)) { |
1730 | /* | ||
1731 | * The domain_exit() function can't be called under | ||
1732 | * device_domain_lock, as it takes this lock itself. | ||
1733 | * So release the lock here and re-run the loop | ||
1734 | * afterwards. | ||
1735 | */ | ||
1736 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
1729 | domain_exit(domain); | 1737 | domain_exit(domain); |
1738 | goto again; | ||
1739 | } | ||
1730 | } | 1740 | } |
1731 | spin_unlock_irqrestore(&device_domain_lock, flags); | 1741 | spin_unlock_irqrestore(&device_domain_lock, flags); |
1732 | 1742 | ||
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig index 012225587c25..b71b747ee0ba 100644 --- a/drivers/media/dvb-frontends/Kconfig +++ b/drivers/media/dvb-frontends/Kconfig | |||
@@ -513,6 +513,11 @@ config DVB_AS102_FE | |||
513 | depends on DVB_CORE | 513 | depends on DVB_CORE |
514 | default DVB_AS102 | 514 | default DVB_AS102 |
515 | 515 | ||
516 | config DVB_GP8PSK_FE | ||
517 | tristate | ||
518 | depends on DVB_CORE | ||
519 | default DVB_USB_GP8PSK | ||
520 | |||
516 | comment "DVB-C (cable) frontends" | 521 | comment "DVB-C (cable) frontends" |
517 | depends on DVB_CORE | 522 | depends on DVB_CORE |
518 | 523 | ||
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile index e90165ad361b..93921a4eaa27 100644 --- a/drivers/media/dvb-frontends/Makefile +++ b/drivers/media/dvb-frontends/Makefile | |||
@@ -121,6 +121,7 @@ obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o | |||
121 | obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o | 121 | obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o |
122 | obj-$(CONFIG_DVB_AF9033) += af9033.o | 122 | obj-$(CONFIG_DVB_AF9033) += af9033.o |
123 | obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o | 123 | obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o |
124 | obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o | ||
124 | obj-$(CONFIG_DVB_TC90522) += tc90522.o | 125 | obj-$(CONFIG_DVB_TC90522) += tc90522.o |
125 | obj-$(CONFIG_DVB_HORUS3A) += horus3a.o | 126 | obj-$(CONFIG_DVB_HORUS3A) += horus3a.o |
126 | obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o | 127 | obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o |
diff --git a/drivers/media/usb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c index db6eb79cde07..93f59bfea092 100644 --- a/drivers/media/usb/dvb-usb/gp8psk-fe.c +++ b/drivers/media/dvb-frontends/gp8psk-fe.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* DVB USB compliant Linux driver for the | 1 | /* |
2 | * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module | 2 | * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module |
3 | * | 3 | * |
4 | * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) | 4 | * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) |
5 | * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) | 5 | * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) |
@@ -8,17 +8,31 @@ | |||
8 | * | 8 | * |
9 | * This module is based off the vp7045 and vp702x modules | 9 | * This module is based off the vp7045 and vp702x modules |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the Free | 12 | * under the terms of the GNU General Public License as published by the Free |
13 | * Software Foundation, version 2. | 13 | * Software Foundation, version 2. |
14 | * | ||
15 | * see Documentation/dvb/README.dvb-usb for more information | ||
16 | */ | 14 | */ |
17 | #include "gp8psk.h" | 15 | |
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
17 | |||
18 | #include "gp8psk-fe.h" | ||
19 | #include "dvb_frontend.h" | ||
20 | |||
21 | static int debug; | ||
22 | module_param(debug, int, 0644); | ||
23 | MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); | ||
24 | |||
25 | #define dprintk(fmt, arg...) do { \ | ||
26 | if (debug) \ | ||
27 | printk(KERN_DEBUG pr_fmt("%s: " fmt), \ | ||
28 | __func__, ##arg); \ | ||
29 | } while (0) | ||
18 | 30 | ||
19 | struct gp8psk_fe_state { | 31 | struct gp8psk_fe_state { |
20 | struct dvb_frontend fe; | 32 | struct dvb_frontend fe; |
21 | struct dvb_usb_device *d; | 33 | void *priv; |
34 | const struct gp8psk_fe_ops *ops; | ||
35 | bool is_rev1; | ||
22 | u8 lock; | 36 | u8 lock; |
23 | u16 snr; | 37 | u16 snr; |
24 | unsigned long next_status_check; | 38 | unsigned long next_status_check; |
@@ -29,22 +43,24 @@ static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe) | |||
29 | { | 43 | { |
30 | struct gp8psk_fe_state *st = fe->demodulator_priv; | 44 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
31 | u8 status; | 45 | u8 status; |
32 | gp8psk_usb_in_op(st->d, GET_8PSK_CONFIG, 0, 0, &status, 1); | 46 | |
47 | st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1); | ||
33 | return status & bmDCtuned; | 48 | return status & bmDCtuned; |
34 | } | 49 | } |
35 | 50 | ||
36 | static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode) | 51 | static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode) |
37 | { | 52 | { |
38 | struct gp8psk_fe_state *state = fe->demodulator_priv; | 53 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
39 | return gp8psk_usb_out_op(state->d, SET_8PSK_CONFIG, mode, 0, NULL, 0); | 54 | |
55 | return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0); | ||
40 | } | 56 | } |
41 | 57 | ||
42 | static int gp8psk_fe_update_status(struct gp8psk_fe_state *st) | 58 | static int gp8psk_fe_update_status(struct gp8psk_fe_state *st) |
43 | { | 59 | { |
44 | u8 buf[6]; | 60 | u8 buf[6]; |
45 | if (time_after(jiffies,st->next_status_check)) { | 61 | if (time_after(jiffies,st->next_status_check)) { |
46 | gp8psk_usb_in_op(st->d, GET_SIGNAL_LOCK, 0,0,&st->lock,1); | 62 | st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1); |
47 | gp8psk_usb_in_op(st->d, GET_SIGNAL_STRENGTH, 0,0,buf,6); | 63 | st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6); |
48 | st->snr = (buf[1]) << 8 | buf[0]; | 64 | st->snr = (buf[1]) << 8 | buf[0]; |
49 | st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000; | 65 | st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000; |
50 | } | 66 | } |
@@ -116,13 +132,12 @@ static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front | |||
116 | 132 | ||
117 | static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) | 133 | static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) |
118 | { | 134 | { |
119 | struct gp8psk_fe_state *state = fe->demodulator_priv; | 135 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
120 | struct dtv_frontend_properties *c = &fe->dtv_property_cache; | 136 | struct dtv_frontend_properties *c = &fe->dtv_property_cache; |
121 | u8 cmd[10]; | 137 | u8 cmd[10]; |
122 | u32 freq = c->frequency * 1000; | 138 | u32 freq = c->frequency * 1000; |
123 | int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct); | ||
124 | 139 | ||
125 | deb_fe("%s()\n", __func__); | 140 | dprintk("%s()\n", __func__); |
126 | 141 | ||
127 | cmd[4] = freq & 0xff; | 142 | cmd[4] = freq & 0xff; |
128 | cmd[5] = (freq >> 8) & 0xff; | 143 | cmd[5] = (freq >> 8) & 0xff; |
@@ -136,21 +151,21 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) | |||
136 | switch (c->delivery_system) { | 151 | switch (c->delivery_system) { |
137 | case SYS_DVBS: | 152 | case SYS_DVBS: |
138 | if (c->modulation != QPSK) { | 153 | if (c->modulation != QPSK) { |
139 | deb_fe("%s: unsupported modulation selected (%d)\n", | 154 | dprintk("%s: unsupported modulation selected (%d)\n", |
140 | __func__, c->modulation); | 155 | __func__, c->modulation); |
141 | return -EOPNOTSUPP; | 156 | return -EOPNOTSUPP; |
142 | } | 157 | } |
143 | c->fec_inner = FEC_AUTO; | 158 | c->fec_inner = FEC_AUTO; |
144 | break; | 159 | break; |
145 | case SYS_DVBS2: /* kept for backwards compatibility */ | 160 | case SYS_DVBS2: /* kept for backwards compatibility */ |
146 | deb_fe("%s: DVB-S2 delivery system selected\n", __func__); | 161 | dprintk("%s: DVB-S2 delivery system selected\n", __func__); |
147 | break; | 162 | break; |
148 | case SYS_TURBO: | 163 | case SYS_TURBO: |
149 | deb_fe("%s: Turbo-FEC delivery system selected\n", __func__); | 164 | dprintk("%s: Turbo-FEC delivery system selected\n", __func__); |
150 | break; | 165 | break; |
151 | 166 | ||
152 | default: | 167 | default: |
153 | deb_fe("%s: unsupported delivery system selected (%d)\n", | 168 | dprintk("%s: unsupported delivery system selected (%d)\n", |
154 | __func__, c->delivery_system); | 169 | __func__, c->delivery_system); |
155 | return -EOPNOTSUPP; | 170 | return -EOPNOTSUPP; |
156 | } | 171 | } |
@@ -161,9 +176,9 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) | |||
161 | cmd[3] = (c->symbol_rate >> 24) & 0xff; | 176 | cmd[3] = (c->symbol_rate >> 24) & 0xff; |
162 | switch (c->modulation) { | 177 | switch (c->modulation) { |
163 | case QPSK: | 178 | case QPSK: |
164 | if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) | 179 | if (st->is_rev1) |
165 | if (gp8psk_tuned_to_DCII(fe)) | 180 | if (gp8psk_tuned_to_DCII(fe)) |
166 | gp8psk_bcm4500_reload(state->d); | 181 | st->ops->reload(st->priv); |
167 | switch (c->fec_inner) { | 182 | switch (c->fec_inner) { |
168 | case FEC_1_2: | 183 | case FEC_1_2: |
169 | cmd[9] = 0; break; | 184 | cmd[9] = 0; break; |
@@ -207,18 +222,18 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) | |||
207 | cmd[9] = 0; | 222 | cmd[9] = 0; |
208 | break; | 223 | break; |
209 | default: /* Unknown modulation */ | 224 | default: /* Unknown modulation */ |
210 | deb_fe("%s: unsupported modulation selected (%d)\n", | 225 | dprintk("%s: unsupported modulation selected (%d)\n", |
211 | __func__, c->modulation); | 226 | __func__, c->modulation); |
212 | return -EOPNOTSUPP; | 227 | return -EOPNOTSUPP; |
213 | } | 228 | } |
214 | 229 | ||
215 | if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) | 230 | if (st->is_rev1) |
216 | gp8psk_set_tuner_mode(fe, 0); | 231 | gp8psk_set_tuner_mode(fe, 0); |
217 | gp8psk_usb_out_op(state->d, TUNE_8PSK, 0, 0, cmd, 10); | 232 | st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10); |
218 | 233 | ||
219 | state->lock = 0; | 234 | st->lock = 0; |
220 | state->next_status_check = jiffies; | 235 | st->next_status_check = jiffies; |
221 | state->status_check_interval = 200; | 236 | st->status_check_interval = 200; |
222 | 237 | ||
223 | return 0; | 238 | return 0; |
224 | } | 239 | } |
@@ -228,9 +243,9 @@ static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe, | |||
228 | { | 243 | { |
229 | struct gp8psk_fe_state *st = fe->demodulator_priv; | 244 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
230 | 245 | ||
231 | deb_fe("%s\n",__func__); | 246 | dprintk("%s\n", __func__); |
232 | 247 | ||
233 | if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, m->msg[0], 0, | 248 | if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0, |
234 | m->msg, m->msg_len)) { | 249 | m->msg, m->msg_len)) { |
235 | return -EINVAL; | 250 | return -EINVAL; |
236 | } | 251 | } |
@@ -243,12 +258,12 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe, | |||
243 | struct gp8psk_fe_state *st = fe->demodulator_priv; | 258 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
244 | u8 cmd; | 259 | u8 cmd; |
245 | 260 | ||
246 | deb_fe("%s\n",__func__); | 261 | dprintk("%s\n", __func__); |
247 | 262 | ||
248 | /* These commands are certainly wrong */ | 263 | /* These commands are certainly wrong */ |
249 | cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01; | 264 | cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01; |
250 | 265 | ||
251 | if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, cmd, 0, | 266 | if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0, |
252 | &cmd, 0)) { | 267 | &cmd, 0)) { |
253 | return -EINVAL; | 268 | return -EINVAL; |
254 | } | 269 | } |
@@ -258,10 +273,10 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe, | |||
258 | static int gp8psk_fe_set_tone(struct dvb_frontend *fe, | 273 | static int gp8psk_fe_set_tone(struct dvb_frontend *fe, |
259 | enum fe_sec_tone_mode tone) | 274 | enum fe_sec_tone_mode tone) |
260 | { | 275 | { |
261 | struct gp8psk_fe_state* state = fe->demodulator_priv; | 276 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
262 | 277 | ||
263 | if (gp8psk_usb_out_op(state->d,SET_22KHZ_TONE, | 278 | if (st->ops->out(st->priv, SET_22KHZ_TONE, |
264 | (tone == SEC_TONE_ON), 0, NULL, 0)) { | 279 | (tone == SEC_TONE_ON), 0, NULL, 0)) { |
265 | return -EINVAL; | 280 | return -EINVAL; |
266 | } | 281 | } |
267 | return 0; | 282 | return 0; |
@@ -270,9 +285,9 @@ static int gp8psk_fe_set_tone(struct dvb_frontend *fe, | |||
270 | static int gp8psk_fe_set_voltage(struct dvb_frontend *fe, | 285 | static int gp8psk_fe_set_voltage(struct dvb_frontend *fe, |
271 | enum fe_sec_voltage voltage) | 286 | enum fe_sec_voltage voltage) |
272 | { | 287 | { |
273 | struct gp8psk_fe_state* state = fe->demodulator_priv; | 288 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
274 | 289 | ||
275 | if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, | 290 | if (st->ops->out(st->priv, SET_LNB_VOLTAGE, |
276 | voltage == SEC_VOLTAGE_18, 0, NULL, 0)) { | 291 | voltage == SEC_VOLTAGE_18, 0, NULL, 0)) { |
277 | return -EINVAL; | 292 | return -EINVAL; |
278 | } | 293 | } |
@@ -281,52 +296,60 @@ static int gp8psk_fe_set_voltage(struct dvb_frontend *fe, | |||
281 | 296 | ||
282 | static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff) | 297 | static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff) |
283 | { | 298 | { |
284 | struct gp8psk_fe_state* state = fe->demodulator_priv; | 299 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
285 | return gp8psk_usb_out_op(state->d, USE_EXTRA_VOLT, onoff, 0,NULL,0); | 300 | |
301 | return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0); | ||
286 | } | 302 | } |
287 | 303 | ||
288 | static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd) | 304 | static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd) |
289 | { | 305 | { |
290 | struct gp8psk_fe_state* state = fe->demodulator_priv; | 306 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
291 | u8 cmd = sw_cmd & 0x7f; | 307 | u8 cmd = sw_cmd & 0x7f; |
292 | 308 | ||
293 | if (gp8psk_usb_out_op(state->d,SET_DN_SWITCH, cmd, 0, | 309 | if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0)) |
294 | NULL, 0)) { | ||
295 | return -EINVAL; | 310 | return -EINVAL; |
296 | } | 311 | |
297 | if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, !!(sw_cmd & 0x80), | 312 | if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80), |
298 | 0, NULL, 0)) { | 313 | 0, NULL, 0)) |
299 | return -EINVAL; | 314 | return -EINVAL; |
300 | } | ||
301 | 315 | ||
302 | return 0; | 316 | return 0; |
303 | } | 317 | } |
304 | 318 | ||
305 | static void gp8psk_fe_release(struct dvb_frontend* fe) | 319 | static void gp8psk_fe_release(struct dvb_frontend* fe) |
306 | { | 320 | { |
307 | struct gp8psk_fe_state *state = fe->demodulator_priv; | 321 | struct gp8psk_fe_state *st = fe->demodulator_priv; |
308 | kfree(state); | 322 | |
323 | kfree(st); | ||
309 | } | 324 | } |
310 | 325 | ||
311 | static struct dvb_frontend_ops gp8psk_fe_ops; | 326 | static struct dvb_frontend_ops gp8psk_fe_ops; |
312 | 327 | ||
313 | struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d) | 328 | struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops, |
329 | void *priv, bool is_rev1) | ||
314 | { | 330 | { |
315 | struct gp8psk_fe_state *s = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL); | 331 | struct gp8psk_fe_state *st; |
316 | if (s == NULL) | ||
317 | goto error; | ||
318 | |||
319 | s->d = d; | ||
320 | memcpy(&s->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops)); | ||
321 | s->fe.demodulator_priv = s; | ||
322 | |||
323 | goto success; | ||
324 | error: | ||
325 | return NULL; | ||
326 | success: | ||
327 | return &s->fe; | ||
328 | } | ||
329 | 332 | ||
333 | if (!ops || !ops->in || !ops->out || !ops->reload) { | ||
334 | pr_err("Error! gp8psk-fe ops not defined.\n"); | ||
335 | return NULL; | ||
336 | } | ||
337 | |||
338 | st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL); | ||
339 | if (!st) | ||
340 | return NULL; | ||
341 | |||
342 | memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops)); | ||
343 | st->fe.demodulator_priv = st; | ||
344 | st->ops = ops; | ||
345 | st->priv = priv; | ||
346 | st->is_rev1 = is_rev1; | ||
347 | |||
348 | pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : ""); | ||
349 | |||
350 | return &st->fe; | ||
351 | } | ||
352 | EXPORT_SYMBOL_GPL(gp8psk_fe_attach); | ||
330 | 353 | ||
331 | static struct dvb_frontend_ops gp8psk_fe_ops = { | 354 | static struct dvb_frontend_ops gp8psk_fe_ops = { |
332 | .delsys = { SYS_DVBS }, | 355 | .delsys = { SYS_DVBS }, |
@@ -370,3 +393,8 @@ static struct dvb_frontend_ops gp8psk_fe_ops = { | |||
370 | .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, | 393 | .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, |
371 | .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage | 394 | .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage |
372 | }; | 395 | }; |
396 | |||
397 | MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>"); | ||
398 | MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S"); | ||
399 | MODULE_VERSION("1.1"); | ||
400 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.h b/drivers/media/dvb-frontends/gp8psk-fe.h new file mode 100644 index 000000000000..6c7944b1ecd6 --- /dev/null +++ b/drivers/media/dvb-frontends/gp8psk-fe.h | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * gp8psk_fe driver | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef GP8PSK_FE_H | ||
16 | #define GP8PSK_FE_H | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | |||
20 | /* gp8psk commands */ | ||
21 | |||
22 | #define GET_8PSK_CONFIG 0x80 /* in */ | ||
23 | #define SET_8PSK_CONFIG 0x81 | ||
24 | #define I2C_WRITE 0x83 | ||
25 | #define I2C_READ 0x84 | ||
26 | #define ARM_TRANSFER 0x85 | ||
27 | #define TUNE_8PSK 0x86 | ||
28 | #define GET_SIGNAL_STRENGTH 0x87 /* in */ | ||
29 | #define LOAD_BCM4500 0x88 | ||
30 | #define BOOT_8PSK 0x89 /* in */ | ||
31 | #define START_INTERSIL 0x8A /* in */ | ||
32 | #define SET_LNB_VOLTAGE 0x8B | ||
33 | #define SET_22KHZ_TONE 0x8C | ||
34 | #define SEND_DISEQC_COMMAND 0x8D | ||
35 | #define SET_DVB_MODE 0x8E | ||
36 | #define SET_DN_SWITCH 0x8F | ||
37 | #define GET_SIGNAL_LOCK 0x90 /* in */ | ||
38 | #define GET_FW_VERS 0x92 | ||
39 | #define GET_SERIAL_NUMBER 0x93 /* in */ | ||
40 | #define USE_EXTRA_VOLT 0x94 | ||
41 | #define GET_FPGA_VERS 0x95 | ||
42 | #define CW3K_INIT 0x9d | ||
43 | |||
44 | /* PSK_configuration bits */ | ||
45 | #define bm8pskStarted 0x01 | ||
46 | #define bm8pskFW_Loaded 0x02 | ||
47 | #define bmIntersilOn 0x04 | ||
48 | #define bmDVBmode 0x08 | ||
49 | #define bm22kHz 0x10 | ||
50 | #define bmSEL18V 0x20 | ||
51 | #define bmDCtuned 0x40 | ||
52 | #define bmArmed 0x80 | ||
53 | |||
54 | /* Satellite modulation modes */ | ||
55 | #define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */ | ||
56 | #define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */ | ||
57 | #define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */ | ||
58 | #define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */ | ||
59 | |||
60 | #define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */ | ||
61 | #define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */ | ||
62 | #define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */ | ||
63 | #define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */ | ||
64 | #define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */ | ||
65 | #define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */ | ||
66 | |||
67 | /* firmware revision id's */ | ||
68 | #define GP8PSK_FW_REV1 0x020604 | ||
69 | #define GP8PSK_FW_REV2 0x020704 | ||
70 | #define GP8PSK_FW_VERS(_fw_vers) \ | ||
71 | ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0]) | ||
72 | |||
73 | struct gp8psk_fe_ops { | ||
74 | int (*in)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen); | ||
75 | int (*out)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen); | ||
76 | int (*reload)(void *priv); | ||
77 | }; | ||
78 | |||
79 | struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops, | ||
80 | void *priv, bool is_rev1); | ||
81 | |||
82 | #endif | ||
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index f95a6bc839d5..cede3975d04b 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c | |||
@@ -118,7 +118,7 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol, | |||
118 | *protocol = RC_TYPE_RC6_MCE; | 118 | *protocol = RC_TYPE_RC6_MCE; |
119 | dev &= 0x7f; | 119 | dev &= 0x7f; |
120 | dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n", | 120 | dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n", |
121 | toggle, vendor, dev, code); | 121 | *ptoggle, vendor, dev, code); |
122 | } else { | 122 | } else { |
123 | *ptoggle = 0; | 123 | *ptoggle = 0; |
124 | *protocol = RC_TYPE_RC6_6A_32; | 124 | *protocol = RC_TYPE_RC6_6A_32; |
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile index 2a7b5a963acf..3b3f32b426d1 100644 --- a/drivers/media/usb/dvb-usb/Makefile +++ b/drivers/media/usb/dvb-usb/Makefile | |||
@@ -8,7 +8,7 @@ obj-$(CONFIG_DVB_USB_VP7045) += dvb-usb-vp7045.o | |||
8 | dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o | 8 | dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o |
9 | obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o | 9 | obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o |
10 | 10 | ||
11 | dvb-usb-gp8psk-objs := gp8psk.o gp8psk-fe.o | 11 | dvb-usb-gp8psk-objs := gp8psk.o |
12 | obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o | 12 | obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o |
13 | 13 | ||
14 | dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o | 14 | dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o |
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c index b257780fb380..7853261906b1 100644 --- a/drivers/media/usb/dvb-usb/af9005.c +++ b/drivers/media/usb/dvb-usb/af9005.c | |||
@@ -53,7 +53,6 @@ struct af9005_device_state { | |||
53 | u8 sequence; | 53 | u8 sequence; |
54 | int led_state; | 54 | int led_state; |
55 | unsigned char data[256]; | 55 | unsigned char data[256]; |
56 | struct mutex data_mutex; | ||
57 | }; | 56 | }; |
58 | 57 | ||
59 | static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, | 58 | static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, |
@@ -72,7 +71,7 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, | |||
72 | return -EINVAL; | 71 | return -EINVAL; |
73 | } | 72 | } |
74 | 73 | ||
75 | mutex_lock(&st->data_mutex); | 74 | mutex_lock(&d->data_mutex); |
76 | st->data[0] = 14; /* rest of buffer length low */ | 75 | st->data[0] = 14; /* rest of buffer length low */ |
77 | st->data[1] = 0; /* rest of buffer length high */ | 76 | st->data[1] = 0; /* rest of buffer length high */ |
78 | 77 | ||
@@ -140,7 +139,7 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, | |||
140 | values[i] = st->data[8 + i]; | 139 | values[i] = st->data[8 + i]; |
141 | 140 | ||
142 | ret: | 141 | ret: |
143 | mutex_unlock(&st->data_mutex); | 142 | mutex_unlock(&d->data_mutex); |
144 | return ret; | 143 | return ret; |
145 | 144 | ||
146 | } | 145 | } |
@@ -481,7 +480,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf, | |||
481 | } | 480 | } |
482 | packet_len = wlen + 5; | 481 | packet_len = wlen + 5; |
483 | 482 | ||
484 | mutex_lock(&st->data_mutex); | 483 | mutex_lock(&d->data_mutex); |
485 | 484 | ||
486 | st->data[0] = (u8) (packet_len & 0xff); | 485 | st->data[0] = (u8) (packet_len & 0xff); |
487 | st->data[1] = (u8) ((packet_len & 0xff00) >> 8); | 486 | st->data[1] = (u8) ((packet_len & 0xff00) >> 8); |
@@ -512,7 +511,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf, | |||
512 | rbuf[i] = st->data[i + 7]; | 511 | rbuf[i] = st->data[i + 7]; |
513 | } | 512 | } |
514 | 513 | ||
515 | mutex_unlock(&st->data_mutex); | 514 | mutex_unlock(&d->data_mutex); |
516 | return ret; | 515 | return ret; |
517 | } | 516 | } |
518 | 517 | ||
@@ -523,7 +522,7 @@ int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values, | |||
523 | u8 seq; | 522 | u8 seq; |
524 | int ret, i; | 523 | int ret, i; |
525 | 524 | ||
526 | mutex_lock(&st->data_mutex); | 525 | mutex_lock(&d->data_mutex); |
527 | 526 | ||
528 | memset(st->data, 0, sizeof(st->data)); | 527 | memset(st->data, 0, sizeof(st->data)); |
529 | 528 | ||
@@ -559,7 +558,7 @@ int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values, | |||
559 | for (i = 0; i < len; i++) | 558 | for (i = 0; i < len; i++) |
560 | values[i] = st->data[6 + i]; | 559 | values[i] = st->data[6 + i]; |
561 | } | 560 | } |
562 | mutex_unlock(&st->data_mutex); | 561 | mutex_unlock(&d->data_mutex); |
563 | 562 | ||
564 | return ret; | 563 | return ret; |
565 | } | 564 | } |
@@ -847,7 +846,7 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state) | |||
847 | return 0; | 846 | return 0; |
848 | } | 847 | } |
849 | 848 | ||
850 | mutex_lock(&st->data_mutex); | 849 | mutex_lock(&d->data_mutex); |
851 | 850 | ||
852 | /* deb_info("rc_query\n"); */ | 851 | /* deb_info("rc_query\n"); */ |
853 | st->data[0] = 3; /* rest of packet length low */ | 852 | st->data[0] = 3; /* rest of packet length low */ |
@@ -890,7 +889,7 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state) | |||
890 | } | 889 | } |
891 | 890 | ||
892 | ret: | 891 | ret: |
893 | mutex_unlock(&st->data_mutex); | 892 | mutex_unlock(&d->data_mutex); |
894 | return ret; | 893 | return ret; |
895 | } | 894 | } |
896 | 895 | ||
@@ -1004,20 +1003,8 @@ static struct dvb_usb_device_properties af9005_properties; | |||
1004 | static int af9005_usb_probe(struct usb_interface *intf, | 1003 | static int af9005_usb_probe(struct usb_interface *intf, |
1005 | const struct usb_device_id *id) | 1004 | const struct usb_device_id *id) |
1006 | { | 1005 | { |
1007 | struct dvb_usb_device *d; | 1006 | return dvb_usb_device_init(intf, &af9005_properties, |
1008 | struct af9005_device_state *st; | 1007 | THIS_MODULE, NULL, adapter_nr); |
1009 | int ret; | ||
1010 | |||
1011 | ret = dvb_usb_device_init(intf, &af9005_properties, | ||
1012 | THIS_MODULE, &d, adapter_nr); | ||
1013 | |||
1014 | if (ret < 0) | ||
1015 | return ret; | ||
1016 | |||
1017 | st = d->priv; | ||
1018 | mutex_init(&st->data_mutex); | ||
1019 | |||
1020 | return 0; | ||
1021 | } | 1008 | } |
1022 | 1009 | ||
1023 | enum af9005_usb_table_entry { | 1010 | enum af9005_usb_table_entry { |
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c index 8ac825413d5a..290275bc7fde 100644 --- a/drivers/media/usb/dvb-usb/cinergyT2-core.c +++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c | |||
@@ -42,7 +42,6 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); | |||
42 | struct cinergyt2_state { | 42 | struct cinergyt2_state { |
43 | u8 rc_counter; | 43 | u8 rc_counter; |
44 | unsigned char data[64]; | 44 | unsigned char data[64]; |
45 | struct mutex data_mutex; | ||
46 | }; | 45 | }; |
47 | 46 | ||
48 | /* We are missing a release hook with usb_device data */ | 47 | /* We are missing a release hook with usb_device data */ |
@@ -56,12 +55,12 @@ static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable) | |||
56 | struct cinergyt2_state *st = d->priv; | 55 | struct cinergyt2_state *st = d->priv; |
57 | int ret; | 56 | int ret; |
58 | 57 | ||
59 | mutex_lock(&st->data_mutex); | 58 | mutex_lock(&d->data_mutex); |
60 | st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER; | 59 | st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER; |
61 | st->data[1] = enable ? 1 : 0; | 60 | st->data[1] = enable ? 1 : 0; |
62 | 61 | ||
63 | ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0); | 62 | ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0); |
64 | mutex_unlock(&st->data_mutex); | 63 | mutex_unlock(&d->data_mutex); |
65 | 64 | ||
66 | return ret; | 65 | return ret; |
67 | } | 66 | } |
@@ -71,12 +70,12 @@ static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable) | |||
71 | struct cinergyt2_state *st = d->priv; | 70 | struct cinergyt2_state *st = d->priv; |
72 | int ret; | 71 | int ret; |
73 | 72 | ||
74 | mutex_lock(&st->data_mutex); | 73 | mutex_lock(&d->data_mutex); |
75 | st->data[0] = CINERGYT2_EP1_SLEEP_MODE; | 74 | st->data[0] = CINERGYT2_EP1_SLEEP_MODE; |
76 | st->data[1] = enable ? 0 : 1; | 75 | st->data[1] = enable ? 0 : 1; |
77 | 76 | ||
78 | ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0); | 77 | ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0); |
79 | mutex_unlock(&st->data_mutex); | 78 | mutex_unlock(&d->data_mutex); |
80 | 79 | ||
81 | return ret; | 80 | return ret; |
82 | } | 81 | } |
@@ -89,7 +88,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) | |||
89 | 88 | ||
90 | adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); | 89 | adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); |
91 | 90 | ||
92 | mutex_lock(&st->data_mutex); | 91 | mutex_lock(&d->data_mutex); |
93 | st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION; | 92 | st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION; |
94 | 93 | ||
95 | ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0); | 94 | ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0); |
@@ -97,7 +96,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) | |||
97 | deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " | 96 | deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " |
98 | "state info\n"); | 97 | "state info\n"); |
99 | } | 98 | } |
100 | mutex_unlock(&st->data_mutex); | 99 | mutex_unlock(&d->data_mutex); |
101 | 100 | ||
102 | /* Copy this pointer as we are gonna need it in the release phase */ | 101 | /* Copy this pointer as we are gonna need it in the release phase */ |
103 | cinergyt2_usb_device = adap->dev; | 102 | cinergyt2_usb_device = adap->dev; |
@@ -166,7 +165,7 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) | |||
166 | 165 | ||
167 | *state = REMOTE_NO_KEY_PRESSED; | 166 | *state = REMOTE_NO_KEY_PRESSED; |
168 | 167 | ||
169 | mutex_lock(&st->data_mutex); | 168 | mutex_lock(&d->data_mutex); |
170 | st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS; | 169 | st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS; |
171 | 170 | ||
172 | ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); | 171 | ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); |
@@ -202,29 +201,17 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) | |||
202 | } | 201 | } |
203 | 202 | ||
204 | ret: | 203 | ret: |
205 | mutex_unlock(&st->data_mutex); | 204 | mutex_unlock(&d->data_mutex); |
206 | return ret; | 205 | return ret; |
207 | } | 206 | } |
208 | 207 | ||
209 | static int cinergyt2_usb_probe(struct usb_interface *intf, | 208 | static int cinergyt2_usb_probe(struct usb_interface *intf, |
210 | const struct usb_device_id *id) | 209 | const struct usb_device_id *id) |
211 | { | 210 | { |
212 | struct dvb_usb_device *d; | 211 | return dvb_usb_device_init(intf, &cinergyt2_properties, |
213 | struct cinergyt2_state *st; | 212 | THIS_MODULE, NULL, adapter_nr); |
214 | int ret; | ||
215 | |||
216 | ret = dvb_usb_device_init(intf, &cinergyt2_properties, | ||
217 | THIS_MODULE, &d, adapter_nr); | ||
218 | if (ret < 0) | ||
219 | return ret; | ||
220 | |||
221 | st = d->priv; | ||
222 | mutex_init(&st->data_mutex); | ||
223 | |||
224 | return 0; | ||
225 | } | 213 | } |
226 | 214 | ||
227 | |||
228 | static struct usb_device_id cinergyt2_usb_table[] = { | 215 | static struct usb_device_id cinergyt2_usb_table[] = { |
229 | { USB_DEVICE(USB_VID_TERRATEC, 0x0038) }, | 216 | { USB_DEVICE(USB_VID_TERRATEC, 0x0038) }, |
230 | { 0 } | 217 | { 0 } |
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 39772812269d..243403081fa5 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c | |||
@@ -68,7 +68,7 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d, | |||
68 | 68 | ||
69 | wo = (rbuf == NULL || rlen == 0); /* write-only */ | 69 | wo = (rbuf == NULL || rlen == 0); /* write-only */ |
70 | 70 | ||
71 | mutex_lock(&st->data_mutex); | 71 | mutex_lock(&d->data_mutex); |
72 | st->data[0] = cmd; | 72 | st->data[0] = cmd; |
73 | memcpy(&st->data[1], wbuf, wlen); | 73 | memcpy(&st->data[1], wbuf, wlen); |
74 | if (wo) | 74 | if (wo) |
@@ -77,7 +77,7 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d, | |||
77 | ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, | 77 | ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, |
78 | rbuf, rlen, 0); | 78 | rbuf, rlen, 0); |
79 | 79 | ||
80 | mutex_unlock(&st->data_mutex); | 80 | mutex_unlock(&d->data_mutex); |
81 | return ret; | 81 | return ret; |
82 | } | 82 | } |
83 | 83 | ||
@@ -1461,43 +1461,36 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties; | |||
1461 | static int cxusb_probe(struct usb_interface *intf, | 1461 | static int cxusb_probe(struct usb_interface *intf, |
1462 | const struct usb_device_id *id) | 1462 | const struct usb_device_id *id) |
1463 | { | 1463 | { |
1464 | struct dvb_usb_device *d; | ||
1465 | struct cxusb_state *st; | ||
1466 | |||
1467 | if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties, | 1464 | if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties, |
1468 | THIS_MODULE, &d, adapter_nr) || | 1465 | THIS_MODULE, NULL, adapter_nr) || |
1469 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties, | 1466 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties, |
1470 | THIS_MODULE, &d, adapter_nr) || | 1467 | THIS_MODULE, NULL, adapter_nr) || |
1471 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties, | 1468 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties, |
1472 | THIS_MODULE, &d, adapter_nr) || | 1469 | THIS_MODULE, NULL, adapter_nr) || |
1473 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties, | 1470 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties, |
1474 | THIS_MODULE, &d, adapter_nr) || | 1471 | THIS_MODULE, NULL, adapter_nr) || |
1475 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties, | 1472 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties, |
1476 | THIS_MODULE, &d, adapter_nr) || | 1473 | THIS_MODULE, NULL, adapter_nr) || |
1477 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties, | 1474 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties, |
1478 | THIS_MODULE, &d, adapter_nr) || | 1475 | THIS_MODULE, NULL, adapter_nr) || |
1479 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties, | 1476 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties, |
1480 | THIS_MODULE, &d, adapter_nr) || | 1477 | THIS_MODULE, NULL, adapter_nr) || |
1481 | 0 == dvb_usb_device_init(intf, | 1478 | 0 == dvb_usb_device_init(intf, |
1482 | &cxusb_bluebird_nano2_needsfirmware_properties, | 1479 | &cxusb_bluebird_nano2_needsfirmware_properties, |
1483 | THIS_MODULE, &d, adapter_nr) || | 1480 | THIS_MODULE, NULL, adapter_nr) || |
1484 | 0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties, | 1481 | 0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties, |
1485 | THIS_MODULE, &d, adapter_nr) || | 1482 | THIS_MODULE, NULL, adapter_nr) || |
1486 | 0 == dvb_usb_device_init(intf, | 1483 | 0 == dvb_usb_device_init(intf, |
1487 | &cxusb_bluebird_dualdig4_rev2_properties, | 1484 | &cxusb_bluebird_dualdig4_rev2_properties, |
1488 | THIS_MODULE, &d, adapter_nr) || | 1485 | THIS_MODULE, NULL, adapter_nr) || |
1489 | 0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties, | 1486 | 0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties, |
1490 | THIS_MODULE, &d, adapter_nr) || | 1487 | THIS_MODULE, NULL, adapter_nr) || |
1491 | 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties, | 1488 | 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties, |
1492 | THIS_MODULE, &d, adapter_nr) || | 1489 | THIS_MODULE, NULL, adapter_nr) || |
1493 | 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties, | 1490 | 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties, |
1494 | THIS_MODULE, &d, adapter_nr) || | 1491 | THIS_MODULE, NULL, adapter_nr) || |
1495 | 0) { | 1492 | 0) |
1496 | st = d->priv; | ||
1497 | mutex_init(&st->data_mutex); | ||
1498 | |||
1499 | return 0; | 1493 | return 0; |
1500 | } | ||
1501 | 1494 | ||
1502 | return -EINVAL; | 1495 | return -EINVAL; |
1503 | } | 1496 | } |
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h index 9f3ee0e47d5c..18acda19527a 100644 --- a/drivers/media/usb/dvb-usb/cxusb.h +++ b/drivers/media/usb/dvb-usb/cxusb.h | |||
@@ -37,7 +37,6 @@ struct cxusb_state { | |||
37 | struct i2c_client *i2c_client_tuner; | 37 | struct i2c_client *i2c_client_tuner; |
38 | 38 | ||
39 | unsigned char data[MAX_XFER_SIZE]; | 39 | unsigned char data[MAX_XFER_SIZE]; |
40 | struct mutex data_mutex; | ||
41 | }; | 40 | }; |
42 | 41 | ||
43 | #endif | 42 | #endif |
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index 92d5408684ac..47ce9d5de4c6 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c | |||
@@ -704,7 +704,7 @@ static void dib0700_rc_urb_completion(struct urb *purb) | |||
704 | struct dvb_usb_device *d = purb->context; | 704 | struct dvb_usb_device *d = purb->context; |
705 | struct dib0700_rc_response *poll_reply; | 705 | struct dib0700_rc_response *poll_reply; |
706 | enum rc_type protocol; | 706 | enum rc_type protocol; |
707 | u32 uninitialized_var(keycode); | 707 | u32 keycode; |
708 | u8 toggle; | 708 | u8 toggle; |
709 | 709 | ||
710 | deb_info("%s()\n", __func__); | 710 | deb_info("%s()\n", __func__); |
@@ -745,7 +745,8 @@ static void dib0700_rc_urb_completion(struct urb *purb) | |||
745 | poll_reply->nec.data == 0x00 && | 745 | poll_reply->nec.data == 0x00 && |
746 | poll_reply->nec.not_data == 0xff) { | 746 | poll_reply->nec.not_data == 0xff) { |
747 | poll_reply->data_state = 2; | 747 | poll_reply->data_state = 2; |
748 | break; | 748 | rc_repeat(d->rc_dev); |
749 | goto resubmit; | ||
749 | } | 750 | } |
750 | 751 | ||
751 | if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) { | 752 | if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) { |
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c index f88572c7ae7c..fcbff7fb0c4e 100644 --- a/drivers/media/usb/dvb-usb/dtt200u.c +++ b/drivers/media/usb/dvb-usb/dtt200u.c | |||
@@ -22,7 +22,6 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); | |||
22 | 22 | ||
23 | struct dtt200u_state { | 23 | struct dtt200u_state { |
24 | unsigned char data[80]; | 24 | unsigned char data[80]; |
25 | struct mutex data_mutex; | ||
26 | }; | 25 | }; |
27 | 26 | ||
28 | static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) | 27 | static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) |
@@ -30,23 +29,24 @@ static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) | |||
30 | struct dtt200u_state *st = d->priv; | 29 | struct dtt200u_state *st = d->priv; |
31 | int ret = 0; | 30 | int ret = 0; |
32 | 31 | ||
33 | mutex_lock(&st->data_mutex); | 32 | mutex_lock(&d->data_mutex); |
34 | 33 | ||
35 | st->data[0] = SET_INIT; | 34 | st->data[0] = SET_INIT; |
36 | 35 | ||
37 | if (onoff) | 36 | if (onoff) |
38 | ret = dvb_usb_generic_write(d, st->data, 2); | 37 | ret = dvb_usb_generic_write(d, st->data, 2); |
39 | 38 | ||
40 | mutex_unlock(&st->data_mutex); | 39 | mutex_unlock(&d->data_mutex); |
41 | return ret; | 40 | return ret; |
42 | } | 41 | } |
43 | 42 | ||
44 | static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) | 43 | static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) |
45 | { | 44 | { |
46 | struct dtt200u_state *st = adap->dev->priv; | 45 | struct dvb_usb_device *d = adap->dev; |
46 | struct dtt200u_state *st = d->priv; | ||
47 | int ret; | 47 | int ret; |
48 | 48 | ||
49 | mutex_lock(&st->data_mutex); | 49 | mutex_lock(&d->data_mutex); |
50 | st->data[0] = SET_STREAMING; | 50 | st->data[0] = SET_STREAMING; |
51 | st->data[1] = onoff; | 51 | st->data[1] = onoff; |
52 | 52 | ||
@@ -61,26 +61,27 @@ static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) | |||
61 | ret = dvb_usb_generic_write(adap->dev, st->data, 1); | 61 | ret = dvb_usb_generic_write(adap->dev, st->data, 1); |
62 | 62 | ||
63 | ret: | 63 | ret: |
64 | mutex_unlock(&st->data_mutex); | 64 | mutex_unlock(&d->data_mutex); |
65 | 65 | ||
66 | return ret; | 66 | return ret; |
67 | } | 67 | } |
68 | 68 | ||
69 | static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) | 69 | static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) |
70 | { | 70 | { |
71 | struct dtt200u_state *st = adap->dev->priv; | 71 | struct dvb_usb_device *d = adap->dev; |
72 | struct dtt200u_state *st = d->priv; | ||
72 | int ret; | 73 | int ret; |
73 | 74 | ||
74 | pid = onoff ? pid : 0; | 75 | pid = onoff ? pid : 0; |
75 | 76 | ||
76 | mutex_lock(&st->data_mutex); | 77 | mutex_lock(&d->data_mutex); |
77 | st->data[0] = SET_PID_FILTER; | 78 | st->data[0] = SET_PID_FILTER; |
78 | st->data[1] = index; | 79 | st->data[1] = index; |
79 | st->data[2] = pid & 0xff; | 80 | st->data[2] = pid & 0xff; |
80 | st->data[3] = (pid >> 8) & 0x1f; | 81 | st->data[3] = (pid >> 8) & 0x1f; |
81 | 82 | ||
82 | ret = dvb_usb_generic_write(adap->dev, st->data, 4); | 83 | ret = dvb_usb_generic_write(adap->dev, st->data, 4); |
83 | mutex_unlock(&st->data_mutex); | 84 | mutex_unlock(&d->data_mutex); |
84 | 85 | ||
85 | return ret; | 86 | return ret; |
86 | } | 87 | } |
@@ -91,7 +92,7 @@ static int dtt200u_rc_query(struct dvb_usb_device *d) | |||
91 | u32 scancode; | 92 | u32 scancode; |
92 | int ret; | 93 | int ret; |
93 | 94 | ||
94 | mutex_lock(&st->data_mutex); | 95 | mutex_lock(&d->data_mutex); |
95 | st->data[0] = GET_RC_CODE; | 96 | st->data[0] = GET_RC_CODE; |
96 | 97 | ||
97 | ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); | 98 | ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); |
@@ -126,7 +127,7 @@ static int dtt200u_rc_query(struct dvb_usb_device *d) | |||
126 | deb_info("st->data: %*ph\n", 5, st->data); | 127 | deb_info("st->data: %*ph\n", 5, st->data); |
127 | 128 | ||
128 | ret: | 129 | ret: |
129 | mutex_unlock(&st->data_mutex); | 130 | mutex_unlock(&d->data_mutex); |
130 | return ret; | 131 | return ret; |
131 | } | 132 | } |
132 | 133 | ||
@@ -145,24 +146,17 @@ static struct dvb_usb_device_properties wt220u_miglia_properties; | |||
145 | static int dtt200u_usb_probe(struct usb_interface *intf, | 146 | static int dtt200u_usb_probe(struct usb_interface *intf, |
146 | const struct usb_device_id *id) | 147 | const struct usb_device_id *id) |
147 | { | 148 | { |
148 | struct dvb_usb_device *d; | ||
149 | struct dtt200u_state *st; | ||
150 | |||
151 | if (0 == dvb_usb_device_init(intf, &dtt200u_properties, | 149 | if (0 == dvb_usb_device_init(intf, &dtt200u_properties, |
152 | THIS_MODULE, &d, adapter_nr) || | 150 | THIS_MODULE, NULL, adapter_nr) || |
153 | 0 == dvb_usb_device_init(intf, &wt220u_properties, | 151 | 0 == dvb_usb_device_init(intf, &wt220u_properties, |
154 | THIS_MODULE, &d, adapter_nr) || | 152 | THIS_MODULE, NULL, adapter_nr) || |
155 | 0 == dvb_usb_device_init(intf, &wt220u_fc_properties, | 153 | 0 == dvb_usb_device_init(intf, &wt220u_fc_properties, |
156 | THIS_MODULE, &d, adapter_nr) || | 154 | THIS_MODULE, NULL, adapter_nr) || |
157 | 0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties, | 155 | 0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties, |
158 | THIS_MODULE, &d, adapter_nr) || | 156 | THIS_MODULE, NULL, adapter_nr) || |
159 | 0 == dvb_usb_device_init(intf, &wt220u_miglia_properties, | 157 | 0 == dvb_usb_device_init(intf, &wt220u_miglia_properties, |
160 | THIS_MODULE, &d, adapter_nr)) { | 158 | THIS_MODULE, NULL, adapter_nr)) |
161 | st = d->priv; | ||
162 | mutex_init(&st->data_mutex); | ||
163 | |||
164 | return 0; | 159 | return 0; |
165 | } | ||
166 | 160 | ||
167 | return -ENODEV; | 161 | return -ENODEV; |
168 | } | 162 | } |
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 3896ba9a4179..84308569e7dc 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c | |||
@@ -142,6 +142,7 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums) | |||
142 | { | 142 | { |
143 | int ret = 0; | 143 | int ret = 0; |
144 | 144 | ||
145 | mutex_init(&d->data_mutex); | ||
145 | mutex_init(&d->usb_mutex); | 146 | mutex_init(&d->usb_mutex); |
146 | mutex_init(&d->i2c_mutex); | 147 | mutex_init(&d->i2c_mutex); |
147 | 148 | ||
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h index 639c4678c65b..107255b08b2b 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb.h +++ b/drivers/media/usb/dvb-usb/dvb-usb.h | |||
@@ -404,8 +404,12 @@ struct dvb_usb_adapter { | |||
404 | * Powered is in/decremented for each call to modify the state. | 404 | * Powered is in/decremented for each call to modify the state. |
405 | * @udev: pointer to the device's struct usb_device. | 405 | * @udev: pointer to the device's struct usb_device. |
406 | * | 406 | * |
407 | * @usb_mutex: semaphore of USB control messages (reading needs two messages) | 407 | * @data_mutex: mutex to protect the data structure used to store URB data |
408 | * @i2c_mutex: semaphore for i2c-transfers | 408 | * @usb_mutex: mutex of USB control messages (reading needs two messages). |
409 | * Please notice that this mutex is used internally at the generic | ||
410 | * URB control functions. So, drivers using dvb_usb_generic_rw() and | ||
411 | * derivated functions should not lock it internally. | ||
412 | * @i2c_mutex: mutex for i2c-transfers | ||
409 | * | 413 | * |
410 | * @i2c_adap: device's i2c_adapter if it uses I2CoverUSB | 414 | * @i2c_adap: device's i2c_adapter if it uses I2CoverUSB |
411 | * | 415 | * |
@@ -433,6 +437,7 @@ struct dvb_usb_device { | |||
433 | int powered; | 437 | int powered; |
434 | 438 | ||
435 | /* locking */ | 439 | /* locking */ |
440 | struct mutex data_mutex; | ||
436 | struct mutex usb_mutex; | 441 | struct mutex usb_mutex; |
437 | 442 | ||
438 | /* i2c */ | 443 | /* i2c */ |
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c index adfd76491451..993bb7a72985 100644 --- a/drivers/media/usb/dvb-usb/gp8psk.c +++ b/drivers/media/usb/dvb-usb/gp8psk.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * see Documentation/dvb/README.dvb-usb for more information | 15 | * see Documentation/dvb/README.dvb-usb for more information |
16 | */ | 16 | */ |
17 | #include "gp8psk.h" | 17 | #include "gp8psk.h" |
18 | #include "gp8psk-fe.h" | ||
18 | 19 | ||
19 | /* debug */ | 20 | /* debug */ |
20 | static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw"; | 21 | static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw"; |
@@ -28,34 +29,8 @@ struct gp8psk_state { | |||
28 | unsigned char data[80]; | 29 | unsigned char data[80]; |
29 | }; | 30 | }; |
30 | 31 | ||
31 | static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) | 32 | static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, |
32 | { | 33 | u16 index, u8 *b, int blen) |
33 | return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6)); | ||
34 | } | ||
35 | |||
36 | static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers) | ||
37 | { | ||
38 | return (gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1)); | ||
39 | } | ||
40 | |||
41 | static void gp8psk_info(struct dvb_usb_device *d) | ||
42 | { | ||
43 | u8 fpga_vers, fw_vers[6]; | ||
44 | |||
45 | if (!gp8psk_get_fw_version(d, fw_vers)) | ||
46 | info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i", | ||
47 | fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers), | ||
48 | 2000 + fw_vers[5], fw_vers[4], fw_vers[3]); | ||
49 | else | ||
50 | info("failed to get FW version"); | ||
51 | |||
52 | if (!gp8psk_get_fpga_version(d, &fpga_vers)) | ||
53 | info("FPGA Version = %i", fpga_vers); | ||
54 | else | ||
55 | info("failed to get FPGA version"); | ||
56 | } | ||
57 | |||
58 | int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) | ||
59 | { | 34 | { |
60 | struct gp8psk_state *st = d->priv; | 35 | struct gp8psk_state *st = d->priv; |
61 | int ret = 0,try = 0; | 36 | int ret = 0,try = 0; |
@@ -67,7 +42,6 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 | |||
67 | return ret; | 42 | return ret; |
68 | 43 | ||
69 | while (ret >= 0 && ret != blen && try < 3) { | 44 | while (ret >= 0 && ret != blen && try < 3) { |
70 | memcpy(st->data, b, blen); | ||
71 | ret = usb_control_msg(d->udev, | 45 | ret = usb_control_msg(d->udev, |
72 | usb_rcvctrlpipe(d->udev,0), | 46 | usb_rcvctrlpipe(d->udev,0), |
73 | req, | 47 | req, |
@@ -81,8 +55,10 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 | |||
81 | if (ret < 0 || ret != blen) { | 55 | if (ret < 0 || ret != blen) { |
82 | warn("usb in %d operation failed.", req); | 56 | warn("usb in %d operation failed.", req); |
83 | ret = -EIO; | 57 | ret = -EIO; |
84 | } else | 58 | } else { |
85 | ret = 0; | 59 | ret = 0; |
60 | memcpy(b, st->data, blen); | ||
61 | } | ||
86 | 62 | ||
87 | deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index); | 63 | deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index); |
88 | debug_dump(b,blen,deb_xfer); | 64 | debug_dump(b,blen,deb_xfer); |
@@ -92,7 +68,7 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 | |||
92 | return ret; | 68 | return ret; |
93 | } | 69 | } |
94 | 70 | ||
95 | int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, | 71 | static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, |
96 | u16 index, u8 *b, int blen) | 72 | u16 index, u8 *b, int blen) |
97 | { | 73 | { |
98 | struct gp8psk_state *st = d->priv; | 74 | struct gp8psk_state *st = d->priv; |
@@ -123,6 +99,34 @@ int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, | |||
123 | return ret; | 99 | return ret; |
124 | } | 100 | } |
125 | 101 | ||
102 | |||
103 | static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) | ||
104 | { | ||
105 | return gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6); | ||
106 | } | ||
107 | |||
108 | static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers) | ||
109 | { | ||
110 | return gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1); | ||
111 | } | ||
112 | |||
113 | static void gp8psk_info(struct dvb_usb_device *d) | ||
114 | { | ||
115 | u8 fpga_vers, fw_vers[6]; | ||
116 | |||
117 | if (!gp8psk_get_fw_version(d, fw_vers)) | ||
118 | info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i", | ||
119 | fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers), | ||
120 | 2000 + fw_vers[5], fw_vers[4], fw_vers[3]); | ||
121 | else | ||
122 | info("failed to get FW version"); | ||
123 | |||
124 | if (!gp8psk_get_fpga_version(d, &fpga_vers)) | ||
125 | info("FPGA Version = %i", fpga_vers); | ||
126 | else | ||
127 | info("failed to get FPGA version"); | ||
128 | } | ||
129 | |||
126 | static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) | 130 | static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) |
127 | { | 131 | { |
128 | int ret; | 132 | int ret; |
@@ -225,10 +229,13 @@ static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff) | |||
225 | return 0; | 229 | return 0; |
226 | } | 230 | } |
227 | 231 | ||
228 | int gp8psk_bcm4500_reload(struct dvb_usb_device *d) | 232 | static int gp8psk_bcm4500_reload(struct dvb_usb_device *d) |
229 | { | 233 | { |
230 | u8 buf; | 234 | u8 buf; |
231 | int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); | 235 | int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); |
236 | |||
237 | deb_xfer("reloading firmware\n"); | ||
238 | |||
232 | /* Turn off 8psk power */ | 239 | /* Turn off 8psk power */ |
233 | if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1)) | 240 | if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1)) |
234 | return -EINVAL; | 241 | return -EINVAL; |
@@ -247,9 +254,47 @@ static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) | |||
247 | return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0); | 254 | return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0); |
248 | } | 255 | } |
249 | 256 | ||
257 | /* Callbacks for gp8psk-fe.c */ | ||
258 | |||
259 | static int gp8psk_fe_in(void *priv, u8 req, u16 value, | ||
260 | u16 index, u8 *b, int blen) | ||
261 | { | ||
262 | struct dvb_usb_device *d = priv; | ||
263 | |||
264 | return gp8psk_usb_in_op(d, req, value, index, b, blen); | ||
265 | } | ||
266 | |||
267 | static int gp8psk_fe_out(void *priv, u8 req, u16 value, | ||
268 | u16 index, u8 *b, int blen) | ||
269 | { | ||
270 | struct dvb_usb_device *d = priv; | ||
271 | |||
272 | return gp8psk_usb_out_op(d, req, value, index, b, blen); | ||
273 | } | ||
274 | |||
275 | static int gp8psk_fe_reload(void *priv) | ||
276 | { | ||
277 | struct dvb_usb_device *d = priv; | ||
278 | |||
279 | return gp8psk_bcm4500_reload(d); | ||
280 | } | ||
281 | |||
282 | const struct gp8psk_fe_ops gp8psk_fe_ops = { | ||
283 | .in = gp8psk_fe_in, | ||
284 | .out = gp8psk_fe_out, | ||
285 | .reload = gp8psk_fe_reload, | ||
286 | }; | ||
287 | |||
250 | static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap) | 288 | static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap) |
251 | { | 289 | { |
252 | adap->fe_adap[0].fe = gp8psk_fe_attach(adap->dev); | 290 | struct dvb_usb_device *d = adap->dev; |
291 | int id = le16_to_cpu(d->udev->descriptor.idProduct); | ||
292 | int is_rev1; | ||
293 | |||
294 | is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false; | ||
295 | |||
296 | adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach, | ||
297 | &gp8psk_fe_ops, d, is_rev1); | ||
253 | return 0; | 298 | return 0; |
254 | } | 299 | } |
255 | 300 | ||
diff --git a/drivers/media/usb/dvb-usb/gp8psk.h b/drivers/media/usb/dvb-usb/gp8psk.h index ed32b9da4843..d8975b866dee 100644 --- a/drivers/media/usb/dvb-usb/gp8psk.h +++ b/drivers/media/usb/dvb-usb/gp8psk.h | |||
@@ -24,58 +24,6 @@ extern int dvb_usb_gp8psk_debug; | |||
24 | #define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args) | 24 | #define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args) |
25 | #define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args) | 25 | #define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args) |
26 | #define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args) | 26 | #define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args) |
27 | #define deb_fe(args...) dprintk(dvb_usb_gp8psk_debug,0x08,args) | ||
28 | |||
29 | /* Twinhan Vendor requests */ | ||
30 | #define TH_COMMAND_IN 0xC0 | ||
31 | #define TH_COMMAND_OUT 0xC1 | ||
32 | |||
33 | /* gp8psk commands */ | ||
34 | |||
35 | #define GET_8PSK_CONFIG 0x80 /* in */ | ||
36 | #define SET_8PSK_CONFIG 0x81 | ||
37 | #define I2C_WRITE 0x83 | ||
38 | #define I2C_READ 0x84 | ||
39 | #define ARM_TRANSFER 0x85 | ||
40 | #define TUNE_8PSK 0x86 | ||
41 | #define GET_SIGNAL_STRENGTH 0x87 /* in */ | ||
42 | #define LOAD_BCM4500 0x88 | ||
43 | #define BOOT_8PSK 0x89 /* in */ | ||
44 | #define START_INTERSIL 0x8A /* in */ | ||
45 | #define SET_LNB_VOLTAGE 0x8B | ||
46 | #define SET_22KHZ_TONE 0x8C | ||
47 | #define SEND_DISEQC_COMMAND 0x8D | ||
48 | #define SET_DVB_MODE 0x8E | ||
49 | #define SET_DN_SWITCH 0x8F | ||
50 | #define GET_SIGNAL_LOCK 0x90 /* in */ | ||
51 | #define GET_FW_VERS 0x92 | ||
52 | #define GET_SERIAL_NUMBER 0x93 /* in */ | ||
53 | #define USE_EXTRA_VOLT 0x94 | ||
54 | #define GET_FPGA_VERS 0x95 | ||
55 | #define CW3K_INIT 0x9d | ||
56 | |||
57 | /* PSK_configuration bits */ | ||
58 | #define bm8pskStarted 0x01 | ||
59 | #define bm8pskFW_Loaded 0x02 | ||
60 | #define bmIntersilOn 0x04 | ||
61 | #define bmDVBmode 0x08 | ||
62 | #define bm22kHz 0x10 | ||
63 | #define bmSEL18V 0x20 | ||
64 | #define bmDCtuned 0x40 | ||
65 | #define bmArmed 0x80 | ||
66 | |||
67 | /* Satellite modulation modes */ | ||
68 | #define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */ | ||
69 | #define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */ | ||
70 | #define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */ | ||
71 | #define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */ | ||
72 | |||
73 | #define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */ | ||
74 | #define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */ | ||
75 | #define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */ | ||
76 | #define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */ | ||
77 | #define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */ | ||
78 | #define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */ | ||
79 | 27 | ||
80 | #define GET_USB_SPEED 0x07 | 28 | #define GET_USB_SPEED 0x07 |
81 | 29 | ||
@@ -86,15 +34,4 @@ extern int dvb_usb_gp8psk_debug; | |||
86 | #define PRODUCT_STRING_READ 0x0D | 34 | #define PRODUCT_STRING_READ 0x0D |
87 | #define FW_BCD_VERSION_READ 0x14 | 35 | #define FW_BCD_VERSION_READ 0x14 |
88 | 36 | ||
89 | /* firmware revision id's */ | ||
90 | #define GP8PSK_FW_REV1 0x020604 | ||
91 | #define GP8PSK_FW_REV2 0x020704 | ||
92 | #define GP8PSK_FW_VERS(_fw_vers) ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0]) | ||
93 | |||
94 | extern struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d); | ||
95 | extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen); | ||
96 | extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, | ||
97 | u16 index, u8 *b, int blen); | ||
98 | extern int gp8psk_bcm4500_reload(struct dvb_usb_device *d); | ||
99 | |||
100 | #endif | 37 | #endif |
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 3228fd182a99..9ff243970e93 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c | |||
@@ -123,19 +123,6 @@ static const struct intel_lpss_platform_info apl_i2c_info = { | |||
123 | .properties = apl_i2c_properties, | 123 | .properties = apl_i2c_properties, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | static const struct intel_lpss_platform_info kbl_info = { | ||
127 | .clk_rate = 120000000, | ||
128 | }; | ||
129 | |||
130 | static const struct intel_lpss_platform_info kbl_uart_info = { | ||
131 | .clk_rate = 120000000, | ||
132 | .clk_con_id = "baudclk", | ||
133 | }; | ||
134 | |||
135 | static const struct intel_lpss_platform_info kbl_i2c_info = { | ||
136 | .clk_rate = 133000000, | ||
137 | }; | ||
138 | |||
139 | static const struct pci_device_id intel_lpss_pci_ids[] = { | 126 | static const struct pci_device_id intel_lpss_pci_ids[] = { |
140 | /* BXT A-Step */ | 127 | /* BXT A-Step */ |
141 | { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, | 128 | { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, |
@@ -207,15 +194,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { | |||
207 | { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, | 194 | { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, |
208 | { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, | 195 | { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, |
209 | /* KBL-H */ | 196 | /* KBL-H */ |
210 | { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info }, | 197 | { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, |
211 | { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info }, | 198 | { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info }, |
212 | { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info }, | 199 | { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info }, |
213 | { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info }, | 200 | { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info }, |
214 | { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info }, | 201 | { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info }, |
215 | { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info }, | 202 | { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info }, |
216 | { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info }, | 203 | { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info }, |
217 | { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info }, | 204 | { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info }, |
218 | { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info }, | 205 | { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info }, |
219 | { } | 206 | { } |
220 | }; | 207 | }; |
221 | MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); | 208 | MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); |
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 41b113875d64..70c646b0097d 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c | |||
@@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev) | |||
502 | for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) | 502 | for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) |
503 | lpss->priv_ctx[i] = readl(lpss->priv + i * 4); | 503 | lpss->priv_ctx[i] = readl(lpss->priv + i * 4); |
504 | 504 | ||
505 | /* Put the device into reset state */ | ||
506 | writel(0, lpss->priv + LPSS_PRIV_RESETS); | ||
507 | |||
508 | return 0; | 505 | return 0; |
509 | } | 506 | } |
510 | EXPORT_SYMBOL_GPL(intel_lpss_suspend); | 507 | EXPORT_SYMBOL_GPL(intel_lpss_suspend); |
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c index 43e54b7e908f..f9a8c5203873 100644 --- a/drivers/mfd/intel_soc_pmic_bxtwc.c +++ b/drivers/mfd/intel_soc_pmic_bxtwc.c | |||
@@ -86,6 +86,7 @@ enum bxtwc_irqs_level2 { | |||
86 | BXTWC_THRM2_IRQ, | 86 | BXTWC_THRM2_IRQ, |
87 | BXTWC_BCU_IRQ, | 87 | BXTWC_BCU_IRQ, |
88 | BXTWC_ADC_IRQ, | 88 | BXTWC_ADC_IRQ, |
89 | BXTWC_USBC_IRQ, | ||
89 | BXTWC_CHGR0_IRQ, | 90 | BXTWC_CHGR0_IRQ, |
90 | BXTWC_CHGR1_IRQ, | 91 | BXTWC_CHGR1_IRQ, |
91 | BXTWC_GPIO0_IRQ, | 92 | BXTWC_GPIO0_IRQ, |
@@ -111,7 +112,8 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = { | |||
111 | REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff), | 112 | REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff), |
112 | REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f), | 113 | REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f), |
113 | REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff), | 114 | REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff), |
114 | REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f), | 115 | REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)), |
116 | REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f), | ||
115 | REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f), | 117 | REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f), |
116 | REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff), | 118 | REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff), |
117 | REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f), | 119 | REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f), |
@@ -146,7 +148,7 @@ static struct resource adc_resources[] = { | |||
146 | }; | 148 | }; |
147 | 149 | ||
148 | static struct resource usbc_resources[] = { | 150 | static struct resource usbc_resources[] = { |
149 | DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"), | 151 | DEFINE_RES_IRQ(BXTWC_USBC_IRQ), |
150 | }; | 152 | }; |
151 | 153 | ||
152 | static struct resource charger_resources[] = { | 154 | static struct resource charger_resources[] = { |
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 3ac486a597f3..c57e407020f1 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c | |||
@@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) | |||
399 | clones[i]); | 399 | clones[i]); |
400 | } | 400 | } |
401 | 401 | ||
402 | put_device(dev); | ||
403 | |||
402 | return 0; | 404 | return 0; |
403 | } | 405 | } |
404 | EXPORT_SYMBOL(mfd_clone_cell); | 406 | EXPORT_SYMBOL(mfd_clone_cell); |
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index cfdae8a3d779..b0c7bcdaf5df 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c | |||
@@ -851,6 +851,8 @@ static int stmpe_reset(struct stmpe *stmpe) | |||
851 | if (ret < 0) | 851 | if (ret < 0) |
852 | return ret; | 852 | return ret; |
853 | 853 | ||
854 | msleep(10); | ||
855 | |||
854 | timeout = jiffies + msecs_to_jiffies(100); | 856 | timeout = jiffies + msecs_to_jiffies(100); |
855 | while (time_before(jiffies, timeout)) { | 857 | while (time_before(jiffies, timeout)) { |
856 | ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]); | 858 | ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]); |
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index e9e6ea3ab73c..75b9d4ac8b1e 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c | |||
@@ -178,7 +178,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, | |||
178 | 178 | ||
179 | ret = 0; | 179 | ret = 0; |
180 | bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); | 180 | bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); |
181 | if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { | 181 | if (bytes_recv < if_version_length) { |
182 | dev_err(bus->dev, "Could not read IF version\n"); | 182 | dev_err(bus->dev, "Could not read IF version\n"); |
183 | ret = -EIO; | 183 | ret = -EIO; |
184 | goto err; | 184 | goto err; |
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 5a8dc5a76e0d..3678220964fe 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -2347,7 +2347,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, | |||
2347 | struct mmc_test_req *rq = mmc_test_req_alloc(); | 2347 | struct mmc_test_req *rq = mmc_test_req_alloc(); |
2348 | struct mmc_host *host = test->card->host; | 2348 | struct mmc_host *host = test->card->host; |
2349 | struct mmc_test_area *t = &test->area; | 2349 | struct mmc_test_area *t = &test->area; |
2350 | struct mmc_async_req areq; | 2350 | struct mmc_test_async_req test_areq = { .test = test }; |
2351 | struct mmc_request *mrq; | 2351 | struct mmc_request *mrq; |
2352 | unsigned long timeout; | 2352 | unsigned long timeout; |
2353 | bool expired = false; | 2353 | bool expired = false; |
@@ -2363,8 +2363,8 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, | |||
2363 | mrq->sbc = &rq->sbc; | 2363 | mrq->sbc = &rq->sbc; |
2364 | mrq->cap_cmd_during_tfr = true; | 2364 | mrq->cap_cmd_during_tfr = true; |
2365 | 2365 | ||
2366 | areq.mrq = mrq; | 2366 | test_areq.areq.mrq = mrq; |
2367 | areq.err_check = mmc_test_check_result_async; | 2367 | test_areq.areq.err_check = mmc_test_check_result_async; |
2368 | 2368 | ||
2369 | mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, | 2369 | mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, |
2370 | 512, write); | 2370 | 512, write); |
@@ -2378,7 +2378,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, | |||
2378 | 2378 | ||
2379 | /* Start ongoing data request */ | 2379 | /* Start ongoing data request */ |
2380 | if (use_areq) { | 2380 | if (use_areq) { |
2381 | mmc_start_req(host, &areq, &ret); | 2381 | mmc_start_req(host, &test_areq.areq, &ret); |
2382 | if (ret) | 2382 | if (ret) |
2383 | goto out_free; | 2383 | goto out_free; |
2384 | } else { | 2384 | } else { |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 39fc5b2b96c5..df19777068a6 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include "mmc_ops.h" | 26 | #include "mmc_ops.h" |
27 | #include "sd_ops.h" | 27 | #include "sd_ops.h" |
28 | 28 | ||
29 | #define DEFAULT_CMD6_TIMEOUT_MS 500 | ||
30 | |||
29 | static const unsigned int tran_exp[] = { | 31 | static const unsigned int tran_exp[] = { |
30 | 10000, 100000, 1000000, 10000000, | 32 | 10000, 100000, 1000000, 10000000, |
31 | 0, 0, 0, 0 | 33 | 0, 0, 0, 0 |
@@ -571,6 +573,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
571 | card->erased_byte = 0x0; | 573 | card->erased_byte = 0x0; |
572 | 574 | ||
573 | /* eMMC v4.5 or later */ | 575 | /* eMMC v4.5 or later */ |
576 | card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS; | ||
574 | if (card->ext_csd.rev >= 6) { | 577 | if (card->ext_csd.rev >= 6) { |
575 | card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; | 578 | card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; |
576 | 579 | ||
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 4fcbc4012ed0..50a674be6655 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -2940,7 +2940,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) | |||
2940 | return ERR_PTR(-ENOMEM); | 2940 | return ERR_PTR(-ENOMEM); |
2941 | 2941 | ||
2942 | /* find reset controller when exist */ | 2942 | /* find reset controller when exist */ |
2943 | pdata->rstc = devm_reset_control_get_optional(dev, NULL); | 2943 | pdata->rstc = devm_reset_control_get_optional(dev, "reset"); |
2944 | if (IS_ERR(pdata->rstc)) { | 2944 | if (IS_ERR(pdata->rstc)) { |
2945 | if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) | 2945 | if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) |
2946 | return ERR_PTR(-EPROBE_DEFER); | 2946 | return ERR_PTR(-EPROBE_DEFER); |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index d839147e591d..44ecebd1ea8c 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
@@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev) | |||
661 | 661 | ||
662 | platform_set_drvdata(pdev, mmc); | 662 | platform_set_drvdata(pdev, mmc); |
663 | 663 | ||
664 | spin_lock_init(&host->lock); | ||
665 | |||
664 | ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, | 666 | ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, |
665 | dev_name(&pdev->dev), host); | 667 | dev_name(&pdev->dev), host); |
666 | if (ret) | 668 | if (ret) |
667 | goto out_free_dma; | 669 | goto out_free_dma; |
668 | 670 | ||
669 | spin_lock_init(&host->lock); | ||
670 | |||
671 | ret = mmc_add_host(mmc); | 671 | ret = mmc_add_host(mmc); |
672 | if (ret) | 672 | if (ret) |
673 | goto out_free_dma; | 673 | goto out_free_dma; |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 71654b90227f..42ef3ebb1d8c 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -2086,6 +2086,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
2086 | 2086 | ||
2087 | if (!host->tuning_done) { | 2087 | if (!host->tuning_done) { |
2088 | pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n"); | 2088 | pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n"); |
2089 | |||
2090 | sdhci_do_reset(host, SDHCI_RESET_CMD); | ||
2091 | sdhci_do_reset(host, SDHCI_RESET_DATA); | ||
2092 | |||
2089 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 2093 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
2090 | ctrl &= ~SDHCI_CTRL_TUNED_CLK; | 2094 | ctrl &= ~SDHCI_CTRL_TUNED_CLK; |
2091 | ctrl &= ~SDHCI_CTRL_EXEC_TUNING; | 2095 | ctrl &= ~SDHCI_CTRL_EXEC_TUNING; |
@@ -2286,10 +2290,8 @@ static bool sdhci_request_done(struct sdhci_host *host) | |||
2286 | 2290 | ||
2287 | for (i = 0; i < SDHCI_MAX_MRQS; i++) { | 2291 | for (i = 0; i < SDHCI_MAX_MRQS; i++) { |
2288 | mrq = host->mrqs_done[i]; | 2292 | mrq = host->mrqs_done[i]; |
2289 | if (mrq) { | 2293 | if (mrq) |
2290 | host->mrqs_done[i] = NULL; | ||
2291 | break; | 2294 | break; |
2292 | } | ||
2293 | } | 2295 | } |
2294 | 2296 | ||
2295 | if (!mrq) { | 2297 | if (!mrq) { |
@@ -2320,6 +2322,17 @@ static bool sdhci_request_done(struct sdhci_host *host) | |||
2320 | * upon error conditions. | 2322 | * upon error conditions. |
2321 | */ | 2323 | */ |
2322 | if (sdhci_needs_reset(host, mrq)) { | 2324 | if (sdhci_needs_reset(host, mrq)) { |
2325 | /* | ||
2326 | * Do not finish until command and data lines are available for | ||
2327 | * reset. Note there can only be one other mrq, so it cannot | ||
2328 | * also be in mrqs_done, otherwise host->cmd and host->data_cmd | ||
2329 | * would both be null. | ||
2330 | */ | ||
2331 | if (host->cmd || host->data_cmd) { | ||
2332 | spin_unlock_irqrestore(&host->lock, flags); | ||
2333 | return true; | ||
2334 | } | ||
2335 | |||
2323 | /* Some controllers need this kick or reset won't work here */ | 2336 | /* Some controllers need this kick or reset won't work here */ |
2324 | if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) | 2337 | if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) |
2325 | /* This is to force an update */ | 2338 | /* This is to force an update */ |
@@ -2327,10 +2340,8 @@ static bool sdhci_request_done(struct sdhci_host *host) | |||
2327 | 2340 | ||
2328 | /* Spec says we should do both at the same time, but Ricoh | 2341 | /* Spec says we should do both at the same time, but Ricoh |
2329 | controllers do not like that. */ | 2342 | controllers do not like that. */ |
2330 | if (!host->cmd) | 2343 | sdhci_do_reset(host, SDHCI_RESET_CMD); |
2331 | sdhci_do_reset(host, SDHCI_RESET_CMD); | 2344 | sdhci_do_reset(host, SDHCI_RESET_DATA); |
2332 | if (!host->data_cmd) | ||
2333 | sdhci_do_reset(host, SDHCI_RESET_DATA); | ||
2334 | 2345 | ||
2335 | host->pending_reset = false; | 2346 | host->pending_reset = false; |
2336 | } | 2347 | } |
@@ -2338,6 +2349,8 @@ static bool sdhci_request_done(struct sdhci_host *host) | |||
2338 | if (!sdhci_has_requests(host)) | 2349 | if (!sdhci_has_requests(host)) |
2339 | sdhci_led_deactivate(host); | 2350 | sdhci_led_deactivate(host); |
2340 | 2351 | ||
2352 | host->mrqs_done[i] = NULL; | ||
2353 | |||
2341 | mmiowb(); | 2354 | mmiowb(); |
2342 | spin_unlock_irqrestore(&host->lock, flags); | 2355 | spin_unlock_irqrestore(&host->lock, flags); |
2343 | 2356 | ||
@@ -2512,9 +2525,6 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | |||
2512 | if (!host->data) { | 2525 | if (!host->data) { |
2513 | struct mmc_command *data_cmd = host->data_cmd; | 2526 | struct mmc_command *data_cmd = host->data_cmd; |
2514 | 2527 | ||
2515 | if (data_cmd) | ||
2516 | host->data_cmd = NULL; | ||
2517 | |||
2518 | /* | 2528 | /* |
2519 | * The "data complete" interrupt is also used to | 2529 | * The "data complete" interrupt is also used to |
2520 | * indicate that a busy state has ended. See comment | 2530 | * indicate that a busy state has ended. See comment |
@@ -2522,11 +2532,13 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | |||
2522 | */ | 2532 | */ |
2523 | if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { | 2533 | if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { |
2524 | if (intmask & SDHCI_INT_DATA_TIMEOUT) { | 2534 | if (intmask & SDHCI_INT_DATA_TIMEOUT) { |
2535 | host->data_cmd = NULL; | ||
2525 | data_cmd->error = -ETIMEDOUT; | 2536 | data_cmd->error = -ETIMEDOUT; |
2526 | sdhci_finish_mrq(host, data_cmd->mrq); | 2537 | sdhci_finish_mrq(host, data_cmd->mrq); |
2527 | return; | 2538 | return; |
2528 | } | 2539 | } |
2529 | if (intmask & SDHCI_INT_DATA_END) { | 2540 | if (intmask & SDHCI_INT_DATA_END) { |
2541 | host->data_cmd = NULL; | ||
2530 | /* | 2542 | /* |
2531 | * Some cards handle busy-end interrupt | 2543 | * Some cards handle busy-end interrupt |
2532 | * before the command completed, so make | 2544 | * before the command completed, so make |
@@ -2912,6 +2924,10 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) | |||
2912 | spin_unlock_irqrestore(&host->lock, flags); | 2924 | spin_unlock_irqrestore(&host->lock, flags); |
2913 | } | 2925 | } |
2914 | 2926 | ||
2927 | if ((mmc->caps2 & MMC_CAP2_HS400_ES) && | ||
2928 | mmc->ops->hs400_enhanced_strobe) | ||
2929 | mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); | ||
2930 | |||
2915 | spin_lock_irqsave(&host->lock, flags); | 2931 | spin_lock_irqsave(&host->lock, flags); |
2916 | 2932 | ||
2917 | host->runtime_suspended = false; | 2933 | host->runtime_suspended = false; |
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 3eb7430dffbf..f8ff25c8ee2e 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c | |||
@@ -142,6 +142,9 @@ struct plx_pci_card { | |||
142 | #define CTI_PCI_VENDOR_ID 0x12c4 | 142 | #define CTI_PCI_VENDOR_ID 0x12c4 |
143 | #define CTI_PCI_DEVICE_ID_CRG001 0x0900 | 143 | #define CTI_PCI_DEVICE_ID_CRG001 0x0900 |
144 | 144 | ||
145 | #define MOXA_PCI_VENDOR_ID 0x1393 | ||
146 | #define MOXA_PCI_DEVICE_ID 0x0100 | ||
147 | |||
145 | static void plx_pci_reset_common(struct pci_dev *pdev); | 148 | static void plx_pci_reset_common(struct pci_dev *pdev); |
146 | static void plx9056_pci_reset_common(struct pci_dev *pdev); | 149 | static void plx9056_pci_reset_common(struct pci_dev *pdev); |
147 | static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); | 150 | static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); |
@@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = { | |||
258 | /* based on PLX9030 */ | 261 | /* based on PLX9030 */ |
259 | }; | 262 | }; |
260 | 263 | ||
264 | static struct plx_pci_card_info plx_pci_card_info_moxa = { | ||
265 | "MOXA", 2, | ||
266 | PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, | ||
267 | {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} }, | ||
268 | &plx_pci_reset_common | ||
269 | /* based on PLX9052 */ | ||
270 | }; | ||
271 | |||
261 | static const struct pci_device_id plx_pci_tbl[] = { | 272 | static const struct pci_device_id plx_pci_tbl[] = { |
262 | { | 273 | { |
263 | /* Adlink PCI-7841/cPCI-7841 */ | 274 | /* Adlink PCI-7841/cPCI-7841 */ |
@@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = { | |||
357 | 0, 0, | 368 | 0, 0, |
358 | (kernel_ulong_t)&plx_pci_card_info_elcus | 369 | (kernel_ulong_t)&plx_pci_card_info_elcus |
359 | }, | 370 | }, |
371 | { | ||
372 | /* moxa */ | ||
373 | MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID, | ||
374 | PCI_ANY_ID, PCI_ANY_ID, | ||
375 | 0, 0, | ||
376 | (kernel_ulong_t)&plx_pci_card_info_moxa | ||
377 | }, | ||
360 | { 0,} | 378 | { 0,} |
361 | }; | 379 | }; |
362 | MODULE_DEVICE_TABLE(pci, plx_pci_tbl); | 380 | MODULE_DEVICE_TABLE(pci, plx_pci_tbl); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index c481f104a8fe..5390ae89136c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) | |||
204 | return num_msgs; | 204 | return num_msgs; |
205 | } | 205 | } |
206 | 206 | ||
207 | static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) | ||
208 | { | ||
209 | u32 data = 0x7777; | ||
210 | |||
211 | xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); | ||
212 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); | ||
213 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); | ||
214 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); | ||
215 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); | ||
216 | } | ||
217 | |||
218 | void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, | 207 | void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, |
219 | struct xgene_enet_pdata *pdata, | 208 | struct xgene_enet_pdata *pdata, |
220 | enum xgene_enet_err_code status) | 209 | enum xgene_enet_err_code status) |
@@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = { | |||
929 | .clear = xgene_enet_clear_ring, | 918 | .clear = xgene_enet_clear_ring, |
930 | .wr_cmd = xgene_enet_wr_cmd, | 919 | .wr_cmd = xgene_enet_wr_cmd, |
931 | .len = xgene_enet_ring_len, | 920 | .len = xgene_enet_ring_len, |
932 | .coalesce = xgene_enet_setup_coalescing, | ||
933 | }; | 921 | }; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 8456337a237d..06e598c8bc16 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | |||
@@ -55,8 +55,10 @@ enum xgene_enet_rm { | |||
55 | #define PREFETCH_BUF_EN BIT(21) | 55 | #define PREFETCH_BUF_EN BIT(21) |
56 | #define CSR_RING_ID_BUF 0x000c | 56 | #define CSR_RING_ID_BUF 0x000c |
57 | #define CSR_PBM_COAL 0x0014 | 57 | #define CSR_PBM_COAL 0x0014 |
58 | #define CSR_PBM_CTICK0 0x0018 | ||
58 | #define CSR_PBM_CTICK1 0x001c | 59 | #define CSR_PBM_CTICK1 0x001c |
59 | #define CSR_PBM_CTICK2 0x0020 | 60 | #define CSR_PBM_CTICK2 0x0020 |
61 | #define CSR_PBM_CTICK3 0x0024 | ||
60 | #define CSR_THRESHOLD0_SET1 0x0030 | 62 | #define CSR_THRESHOLD0_SET1 0x0030 |
61 | #define CSR_THRESHOLD1_SET1 0x0034 | 63 | #define CSR_THRESHOLD1_SET1 0x0034 |
62 | #define CSR_RING_NE_INT_MODE 0x017c | 64 | #define CSR_RING_NE_INT_MODE 0x017c |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 429f18fc5503..8158d4698734 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) | |||
1188 | tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); | 1188 | tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | pdata->ring_ops->coalesce(pdata->tx_ring[0]); | 1191 | if (pdata->ring_ops->coalesce) |
1192 | pdata->ring_ops->coalesce(pdata->tx_ring[0]); | ||
1192 | pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; | 1193 | pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; |
1193 | 1194 | ||
1194 | return 0; | 1195 | return 0; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c index 2b76732add5d..af51dd5844ce 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c | |||
@@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) | |||
30 | ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); | 30 | ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); |
31 | ring_cfg[3] |= SET_BIT(X2_DEQINTEN); | 31 | ring_cfg[3] |= SET_BIT(X2_DEQINTEN); |
32 | } | 32 | } |
33 | ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1); | 33 | ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2); |
34 | 34 | ||
35 | addr >>= 8; | 35 | addr >>= 8; |
36 | ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); | 36 | ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); |
@@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) | |||
192 | 192 | ||
193 | static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) | 193 | static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) |
194 | { | 194 | { |
195 | u32 data = 0x7777; | 195 | u32 data = 0x77777777; |
196 | 196 | ||
197 | xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); | 197 | xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); |
198 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data); | ||
198 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); | 199 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); |
199 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); | 200 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data); |
200 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); | 201 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data); |
201 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); | 202 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08); |
203 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10); | ||
202 | } | 204 | } |
203 | 205 | ||
204 | struct xgene_ring_ops xgene_ring2_ops = { | 206 | struct xgene_ring_ops xgene_ring2_ops = { |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 31ca204b38d2..49f4cafe5438 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
@@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, | |||
307 | u32 ctl; | 307 | u32 ctl; |
308 | 308 | ||
309 | ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); | 309 | ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); |
310 | |||
311 | /* preserve ONLY bits 16-17 from current hardware value */ | ||
312 | ctl &= BGMAC_DMA_RX_ADDREXT_MASK; | ||
313 | |||
310 | if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { | 314 | if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { |
311 | ctl &= ~BGMAC_DMA_RX_BL_MASK; | 315 | ctl &= ~BGMAC_DMA_RX_BL_MASK; |
312 | ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; | 316 | ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; |
@@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, | |||
317 | ctl &= ~BGMAC_DMA_RX_PT_MASK; | 321 | ctl &= ~BGMAC_DMA_RX_PT_MASK; |
318 | ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; | 322 | ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; |
319 | } | 323 | } |
320 | ctl &= BGMAC_DMA_RX_ADDREXT_MASK; | ||
321 | ctl |= BGMAC_DMA_RX_ENABLE; | 324 | ctl |= BGMAC_DMA_RX_ENABLE; |
322 | ctl |= BGMAC_DMA_RX_PARITY_DISABLE; | 325 | ctl |= BGMAC_DMA_RX_PARITY_DISABLE; |
323 | ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; | 326 | ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; |
@@ -1046,9 +1049,9 @@ static void bgmac_enable(struct bgmac *bgmac) | |||
1046 | 1049 | ||
1047 | mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> | 1050 | mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> |
1048 | BGMAC_DS_MM_SHIFT; | 1051 | BGMAC_DS_MM_SHIFT; |
1049 | if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0) | 1052 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) |
1050 | bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); | 1053 | bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); |
1051 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) | 1054 | if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2) |
1052 | bgmac_cco_ctl_maskset(bgmac, 1, ~0, | 1055 | bgmac_cco_ctl_maskset(bgmac, 1, ~0, |
1053 | BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); | 1056 | BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); |
1054 | 1057 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index b3791b394715..1f7034d739b0 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/firmware.h> | 49 | #include <linux/firmware.h> |
50 | #include <linux/log2.h> | 50 | #include <linux/log2.h> |
51 | #include <linux/aer.h> | 51 | #include <linux/aer.h> |
52 | #include <linux/crash_dump.h> | ||
52 | 53 | ||
53 | #if IS_ENABLED(CONFIG_CNIC) | 54 | #if IS_ENABLED(CONFIG_CNIC) |
54 | #define BCM_CNIC 1 | 55 | #define BCM_CNIC 1 |
@@ -4764,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp) | |||
4764 | BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); | 4765 | BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); |
4765 | } | 4766 | } |
4766 | 4767 | ||
4767 | static int | 4768 | static void |
4768 | bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | 4769 | bnx2_wait_dma_complete(struct bnx2 *bp) |
4769 | { | 4770 | { |
4770 | u32 val; | 4771 | u32 val; |
4771 | int i, rc = 0; | 4772 | int i; |
4772 | u8 old_port; | ||
4773 | 4773 | ||
4774 | /* Wait for the current PCI transaction to complete before | 4774 | /* |
4775 | * issuing a reset. */ | 4775 | * Wait for the current PCI transaction to complete before |
4776 | * issuing a reset. | ||
4777 | */ | ||
4776 | if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || | 4778 | if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || |
4777 | (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { | 4779 | (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { |
4778 | BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, | 4780 | BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, |
@@ -4796,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | |||
4796 | } | 4798 | } |
4797 | } | 4799 | } |
4798 | 4800 | ||
4801 | return; | ||
4802 | } | ||
4803 | |||
4804 | |||
4805 | static int | ||
4806 | bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | ||
4807 | { | ||
4808 | u32 val; | ||
4809 | int i, rc = 0; | ||
4810 | u8 old_port; | ||
4811 | |||
4812 | /* Wait for the current PCI transaction to complete before | ||
4813 | * issuing a reset. */ | ||
4814 | bnx2_wait_dma_complete(bp); | ||
4815 | |||
4799 | /* Wait for the firmware to tell us it is ok to issue a reset. */ | 4816 | /* Wait for the firmware to tell us it is ok to issue a reset. */ |
4800 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); | 4817 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); |
4801 | 4818 | ||
@@ -6361,6 +6378,10 @@ bnx2_open(struct net_device *dev) | |||
6361 | struct bnx2 *bp = netdev_priv(dev); | 6378 | struct bnx2 *bp = netdev_priv(dev); |
6362 | int rc; | 6379 | int rc; |
6363 | 6380 | ||
6381 | rc = bnx2_request_firmware(bp); | ||
6382 | if (rc < 0) | ||
6383 | goto out; | ||
6384 | |||
6364 | netif_carrier_off(dev); | 6385 | netif_carrier_off(dev); |
6365 | 6386 | ||
6366 | bnx2_disable_int(bp); | 6387 | bnx2_disable_int(bp); |
@@ -6429,6 +6450,7 @@ open_err: | |||
6429 | bnx2_free_irq(bp); | 6450 | bnx2_free_irq(bp); |
6430 | bnx2_free_mem(bp); | 6451 | bnx2_free_mem(bp); |
6431 | bnx2_del_napi(bp); | 6452 | bnx2_del_napi(bp); |
6453 | bnx2_release_firmware(bp); | ||
6432 | goto out; | 6454 | goto out; |
6433 | } | 6455 | } |
6434 | 6456 | ||
@@ -8575,12 +8597,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8575 | 8597 | ||
8576 | pci_set_drvdata(pdev, dev); | 8598 | pci_set_drvdata(pdev, dev); |
8577 | 8599 | ||
8578 | rc = bnx2_request_firmware(bp); | 8600 | /* |
8579 | if (rc < 0) | 8601 | * In-flight DMA from 1st kernel could continue going in kdump kernel. |
8580 | goto error; | 8602 | * New io-page table has been created before bnx2 does reset at open stage. |
8581 | 8603 | * We have to wait for the in-flight DMA to complete to avoid it look up | |
8604 | * into the newly created io-page table. | ||
8605 | */ | ||
8606 | if (is_kdump_kernel()) | ||
8607 | bnx2_wait_dma_complete(bp); | ||
8582 | 8608 | ||
8583 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | ||
8584 | memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); | 8609 | memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); |
8585 | 8610 | ||
8586 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | | 8611 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
@@ -8613,7 +8638,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8613 | return 0; | 8638 | return 0; |
8614 | 8639 | ||
8615 | error: | 8640 | error: |
8616 | bnx2_release_firmware(bp); | ||
8617 | pci_iounmap(pdev, bp->regview); | 8641 | pci_iounmap(pdev, bp->regview); |
8618 | pci_release_regions(pdev); | 8642 | pci_release_regions(pdev); |
8619 | pci_disable_device(pdev); | 8643 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a9f9f3738022..c6909660e097 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -6309,6 +6309,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | |||
6309 | struct tc_to_netdev *ntc) | 6309 | struct tc_to_netdev *ntc) |
6310 | { | 6310 | { |
6311 | struct bnxt *bp = netdev_priv(dev); | 6311 | struct bnxt *bp = netdev_priv(dev); |
6312 | bool sh = false; | ||
6312 | u8 tc; | 6313 | u8 tc; |
6313 | 6314 | ||
6314 | if (ntc->type != TC_SETUP_MQPRIO) | 6315 | if (ntc->type != TC_SETUP_MQPRIO) |
@@ -6325,12 +6326,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | |||
6325 | if (netdev_get_num_tc(dev) == tc) | 6326 | if (netdev_get_num_tc(dev) == tc) |
6326 | return 0; | 6327 | return 0; |
6327 | 6328 | ||
6329 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) | ||
6330 | sh = true; | ||
6331 | |||
6328 | if (tc) { | 6332 | if (tc) { |
6329 | int max_rx_rings, max_tx_rings, rc; | 6333 | int max_rx_rings, max_tx_rings, rc; |
6330 | bool sh = false; | ||
6331 | |||
6332 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) | ||
6333 | sh = true; | ||
6334 | 6334 | ||
6335 | rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); | 6335 | rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); |
6336 | if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) | 6336 | if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) |
@@ -6348,7 +6348,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | |||
6348 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; | 6348 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
6349 | netdev_reset_tc(dev); | 6349 | netdev_reset_tc(dev); |
6350 | } | 6350 | } |
6351 | bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); | 6351 | bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : |
6352 | bp->tx_nr_rings + bp->rx_nr_rings; | ||
6352 | bp->num_stat_ctxs = bp->cp_nr_rings; | 6353 | bp->num_stat_ctxs = bp->cp_nr_rings; |
6353 | 6354 | ||
6354 | if (netif_running(bp->dev)) | 6355 | if (netif_running(bp->dev)) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index ec6cd18842c3..60e2af8678bd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
@@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) | |||
774 | 774 | ||
775 | if (vf->flags & BNXT_VF_LINK_UP) { | 775 | if (vf->flags & BNXT_VF_LINK_UP) { |
776 | /* if physical link is down, force link up on VF */ | 776 | /* if physical link is down, force link up on VF */ |
777 | if (phy_qcfg_resp.link == | 777 | if (phy_qcfg_resp.link != |
778 | PORT_PHY_QCFG_RESP_LINK_NO_LINK) { | 778 | PORT_PHY_QCFG_RESP_LINK_LINK) { |
779 | phy_qcfg_resp.link = | 779 | phy_qcfg_resp.link = |
780 | PORT_PHY_QCFG_RESP_LINK_LINK; | 780 | PORT_PHY_QCFG_RESP_LINK_LINK; |
781 | phy_qcfg_resp.link_speed = cpu_to_le16( | 781 | phy_qcfg_resp.link_speed = cpu_to_le16( |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index f9df4b5ae90e..f42f672b0e7e 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) | |||
177 | return 0; | 177 | return 0; |
178 | 178 | ||
179 | hw_cons = *(tcb->hw_consumer_index); | 179 | hw_cons = *(tcb->hw_consumer_index); |
180 | rmb(); | ||
180 | cons = tcb->consumer_index; | 181 | cons = tcb->consumer_index; |
181 | q_depth = tcb->q_depth; | 182 | q_depth = tcb->q_depth; |
182 | 183 | ||
@@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
3094 | BNA_QE_INDX_INC(prod, q_depth); | 3095 | BNA_QE_INDX_INC(prod, q_depth); |
3095 | tcb->producer_index = prod; | 3096 | tcb->producer_index = prod; |
3096 | 3097 | ||
3097 | smp_mb(); | 3098 | wmb(); |
3098 | 3099 | ||
3099 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | 3100 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) |
3100 | return NETDEV_TX_OK; | 3101 | return NETDEV_TX_OK; |
@@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
3102 | skb_tx_timestamp(skb); | 3103 | skb_tx_timestamp(skb); |
3103 | 3104 | ||
3104 | bna_txq_prod_indx_doorbell(tcb); | 3105 | bna_txq_prod_indx_doorbell(tcb); |
3105 | smp_mb(); | ||
3106 | 3106 | ||
3107 | return NETDEV_TX_OK; | 3107 | return NETDEV_TX_OK; |
3108 | } | 3108 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 50812a1d67bd..df1573c4a659 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | |||
@@ -178,9 +178,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN | |||
178 | CH_PCI_ID_TABLE_FENTRY(0x6005), | 178 | CH_PCI_ID_TABLE_FENTRY(0x6005), |
179 | CH_PCI_ID_TABLE_FENTRY(0x6006), | 179 | CH_PCI_ID_TABLE_FENTRY(0x6006), |
180 | CH_PCI_ID_TABLE_FENTRY(0x6007), | 180 | CH_PCI_ID_TABLE_FENTRY(0x6007), |
181 | CH_PCI_ID_TABLE_FENTRY(0x6008), | ||
181 | CH_PCI_ID_TABLE_FENTRY(0x6009), | 182 | CH_PCI_ID_TABLE_FENTRY(0x6009), |
182 | CH_PCI_ID_TABLE_FENTRY(0x600d), | 183 | CH_PCI_ID_TABLE_FENTRY(0x600d), |
183 | CH_PCI_ID_TABLE_FENTRY(0x6010), | ||
184 | CH_PCI_ID_TABLE_FENTRY(0x6011), | 184 | CH_PCI_ID_TABLE_FENTRY(0x6011), |
185 | CH_PCI_ID_TABLE_FENTRY(0x6014), | 185 | CH_PCI_ID_TABLE_FENTRY(0x6014), |
186 | CH_PCI_ID_TABLE_FENTRY(0x6015), | 186 | CH_PCI_ID_TABLE_FENTRY(0x6015), |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index c54c6fac0d1d..b6ed818f78ff 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c | |||
@@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, | |||
332 | return ERR_PTR(-ENODEV); | 332 | return ERR_PTR(-ENODEV); |
333 | 333 | ||
334 | handle = dev->ops->get_handle(dev, port_id); | 334 | handle = dev->ops->get_handle(dev, port_id); |
335 | if (IS_ERR(handle)) | 335 | if (IS_ERR(handle)) { |
336 | put_device(&dev->cls_dev); | ||
336 | return handle; | 337 | return handle; |
338 | } | ||
337 | 339 | ||
338 | handle->dev = dev; | 340 | handle->dev = dev; |
339 | handle->owner_dev = owner_dev; | 341 | handle->owner_dev = owner_dev; |
@@ -356,6 +358,8 @@ out_when_init_queue: | |||
356 | for (j = i - 1; j >= 0; j--) | 358 | for (j = i - 1; j >= 0; j--) |
357 | hnae_fini_queue(handle->qs[j]); | 359 | hnae_fini_queue(handle->qs[j]); |
358 | 360 | ||
361 | put_device(&dev->cls_dev); | ||
362 | |||
359 | return ERR_PTR(-ENOMEM); | 363 | return ERR_PTR(-ENOMEM); |
360 | } | 364 | } |
361 | EXPORT_SYMBOL(hnae_get_handle); | 365 | EXPORT_SYMBOL(hnae_get_handle); |
@@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h) | |||
377 | dev->ops->put_handle(h); | 381 | dev->ops->put_handle(h); |
378 | 382 | ||
379 | module_put(dev->owner); | 383 | module_put(dev->owner); |
384 | |||
385 | put_device(&dev->cls_dev); | ||
380 | } | 386 | } |
381 | EXPORT_SYMBOL(hnae_put_handle); | 387 | EXPORT_SYMBOL(hnae_put_handle); |
382 | 388 | ||
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 54efa9a5167b..bd719e25dd76 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -2446,6 +2446,8 @@ static int ehea_open(struct net_device *dev) | |||
2446 | 2446 | ||
2447 | netif_info(port, ifup, dev, "enabling port\n"); | 2447 | netif_info(port, ifup, dev, "enabling port\n"); |
2448 | 2448 | ||
2449 | netif_carrier_off(dev); | ||
2450 | |||
2449 | ret = ehea_up(dev); | 2451 | ret = ehea_up(dev); |
2450 | if (!ret) { | 2452 | if (!ret) { |
2451 | port_napi_enable(port); | 2453 | port_napi_enable(port); |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5f44c5520fbc..4f3281a03e7e 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -1505,9 +1505,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) | |||
1505 | adapter->max_rx_add_entries_per_subcrq > entries_page ? | 1505 | adapter->max_rx_add_entries_per_subcrq > entries_page ? |
1506 | entries_page : adapter->max_rx_add_entries_per_subcrq; | 1506 | entries_page : adapter->max_rx_add_entries_per_subcrq; |
1507 | 1507 | ||
1508 | /* Choosing the maximum number of queues supported by firmware*/ | 1508 | adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; |
1509 | adapter->req_tx_queues = adapter->max_tx_queues; | 1509 | adapter->req_rx_queues = adapter->opt_rx_comp_queues; |
1510 | adapter->req_rx_queues = adapter->max_rx_queues; | ||
1511 | adapter->req_rx_add_queues = adapter->max_rx_add_queues; | 1510 | adapter->req_rx_add_queues = adapter->max_rx_add_queues; |
1512 | 1511 | ||
1513 | adapter->req_mtu = adapter->max_mtu; | 1512 | adapter->req_mtu = adapter->max_mtu; |
@@ -3706,7 +3705,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
3706 | struct net_device *netdev; | 3705 | struct net_device *netdev; |
3707 | unsigned char *mac_addr_p; | 3706 | unsigned char *mac_addr_p; |
3708 | struct dentry *ent; | 3707 | struct dentry *ent; |
3709 | char buf[16]; /* debugfs name buf */ | 3708 | char buf[17]; /* debugfs name buf */ |
3710 | int rc; | 3709 | int rc; |
3711 | 3710 | ||
3712 | dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", | 3711 | dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", |
@@ -3845,6 +3844,9 @@ static int ibmvnic_remove(struct vio_dev *dev) | |||
3845 | if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) | 3844 | if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) |
3846 | debugfs_remove_recursive(adapter->debugfs_dir); | 3845 | debugfs_remove_recursive(adapter->debugfs_dir); |
3847 | 3846 | ||
3847 | dma_unmap_single(&dev->dev, adapter->stats_token, | ||
3848 | sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE); | ||
3849 | |||
3848 | if (adapter->ras_comps) | 3850 | if (adapter->ras_comps) |
3849 | dma_free_coherent(&dev->dev, | 3851 | dma_free_coherent(&dev->dev, |
3850 | adapter->ras_comp_num * | 3852 | adapter->ras_comp_num * |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index bf5cc55ba24c..5b12022adf1f 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -1381,6 +1381,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) | |||
1381 | temp = (val & 0x003fff00) >> 8; | 1381 | temp = (val & 0x003fff00) >> 8; |
1382 | 1382 | ||
1383 | temp *= 64000000; | 1383 | temp *= 64000000; |
1384 | temp += mp->t_clk / 2; | ||
1384 | do_div(temp, mp->t_clk); | 1385 | do_div(temp, mp->t_clk); |
1385 | 1386 | ||
1386 | return (unsigned int)temp; | 1387 | return (unsigned int)temp; |
@@ -1417,6 +1418,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) | |||
1417 | 1418 | ||
1418 | temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; | 1419 | temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; |
1419 | temp *= 64000000; | 1420 | temp *= 64000000; |
1421 | temp += mp->t_clk / 2; | ||
1420 | do_div(temp, mp->t_clk); | 1422 | do_div(temp, mp->t_clk); |
1421 | 1423 | ||
1422 | return (unsigned int)temp; | 1424 | return (unsigned int)temp; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 12c99a2655f2..3a47e83d3e07 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -2202,7 +2202,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
2202 | 2202 | ||
2203 | if (!shutdown) | 2203 | if (!shutdown) |
2204 | free_netdev(dev); | 2204 | free_netdev(dev); |
2205 | dev->ethtool_ops = NULL; | ||
2206 | } | 2205 | } |
2207 | 2206 | ||
2208 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | 2207 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f4c687ce4c59..84e8b250e2af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
1445 | c->netdev = priv->netdev; | 1445 | c->netdev = priv->netdev; |
1446 | c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); | 1446 | c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); |
1447 | c->num_tc = priv->params.num_tc; | 1447 | c->num_tc = priv->params.num_tc; |
1448 | c->xdp = !!priv->xdp_prog; | ||
1448 | 1449 | ||
1449 | if (priv->params.rx_am_enabled) | 1450 | if (priv->params.rx_am_enabled) |
1450 | rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); | 1451 | rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); |
@@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
1468 | if (err) | 1469 | if (err) |
1469 | goto err_close_tx_cqs; | 1470 | goto err_close_tx_cqs; |
1470 | 1471 | ||
1472 | /* XDP SQ CQ params are same as normal TXQ sq CQ params */ | ||
1473 | err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, | ||
1474 | priv->params.tx_cq_moderation) : 0; | ||
1475 | if (err) | ||
1476 | goto err_close_rx_cq; | ||
1477 | |||
1471 | napi_enable(&c->napi); | 1478 | napi_enable(&c->napi); |
1472 | 1479 | ||
1473 | err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); | 1480 | err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); |
@@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
1488 | } | 1495 | } |
1489 | } | 1496 | } |
1490 | 1497 | ||
1491 | if (priv->xdp_prog) { | 1498 | err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0; |
1492 | /* XDP SQ CQ params are same as normal TXQ sq CQ params */ | 1499 | if (err) |
1493 | err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, | 1500 | goto err_close_sqs; |
1494 | priv->params.tx_cq_moderation); | ||
1495 | if (err) | ||
1496 | goto err_close_sqs; | ||
1497 | |||
1498 | err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq); | ||
1499 | if (err) { | ||
1500 | mlx5e_close_cq(&c->xdp_sq.cq); | ||
1501 | goto err_close_sqs; | ||
1502 | } | ||
1503 | } | ||
1504 | 1501 | ||
1505 | c->xdp = !!priv->xdp_prog; | ||
1506 | err = mlx5e_open_rq(c, &cparam->rq, &c->rq); | 1502 | err = mlx5e_open_rq(c, &cparam->rq, &c->rq); |
1507 | if (err) | 1503 | if (err) |
1508 | goto err_close_xdp_sq; | 1504 | goto err_close_xdp_sq; |
@@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
1512 | 1508 | ||
1513 | return 0; | 1509 | return 0; |
1514 | err_close_xdp_sq: | 1510 | err_close_xdp_sq: |
1515 | mlx5e_close_sq(&c->xdp_sq); | 1511 | if (c->xdp) |
1512 | mlx5e_close_sq(&c->xdp_sq); | ||
1516 | 1513 | ||
1517 | err_close_sqs: | 1514 | err_close_sqs: |
1518 | mlx5e_close_sqs(c); | 1515 | mlx5e_close_sqs(c); |
@@ -1522,6 +1519,10 @@ err_close_icosq: | |||
1522 | 1519 | ||
1523 | err_disable_napi: | 1520 | err_disable_napi: |
1524 | napi_disable(&c->napi); | 1521 | napi_disable(&c->napi); |
1522 | if (c->xdp) | ||
1523 | mlx5e_close_cq(&c->xdp_sq.cq); | ||
1524 | |||
1525 | err_close_rx_cq: | ||
1525 | mlx5e_close_cq(&c->rq.cq); | 1526 | mlx5e_close_cq(&c->rq.cq); |
1526 | 1527 | ||
1527 | err_close_tx_cqs: | 1528 | err_close_tx_cqs: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7fe6559e4ab3..bf1c09ca73c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -308,7 +308,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) | |||
308 | netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; | 308 | netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; |
309 | #endif | 309 | #endif |
310 | 310 | ||
311 | netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC; | 311 | netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; |
312 | netdev->hw_features |= NETIF_F_HW_TC; | 312 | netdev->hw_features |= NETIF_F_HW_TC; |
313 | 313 | ||
314 | eth_hw_addr_random(netdev); | 314 | eth_hw_addr_random(netdev); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ce8c54d18906..6bb21b31cfeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -237,12 +237,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec | |||
237 | skb_flow_dissector_target(f->dissector, | 237 | skb_flow_dissector_target(f->dissector, |
238 | FLOW_DISSECTOR_KEY_VLAN, | 238 | FLOW_DISSECTOR_KEY_VLAN, |
239 | f->mask); | 239 | f->mask); |
240 | if (mask->vlan_id) { | 240 | if (mask->vlan_id || mask->vlan_priority) { |
241 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); | 241 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); |
242 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); | 242 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); |
243 | 243 | ||
244 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); | 244 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); |
245 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); | 245 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); |
246 | |||
247 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); | ||
248 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); | ||
246 | } | 249 | } |
247 | } | 250 | } |
248 | 251 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c55ad8d00c05..d239f5d0ea36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -57,7 +57,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | |||
57 | if (esw->mode != SRIOV_OFFLOADS) | 57 | if (esw->mode != SRIOV_OFFLOADS) |
58 | return ERR_PTR(-EOPNOTSUPP); | 58 | return ERR_PTR(-EOPNOTSUPP); |
59 | 59 | ||
60 | action = attr->action; | 60 | /* per flow vlan pop/push is emulated, don't set that into the firmware */ |
61 | action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); | ||
61 | 62 | ||
62 | if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { | 63 | if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { |
63 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | 64 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 89696048b045..914e5466f729 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -1690,7 +1690,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering) | |||
1690 | { | 1690 | { |
1691 | 1691 | ||
1692 | steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); | 1692 | steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); |
1693 | if (IS_ERR_OR_NULL(steering->root_ns)) | 1693 | if (!steering->root_ns) |
1694 | goto cleanup; | 1694 | goto cleanup; |
1695 | 1695 | ||
1696 | if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) | 1696 | if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d5433c49b2b0..3eb931585b3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -1226,6 +1226,9 @@ static int init_one(struct pci_dev *pdev, | |||
1226 | 1226 | ||
1227 | pci_set_drvdata(pdev, dev); | 1227 | pci_set_drvdata(pdev, dev); |
1228 | 1228 | ||
1229 | dev->pdev = pdev; | ||
1230 | dev->event = mlx5_core_event; | ||
1231 | |||
1229 | if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { | 1232 | if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { |
1230 | mlx5_core_warn(dev, | 1233 | mlx5_core_warn(dev, |
1231 | "selected profile out of range, selecting default (%d)\n", | 1234 | "selected profile out of range, selecting default (%d)\n", |
@@ -1233,8 +1236,6 @@ static int init_one(struct pci_dev *pdev, | |||
1233 | prof_sel = MLX5_DEFAULT_PROF; | 1236 | prof_sel = MLX5_DEFAULT_PROF; |
1234 | } | 1237 | } |
1235 | dev->profile = &profile[prof_sel]; | 1238 | dev->profile = &profile[prof_sel]; |
1236 | dev->pdev = pdev; | ||
1237 | dev->event = mlx5_core_event; | ||
1238 | 1239 | ||
1239 | INIT_LIST_HEAD(&priv->ctx_list); | 1240 | INIT_LIST_HEAD(&priv->ctx_list); |
1240 | spin_lock_init(&priv->ctx_lock); | 1241 | spin_lock_init(&priv->ctx_lock); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1ec0a4ce3c46..dda5761e91bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) | |||
231 | 231 | ||
232 | span_entry->used = true; | 232 | span_entry->used = true; |
233 | span_entry->id = index; | 233 | span_entry->id = index; |
234 | span_entry->ref_count = 0; | 234 | span_entry->ref_count = 1; |
235 | span_entry->local_port = local_port; | 235 | span_entry->local_port = local_port; |
236 | return span_entry; | 236 | return span_entry; |
237 | } | 237 | } |
@@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry | |||
270 | 270 | ||
271 | span_entry = mlxsw_sp_span_entry_find(port); | 271 | span_entry = mlxsw_sp_span_entry_find(port); |
272 | if (span_entry) { | 272 | if (span_entry) { |
273 | /* Already exists, just take a reference */ | ||
273 | span_entry->ref_count++; | 274 | span_entry->ref_count++; |
274 | return span_entry; | 275 | return span_entry; |
275 | } | 276 | } |
@@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry | |||
280 | static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, | 281 | static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, |
281 | struct mlxsw_sp_span_entry *span_entry) | 282 | struct mlxsw_sp_span_entry *span_entry) |
282 | { | 283 | { |
284 | WARN_ON(!span_entry->ref_count); | ||
283 | if (--span_entry->ref_count == 0) | 285 | if (--span_entry->ref_count == 0) |
284 | mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); | 286 | mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); |
285 | return 0; | 287 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 9b22863a924b..97bbc1d21df8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -115,7 +115,7 @@ struct mlxsw_sp_rif { | |||
115 | struct mlxsw_sp_mid { | 115 | struct mlxsw_sp_mid { |
116 | struct list_head list; | 116 | struct list_head list; |
117 | unsigned char addr[ETH_ALEN]; | 117 | unsigned char addr[ETH_ALEN]; |
118 | u16 vid; | 118 | u16 fid; |
119 | u16 mid; | 119 | u16 mid; |
120 | unsigned int ref_count; | 120 | unsigned int ref_count; |
121 | }; | 121 | }; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4573da2c5560..e83072da6272 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -594,21 +594,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) | |||
594 | return 0; | 594 | return 0; |
595 | } | 595 | } |
596 | 596 | ||
597 | static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp); | ||
598 | |||
597 | static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) | 599 | static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) |
598 | { | 600 | { |
601 | mlxsw_sp_router_fib_flush(mlxsw_sp); | ||
599 | kfree(mlxsw_sp->router.vrs); | 602 | kfree(mlxsw_sp->router.vrs); |
600 | } | 603 | } |
601 | 604 | ||
602 | struct mlxsw_sp_neigh_key { | 605 | struct mlxsw_sp_neigh_key { |
603 | unsigned char addr[sizeof(struct in6_addr)]; | 606 | struct neighbour *n; |
604 | struct net_device *dev; | ||
605 | }; | 607 | }; |
606 | 608 | ||
607 | struct mlxsw_sp_neigh_entry { | 609 | struct mlxsw_sp_neigh_entry { |
608 | struct rhash_head ht_node; | 610 | struct rhash_head ht_node; |
609 | struct mlxsw_sp_neigh_key key; | 611 | struct mlxsw_sp_neigh_key key; |
610 | u16 rif; | 612 | u16 rif; |
611 | struct neighbour *n; | ||
612 | bool offloaded; | 613 | bool offloaded; |
613 | struct delayed_work dw; | 614 | struct delayed_work dw; |
614 | struct mlxsw_sp_port *mlxsw_sp_port; | 615 | struct mlxsw_sp_port *mlxsw_sp_port; |
@@ -646,19 +647,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, | |||
646 | static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); | 647 | static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); |
647 | 648 | ||
648 | static struct mlxsw_sp_neigh_entry * | 649 | static struct mlxsw_sp_neigh_entry * |
649 | mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len, | 650 | mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif) |
650 | struct net_device *dev, u16 rif, | ||
651 | struct neighbour *n) | ||
652 | { | 651 | { |
653 | struct mlxsw_sp_neigh_entry *neigh_entry; | 652 | struct mlxsw_sp_neigh_entry *neigh_entry; |
654 | 653 | ||
655 | neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); | 654 | neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); |
656 | if (!neigh_entry) | 655 | if (!neigh_entry) |
657 | return NULL; | 656 | return NULL; |
658 | memcpy(neigh_entry->key.addr, addr, addr_len); | 657 | neigh_entry->key.n = n; |
659 | neigh_entry->key.dev = dev; | ||
660 | neigh_entry->rif = rif; | 658 | neigh_entry->rif = rif; |
661 | neigh_entry->n = n; | ||
662 | INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); | 659 | INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); |
663 | INIT_LIST_HEAD(&neigh_entry->nexthop_list); | 660 | INIT_LIST_HEAD(&neigh_entry->nexthop_list); |
664 | return neigh_entry; | 661 | return neigh_entry; |
@@ -671,13 +668,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry) | |||
671 | } | 668 | } |
672 | 669 | ||
673 | static struct mlxsw_sp_neigh_entry * | 670 | static struct mlxsw_sp_neigh_entry * |
674 | mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr, | 671 | mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) |
675 | size_t addr_len, struct net_device *dev) | ||
676 | { | 672 | { |
677 | struct mlxsw_sp_neigh_key key = {{ 0 } }; | 673 | struct mlxsw_sp_neigh_key key; |
678 | 674 | ||
679 | memcpy(key.addr, addr, addr_len); | 675 | key.n = n; |
680 | key.dev = dev; | ||
681 | return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, | 676 | return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, |
682 | &key, mlxsw_sp_neigh_ht_params); | 677 | &key, mlxsw_sp_neigh_ht_params); |
683 | } | 678 | } |
@@ -689,26 +684,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev, | |||
689 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 684 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
690 | struct mlxsw_sp_neigh_entry *neigh_entry; | 685 | struct mlxsw_sp_neigh_entry *neigh_entry; |
691 | struct mlxsw_sp_rif *r; | 686 | struct mlxsw_sp_rif *r; |
692 | u32 dip; | ||
693 | int err; | 687 | int err; |
694 | 688 | ||
695 | if (n->tbl != &arp_tbl) | 689 | if (n->tbl != &arp_tbl) |
696 | return 0; | 690 | return 0; |
697 | 691 | ||
698 | dip = ntohl(*((__be32 *) n->primary_key)); | 692 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
699 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), | 693 | if (neigh_entry) |
700 | n->dev); | ||
701 | if (neigh_entry) { | ||
702 | WARN_ON(neigh_entry->n != n); | ||
703 | return 0; | 694 | return 0; |
704 | } | ||
705 | 695 | ||
706 | r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); | 696 | r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); |
707 | if (WARN_ON(!r)) | 697 | if (WARN_ON(!r)) |
708 | return -EINVAL; | 698 | return -EINVAL; |
709 | 699 | ||
710 | neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev, | 700 | neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif); |
711 | r->rif, n); | ||
712 | if (!neigh_entry) | 701 | if (!neigh_entry) |
713 | return -ENOMEM; | 702 | return -ENOMEM; |
714 | err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); | 703 | err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); |
@@ -727,14 +716,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev, | |||
727 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 716 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
728 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 717 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
729 | struct mlxsw_sp_neigh_entry *neigh_entry; | 718 | struct mlxsw_sp_neigh_entry *neigh_entry; |
730 | u32 dip; | ||
731 | 719 | ||
732 | if (n->tbl != &arp_tbl) | 720 | if (n->tbl != &arp_tbl) |
733 | return; | 721 | return; |
734 | 722 | ||
735 | dip = ntohl(*((__be32 *) n->primary_key)); | 723 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
736 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), | ||
737 | n->dev); | ||
738 | if (!neigh_entry) | 724 | if (!neigh_entry) |
739 | return; | 725 | return; |
740 | mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); | 726 | mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); |
@@ -817,6 +803,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, | |||
817 | } | 803 | } |
818 | } | 804 | } |
819 | 805 | ||
806 | static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) | ||
807 | { | ||
808 | u8 num_rec, last_rec_index, num_entries; | ||
809 | |||
810 | num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); | ||
811 | last_rec_index = num_rec - 1; | ||
812 | |||
813 | if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM) | ||
814 | return false; | ||
815 | if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) == | ||
816 | MLXSW_REG_RAUHTD_TYPE_IPV6) | ||
817 | return true; | ||
818 | |||
819 | num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, | ||
820 | last_rec_index); | ||
821 | if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC) | ||
822 | return true; | ||
823 | return false; | ||
824 | } | ||
825 | |||
820 | static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) | 826 | static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) |
821 | { | 827 | { |
822 | char *rauhtd_pl; | 828 | char *rauhtd_pl; |
@@ -843,7 +849,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) | |||
843 | for (i = 0; i < num_rec; i++) | 849 | for (i = 0; i < num_rec; i++) |
844 | mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, | 850 | mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, |
845 | i); | 851 | i); |
846 | } while (num_rec); | 852 | } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); |
847 | rtnl_unlock(); | 853 | rtnl_unlock(); |
848 | 854 | ||
849 | kfree(rauhtd_pl); | 855 | kfree(rauhtd_pl); |
@@ -862,7 +868,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) | |||
862 | * is active regardless of the traffic. | 868 | * is active regardless of the traffic. |
863 | */ | 869 | */ |
864 | if (!list_empty(&neigh_entry->nexthop_list)) | 870 | if (!list_empty(&neigh_entry->nexthop_list)) |
865 | neigh_event_send(neigh_entry->n, NULL); | 871 | neigh_event_send(neigh_entry->key.n, NULL); |
866 | } | 872 | } |
867 | rtnl_unlock(); | 873 | rtnl_unlock(); |
868 | } | 874 | } |
@@ -908,9 +914,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) | |||
908 | rtnl_lock(); | 914 | rtnl_lock(); |
909 | list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, | 915 | list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, |
910 | nexthop_neighs_list_node) { | 916 | nexthop_neighs_list_node) { |
911 | if (!(neigh_entry->n->nud_state & NUD_VALID) && | 917 | if (!(neigh_entry->key.n->nud_state & NUD_VALID) && |
912 | !list_empty(&neigh_entry->nexthop_list)) | 918 | !list_empty(&neigh_entry->nexthop_list)) |
913 | neigh_event_send(neigh_entry->n, NULL); | 919 | neigh_event_send(neigh_entry->key.n, NULL); |
914 | } | 920 | } |
915 | rtnl_unlock(); | 921 | rtnl_unlock(); |
916 | 922 | ||
@@ -927,7 +933,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) | |||
927 | { | 933 | { |
928 | struct mlxsw_sp_neigh_entry *neigh_entry = | 934 | struct mlxsw_sp_neigh_entry *neigh_entry = |
929 | container_of(work, struct mlxsw_sp_neigh_entry, dw.work); | 935 | container_of(work, struct mlxsw_sp_neigh_entry, dw.work); |
930 | struct neighbour *n = neigh_entry->n; | 936 | struct neighbour *n = neigh_entry->key.n; |
931 | struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; | 937 | struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; |
932 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 938 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
933 | char rauht_pl[MLXSW_REG_RAUHT_LEN]; | 939 | char rauht_pl[MLXSW_REG_RAUHT_LEN]; |
@@ -1030,11 +1036,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, | |||
1030 | 1036 | ||
1031 | mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 1037 | mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1032 | dip = ntohl(*((__be32 *) n->primary_key)); | 1038 | dip = ntohl(*((__be32 *) n->primary_key)); |
1033 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, | 1039 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
1034 | &dip, | 1040 | if (WARN_ON(!neigh_entry)) { |
1035 | sizeof(__be32), | ||
1036 | dev); | ||
1037 | if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) { | ||
1038 | mlxsw_sp_port_dev_put(mlxsw_sp_port); | 1041 | mlxsw_sp_port_dev_put(mlxsw_sp_port); |
1039 | return NOTIFY_DONE; | 1042 | return NOTIFY_DONE; |
1040 | } | 1043 | } |
@@ -1343,33 +1346,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, | |||
1343 | struct fib_nh *fib_nh) | 1346 | struct fib_nh *fib_nh) |
1344 | { | 1347 | { |
1345 | struct mlxsw_sp_neigh_entry *neigh_entry; | 1348 | struct mlxsw_sp_neigh_entry *neigh_entry; |
1346 | u32 gwip = ntohl(fib_nh->nh_gw); | ||
1347 | struct net_device *dev = fib_nh->nh_dev; | 1349 | struct net_device *dev = fib_nh->nh_dev; |
1348 | struct neighbour *n; | 1350 | struct neighbour *n; |
1349 | u8 nud_state; | 1351 | u8 nud_state; |
1350 | 1352 | ||
1351 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, | 1353 | /* Take a reference of neigh here ensuring that neigh would |
1352 | sizeof(gwip), dev); | 1354 | * not be detructed before the nexthop entry is finished. |
1353 | if (!neigh_entry) { | 1355 | * The reference is taken either in neigh_lookup() or |
1354 | __be32 gwipn = htonl(gwip); | 1356 | * in neith_create() in case n is not found. |
1355 | 1357 | */ | |
1356 | n = neigh_create(&arp_tbl, &gwipn, dev); | 1358 | n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev); |
1359 | if (!n) { | ||
1360 | n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev); | ||
1357 | if (IS_ERR(n)) | 1361 | if (IS_ERR(n)) |
1358 | return PTR_ERR(n); | 1362 | return PTR_ERR(n); |
1359 | neigh_event_send(n, NULL); | 1363 | neigh_event_send(n, NULL); |
1360 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, | 1364 | } |
1361 | sizeof(gwip), dev); | 1365 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
1362 | if (!neigh_entry) { | 1366 | if (!neigh_entry) { |
1363 | neigh_release(n); | 1367 | neigh_release(n); |
1364 | return -EINVAL; | 1368 | return -EINVAL; |
1365 | } | ||
1366 | } else { | ||
1367 | /* Take a reference of neigh here ensuring that neigh would | ||
1368 | * not be detructed before the nexthop entry is finished. | ||
1369 | * The second branch takes the reference in neith_create() | ||
1370 | */ | ||
1371 | n = neigh_entry->n; | ||
1372 | neigh_clone(n); | ||
1373 | } | 1369 | } |
1374 | 1370 | ||
1375 | /* If that is the first nexthop connected to that neigh, add to | 1371 | /* If that is the first nexthop connected to that neigh, add to |
@@ -1403,7 +1399,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, | |||
1403 | if (list_empty(&nh->neigh_entry->nexthop_list)) | 1399 | if (list_empty(&nh->neigh_entry->nexthop_list)) |
1404 | list_del(&nh->neigh_entry->nexthop_neighs_list_node); | 1400 | list_del(&nh->neigh_entry->nexthop_neighs_list_node); |
1405 | 1401 | ||
1406 | neigh_release(neigh_entry->n); | 1402 | neigh_release(neigh_entry->key.n); |
1407 | } | 1403 | } |
1408 | 1404 | ||
1409 | static struct mlxsw_sp_nexthop_group * | 1405 | static struct mlxsw_sp_nexthop_group * |
@@ -1463,11 +1459,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh, | |||
1463 | 1459 | ||
1464 | for (i = 0; i < fi->fib_nhs; i++) { | 1460 | for (i = 0; i < fi->fib_nhs; i++) { |
1465 | struct fib_nh *fib_nh = &fi->fib_nh[i]; | 1461 | struct fib_nh *fib_nh = &fi->fib_nh[i]; |
1466 | u32 gwip = ntohl(fib_nh->nh_gw); | 1462 | struct neighbour *n = nh->neigh_entry->key.n; |
1467 | 1463 | ||
1468 | if (memcmp(nh->neigh_entry->key.addr, | 1464 | if (memcmp(n->primary_key, &fib_nh->nh_gw, |
1469 | &gwip, sizeof(u32)) == 0 && | 1465 | sizeof(fib_nh->nh_gw)) == 0 && |
1470 | nh->neigh_entry->key.dev == fib_nh->nh_dev) | 1466 | n->dev == fib_nh->nh_dev) |
1471 | return true; | 1467 | return true; |
1472 | } | 1468 | } |
1473 | return false; | 1469 | return false; |
@@ -1874,18 +1870,18 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) | |||
1874 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); | 1870 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); |
1875 | } | 1871 | } |
1876 | 1872 | ||
1877 | static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) | 1873 | static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) |
1878 | { | 1874 | { |
1879 | struct mlxsw_resources *resources; | 1875 | struct mlxsw_resources *resources; |
1880 | struct mlxsw_sp_fib_entry *fib_entry; | 1876 | struct mlxsw_sp_fib_entry *fib_entry; |
1881 | struct mlxsw_sp_fib_entry *tmp; | 1877 | struct mlxsw_sp_fib_entry *tmp; |
1882 | struct mlxsw_sp_vr *vr; | 1878 | struct mlxsw_sp_vr *vr; |
1883 | int i; | 1879 | int i; |
1884 | int err; | ||
1885 | 1880 | ||
1886 | resources = mlxsw_core_resources_get(mlxsw_sp->core); | 1881 | resources = mlxsw_core_resources_get(mlxsw_sp->core); |
1887 | for (i = 0; i < resources->max_virtual_routers; i++) { | 1882 | for (i = 0; i < resources->max_virtual_routers; i++) { |
1888 | vr = &mlxsw_sp->router.vrs[i]; | 1883 | vr = &mlxsw_sp->router.vrs[i]; |
1884 | |||
1889 | if (!vr->used) | 1885 | if (!vr->used) |
1890 | continue; | 1886 | continue; |
1891 | 1887 | ||
@@ -1901,6 +1897,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) | |||
1901 | break; | 1897 | break; |
1902 | } | 1898 | } |
1903 | } | 1899 | } |
1900 | } | ||
1901 | |||
1902 | static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) | ||
1903 | { | ||
1904 | int err; | ||
1905 | |||
1906 | mlxsw_sp_router_fib_flush(mlxsw_sp); | ||
1904 | mlxsw_sp->router.aborted = true; | 1907 | mlxsw_sp->router.aborted = true; |
1905 | err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); | 1908 | err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); |
1906 | if (err) | 1909 | if (err) |
@@ -1958,6 +1961,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, | |||
1958 | struct fib_entry_notifier_info *fen_info = ptr; | 1961 | struct fib_entry_notifier_info *fen_info = ptr; |
1959 | int err; | 1962 | int err; |
1960 | 1963 | ||
1964 | if (!net_eq(fen_info->info.net, &init_net)) | ||
1965 | return NOTIFY_DONE; | ||
1966 | |||
1961 | switch (event) { | 1967 | switch (event) { |
1962 | case FIB_EVENT_ENTRY_ADD: | 1968 | case FIB_EVENT_ENTRY_ADD: |
1963 | err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); | 1969 | err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 5e00c79e8133..1e2c8eca3af1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, | |||
929 | 929 | ||
930 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, | 930 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, |
931 | const unsigned char *addr, | 931 | const unsigned char *addr, |
932 | u16 vid) | 932 | u16 fid) |
933 | { | 933 | { |
934 | struct mlxsw_sp_mid *mid; | 934 | struct mlxsw_sp_mid *mid; |
935 | 935 | ||
936 | list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { | 936 | list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { |
937 | if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) | 937 | if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) |
938 | return mid; | 938 | return mid; |
939 | } | 939 | } |
940 | return NULL; | 940 | return NULL; |
@@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, | |||
942 | 942 | ||
943 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, | 943 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, |
944 | const unsigned char *addr, | 944 | const unsigned char *addr, |
945 | u16 vid) | 945 | u16 fid) |
946 | { | 946 | { |
947 | struct mlxsw_sp_mid *mid; | 947 | struct mlxsw_sp_mid *mid; |
948 | u16 mid_idx; | 948 | u16 mid_idx; |
@@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, | |||
958 | 958 | ||
959 | set_bit(mid_idx, mlxsw_sp->br_mids.mapped); | 959 | set_bit(mid_idx, mlxsw_sp->br_mids.mapped); |
960 | ether_addr_copy(mid->addr, addr); | 960 | ether_addr_copy(mid->addr, addr); |
961 | mid->vid = vid; | 961 | mid->fid = fid; |
962 | mid->mid = mid_idx; | 962 | mid->mid = mid_idx; |
963 | mid->ref_count = 0; | 963 | mid->ref_count = 0; |
964 | list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); | 964 | list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); |
@@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
991 | if (switchdev_trans_ph_prepare(trans)) | 991 | if (switchdev_trans_ph_prepare(trans)) |
992 | return 0; | 992 | return 0; |
993 | 993 | ||
994 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); | 994 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); |
995 | if (!mid) { | 995 | if (!mid) { |
996 | mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); | 996 | mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid); |
997 | if (!mid) { | 997 | if (!mid) { |
998 | netdev_err(dev, "Unable to allocate MC group\n"); | 998 | netdev_err(dev, "Unable to allocate MC group\n"); |
999 | return -ENOMEM; | 999 | return -ENOMEM; |
@@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1137 | u16 mid_idx; | 1137 | u16 mid_idx; |
1138 | int err = 0; | 1138 | int err = 0; |
1139 | 1139 | ||
1140 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); | 1140 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); |
1141 | if (!mid) { | 1141 | if (!mid) { |
1142 | netdev_err(dev, "Unable to remove port from MC DB\n"); | 1142 | netdev_err(dev, "Unable to remove port from MC DB\n"); |
1143 | return -EINVAL; | 1143 | return -EINVAL; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 72eee29c677f..2777d5bb4380 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
@@ -727,9 +727,6 @@ struct core_tx_bd_flags { | |||
727 | #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 | 727 | #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 |
728 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 | 728 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 |
729 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 | 729 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 |
730 | #define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1 | ||
731 | #define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12 | ||
732 | |||
733 | }; | 730 | }; |
734 | 731 | ||
735 | struct core_tx_bd { | 732 | struct core_tx_bd { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 63e1a1b0ef8e..f95385cbbd40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
@@ -1119,6 +1119,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, | |||
1119 | start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << | 1119 | start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << |
1120 | CORE_TX_BD_FLAGS_START_BD_SHIFT; | 1120 | CORE_TX_BD_FLAGS_START_BD_SHIFT; |
1121 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); | 1121 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); |
1122 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type); | ||
1122 | DMA_REGPAIR_LE(start_bd->addr, first_frag); | 1123 | DMA_REGPAIR_LE(start_bd->addr, first_frag); |
1123 | start_bd->nbytes = cpu_to_le16(first_frag_len); | 1124 | start_bd->nbytes = cpu_to_le16(first_frag_len); |
1124 | 1125 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c418360ba02a..333c7442e48a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -839,20 +839,19 @@ static void qed_update_pf_params(struct qed_dev *cdev, | |||
839 | { | 839 | { |
840 | int i; | 840 | int i; |
841 | 841 | ||
842 | if (IS_ENABLED(CONFIG_QED_RDMA)) { | ||
843 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; | ||
844 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; | ||
845 | /* divide by 3 the MRs to avoid MF ILT overflow */ | ||
846 | params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; | ||
847 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; | ||
848 | } | ||
849 | |||
842 | for (i = 0; i < cdev->num_hwfns; i++) { | 850 | for (i = 0; i < cdev->num_hwfns; i++) { |
843 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | 851 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
844 | 852 | ||
845 | p_hwfn->pf_params = *params; | 853 | p_hwfn->pf_params = *params; |
846 | } | 854 | } |
847 | |||
848 | if (!IS_ENABLED(CONFIG_QED_RDMA)) | ||
849 | return; | ||
850 | |||
851 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; | ||
852 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; | ||
853 | /* divide by 3 the MRs to avoid MF ILT overflow */ | ||
854 | params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; | ||
855 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; | ||
856 | } | 855 | } |
857 | 856 | ||
858 | static int qed_slowpath_start(struct qed_dev *cdev, | 857 | static int qed_slowpath_start(struct qed_dev *cdev, |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 12251a1032d1..7567cc464b88 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c | |||
@@ -175,16 +175,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) | |||
175 | for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { | 175 | for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { |
176 | int tc; | 176 | int tc; |
177 | 177 | ||
178 | for (j = 0; j < QEDE_NUM_RQSTATS; j++) | 178 | if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { |
179 | sprintf(buf + (k + j) * ETH_GSTRING_LEN, | 179 | for (j = 0; j < QEDE_NUM_RQSTATS; j++) |
180 | "%d: %s", i, qede_rqstats_arr[j].string); | ||
181 | k += QEDE_NUM_RQSTATS; | ||
182 | for (tc = 0; tc < edev->num_tc; tc++) { | ||
183 | for (j = 0; j < QEDE_NUM_TQSTATS; j++) | ||
184 | sprintf(buf + (k + j) * ETH_GSTRING_LEN, | 180 | sprintf(buf + (k + j) * ETH_GSTRING_LEN, |
185 | "%d.%d: %s", i, tc, | 181 | "%d: %s", i, |
186 | qede_tqstats_arr[j].string); | 182 | qede_rqstats_arr[j].string); |
187 | k += QEDE_NUM_TQSTATS; | 183 | k += QEDE_NUM_RQSTATS; |
184 | } | ||
185 | |||
186 | if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { | ||
187 | for (tc = 0; tc < edev->num_tc; tc++) { | ||
188 | for (j = 0; j < QEDE_NUM_TQSTATS; j++) | ||
189 | sprintf(buf + (k + j) * | ||
190 | ETH_GSTRING_LEN, | ||
191 | "%d.%d: %s", i, tc, | ||
192 | qede_tqstats_arr[j].string); | ||
193 | k += QEDE_NUM_TQSTATS; | ||
194 | } | ||
188 | } | 195 | } |
189 | } | 196 | } |
190 | 197 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 7def29aaf65c..85f46dbecd5b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
@@ -2839,7 +2839,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) | |||
2839 | } | 2839 | } |
2840 | 2840 | ||
2841 | mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, | 2841 | mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, |
2842 | rxq->rx_buf_size, DMA_FROM_DEVICE); | 2842 | PAGE_SIZE, DMA_FROM_DEVICE); |
2843 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { | 2843 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { |
2844 | DP_NOTICE(edev, | 2844 | DP_NOTICE(edev, |
2845 | "Failed to map TPA replacement buffer\n"); | 2845 | "Failed to map TPA replacement buffer\n"); |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 6fb3bee904d3..0b4deb31e742 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
@@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt) | |||
575 | 575 | ||
576 | mac |= TXEN | RXEN; /* enable RX/TX */ | 576 | mac |= TXEN | RXEN; /* enable RX/TX */ |
577 | 577 | ||
578 | /* We don't have ethtool support yet, so force flow-control mode | 578 | /* Configure MAC flow control to match the PHY's settings. */ |
579 | * to 'full' always. | 579 | if (phydev->pause) |
580 | */ | 580 | mac |= RXFC; |
581 | mac |= TXFC | RXFC; | 581 | if (phydev->pause != phydev->asym_pause) |
582 | mac |= TXFC; | ||
582 | 583 | ||
583 | /* setup link speed */ | 584 | /* setup link speed */ |
584 | mac &= ~SPEED_MASK; | 585 | mac &= ~SPEED_MASK; |
@@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt) | |||
1003 | writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); | 1004 | writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); |
1004 | writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); | 1005 | writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); |
1005 | 1006 | ||
1007 | /* Enable pause frames. Without this feature, the EMAC has been shown | ||
1008 | * to receive (and drop) frames with FCS errors at gigabit connections. | ||
1009 | */ | ||
1010 | adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
1011 | adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
1012 | |||
1006 | adpt->phydev->irq = PHY_IGNORE_INTERRUPT; | 1013 | adpt->phydev->irq = PHY_IGNORE_INTERRUPT; |
1007 | phy_start(adpt->phydev); | 1014 | phy_start(adpt->phydev); |
1008 | 1015 | ||
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 75c1b530e39e..72fe343c7a36 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | |||
@@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = { | |||
421 | /* CDR Settings */ | 421 | /* CDR Settings */ |
422 | {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, | 422 | {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, |
423 | UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, | 423 | UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, |
424 | {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)}, | 424 | {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)}, |
425 | {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, | 425 | {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, |
426 | 426 | ||
427 | /* TX/RX Settings */ | 427 | /* TX/RX Settings */ |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3cf3557106c2..6b89e4a7b164 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel) | |||
485 | *channel = *old_channel; | 485 | *channel = *old_channel; |
486 | 486 | ||
487 | channel->napi_dev = NULL; | 487 | channel->napi_dev = NULL; |
488 | INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); | ||
489 | channel->napi_str.napi_id = 0; | ||
490 | channel->napi_str.state = 0; | ||
488 | memset(&channel->eventq, 0, sizeof(channel->eventq)); | 491 | memset(&channel->eventq, 0, sizeof(channel->eventq)); |
489 | 492 | ||
490 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | 493 | for (j = 0; j < EFX_TXQ_TYPES; j++) { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 48e71fad4210..e2c94ec4edd0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -880,6 +880,13 @@ static int stmmac_init_phy(struct net_device *dev) | |||
880 | return -ENODEV; | 880 | return -ENODEV; |
881 | } | 881 | } |
882 | 882 | ||
883 | /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid | ||
884 | * subsequent PHY polling, make sure we force a link transition if | ||
885 | * we have a UP/DOWN/UP transition | ||
886 | */ | ||
887 | if (phydev->is_pseudo_fixed_link) | ||
888 | phydev->irq = PHY_POLL; | ||
889 | |||
883 | pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" | 890 | pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" |
884 | " Link = %d\n", dev->name, phydev->phy_id, phydev->link); | 891 | " Link = %d\n", dev->name, phydev->phy_id, phydev->link); |
885 | 892 | ||
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 054a8dd23dae..ba1e45ff6aae 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c | |||
@@ -176,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) | |||
176 | } | 176 | } |
177 | 177 | ||
178 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 178 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
179 | of_node_put(node); | ||
179 | priv = dev_get_drvdata(dev); | 180 | priv = dev_get_drvdata(dev); |
180 | 181 | ||
181 | priv->cpsw_phy_sel(priv, phy_mode, slave); | 182 | priv->cpsw_phy_sel(priv, phy_mode, slave); |
183 | |||
184 | put_device(dev); | ||
182 | } | 185 | } |
183 | EXPORT_SYMBOL_GPL(cpsw_phy_sel); | 186 | EXPORT_SYMBOL_GPL(cpsw_phy_sel); |
184 | 187 | ||
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 2fd94a5bc1f3..84fbe5714f8b 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev) | |||
1410 | int i = 0; | 1410 | int i = 0; |
1411 | struct emac_priv *priv = netdev_priv(ndev); | 1411 | struct emac_priv *priv = netdev_priv(ndev); |
1412 | struct phy_device *phydev = NULL; | 1412 | struct phy_device *phydev = NULL; |
1413 | struct device *phy = NULL; | ||
1413 | 1414 | ||
1414 | ret = pm_runtime_get_sync(&priv->pdev->dev); | 1415 | ret = pm_runtime_get_sync(&priv->pdev->dev); |
1415 | if (ret < 0) { | 1416 | if (ret < 0) { |
@@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev) | |||
1488 | 1489 | ||
1489 | /* use the first phy on the bus if pdata did not give us a phy id */ | 1490 | /* use the first phy on the bus if pdata did not give us a phy id */ |
1490 | if (!phydev && !priv->phy_id) { | 1491 | if (!phydev && !priv->phy_id) { |
1491 | struct device *phy; | ||
1492 | |||
1493 | phy = bus_find_device(&mdio_bus_type, NULL, NULL, | 1492 | phy = bus_find_device(&mdio_bus_type, NULL, NULL, |
1494 | match_first_device); | 1493 | match_first_device); |
1495 | if (phy) | 1494 | if (phy) { |
1496 | priv->phy_id = dev_name(phy); | 1495 | priv->phy_id = dev_name(phy); |
1496 | if (!priv->phy_id || !*priv->phy_id) | ||
1497 | put_device(phy); | ||
1498 | } | ||
1497 | } | 1499 | } |
1498 | 1500 | ||
1499 | if (!phydev && priv->phy_id && *priv->phy_id) { | 1501 | if (!phydev && priv->phy_id && *priv->phy_id) { |
1500 | phydev = phy_connect(ndev, priv->phy_id, | 1502 | phydev = phy_connect(ndev, priv->phy_id, |
1501 | &emac_adjust_link, | 1503 | &emac_adjust_link, |
1502 | PHY_INTERFACE_MODE_MII); | 1504 | PHY_INTERFACE_MODE_MII); |
1503 | 1505 | put_device(phy); /* reference taken by bus_find_device */ | |
1504 | if (IS_ERR(phydev)) { | 1506 | if (IS_ERR(phydev)) { |
1505 | dev_err(emac_dev, "could not connect to phy %s\n", | 1507 | dev_err(emac_dev, "could not connect to phy %s\n", |
1506 | priv->phy_id); | 1508 | priv->phy_id); |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 446ea580ad42..928c1dca2673 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c | |||
@@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl) | |||
1694 | pr_debug("%s: bssid matched\n", __func__); | 1694 | pr_debug("%s: bssid matched\n", __func__); |
1695 | break; | 1695 | break; |
1696 | } else { | 1696 | } else { |
1697 | pr_debug("%s: bssid unmached\n", __func__); | 1697 | pr_debug("%s: bssid unmatched\n", __func__); |
1698 | continue; | 1698 | continue; |
1699 | } | 1699 | } |
1700 | } | 1700 | } |
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 7f127dc1b7ba..fa32391720fe 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c | |||
@@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget) | |||
708 | if (!qmgr_stat_below_low_watermark(rxq) && | 708 | if (!qmgr_stat_below_low_watermark(rxq) && |
709 | napi_reschedule(napi)) { /* not empty again */ | 709 | napi_reschedule(napi)) { /* not empty again */ |
710 | #if DEBUG_RX | 710 | #if DEBUG_RX |
711 | printk(KERN_DEBUG "%s: eth_poll" | 711 | printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n", |
712 | " napi_reschedule successed\n", | ||
713 | dev->name); | 712 | dev->name); |
714 | #endif | 713 | #endif |
715 | qmgr_disable_irq(rxq); | 714 | qmgr_disable_irq(rxq); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 3234fcdea317..d2d6f12a112f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -1278,6 +1278,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1278 | struct net_device *lowerdev; | 1278 | struct net_device *lowerdev; |
1279 | int err; | 1279 | int err; |
1280 | int macmode; | 1280 | int macmode; |
1281 | bool create = false; | ||
1281 | 1282 | ||
1282 | if (!tb[IFLA_LINK]) | 1283 | if (!tb[IFLA_LINK]) |
1283 | return -EINVAL; | 1284 | return -EINVAL; |
@@ -1304,12 +1305,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1304 | err = macvlan_port_create(lowerdev); | 1305 | err = macvlan_port_create(lowerdev); |
1305 | if (err < 0) | 1306 | if (err < 0) |
1306 | return err; | 1307 | return err; |
1308 | create = true; | ||
1307 | } | 1309 | } |
1308 | port = macvlan_port_get_rtnl(lowerdev); | 1310 | port = macvlan_port_get_rtnl(lowerdev); |
1309 | 1311 | ||
1310 | /* Only 1 macvlan device can be created in passthru mode */ | 1312 | /* Only 1 macvlan device can be created in passthru mode */ |
1311 | if (port->passthru) | 1313 | if (port->passthru) { |
1312 | return -EINVAL; | 1314 | /* The macvlan port must be not created this time, |
1315 | * still goto destroy_macvlan_port for readability. | ||
1316 | */ | ||
1317 | err = -EINVAL; | ||
1318 | goto destroy_macvlan_port; | ||
1319 | } | ||
1313 | 1320 | ||
1314 | vlan->lowerdev = lowerdev; | 1321 | vlan->lowerdev = lowerdev; |
1315 | vlan->dev = dev; | 1322 | vlan->dev = dev; |
@@ -1325,24 +1332,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1325 | vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); | 1332 | vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); |
1326 | 1333 | ||
1327 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { | 1334 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { |
1328 | if (port->count) | 1335 | if (port->count) { |
1329 | return -EINVAL; | 1336 | err = -EINVAL; |
1337 | goto destroy_macvlan_port; | ||
1338 | } | ||
1330 | port->passthru = true; | 1339 | port->passthru = true; |
1331 | eth_hw_addr_inherit(dev, lowerdev); | 1340 | eth_hw_addr_inherit(dev, lowerdev); |
1332 | } | 1341 | } |
1333 | 1342 | ||
1334 | if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { | 1343 | if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { |
1335 | if (vlan->mode != MACVLAN_MODE_SOURCE) | 1344 | if (vlan->mode != MACVLAN_MODE_SOURCE) { |
1336 | return -EINVAL; | 1345 | err = -EINVAL; |
1346 | goto destroy_macvlan_port; | ||
1347 | } | ||
1337 | macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); | 1348 | macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); |
1338 | err = macvlan_changelink_sources(vlan, macmode, data); | 1349 | err = macvlan_changelink_sources(vlan, macmode, data); |
1339 | if (err) | 1350 | if (err) |
1340 | return err; | 1351 | goto destroy_macvlan_port; |
1341 | } | 1352 | } |
1342 | 1353 | ||
1343 | err = register_netdevice(dev); | 1354 | err = register_netdevice(dev); |
1344 | if (err < 0) | 1355 | if (err < 0) |
1345 | return err; | 1356 | goto destroy_macvlan_port; |
1346 | 1357 | ||
1347 | dev->priv_flags |= IFF_MACVLAN; | 1358 | dev->priv_flags |= IFF_MACVLAN; |
1348 | err = netdev_upper_dev_link(lowerdev, dev); | 1359 | err = netdev_upper_dev_link(lowerdev, dev); |
@@ -1357,7 +1368,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1357 | 1368 | ||
1358 | unregister_netdev: | 1369 | unregister_netdev: |
1359 | unregister_netdevice(dev); | 1370 | unregister_netdevice(dev); |
1360 | 1371 | destroy_macvlan_port: | |
1372 | if (create) | ||
1373 | macvlan_port_destroy(port->dev); | ||
1361 | return err; | 1374 | return err; |
1362 | } | 1375 | } |
1363 | EXPORT_SYMBOL_GPL(macvlan_common_newlink); | 1376 | EXPORT_SYMBOL_GPL(macvlan_common_newlink); |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index e977ba931878..1a4bf8acad78 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -723,6 +723,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id, | |||
723 | phydev = to_phy_device(d); | 723 | phydev = to_phy_device(d); |
724 | 724 | ||
725 | rc = phy_connect_direct(dev, phydev, handler, interface); | 725 | rc = phy_connect_direct(dev, phydev, handler, interface); |
726 | put_device(d); | ||
726 | if (rc) | 727 | if (rc) |
727 | return ERR_PTR(rc); | 728 | return ERR_PTR(rc); |
728 | 729 | ||
@@ -953,6 +954,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, | |||
953 | phydev = to_phy_device(d); | 954 | phydev = to_phy_device(d); |
954 | 955 | ||
955 | rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); | 956 | rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); |
957 | put_device(d); | ||
956 | if (rc) | 958 | if (rc) |
957 | return ERR_PTR(rc); | 959 | return ERR_PTR(rc); |
958 | 960 | ||
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index e6338c16081a..8a6675d92b98 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c | |||
@@ -1656,6 +1656,19 @@ static const struct driver_info ax88178a_info = { | |||
1656 | .tx_fixup = ax88179_tx_fixup, | 1656 | .tx_fixup = ax88179_tx_fixup, |
1657 | }; | 1657 | }; |
1658 | 1658 | ||
1659 | static const struct driver_info cypress_GX3_info = { | ||
1660 | .description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller", | ||
1661 | .bind = ax88179_bind, | ||
1662 | .unbind = ax88179_unbind, | ||
1663 | .status = ax88179_status, | ||
1664 | .link_reset = ax88179_link_reset, | ||
1665 | .reset = ax88179_reset, | ||
1666 | .stop = ax88179_stop, | ||
1667 | .flags = FLAG_ETHER | FLAG_FRAMING_AX, | ||
1668 | .rx_fixup = ax88179_rx_fixup, | ||
1669 | .tx_fixup = ax88179_tx_fixup, | ||
1670 | }; | ||
1671 | |||
1659 | static const struct driver_info dlink_dub1312_info = { | 1672 | static const struct driver_info dlink_dub1312_info = { |
1660 | .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", | 1673 | .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", |
1661 | .bind = ax88179_bind, | 1674 | .bind = ax88179_bind, |
@@ -1718,6 +1731,10 @@ static const struct usb_device_id products[] = { | |||
1718 | USB_DEVICE(0x0b95, 0x178a), | 1731 | USB_DEVICE(0x0b95, 0x178a), |
1719 | .driver_info = (unsigned long)&ax88178a_info, | 1732 | .driver_info = (unsigned long)&ax88178a_info, |
1720 | }, { | 1733 | }, { |
1734 | /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */ | ||
1735 | USB_DEVICE(0x04b4, 0x3610), | ||
1736 | .driver_info = (unsigned long)&cypress_GX3_info, | ||
1737 | }, { | ||
1721 | /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ | 1738 | /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ |
1722 | USB_DEVICE(0x2001, 0x4a00), | 1739 | USB_DEVICE(0x2001, 0x4a00), |
1723 | .driver_info = (unsigned long)&dlink_dub1312_info, | 1740 | .driver_info = (unsigned long)&dlink_dub1312_info, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 44d439f50961..efb84f092492 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) | |||
1730 | u8 checksum = CHECKSUM_NONE; | 1730 | u8 checksum = CHECKSUM_NONE; |
1731 | u32 opts2, opts3; | 1731 | u32 opts2, opts3; |
1732 | 1732 | ||
1733 | if (tp->version == RTL_VER_01) | 1733 | if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) |
1734 | goto return_result; | 1734 | goto return_result; |
1735 | 1735 | ||
1736 | opts2 = le32_to_cpu(rx_desc->opts2); | 1736 | opts2 = le32_to_cpu(rx_desc->opts2); |
@@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) | |||
1745 | checksum = CHECKSUM_NONE; | 1745 | checksum = CHECKSUM_NONE; |
1746 | else | 1746 | else |
1747 | checksum = CHECKSUM_UNNECESSARY; | 1747 | checksum = CHECKSUM_UNNECESSARY; |
1748 | } else if (RD_IPV6_CS) { | 1748 | } else if (opts2 & RD_IPV6_CS) { |
1749 | if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) | 1749 | if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) |
1750 | checksum = CHECKSUM_UNNECESSARY; | 1750 | checksum = CHECKSUM_UNNECESSARY; |
1751 | else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) | 1751 | else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) |
@@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev) | |||
3266 | goto out; | 3266 | goto out; |
3267 | 3267 | ||
3268 | res = usb_autopm_get_interface(tp->intf); | 3268 | res = usb_autopm_get_interface(tp->intf); |
3269 | if (res < 0) { | 3269 | if (res < 0) |
3270 | free_all_mem(tp); | 3270 | goto out_free; |
3271 | goto out; | ||
3272 | } | ||
3273 | 3271 | ||
3274 | mutex_lock(&tp->control); | 3272 | mutex_lock(&tp->control); |
3275 | 3273 | ||
@@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev) | |||
3285 | netif_device_detach(tp->netdev); | 3283 | netif_device_detach(tp->netdev); |
3286 | netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", | 3284 | netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", |
3287 | res); | 3285 | res); |
3288 | free_all_mem(tp); | 3286 | goto out_unlock; |
3289 | } else { | ||
3290 | napi_enable(&tp->napi); | ||
3291 | } | 3287 | } |
3288 | napi_enable(&tp->napi); | ||
3292 | 3289 | ||
3293 | mutex_unlock(&tp->control); | 3290 | mutex_unlock(&tp->control); |
3294 | 3291 | ||
@@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev) | |||
3297 | tp->pm_notifier.notifier_call = rtl_notifier; | 3294 | tp->pm_notifier.notifier_call = rtl_notifier; |
3298 | register_pm_notifier(&tp->pm_notifier); | 3295 | register_pm_notifier(&tp->pm_notifier); |
3299 | #endif | 3296 | #endif |
3297 | return 0; | ||
3300 | 3298 | ||
3299 | out_unlock: | ||
3300 | mutex_unlock(&tp->control); | ||
3301 | usb_autopm_put_interface(tp->intf); | ||
3302 | out_free: | ||
3303 | free_all_mem(tp); | ||
3301 | out: | 3304 | out: |
3302 | return res; | 3305 | return res; |
3303 | } | 3306 | } |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fad84f3f4109..fd8b1e62301f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -2038,23 +2038,33 @@ static struct virtio_device_id id_table[] = { | |||
2038 | { 0 }, | 2038 | { 0 }, |
2039 | }; | 2039 | }; |
2040 | 2040 | ||
2041 | #define VIRTNET_FEATURES \ | ||
2042 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ | ||
2043 | VIRTIO_NET_F_MAC, \ | ||
2044 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ | ||
2045 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ | ||
2046 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ | ||
2047 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ | ||
2048 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ | ||
2049 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ | ||
2050 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ | ||
2051 | VIRTIO_NET_F_MTU | ||
2052 | |||
2041 | static unsigned int features[] = { | 2053 | static unsigned int features[] = { |
2042 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, | 2054 | VIRTNET_FEATURES, |
2043 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | 2055 | }; |
2044 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, | 2056 | |
2045 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, | 2057 | static unsigned int features_legacy[] = { |
2046 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, | 2058 | VIRTNET_FEATURES, |
2047 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, | 2059 | VIRTIO_NET_F_GSO, |
2048 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, | ||
2049 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, | ||
2050 | VIRTIO_NET_F_CTRL_MAC_ADDR, | ||
2051 | VIRTIO_F_ANY_LAYOUT, | 2060 | VIRTIO_F_ANY_LAYOUT, |
2052 | VIRTIO_NET_F_MTU, | ||
2053 | }; | 2061 | }; |
2054 | 2062 | ||
2055 | static struct virtio_driver virtio_net_driver = { | 2063 | static struct virtio_driver virtio_net_driver = { |
2056 | .feature_table = features, | 2064 | .feature_table = features, |
2057 | .feature_table_size = ARRAY_SIZE(features), | 2065 | .feature_table_size = ARRAY_SIZE(features), |
2066 | .feature_table_legacy = features_legacy, | ||
2067 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), | ||
2058 | .driver.name = KBUILD_MODNAME, | 2068 | .driver.name = KBUILD_MODNAME, |
2059 | .driver.owner = THIS_MODULE, | 2069 | .driver.owner = THIS_MODULE, |
2060 | .id_table = id_table, | 2070 | .id_table = id_table, |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f3c2fa3ab0d5..24532cdebb00 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -944,7 +944,9 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) | |||
944 | { | 944 | { |
945 | struct vxlan_dev *vxlan; | 945 | struct vxlan_dev *vxlan; |
946 | struct vxlan_sock *sock4; | 946 | struct vxlan_sock *sock4; |
947 | struct vxlan_sock *sock6 = NULL; | 947 | #if IS_ENABLED(CONFIG_IPV6) |
948 | struct vxlan_sock *sock6; | ||
949 | #endif | ||
948 | unsigned short family = dev->default_dst.remote_ip.sa.sa_family; | 950 | unsigned short family = dev->default_dst.remote_ip.sa.sa_family; |
949 | 951 | ||
950 | sock4 = rtnl_dereference(dev->vn4_sock); | 952 | sock4 = rtnl_dereference(dev->vn4_sock); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index b777e1b2f87a..78d9966a3957 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
@@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, | |||
4516 | /* store current 11d setting */ | 4516 | /* store current 11d setting */ |
4517 | if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, | 4517 | if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, |
4518 | &ifp->vif->is_11d)) { | 4518 | &ifp->vif->is_11d)) { |
4519 | supports_11d = false; | 4519 | is_11d = supports_11d = false; |
4520 | } else { | 4520 | } else { |
4521 | country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, | 4521 | country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, |
4522 | settings->beacon.tail_len, | 4522 | settings->beacon.tail_len, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 4fdc3dad3e85..b88e2048ae0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c | |||
@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, | |||
1087 | ret = iwl_mvm_switch_to_d3(mvm); | 1087 | ret = iwl_mvm_switch_to_d3(mvm); |
1088 | if (ret) | 1088 | if (ret) |
1089 | return ret; | 1089 | return ret; |
1090 | } else { | ||
1091 | /* In theory, we wouldn't have to stop a running sched | ||
1092 | * scan in order to start another one (for | ||
1093 | * net-detect). But in practice this doesn't seem to | ||
1094 | * work properly, so stop any running sched_scan now. | ||
1095 | */ | ||
1096 | ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); | ||
1097 | if (ret) | ||
1098 | return ret; | ||
1090 | } | 1099 | } |
1091 | 1100 | ||
1092 | /* rfkill release can be either for wowlan or netdetect */ | 1101 | /* rfkill release can be either for wowlan or netdetect */ |
@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, | |||
1254 | out: | 1263 | out: |
1255 | if (ret < 0) { | 1264 | if (ret < 0) { |
1256 | iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); | 1265 | iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); |
1257 | ieee80211_restart_hw(mvm->hw); | 1266 | if (mvm->restart_fw > 0) { |
1267 | mvm->restart_fw--; | ||
1268 | ieee80211_restart_hw(mvm->hw); | ||
1269 | } | ||
1258 | iwl_mvm_free_nd(mvm); | 1270 | iwl_mvm_free_nd(mvm); |
1259 | } | 1271 | } |
1260 | out_noreset: | 1272 | out_noreset: |
@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) | |||
2088 | iwl_mvm_update_changed_regdom(mvm); | 2100 | iwl_mvm_update_changed_regdom(mvm); |
2089 | 2101 | ||
2090 | if (mvm->net_detect) { | 2102 | if (mvm->net_detect) { |
2103 | /* If this is a non-unified image, we restart the FW, | ||
2104 | * so no need to stop the netdetect scan. If that | ||
2105 | * fails, continue and try to get the wake-up reasons, | ||
2106 | * but trigger a HW restart by keeping a failure code | ||
2107 | * in ret. | ||
2108 | */ | ||
2109 | if (unified_image) | ||
2110 | ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, | ||
2111 | false); | ||
2112 | |||
2091 | iwl_mvm_query_netdetect_reasons(mvm, vif); | 2113 | iwl_mvm_query_netdetect_reasons(mvm, vif); |
2092 | /* has unlocked the mutex, so skip that */ | 2114 | /* has unlocked the mutex, so skip that */ |
2093 | goto out; | 2115 | goto out; |
@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, | |||
2271 | static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) | 2293 | static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) |
2272 | { | 2294 | { |
2273 | struct iwl_mvm *mvm = inode->i_private; | 2295 | struct iwl_mvm *mvm = inode->i_private; |
2274 | int remaining_time = 10; | 2296 | bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, |
2297 | IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); | ||
2275 | 2298 | ||
2276 | mvm->d3_test_active = false; | 2299 | mvm->d3_test_active = false; |
2277 | 2300 | ||
@@ -2282,17 +2305,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) | |||
2282 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; | 2305 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; |
2283 | 2306 | ||
2284 | iwl_abort_notification_waits(&mvm->notif_wait); | 2307 | iwl_abort_notification_waits(&mvm->notif_wait); |
2285 | ieee80211_restart_hw(mvm->hw); | 2308 | if (!unified_image) { |
2309 | int remaining_time = 10; | ||
2286 | 2310 | ||
2287 | /* wait for restart and disconnect all interfaces */ | 2311 | ieee80211_restart_hw(mvm->hw); |
2288 | while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && | 2312 | |
2289 | remaining_time > 0) { | 2313 | /* wait for restart and disconnect all interfaces */ |
2290 | remaining_time--; | 2314 | while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && |
2291 | msleep(1000); | 2315 | remaining_time > 0) { |
2292 | } | 2316 | remaining_time--; |
2317 | msleep(1000); | ||
2318 | } | ||
2293 | 2319 | ||
2294 | if (remaining_time == 0) | 2320 | if (remaining_time == 0) |
2295 | IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); | 2321 | IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); |
2322 | } | ||
2296 | 2323 | ||
2297 | ieee80211_iterate_active_interfaces_atomic( | 2324 | ieee80211_iterate_active_interfaces_atomic( |
2298 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, | 2325 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 07da4efe8458..7b7d2a146e30 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | |||
@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, | |||
1529 | .data = { &cmd, }, | 1529 | .data = { &cmd, }, |
1530 | .len = { sizeof(cmd) }, | 1530 | .len = { sizeof(cmd) }, |
1531 | }; | 1531 | }; |
1532 | size_t delta, len; | 1532 | size_t delta; |
1533 | ssize_t ret; | 1533 | ssize_t ret, len; |
1534 | 1534 | ||
1535 | hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, | 1535 | hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, |
1536 | DEBUG_GROUP, 0); | 1536 | DEBUG_GROUP, 0); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 318efd814037..1db1dc13e988 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, | |||
4121 | struct iwl_mvm_internal_rxq_notif *notif, | 4121 | struct iwl_mvm_internal_rxq_notif *notif, |
4122 | u32 size) | 4122 | u32 size) |
4123 | { | 4123 | { |
4124 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq); | ||
4125 | u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; | 4124 | u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; |
4126 | int ret; | 4125 | int ret; |
4127 | 4126 | ||
@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, | |||
4143 | } | 4142 | } |
4144 | 4143 | ||
4145 | if (notif->sync) | 4144 | if (notif->sync) |
4146 | ret = wait_event_timeout(notif_waitq, | 4145 | ret = wait_event_timeout(mvm->rx_sync_waitq, |
4147 | atomic_read(&mvm->queue_sync_counter) == 0, | 4146 | atomic_read(&mvm->queue_sync_counter) == 0, |
4148 | HZ); | 4147 | HZ); |
4149 | WARN_ON_ONCE(!ret); | 4148 | WARN_ON_ONCE(!ret); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index d17cbf603f7c..c60703e0c246 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -937,6 +937,7 @@ struct iwl_mvm { | |||
937 | /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ | 937 | /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ |
938 | spinlock_t d0i3_tx_lock; | 938 | spinlock_t d0i3_tx_lock; |
939 | wait_queue_head_t d0i3_exit_waitq; | 939 | wait_queue_head_t d0i3_exit_waitq; |
940 | wait_queue_head_t rx_sync_waitq; | ||
940 | 941 | ||
941 | /* BT-Coex */ | 942 | /* BT-Coex */ |
942 | struct iwl_bt_coex_profile_notif last_bt_notif; | 943 | struct iwl_bt_coex_profile_notif last_bt_notif; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 05fe6dd1a2c8..4d35deb628bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
619 | spin_lock_init(&mvm->refs_lock); | 619 | spin_lock_init(&mvm->refs_lock); |
620 | skb_queue_head_init(&mvm->d0i3_tx); | 620 | skb_queue_head_init(&mvm->d0i3_tx); |
621 | init_waitqueue_head(&mvm->d0i3_exit_waitq); | 621 | init_waitqueue_head(&mvm->d0i3_exit_waitq); |
622 | init_waitqueue_head(&mvm->rx_sync_waitq); | ||
622 | 623 | ||
623 | atomic_set(&mvm->queue_sync_counter, 0); | 624 | atomic_set(&mvm->queue_sync_counter, 0); |
624 | 625 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index a57c6ef5bc14..6c802cee900c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
547 | "Received expired RX queue sync message\n"); | 547 | "Received expired RX queue sync message\n"); |
548 | return; | 548 | return; |
549 | } | 549 | } |
550 | atomic_dec(&mvm->queue_sync_counter); | 550 | if (!atomic_dec_return(&mvm->queue_sync_counter)) |
551 | wake_up(&mvm->rx_sync_waitq); | ||
551 | } | 552 | } |
552 | 553 | ||
553 | switch (internal_notif->type) { | 554 | switch (internal_notif->type) { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index f279fdd6eb44..fa9743205491 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c | |||
@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm) | |||
1199 | 1199 | ||
1200 | static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) | 1200 | static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) |
1201 | { | 1201 | { |
1202 | bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, | ||
1203 | IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); | ||
1204 | |||
1202 | /* This looks a bit arbitrary, but the idea is that if we run | 1205 | /* This looks a bit arbitrary, but the idea is that if we run |
1203 | * out of possible simultaneous scans and the userspace is | 1206 | * out of possible simultaneous scans and the userspace is |
1204 | * trying to run a scan type that is already running, we | 1207 | * trying to run a scan type that is already running, we |
@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) | |||
1225 | return -EBUSY; | 1228 | return -EBUSY; |
1226 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); | 1229 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); |
1227 | case IWL_MVM_SCAN_NETDETECT: | 1230 | case IWL_MVM_SCAN_NETDETECT: |
1228 | /* No need to stop anything for net-detect since the | 1231 | /* For non-unified images, there's no need to stop |
1229 | * firmware is restarted anyway. This way, any sched | 1232 | * anything for net-detect since the firmware is |
1230 | * scans that were running will be restarted when we | 1233 | * restarted anyway. This way, any sched scans that |
1231 | * resume. | 1234 | * were running will be restarted when we resume. |
1232 | */ | 1235 | */ |
1233 | return 0; | 1236 | if (!unified_image) |
1237 | return 0; | ||
1238 | |||
1239 | /* If this is a unified image and we ran out of scans, | ||
1240 | * we need to stop something. Prefer stopping regular | ||
1241 | * scans, because the results are useless at this | ||
1242 | * point, and we should be able to keep running | ||
1243 | * another scheduled scan while suspended. | ||
1244 | */ | ||
1245 | if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) | ||
1246 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, | ||
1247 | true); | ||
1248 | if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) | ||
1249 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, | ||
1250 | true); | ||
1251 | |||
1252 | /* fall through, something is wrong if no scan was | ||
1253 | * running but we ran out of scans. | ||
1254 | */ | ||
1234 | default: | 1255 | default: |
1235 | WARN_ON(1); | 1256 | WARN_ON(1); |
1236 | break; | 1257 | break; |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 001be406a3d3..2f8134b2a504 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
541 | MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); | 541 | MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); |
542 | 542 | ||
543 | #ifdef CONFIG_ACPI | 543 | #ifdef CONFIG_ACPI |
544 | #define SPL_METHOD "SPLC" | 544 | #define ACPI_SPLC_METHOD "SPLC" |
545 | #define SPL_DOMAINTYPE_MODULE BIT(0) | 545 | #define ACPI_SPLC_DOMAIN_WIFI (0x07) |
546 | #define SPL_DOMAINTYPE_WIFI BIT(1) | ||
547 | #define SPL_DOMAINTYPE_WIGIG BIT(2) | ||
548 | #define SPL_DOMAINTYPE_RFEM BIT(3) | ||
549 | 546 | ||
550 | static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) | 547 | static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc) |
551 | { | 548 | { |
552 | union acpi_object *limits, *domain_type, *power_limit; | 549 | union acpi_object *data_pkg, *dflt_pwr_limit; |
553 | 550 | int i; | |
554 | if (splx->type != ACPI_TYPE_PACKAGE || | 551 | |
555 | splx->package.count != 2 || | 552 | /* We need at least two elements, one for the revision and one |
556 | splx->package.elements[0].type != ACPI_TYPE_INTEGER || | 553 | * for the data itself. Also check that the revision is |
557 | splx->package.elements[0].integer.value != 0) { | 554 | * supported (currently only revision 0). |
558 | IWL_ERR(trans, "Unsupported splx structure\n"); | 555 | */ |
556 | if (splc->type != ACPI_TYPE_PACKAGE || | ||
557 | splc->package.count < 2 || | ||
558 | splc->package.elements[0].type != ACPI_TYPE_INTEGER || | ||
559 | splc->package.elements[0].integer.value != 0) { | ||
560 | IWL_DEBUG_INFO(trans, | ||
561 | "Unsupported structure returned by the SPLC method. Ignoring.\n"); | ||
559 | return 0; | 562 | return 0; |
560 | } | 563 | } |
561 | 564 | ||
562 | limits = &splx->package.elements[1]; | 565 | /* loop through all the packages to find the one for WiFi */ |
563 | if (limits->type != ACPI_TYPE_PACKAGE || | 566 | for (i = 1; i < splc->package.count; i++) { |
564 | limits->package.count < 2 || | 567 | union acpi_object *domain; |
565 | limits->package.elements[0].type != ACPI_TYPE_INTEGER || | 568 | |
566 | limits->package.elements[1].type != ACPI_TYPE_INTEGER) { | 569 | data_pkg = &splc->package.elements[i]; |
567 | IWL_ERR(trans, "Invalid limits element\n"); | 570 | |
568 | return 0; | 571 | /* Skip anything that is not a package with the right |
572 | * amount of elements (i.e. at least 2 integers). | ||
573 | */ | ||
574 | if (data_pkg->type != ACPI_TYPE_PACKAGE || | ||
575 | data_pkg->package.count < 2 || | ||
576 | data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || | ||
577 | data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) | ||
578 | continue; | ||
579 | |||
580 | domain = &data_pkg->package.elements[0]; | ||
581 | if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI) | ||
582 | break; | ||
583 | |||
584 | data_pkg = NULL; | ||
569 | } | 585 | } |
570 | 586 | ||
571 | domain_type = &limits->package.elements[0]; | 587 | if (!data_pkg) { |
572 | power_limit = &limits->package.elements[1]; | 588 | IWL_DEBUG_INFO(trans, |
573 | if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { | 589 | "No element for the WiFi domain returned by the SPLC method.\n"); |
574 | IWL_DEBUG_INFO(trans, "WiFi power is not limited\n"); | ||
575 | return 0; | 590 | return 0; |
576 | } | 591 | } |
577 | 592 | ||
578 | return power_limit->integer.value; | 593 | dflt_pwr_limit = &data_pkg->package.elements[1]; |
594 | return dflt_pwr_limit->integer.value; | ||
579 | } | 595 | } |
580 | 596 | ||
581 | static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) | 597 | static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) |
582 | { | 598 | { |
583 | acpi_handle pxsx_handle; | 599 | acpi_handle pxsx_handle; |
584 | acpi_handle handle; | 600 | acpi_handle handle; |
585 | struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; | 601 | struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL}; |
586 | acpi_status status; | 602 | acpi_status status; |
587 | 603 | ||
588 | pxsx_handle = ACPI_HANDLE(&pdev->dev); | 604 | pxsx_handle = ACPI_HANDLE(&pdev->dev); |
@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) | |||
593 | } | 609 | } |
594 | 610 | ||
595 | /* Get the method's handle */ | 611 | /* Get the method's handle */ |
596 | status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); | 612 | status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD, |
613 | &handle); | ||
597 | if (ACPI_FAILURE(status)) { | 614 | if (ACPI_FAILURE(status)) { |
598 | IWL_DEBUG_INFO(trans, "SPL method not found\n"); | 615 | IWL_DEBUG_INFO(trans, "SPLC method not found\n"); |
599 | return; | 616 | return; |
600 | } | 617 | } |
601 | 618 | ||
602 | /* Call SPLC with no arguments */ | 619 | /* Call SPLC with no arguments */ |
603 | status = acpi_evaluate_object(handle, NULL, NULL, &splx); | 620 | status = acpi_evaluate_object(handle, NULL, NULL, &splc); |
604 | if (ACPI_FAILURE(status)) { | 621 | if (ACPI_FAILURE(status)) { |
605 | IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); | 622 | IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); |
606 | return; | 623 | return; |
607 | } | 624 | } |
608 | 625 | ||
609 | trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); | 626 | trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer); |
610 | IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", | 627 | IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", |
611 | trans->dflt_pwr_limit); | 628 | trans->dflt_pwr_limit); |
612 | kfree(splx.pointer); | 629 | kfree(splc.pointer); |
613 | } | 630 | } |
614 | 631 | ||
615 | #else /* CONFIG_ACPI */ | 632 | #else /* CONFIG_ACPI */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index e9a278b60dfd..5f840f16f40b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | |||
@@ -592,6 +592,7 @@ error: | |||
592 | static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, | 592 | static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, |
593 | int slots_num, u32 txq_id) | 593 | int slots_num, u32 txq_id) |
594 | { | 594 | { |
595 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
595 | int ret; | 596 | int ret; |
596 | 597 | ||
597 | txq->need_update = false; | 598 | txq->need_update = false; |
@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, | |||
606 | return ret; | 607 | return ret; |
607 | 608 | ||
608 | spin_lock_init(&txq->lock); | 609 | spin_lock_init(&txq->lock); |
610 | |||
611 | if (txq_id == trans_pcie->cmd_queue) { | ||
612 | static struct lock_class_key iwl_pcie_cmd_queue_lock_class; | ||
613 | |||
614 | lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); | ||
615 | } | ||
616 | |||
609 | __skb_queue_head_init(&txq->overflow_q); | 617 | __skb_queue_head_init(&txq->overflow_q); |
610 | 618 | ||
611 | /* | 619 | /* |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e17879dd5d5a..bf2744e1e3db 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
304 | queue->rx_skbs[id] = skb; | 304 | queue->rx_skbs[id] = skb; |
305 | 305 | ||
306 | ref = gnttab_claim_grant_reference(&queue->gref_rx_head); | 306 | ref = gnttab_claim_grant_reference(&queue->gref_rx_head); |
307 | BUG_ON((signed short)ref < 0); | 307 | WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); |
308 | queue->grant_rx_ref[id] = ref; | 308 | queue->grant_rx_ref[id] = ref; |
309 | 309 | ||
310 | page = skb_frag_page(&skb_shinfo(skb)->frags[0]); | 310 | page = skb_frag_page(&skb_shinfo(skb)->frags[0]); |
@@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, | |||
428 | id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); | 428 | id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); |
429 | tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); | 429 | tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); |
430 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); | 430 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); |
431 | BUG_ON((signed short)ref < 0); | 431 | WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); |
432 | 432 | ||
433 | gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, | 433 | gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, |
434 | gfn, GNTMAP_readonly); | 434 | gfn, GNTMAP_readonly); |
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c index 83deda4bb4d6..6f9563a96488 100644 --- a/drivers/nfc/mei_phy.c +++ b/drivers/nfc/mei_phy.c | |||
@@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy) | |||
133 | return -ENOMEM; | 133 | return -ENOMEM; |
134 | 134 | ||
135 | bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); | 135 | bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); |
136 | if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { | 136 | if (bytes_recv < 0 || bytes_recv < if_version_length) { |
137 | pr_err("Could not read IF version\n"); | 137 | pr_err("Could not read IF version\n"); |
138 | r = -EIO; | 138 | r = -EIO; |
139 | goto err; | 139 | goto err; |
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index 0d5c29ae51de..7310a261c858 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c | |||
@@ -112,17 +112,17 @@ MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, | |||
112 | 112 | ||
113 | module_param_named(xeon_b2b_usd_bar4_addr64, | 113 | module_param_named(xeon_b2b_usd_bar4_addr64, |
114 | xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); | 114 | xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); |
115 | MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, | 115 | MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64, |
116 | "XEON B2B USD BAR 4 64-bit address"); | 116 | "XEON B2B USD BAR 4 64-bit address"); |
117 | 117 | ||
118 | module_param_named(xeon_b2b_usd_bar4_addr32, | 118 | module_param_named(xeon_b2b_usd_bar4_addr32, |
119 | xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); | 119 | xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); |
120 | MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, | 120 | MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32, |
121 | "XEON B2B USD split-BAR 4 32-bit address"); | 121 | "XEON B2B USD split-BAR 4 32-bit address"); |
122 | 122 | ||
123 | module_param_named(xeon_b2b_usd_bar5_addr32, | 123 | module_param_named(xeon_b2b_usd_bar5_addr32, |
124 | xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); | 124 | xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); |
125 | MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, | 125 | MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32, |
126 | "XEON B2B USD split-BAR 5 32-bit address"); | 126 | "XEON B2B USD split-BAR 5 32-bit address"); |
127 | 127 | ||
128 | module_param_named(xeon_b2b_dsd_bar2_addr64, | 128 | module_param_named(xeon_b2b_dsd_bar2_addr64, |
@@ -132,17 +132,17 @@ MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, | |||
132 | 132 | ||
133 | module_param_named(xeon_b2b_dsd_bar4_addr64, | 133 | module_param_named(xeon_b2b_dsd_bar4_addr64, |
134 | xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); | 134 | xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); |
135 | MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, | 135 | MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64, |
136 | "XEON B2B DSD BAR 4 64-bit address"); | 136 | "XEON B2B DSD BAR 4 64-bit address"); |
137 | 137 | ||
138 | module_param_named(xeon_b2b_dsd_bar4_addr32, | 138 | module_param_named(xeon_b2b_dsd_bar4_addr32, |
139 | xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); | 139 | xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); |
140 | MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, | 140 | MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32, |
141 | "XEON B2B DSD split-BAR 4 32-bit address"); | 141 | "XEON B2B DSD split-BAR 4 32-bit address"); |
142 | 142 | ||
143 | module_param_named(xeon_b2b_dsd_bar5_addr32, | 143 | module_param_named(xeon_b2b_dsd_bar5_addr32, |
144 | xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); | 144 | xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); |
145 | MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, | 145 | MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32, |
146 | "XEON B2B DSD split-BAR 5 32-bit address"); | 146 | "XEON B2B DSD split-BAR 5 32-bit address"); |
147 | 147 | ||
148 | #ifndef ioread64 | 148 | #ifndef ioread64 |
@@ -1755,6 +1755,8 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, | |||
1755 | XEON_B2B_MIN_SIZE); | 1755 | XEON_B2B_MIN_SIZE); |
1756 | if (!ndev->peer_mmio) | 1756 | if (!ndev->peer_mmio) |
1757 | return -EIO; | 1757 | return -EIO; |
1758 | |||
1759 | ndev->peer_addr = pci_resource_start(pdev, b2b_bar); | ||
1758 | } | 1760 | } |
1759 | 1761 | ||
1760 | return 0; | 1762 | return 0; |
@@ -2019,6 +2021,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) | |||
2019 | goto err_mmio; | 2021 | goto err_mmio; |
2020 | } | 2022 | } |
2021 | ndev->peer_mmio = ndev->self_mmio; | 2023 | ndev->peer_mmio = ndev->self_mmio; |
2024 | ndev->peer_addr = pci_resource_start(pdev, 0); | ||
2022 | 2025 | ||
2023 | return 0; | 2026 | return 0; |
2024 | 2027 | ||
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 8601c10acf74..4eb8adb34508 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
@@ -257,7 +257,7 @@ enum { | |||
257 | #define NTB_QP_DEF_NUM_ENTRIES 100 | 257 | #define NTB_QP_DEF_NUM_ENTRIES 100 |
258 | #define NTB_LINK_DOWN_TIMEOUT 10 | 258 | #define NTB_LINK_DOWN_TIMEOUT 10 |
259 | #define DMA_RETRIES 20 | 259 | #define DMA_RETRIES 20 |
260 | #define DMA_OUT_RESOURCE_TO 50 | 260 | #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) |
261 | 261 | ||
262 | static void ntb_transport_rxc_db(unsigned long data); | 262 | static void ntb_transport_rxc_db(unsigned long data); |
263 | static const struct ntb_ctx_ops ntb_transport_ops; | 263 | static const struct ntb_ctx_ops ntb_transport_ops; |
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 6a50f20bf1cd..e75d4fdc0866 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c | |||
@@ -72,7 +72,7 @@ | |||
72 | #define MAX_THREADS 32 | 72 | #define MAX_THREADS 32 |
73 | #define MAX_TEST_SIZE SZ_1M | 73 | #define MAX_TEST_SIZE SZ_1M |
74 | #define MAX_SRCS 32 | 74 | #define MAX_SRCS 32 |
75 | #define DMA_OUT_RESOURCE_TO 50 | 75 | #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) |
76 | #define DMA_RETRIES 20 | 76 | #define DMA_RETRIES 20 |
77 | #define SZ_4G (1ULL << 32) | 77 | #define SZ_4G (1ULL << 32) |
78 | #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ | 78 | #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ |
@@ -589,7 +589,7 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, | |||
589 | return -ENOMEM; | 589 | return -ENOMEM; |
590 | 590 | ||
591 | if (mutex_is_locked(&perf->run_mutex)) { | 591 | if (mutex_is_locked(&perf->run_mutex)) { |
592 | out_off = snprintf(buf, 64, "running\n"); | 592 | out_off = scnprintf(buf, 64, "running\n"); |
593 | goto read_from_buf; | 593 | goto read_from_buf; |
594 | } | 594 | } |
595 | 595 | ||
@@ -600,14 +600,14 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, | |||
600 | break; | 600 | break; |
601 | 601 | ||
602 | if (pctx->status) { | 602 | if (pctx->status) { |
603 | out_off += snprintf(buf + out_off, 1024 - out_off, | 603 | out_off += scnprintf(buf + out_off, 1024 - out_off, |
604 | "%d: error %d\n", i, | 604 | "%d: error %d\n", i, |
605 | pctx->status); | 605 | pctx->status); |
606 | continue; | 606 | continue; |
607 | } | 607 | } |
608 | 608 | ||
609 | rate = div64_u64(pctx->copied, pctx->diff_us); | 609 | rate = div64_u64(pctx->copied, pctx->diff_us); |
610 | out_off += snprintf(buf + out_off, 1024 - out_off, | 610 | out_off += scnprintf(buf + out_off, 1024 - out_off, |
611 | "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", | 611 | "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", |
612 | i, pctx->copied, pctx->diff_us, rate); | 612 | i, pctx->copied, pctx->diff_us, rate); |
613 | } | 613 | } |
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c index 7d311799fca1..435861189d97 100644 --- a/drivers/ntb/test/ntb_pingpong.c +++ b/drivers/ntb/test/ntb_pingpong.c | |||
@@ -88,7 +88,7 @@ MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer"); | |||
88 | 88 | ||
89 | static unsigned long db_init = 0x7; | 89 | static unsigned long db_init = 0x7; |
90 | module_param(db_init, ulong, 0644); | 90 | module_param(db_init, ulong, 0644); |
91 | MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer"); | 91 | MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer"); |
92 | 92 | ||
93 | struct pp_ctx { | 93 | struct pp_ctx { |
94 | struct ntb_dev *ntb; | 94 | struct ntb_dev *ntb; |
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index f5e3011e31fc..5daf2f4be0cd 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
@@ -612,7 +612,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node, | |||
612 | 612 | ||
613 | ret = nvm_register(dev); | 613 | ret = nvm_register(dev); |
614 | 614 | ||
615 | ns->lba_shift = ilog2(dev->sec_size) - 9; | 615 | ns->lba_shift = ilog2(dev->sec_size); |
616 | 616 | ||
617 | if (sysfs_create_group(&dev->dev.kobj, attrs)) | 617 | if (sysfs_create_group(&dev->dev.kobj, attrs)) |
618 | pr_warn("%s: failed to create sysfs group for identification\n", | 618 | pr_warn("%s: failed to create sysfs group for identification\n", |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0248d0e21fee..5e52034ab010 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1242,20 +1242,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
1242 | 1242 | ||
1243 | result = nvme_enable_ctrl(&dev->ctrl, cap); | 1243 | result = nvme_enable_ctrl(&dev->ctrl, cap); |
1244 | if (result) | 1244 | if (result) |
1245 | goto free_nvmeq; | 1245 | return result; |
1246 | 1246 | ||
1247 | nvmeq->cq_vector = 0; | 1247 | nvmeq->cq_vector = 0; |
1248 | result = queue_request_irq(nvmeq); | 1248 | result = queue_request_irq(nvmeq); |
1249 | if (result) { | 1249 | if (result) { |
1250 | nvmeq->cq_vector = -1; | 1250 | nvmeq->cq_vector = -1; |
1251 | goto free_nvmeq; | 1251 | return result; |
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | return result; | 1254 | return result; |
1255 | |||
1256 | free_nvmeq: | ||
1257 | nvme_free_queues(dev, 0); | ||
1258 | return result; | ||
1259 | } | 1255 | } |
1260 | 1256 | ||
1261 | static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) | 1257 | static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) |
@@ -1317,10 +1313,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev) | |||
1317 | max = min(dev->max_qid, dev->queue_count - 1); | 1313 | max = min(dev->max_qid, dev->queue_count - 1); |
1318 | for (i = dev->online_queues; i <= max; i++) { | 1314 | for (i = dev->online_queues; i <= max; i++) { |
1319 | ret = nvme_create_queue(dev->queues[i], i); | 1315 | ret = nvme_create_queue(dev->queues[i], i); |
1320 | if (ret) { | 1316 | if (ret) |
1321 | nvme_free_queues(dev, i); | ||
1322 | break; | 1317 | break; |
1323 | } | ||
1324 | } | 1318 | } |
1325 | 1319 | ||
1326 | /* | 1320 | /* |
@@ -1460,13 +1454,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1460 | result = queue_request_irq(adminq); | 1454 | result = queue_request_irq(adminq); |
1461 | if (result) { | 1455 | if (result) { |
1462 | adminq->cq_vector = -1; | 1456 | adminq->cq_vector = -1; |
1463 | goto free_queues; | 1457 | return result; |
1464 | } | 1458 | } |
1465 | return nvme_create_io_queues(dev); | 1459 | return nvme_create_io_queues(dev); |
1466 | |||
1467 | free_queues: | ||
1468 | nvme_free_queues(dev, 1); | ||
1469 | return result; | ||
1470 | } | 1460 | } |
1471 | 1461 | ||
1472 | static void nvme_del_queue_end(struct request *req, int error) | 1462 | static void nvme_del_queue_end(struct request *req, int error) |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 5a8388177959..3d25add36d91 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -83,6 +83,7 @@ enum nvme_rdma_queue_flags { | |||
83 | NVME_RDMA_Q_CONNECTED = (1 << 0), | 83 | NVME_RDMA_Q_CONNECTED = (1 << 0), |
84 | NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), | 84 | NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), |
85 | NVME_RDMA_Q_DELETING = (1 << 2), | 85 | NVME_RDMA_Q_DELETING = (1 << 2), |
86 | NVME_RDMA_Q_LIVE = (1 << 3), | ||
86 | }; | 87 | }; |
87 | 88 | ||
88 | struct nvme_rdma_queue { | 89 | struct nvme_rdma_queue { |
@@ -624,10 +625,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl) | |||
624 | 625 | ||
625 | for (i = 1; i < ctrl->queue_count; i++) { | 626 | for (i = 1; i < ctrl->queue_count; i++) { |
626 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); | 627 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
627 | if (ret) | 628 | if (ret) { |
628 | break; | 629 | dev_info(ctrl->ctrl.device, |
630 | "failed to connect i/o queue: %d\n", ret); | ||
631 | goto out_free_queues; | ||
632 | } | ||
633 | set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); | ||
629 | } | 634 | } |
630 | 635 | ||
636 | return 0; | ||
637 | |||
638 | out_free_queues: | ||
639 | nvme_rdma_free_io_queues(ctrl); | ||
631 | return ret; | 640 | return ret; |
632 | } | 641 | } |
633 | 642 | ||
@@ -712,6 +721,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
712 | if (ret) | 721 | if (ret) |
713 | goto stop_admin_q; | 722 | goto stop_admin_q; |
714 | 723 | ||
724 | set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); | ||
725 | |||
715 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); | 726 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); |
716 | if (ret) | 727 | if (ret) |
717 | goto stop_admin_q; | 728 | goto stop_admin_q; |
@@ -761,8 +772,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) | |||
761 | 772 | ||
762 | nvme_stop_keep_alive(&ctrl->ctrl); | 773 | nvme_stop_keep_alive(&ctrl->ctrl); |
763 | 774 | ||
764 | for (i = 0; i < ctrl->queue_count; i++) | 775 | for (i = 0; i < ctrl->queue_count; i++) { |
765 | clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); | 776 | clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); |
777 | clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); | ||
778 | } | ||
766 | 779 | ||
767 | if (ctrl->queue_count > 1) | 780 | if (ctrl->queue_count > 1) |
768 | nvme_stop_queues(&ctrl->ctrl); | 781 | nvme_stop_queues(&ctrl->ctrl); |
@@ -1378,6 +1391,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved) | |||
1378 | return BLK_EH_HANDLED; | 1391 | return BLK_EH_HANDLED; |
1379 | } | 1392 | } |
1380 | 1393 | ||
1394 | /* | ||
1395 | * We cannot accept any other command until the Connect command has completed. | ||
1396 | */ | ||
1397 | static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, | ||
1398 | struct request *rq) | ||
1399 | { | ||
1400 | if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { | ||
1401 | struct nvme_command *cmd = (struct nvme_command *)rq->cmd; | ||
1402 | |||
1403 | if (rq->cmd_type != REQ_TYPE_DRV_PRIV || | ||
1404 | cmd->common.opcode != nvme_fabrics_command || | ||
1405 | cmd->fabrics.fctype != nvme_fabrics_type_connect) | ||
1406 | return false; | ||
1407 | } | ||
1408 | |||
1409 | return true; | ||
1410 | } | ||
1411 | |||
1381 | static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, | 1412 | static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, |
1382 | const struct blk_mq_queue_data *bd) | 1413 | const struct blk_mq_queue_data *bd) |
1383 | { | 1414 | { |
@@ -1394,6 +1425,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
1394 | 1425 | ||
1395 | WARN_ON_ONCE(rq->tag < 0); | 1426 | WARN_ON_ONCE(rq->tag < 0); |
1396 | 1427 | ||
1428 | if (!nvme_rdma_queue_is_ready(queue, rq)) | ||
1429 | return BLK_MQ_RQ_QUEUE_BUSY; | ||
1430 | |||
1397 | dev = queue->device->dev; | 1431 | dev = queue->device->dev; |
1398 | ib_dma_sync_single_for_cpu(dev, sqe->dma, | 1432 | ib_dma_sync_single_for_cpu(dev, sqe->dma, |
1399 | sizeof(struct nvme_command), DMA_TO_DEVICE); | 1433 | sizeof(struct nvme_command), DMA_TO_DEVICE); |
@@ -1544,6 +1578,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) | |||
1544 | if (error) | 1578 | if (error) |
1545 | goto out_cleanup_queue; | 1579 | goto out_cleanup_queue; |
1546 | 1580 | ||
1581 | set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); | ||
1582 | |||
1547 | error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); | 1583 | error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); |
1548 | if (error) { | 1584 | if (error) { |
1549 | dev_err(ctrl->ctrl.device, | 1585 | dev_err(ctrl->ctrl.device, |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b4cacb6f0258..a21437a33adb 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -838,9 +838,13 @@ static void nvmet_fatal_error_handler(struct work_struct *work) | |||
838 | 838 | ||
839 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) | 839 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) |
840 | { | 840 | { |
841 | ctrl->csts |= NVME_CSTS_CFS; | 841 | mutex_lock(&ctrl->lock); |
842 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); | 842 | if (!(ctrl->csts & NVME_CSTS_CFS)) { |
843 | schedule_work(&ctrl->fatal_err_work); | 843 | ctrl->csts |= NVME_CSTS_CFS; |
844 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); | ||
845 | schedule_work(&ctrl->fatal_err_work); | ||
846 | } | ||
847 | mutex_unlock(&ctrl->lock); | ||
844 | } | 848 | } |
845 | EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); | 849 | EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); |
846 | 850 | ||
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index f8d23999e0f2..005ef5d17a19 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
@@ -951,6 +951,7 @@ err_destroy_cq: | |||
951 | 951 | ||
952 | static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) | 952 | static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) |
953 | { | 953 | { |
954 | ib_drain_qp(queue->cm_id->qp); | ||
954 | rdma_destroy_qp(queue->cm_id); | 955 | rdma_destroy_qp(queue->cm_id); |
955 | ib_free_cq(queue->cq); | 956 | ib_free_cq(queue->cq); |
956 | } | 957 | } |
@@ -1066,6 +1067,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, | |||
1066 | spin_lock_init(&queue->rsp_wr_wait_lock); | 1067 | spin_lock_init(&queue->rsp_wr_wait_lock); |
1067 | INIT_LIST_HEAD(&queue->free_rsps); | 1068 | INIT_LIST_HEAD(&queue->free_rsps); |
1068 | spin_lock_init(&queue->rsps_lock); | 1069 | spin_lock_init(&queue->rsps_lock); |
1070 | INIT_LIST_HEAD(&queue->queue_list); | ||
1069 | 1071 | ||
1070 | queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); | 1072 | queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); |
1071 | if (queue->idx < 0) { | 1073 | if (queue->idx < 0) { |
@@ -1244,7 +1246,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) | |||
1244 | 1246 | ||
1245 | if (disconnect) { | 1247 | if (disconnect) { |
1246 | rdma_disconnect(queue->cm_id); | 1248 | rdma_disconnect(queue->cm_id); |
1247 | ib_drain_qp(queue->cm_id->qp); | ||
1248 | schedule_work(&queue->release_work); | 1249 | schedule_work(&queue->release_work); |
1249 | } | 1250 | } |
1250 | } | 1251 | } |
@@ -1269,7 +1270,12 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, | |||
1269 | { | 1270 | { |
1270 | WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); | 1271 | WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); |
1271 | 1272 | ||
1272 | pr_err("failed to connect queue\n"); | 1273 | mutex_lock(&nvmet_rdma_queue_mutex); |
1274 | if (!list_empty(&queue->queue_list)) | ||
1275 | list_del_init(&queue->queue_list); | ||
1276 | mutex_unlock(&nvmet_rdma_queue_mutex); | ||
1277 | |||
1278 | pr_err("failed to connect queue %d\n", queue->idx); | ||
1273 | schedule_work(&queue->release_work); | 1279 | schedule_work(&queue->release_work); |
1274 | } | 1280 | } |
1275 | 1281 | ||
@@ -1352,7 +1358,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, | |||
1352 | case RDMA_CM_EVENT_ADDR_CHANGE: | 1358 | case RDMA_CM_EVENT_ADDR_CHANGE: |
1353 | case RDMA_CM_EVENT_DISCONNECTED: | 1359 | case RDMA_CM_EVENT_DISCONNECTED: |
1354 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: | 1360 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: |
1355 | nvmet_rdma_queue_disconnect(queue); | 1361 | /* |
1362 | * We might end up here when we already freed the qp | ||
1363 | * which means queue release sequence is in progress, | ||
1364 | * so don't get in the way... | ||
1365 | */ | ||
1366 | if (queue) | ||
1367 | nvmet_rdma_queue_disconnect(queue); | ||
1356 | break; | 1368 | break; |
1357 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | 1369 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
1358 | ret = nvmet_rdma_device_removal(cm_id, queue); | 1370 | ret = nvmet_rdma_device_removal(cm_id, queue); |
diff --git a/drivers/of/base.c b/drivers/of/base.c index d687e6de24a0..a0bccb54a9bd 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -2077,8 +2077,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) | |||
2077 | name = of_get_property(of_aliases, "stdout", NULL); | 2077 | name = of_get_property(of_aliases, "stdout", NULL); |
2078 | if (name) | 2078 | if (name) |
2079 | of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); | 2079 | of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); |
2080 | if (of_stdout) | ||
2081 | console_set_by_of(); | ||
2082 | } | 2080 | } |
2083 | 2081 | ||
2084 | if (!of_aliases) | 2082 | if (!of_aliases) |
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index e0b22dab9b7a..e04f69beb42d 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c | |||
@@ -190,6 +190,9 @@ struct rockchip_pcie { | |||
190 | struct reset_control *mgmt_rst; | 190 | struct reset_control *mgmt_rst; |
191 | struct reset_control *mgmt_sticky_rst; | 191 | struct reset_control *mgmt_sticky_rst; |
192 | struct reset_control *pipe_rst; | 192 | struct reset_control *pipe_rst; |
193 | struct reset_control *pm_rst; | ||
194 | struct reset_control *aclk_rst; | ||
195 | struct reset_control *pclk_rst; | ||
193 | struct clk *aclk_pcie; | 196 | struct clk *aclk_pcie; |
194 | struct clk *aclk_perf_pcie; | 197 | struct clk *aclk_perf_pcie; |
195 | struct clk *hclk_pcie; | 198 | struct clk *hclk_pcie; |
@@ -408,6 +411,44 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) | |||
408 | 411 | ||
409 | gpiod_set_value(rockchip->ep_gpio, 0); | 412 | gpiod_set_value(rockchip->ep_gpio, 0); |
410 | 413 | ||
414 | err = reset_control_assert(rockchip->aclk_rst); | ||
415 | if (err) { | ||
416 | dev_err(dev, "assert aclk_rst err %d\n", err); | ||
417 | return err; | ||
418 | } | ||
419 | |||
420 | err = reset_control_assert(rockchip->pclk_rst); | ||
421 | if (err) { | ||
422 | dev_err(dev, "assert pclk_rst err %d\n", err); | ||
423 | return err; | ||
424 | } | ||
425 | |||
426 | err = reset_control_assert(rockchip->pm_rst); | ||
427 | if (err) { | ||
428 | dev_err(dev, "assert pm_rst err %d\n", err); | ||
429 | return err; | ||
430 | } | ||
431 | |||
432 | udelay(10); | ||
433 | |||
434 | err = reset_control_deassert(rockchip->pm_rst); | ||
435 | if (err) { | ||
436 | dev_err(dev, "deassert pm_rst err %d\n", err); | ||
437 | return err; | ||
438 | } | ||
439 | |||
440 | err = reset_control_deassert(rockchip->aclk_rst); | ||
441 | if (err) { | ||
442 | dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); | ||
443 | return err; | ||
444 | } | ||
445 | |||
446 | err = reset_control_deassert(rockchip->pclk_rst); | ||
447 | if (err) { | ||
448 | dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); | ||
449 | return err; | ||
450 | } | ||
451 | |||
411 | err = phy_init(rockchip->phy); | 452 | err = phy_init(rockchip->phy); |
412 | if (err < 0) { | 453 | if (err < 0) { |
413 | dev_err(dev, "fail to init phy, err %d\n", err); | 454 | dev_err(dev, "fail to init phy, err %d\n", err); |
@@ -781,6 +822,27 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) | |||
781 | return PTR_ERR(rockchip->pipe_rst); | 822 | return PTR_ERR(rockchip->pipe_rst); |
782 | } | 823 | } |
783 | 824 | ||
825 | rockchip->pm_rst = devm_reset_control_get(dev, "pm"); | ||
826 | if (IS_ERR(rockchip->pm_rst)) { | ||
827 | if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) | ||
828 | dev_err(dev, "missing pm reset property in node\n"); | ||
829 | return PTR_ERR(rockchip->pm_rst); | ||
830 | } | ||
831 | |||
832 | rockchip->pclk_rst = devm_reset_control_get(dev, "pclk"); | ||
833 | if (IS_ERR(rockchip->pclk_rst)) { | ||
834 | if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) | ||
835 | dev_err(dev, "missing pclk reset property in node\n"); | ||
836 | return PTR_ERR(rockchip->pclk_rst); | ||
837 | } | ||
838 | |||
839 | rockchip->aclk_rst = devm_reset_control_get(dev, "aclk"); | ||
840 | if (IS_ERR(rockchip->aclk_rst)) { | ||
841 | if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) | ||
842 | dev_err(dev, "missing aclk reset property in node\n"); | ||
843 | return PTR_ERR(rockchip->aclk_rst); | ||
844 | } | ||
845 | |||
784 | rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); | 846 | rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); |
785 | if (IS_ERR(rockchip->ep_gpio)) { | 847 | if (IS_ERR(rockchip->ep_gpio)) { |
786 | dev_err(dev, "missing ep-gpios property in node\n"); | 848 | dev_err(dev, "missing ep-gpios property in node\n"); |
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c index 55f453de562e..c7f3408e3148 100644 --- a/drivers/pci/pci-mid.c +++ b/drivers/pci/pci-mid.c | |||
@@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) | |||
29 | return intel_mid_pci_set_power_state(pdev, state); | 29 | return intel_mid_pci_set_power_state(pdev, state); |
30 | } | 30 | } |
31 | 31 | ||
32 | static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) | ||
33 | { | ||
34 | return intel_mid_pci_get_power_state(pdev); | ||
35 | } | ||
36 | |||
32 | static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) | 37 | static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) |
33 | { | 38 | { |
34 | return PCI_D3hot; | 39 | return PCI_D3hot; |
@@ -52,6 +57,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev) | |||
52 | static struct pci_platform_pm_ops mid_pci_platform_pm = { | 57 | static struct pci_platform_pm_ops mid_pci_platform_pm = { |
53 | .is_manageable = mid_pci_power_manageable, | 58 | .is_manageable = mid_pci_power_manageable, |
54 | .set_state = mid_pci_set_power_state, | 59 | .set_state = mid_pci_set_power_state, |
60 | .get_state = mid_pci_get_power_state, | ||
55 | .choose_state = mid_pci_choose_state, | 61 | .choose_state = mid_pci_choose_state, |
56 | .sleep_wake = mid_pci_sleep_wake, | 62 | .sleep_wake = mid_pci_sleep_wake, |
57 | .run_wake = mid_pci_run_wake, | 63 | .run_wake = mid_pci_run_wake, |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 66c4d8f42233..9526e341988b 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -121,6 +121,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource) | |||
121 | return -EINVAL; | 121 | return -EINVAL; |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | ||
125 | * If we have a shadow copy in RAM, the PCI device doesn't respond | ||
126 | * to the shadow range, so we don't need to claim it, and upstream | ||
127 | * bridges don't need to route the range to the device. | ||
128 | */ | ||
129 | if (res->flags & IORESOURCE_ROM_SHADOW) | ||
130 | return 0; | ||
131 | |||
124 | root = pci_find_parent_resource(dev, res); | 132 | root = pci_find_parent_resource(dev, res); |
125 | if (!root) { | 133 | if (!root) { |
126 | dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n", | 134 | dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n", |
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 153f3122283d..b6b316de055c 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c | |||
@@ -107,7 +107,7 @@ int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt, | |||
107 | 107 | ||
108 | ret = regulator_enable(r->reg); | 108 | ret = regulator_enable(r->reg); |
109 | } else { | 109 | } else { |
110 | regulator_disable(r->reg); | 110 | ret = regulator_disable(r->reg); |
111 | } | 111 | } |
112 | if (ret == 0) | 112 | if (ret == 0) |
113 | r->on = on; | 113 | r->on = on; |
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c index 32ae78c8ca17..c85fb0b59729 100644 --- a/drivers/phy/phy-da8xx-usb.c +++ b/drivers/phy/phy-da8xx-usb.c | |||
@@ -198,7 +198,8 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev) | |||
198 | } else { | 198 | } else { |
199 | int ret; | 199 | int ret; |
200 | 200 | ||
201 | ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); | 201 | ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", |
202 | "ohci-da8xx"); | ||
202 | if (ret) | 203 | if (ret) |
203 | dev_warn(dev, "Failed to create usb11 phy lookup\n"); | 204 | dev_warn(dev, "Failed to create usb11 phy lookup\n"); |
204 | ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy", | 205 | ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy", |
@@ -216,7 +217,7 @@ static int da8xx_usb_phy_remove(struct platform_device *pdev) | |||
216 | 217 | ||
217 | if (!pdev->dev.of_node) { | 218 | if (!pdev->dev.of_node) { |
218 | phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx"); | 219 | phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx"); |
219 | phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); | 220 | phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci-da8xx"); |
220 | } | 221 | } |
221 | 222 | ||
222 | return 0; | 223 | return 0; |
diff --git a/drivers/phy/phy-rockchip-pcie.c b/drivers/phy/phy-rockchip-pcie.c index a2b4c6b58aea..6904633cad68 100644 --- a/drivers/phy/phy-rockchip-pcie.c +++ b/drivers/phy/phy-rockchip-pcie.c | |||
@@ -249,21 +249,10 @@ err_refclk: | |||
249 | static int rockchip_pcie_phy_exit(struct phy *phy) | 249 | static int rockchip_pcie_phy_exit(struct phy *phy) |
250 | { | 250 | { |
251 | struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); | 251 | struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); |
252 | int err = 0; | ||
253 | 252 | ||
254 | clk_disable_unprepare(rk_phy->clk_pciephy_ref); | 253 | clk_disable_unprepare(rk_phy->clk_pciephy_ref); |
255 | 254 | ||
256 | err = reset_control_deassert(rk_phy->phy_rst); | 255 | return 0; |
257 | if (err) { | ||
258 | dev_err(&phy->dev, "deassert phy_rst err %d\n", err); | ||
259 | goto err_reset; | ||
260 | } | ||
261 | |||
262 | return err; | ||
263 | |||
264 | err_reset: | ||
265 | clk_prepare_enable(rk_phy->clk_pciephy_ref); | ||
266 | return err; | ||
267 | } | 256 | } |
268 | 257 | ||
269 | static const struct phy_ops ops = { | 258 | static const struct phy_ops ops = { |
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index b9342a2af7b3..fec34f5213c4 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c | |||
@@ -264,7 +264,7 @@ static int sun4i_usb_phy_init(struct phy *_phy) | |||
264 | return ret; | 264 | return ret; |
265 | } | 265 | } |
266 | 266 | ||
267 | if (data->cfg->enable_pmu_unk1) { | 267 | if (phy->pmu && data->cfg->enable_pmu_unk1) { |
268 | val = readl(phy->pmu + REG_PMU_UNK1); | 268 | val = readl(phy->pmu + REG_PMU_UNK1); |
269 | writel(val & ~2, phy->pmu + REG_PMU_UNK1); | 269 | writel(val & ~2, phy->pmu + REG_PMU_UNK1); |
270 | } | 270 | } |
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index c8c72e8259d3..87b46390b695 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #define ASPEED_G5_NR_PINS 228 | 27 | #define ASPEED_G5_NR_PINS 228 |
28 | 28 | ||
29 | #define COND1 SIG_DESC_BIT(SCU90, 6, 0) | 29 | #define COND1 { SCU90, BIT(6), 0, 0 } |
30 | #define COND2 { SCU94, GENMASK(1, 0), 0, 0 } | 30 | #define COND2 { SCU94, GENMASK(1, 0), 0, 0 } |
31 | 31 | ||
32 | #define B14 0 | 32 | #define B14 0 |
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c index 7f7700716398..5d1e505c3c63 100644 --- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c | |||
@@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = { | |||
844 | 844 | ||
845 | static int __init iproc_gpio_init(void) | 845 | static int __init iproc_gpio_init(void) |
846 | { | 846 | { |
847 | return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe); | 847 | return platform_driver_register(&iproc_gpio_driver); |
848 | } | 848 | } |
849 | arch_initcall_sync(iproc_gpio_init); | 849 | arch_initcall_sync(iproc_gpio_init); |
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c index 35783db1c10b..c8deb8be1da7 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c | |||
@@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = { | |||
741 | 741 | ||
742 | static int __init nsp_gpio_init(void) | 742 | static int __init nsp_gpio_init(void) |
743 | { | 743 | { |
744 | return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe); | 744 | return platform_driver_register(&nsp_gpio_driver); |
745 | } | 745 | } |
746 | arch_initcall_sync(nsp_gpio_init); | 746 | arch_initcall_sync(nsp_gpio_init); |
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 47613201269a..79c4e14a5a75 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c | |||
@@ -687,6 +687,7 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev, | |||
687 | if (!info->functions) | 687 | if (!info->functions) |
688 | return -ENOMEM; | 688 | return -ENOMEM; |
689 | 689 | ||
690 | info->group_index = 0; | ||
690 | if (flat_funcs) { | 691 | if (flat_funcs) { |
691 | info->ngroups = of_get_child_count(np); | 692 | info->ngroups = of_get_child_count(np); |
692 | } else { | 693 | } else { |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 30389f4ccab4..c43b1e9a06af 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1652,12 +1652,15 @@ static int chv_pinctrl_probe(struct platform_device *pdev) | |||
1652 | } | 1652 | } |
1653 | 1653 | ||
1654 | #ifdef CONFIG_PM_SLEEP | 1654 | #ifdef CONFIG_PM_SLEEP |
1655 | static int chv_pinctrl_suspend(struct device *dev) | 1655 | static int chv_pinctrl_suspend_noirq(struct device *dev) |
1656 | { | 1656 | { |
1657 | struct platform_device *pdev = to_platform_device(dev); | 1657 | struct platform_device *pdev = to_platform_device(dev); |
1658 | struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); | 1658 | struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); |
1659 | unsigned long flags; | ||
1659 | int i; | 1660 | int i; |
1660 | 1661 | ||
1662 | raw_spin_lock_irqsave(&chv_lock, flags); | ||
1663 | |||
1661 | pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK); | 1664 | pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK); |
1662 | 1665 | ||
1663 | for (i = 0; i < pctrl->community->npins; i++) { | 1666 | for (i = 0; i < pctrl->community->npins; i++) { |
@@ -1678,15 +1681,20 @@ static int chv_pinctrl_suspend(struct device *dev) | |||
1678 | ctx->padctrl1 = readl(reg); | 1681 | ctx->padctrl1 = readl(reg); |
1679 | } | 1682 | } |
1680 | 1683 | ||
1684 | raw_spin_unlock_irqrestore(&chv_lock, flags); | ||
1685 | |||
1681 | return 0; | 1686 | return 0; |
1682 | } | 1687 | } |
1683 | 1688 | ||
1684 | static int chv_pinctrl_resume(struct device *dev) | 1689 | static int chv_pinctrl_resume_noirq(struct device *dev) |
1685 | { | 1690 | { |
1686 | struct platform_device *pdev = to_platform_device(dev); | 1691 | struct platform_device *pdev = to_platform_device(dev); |
1687 | struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); | 1692 | struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); |
1693 | unsigned long flags; | ||
1688 | int i; | 1694 | int i; |
1689 | 1695 | ||
1696 | raw_spin_lock_irqsave(&chv_lock, flags); | ||
1697 | |||
1690 | /* | 1698 | /* |
1691 | * Mask all interrupts before restoring per-pin configuration | 1699 | * Mask all interrupts before restoring per-pin configuration |
1692 | * registers because we don't know in which state BIOS left them | 1700 | * registers because we don't know in which state BIOS left them |
@@ -1731,12 +1739,15 @@ static int chv_pinctrl_resume(struct device *dev) | |||
1731 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); | 1739 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); |
1732 | chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK); | 1740 | chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK); |
1733 | 1741 | ||
1742 | raw_spin_unlock_irqrestore(&chv_lock, flags); | ||
1743 | |||
1734 | return 0; | 1744 | return 0; |
1735 | } | 1745 | } |
1736 | #endif | 1746 | #endif |
1737 | 1747 | ||
1738 | static const struct dev_pm_ops chv_pinctrl_pm_ops = { | 1748 | static const struct dev_pm_ops chv_pinctrl_pm_ops = { |
1739 | SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume) | 1749 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq, |
1750 | chv_pinctrl_resume_noirq) | ||
1740 | }; | 1751 | }; |
1741 | 1752 | ||
1742 | static const struct acpi_device_id chv_pinctrl_acpi_match[] = { | 1753 | static const struct acpi_device_id chv_pinctrl_acpi_match[] = { |
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 99da4cf91031..b7bb37167969 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c | |||
@@ -1512,7 +1512,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info, | |||
1512 | if (info->irqmux_base || gpio_irq > 0) { | 1512 | if (info->irqmux_base || gpio_irq > 0) { |
1513 | err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip, | 1513 | err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip, |
1514 | 0, handle_simple_irq, | 1514 | 0, handle_simple_irq, |
1515 | IRQ_TYPE_LEVEL_LOW); | 1515 | IRQ_TYPE_NONE); |
1516 | if (err) { | 1516 | if (err) { |
1517 | gpiochip_remove(&bank->gpio_chip); | 1517 | gpiochip_remove(&bank->gpio_chip); |
1518 | dev_info(dev, "could not add irqchip\n"); | 1518 | dev_info(dev, "could not add irqchip\n"); |
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 200667f08c37..efc43711ff5c 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c | |||
@@ -1092,9 +1092,11 @@ int stm32_pctl_probe(struct platform_device *pdev) | |||
1092 | return -EINVAL; | 1092 | return -EINVAL; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | ret = stm32_pctrl_dt_setup_irq(pdev, pctl); | 1095 | if (of_find_property(np, "interrupt-parent", NULL)) { |
1096 | if (ret) | 1096 | ret = stm32_pctrl_dt_setup_irq(pdev, pctl); |
1097 | return ret; | 1097 | if (ret) |
1098 | return ret; | ||
1099 | } | ||
1098 | 1100 | ||
1099 | for_each_child_of_node(np, child) | 1101 | for_each_child_of_node(np, child) |
1100 | if (of_property_read_bool(child, "gpio-controller")) | 1102 | if (of_property_read_bool(child, "gpio-controller")) |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index a2323941e677..a7614fc542b5 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
@@ -934,6 +934,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { | |||
934 | }, | 934 | }, |
935 | }, | 935 | }, |
936 | { | 936 | { |
937 | .ident = "Lenovo Yoga 900", | ||
938 | .matches = { | ||
939 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
940 | DMI_MATCH(DMI_BOARD_NAME, "VIUU4"), | ||
941 | }, | ||
942 | }, | ||
943 | { | ||
937 | .ident = "Lenovo YOGA 910-13IKB", | 944 | .ident = "Lenovo YOGA 910-13IKB", |
938 | .matches = { | 945 | .matches = { |
939 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | 946 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index ed5874217ee7..12dbb5063376 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c | |||
@@ -264,7 +264,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
264 | return AE_OK; | 264 | return AE_OK; |
265 | 265 | ||
266 | if (acpi_match_device_ids(dev, ids) == 0) | 266 | if (acpi_match_device_ids(dev, ids) == 0) |
267 | if (acpi_create_platform_device(dev)) | 267 | if (acpi_create_platform_device(dev, NULL)) |
268 | dev_info(&dev->dev, | 268 | dev_info(&dev->dev, |
269 | "intel-hid: created platform device\n"); | 269 | "intel-hid: created platform device\n"); |
270 | 270 | ||
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 146d02f8c9bc..78080763df51 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c | |||
@@ -164,7 +164,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
164 | return AE_OK; | 164 | return AE_OK; |
165 | 165 | ||
166 | if (acpi_match_device_ids(dev, ids) == 0) | 166 | if (acpi_match_device_ids(dev, ids) == 0) |
167 | if (acpi_create_platform_device(dev)) | 167 | if (acpi_create_platform_device(dev, NULL)) |
168 | dev_info(&dev->dev, | 168 | dev_info(&dev->dev, |
169 | "intel-vbtn: created platform device\n"); | 169 | "intel-vbtn: created platform device\n"); |
170 | 170 | ||
diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c index feac4576b837..2df07ee8f3c3 100644 --- a/drivers/platform/x86/toshiba-wmi.c +++ b/drivers/platform/x86/toshiba-wmi.c | |||
@@ -24,14 +24,15 @@ | |||
24 | #include <linux/acpi.h> | 24 | #include <linux/acpi.h> |
25 | #include <linux/input.h> | 25 | #include <linux/input.h> |
26 | #include <linux/input/sparse-keymap.h> | 26 | #include <linux/input/sparse-keymap.h> |
27 | #include <linux/dmi.h> | ||
27 | 28 | ||
28 | MODULE_AUTHOR("Azael Avalos"); | 29 | MODULE_AUTHOR("Azael Avalos"); |
29 | MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver"); | 30 | MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver"); |
30 | MODULE_LICENSE("GPL"); | 31 | MODULE_LICENSE("GPL"); |
31 | 32 | ||
32 | #define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" | 33 | #define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" |
33 | 34 | ||
34 | MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID); | 35 | MODULE_ALIAS("wmi:"WMI_EVENT_GUID); |
35 | 36 | ||
36 | static struct input_dev *toshiba_wmi_input_dev; | 37 | static struct input_dev *toshiba_wmi_input_dev; |
37 | 38 | ||
@@ -63,6 +64,16 @@ static void toshiba_wmi_notify(u32 value, void *context) | |||
63 | kfree(response.pointer); | 64 | kfree(response.pointer); |
64 | } | 65 | } |
65 | 66 | ||
67 | static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { | ||
68 | { | ||
69 | .ident = "Toshiba laptop", | ||
70 | .matches = { | ||
71 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
72 | }, | ||
73 | }, | ||
74 | {} | ||
75 | }; | ||
76 | |||
66 | static int __init toshiba_wmi_input_setup(void) | 77 | static int __init toshiba_wmi_input_setup(void) |
67 | { | 78 | { |
68 | acpi_status status; | 79 | acpi_status status; |
@@ -81,7 +92,7 @@ static int __init toshiba_wmi_input_setup(void) | |||
81 | if (err) | 92 | if (err) |
82 | goto err_free_dev; | 93 | goto err_free_dev; |
83 | 94 | ||
84 | status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID, | 95 | status = wmi_install_notify_handler(WMI_EVENT_GUID, |
85 | toshiba_wmi_notify, NULL); | 96 | toshiba_wmi_notify, NULL); |
86 | if (ACPI_FAILURE(status)) { | 97 | if (ACPI_FAILURE(status)) { |
87 | err = -EIO; | 98 | err = -EIO; |
@@ -95,7 +106,7 @@ static int __init toshiba_wmi_input_setup(void) | |||
95 | return 0; | 106 | return 0; |
96 | 107 | ||
97 | err_remove_notifier: | 108 | err_remove_notifier: |
98 | wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); | 109 | wmi_remove_notify_handler(WMI_EVENT_GUID); |
99 | err_free_keymap: | 110 | err_free_keymap: |
100 | sparse_keymap_free(toshiba_wmi_input_dev); | 111 | sparse_keymap_free(toshiba_wmi_input_dev); |
101 | err_free_dev: | 112 | err_free_dev: |
@@ -105,7 +116,7 @@ static int __init toshiba_wmi_input_setup(void) | |||
105 | 116 | ||
106 | static void toshiba_wmi_input_destroy(void) | 117 | static void toshiba_wmi_input_destroy(void) |
107 | { | 118 | { |
108 | wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); | 119 | wmi_remove_notify_handler(WMI_EVENT_GUID); |
109 | sparse_keymap_free(toshiba_wmi_input_dev); | 120 | sparse_keymap_free(toshiba_wmi_input_dev); |
110 | input_unregister_device(toshiba_wmi_input_dev); | 121 | input_unregister_device(toshiba_wmi_input_dev); |
111 | } | 122 | } |
@@ -114,7 +125,8 @@ static int __init toshiba_wmi_init(void) | |||
114 | { | 125 | { |
115 | int ret; | 126 | int ret; |
116 | 127 | ||
117 | if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) | 128 | if (!wmi_has_guid(WMI_EVENT_GUID) || |
129 | !dmi_check_system(toshiba_wmi_dmi_table)) | ||
118 | return -ENODEV; | 130 | return -ENODEV; |
119 | 131 | ||
120 | ret = toshiba_wmi_input_setup(); | 132 | ret = toshiba_wmi_input_setup(); |
@@ -130,7 +142,7 @@ static int __init toshiba_wmi_init(void) | |||
130 | 142 | ||
131 | static void __exit toshiba_wmi_exit(void) | 143 | static void __exit toshiba_wmi_exit(void) |
132 | { | 144 | { |
133 | if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) | 145 | if (wmi_has_guid(WMI_EVENT_GUID)) |
134 | toshiba_wmi_input_destroy(); | 146 | toshiba_wmi_input_destroy(); |
135 | } | 147 | } |
136 | 148 | ||
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c index 18a93d3e3f93..d36534965635 100644 --- a/drivers/rtc/rtc-asm9260.c +++ b/drivers/rtc/rtc-asm9260.c | |||
@@ -327,6 +327,7 @@ static const struct of_device_id asm9260_dt_ids[] = { | |||
327 | { .compatible = "alphascale,asm9260-rtc", }, | 327 | { .compatible = "alphascale,asm9260-rtc", }, |
328 | {} | 328 | {} |
329 | }; | 329 | }; |
330 | MODULE_DEVICE_TABLE(of, asm9260_dt_ids); | ||
330 | 331 | ||
331 | static struct platform_driver asm9260_rtc_driver = { | 332 | static struct platform_driver asm9260_rtc_driver = { |
332 | .probe = asm9260_rtc_probe, | 333 | .probe = asm9260_rtc_probe, |
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index dd3d59806ffa..7030d7cd3861 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
@@ -776,7 +776,7 @@ static void cmos_do_shutdown(int rtc_irq) | |||
776 | spin_unlock_irq(&rtc_lock); | 776 | spin_unlock_irq(&rtc_lock); |
777 | } | 777 | } |
778 | 778 | ||
779 | static void __exit cmos_do_remove(struct device *dev) | 779 | static void cmos_do_remove(struct device *dev) |
780 | { | 780 | { |
781 | struct cmos_rtc *cmos = dev_get_drvdata(dev); | 781 | struct cmos_rtc *cmos = dev_get_drvdata(dev); |
782 | struct resource *ports; | 782 | struct resource *ports; |
@@ -996,8 +996,9 @@ static u32 rtc_handler(void *context) | |||
996 | struct cmos_rtc *cmos = dev_get_drvdata(dev); | 996 | struct cmos_rtc *cmos = dev_get_drvdata(dev); |
997 | unsigned char rtc_control = 0; | 997 | unsigned char rtc_control = 0; |
998 | unsigned char rtc_intr; | 998 | unsigned char rtc_intr; |
999 | unsigned long flags; | ||
999 | 1000 | ||
1000 | spin_lock_irq(&rtc_lock); | 1001 | spin_lock_irqsave(&rtc_lock, flags); |
1001 | if (cmos_rtc.suspend_ctrl) | 1002 | if (cmos_rtc.suspend_ctrl) |
1002 | rtc_control = CMOS_READ(RTC_CONTROL); | 1003 | rtc_control = CMOS_READ(RTC_CONTROL); |
1003 | if (rtc_control & RTC_AIE) { | 1004 | if (rtc_control & RTC_AIE) { |
@@ -1006,7 +1007,7 @@ static u32 rtc_handler(void *context) | |||
1006 | rtc_intr = CMOS_READ(RTC_INTR_FLAGS); | 1007 | rtc_intr = CMOS_READ(RTC_INTR_FLAGS); |
1007 | rtc_update_irq(cmos->rtc, 1, rtc_intr); | 1008 | rtc_update_irq(cmos->rtc, 1, rtc_intr); |
1008 | } | 1009 | } |
1009 | spin_unlock_irq(&rtc_lock); | 1010 | spin_unlock_irqrestore(&rtc_lock, flags); |
1010 | 1011 | ||
1011 | pm_wakeup_event(dev, 0); | 1012 | pm_wakeup_event(dev, 0); |
1012 | acpi_clear_event(ACPI_EVENT_RTC); | 1013 | acpi_clear_event(ACPI_EVENT_RTC); |
@@ -1129,7 +1130,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) | |||
1129 | pnp_irq(pnp, 0)); | 1130 | pnp_irq(pnp, 0)); |
1130 | } | 1131 | } |
1131 | 1132 | ||
1132 | static void __exit cmos_pnp_remove(struct pnp_dev *pnp) | 1133 | static void cmos_pnp_remove(struct pnp_dev *pnp) |
1133 | { | 1134 | { |
1134 | cmos_do_remove(&pnp->dev); | 1135 | cmos_do_remove(&pnp->dev); |
1135 | } | 1136 | } |
@@ -1161,7 +1162,7 @@ static struct pnp_driver cmos_pnp_driver = { | |||
1161 | .name = (char *) driver_name, | 1162 | .name = (char *) driver_name, |
1162 | .id_table = rtc_ids, | 1163 | .id_table = rtc_ids, |
1163 | .probe = cmos_pnp_probe, | 1164 | .probe = cmos_pnp_probe, |
1164 | .remove = __exit_p(cmos_pnp_remove), | 1165 | .remove = cmos_pnp_remove, |
1165 | .shutdown = cmos_pnp_shutdown, | 1166 | .shutdown = cmos_pnp_shutdown, |
1166 | 1167 | ||
1167 | /* flag ensures resume() gets called, and stops syslog spam */ | 1168 | /* flag ensures resume() gets called, and stops syslog spam */ |
@@ -1238,7 +1239,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev) | |||
1238 | return cmos_do_probe(&pdev->dev, resource, irq); | 1239 | return cmos_do_probe(&pdev->dev, resource, irq); |
1239 | } | 1240 | } |
1240 | 1241 | ||
1241 | static int __exit cmos_platform_remove(struct platform_device *pdev) | 1242 | static int cmos_platform_remove(struct platform_device *pdev) |
1242 | { | 1243 | { |
1243 | cmos_do_remove(&pdev->dev); | 1244 | cmos_do_remove(&pdev->dev); |
1244 | return 0; | 1245 | return 0; |
@@ -1263,7 +1264,7 @@ static void cmos_platform_shutdown(struct platform_device *pdev) | |||
1263 | MODULE_ALIAS("platform:rtc_cmos"); | 1264 | MODULE_ALIAS("platform:rtc_cmos"); |
1264 | 1265 | ||
1265 | static struct platform_driver cmos_platform_driver = { | 1266 | static struct platform_driver cmos_platform_driver = { |
1266 | .remove = __exit_p(cmos_platform_remove), | 1267 | .remove = cmos_platform_remove, |
1267 | .shutdown = cmos_platform_shutdown, | 1268 | .shutdown = cmos_platform_shutdown, |
1268 | .driver = { | 1269 | .driver = { |
1269 | .name = driver_name, | 1270 | .name = driver_name, |
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index b04ea9b5ae67..51e52446eacb 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c | |||
@@ -113,6 +113,7 @@ | |||
113 | /* OMAP_RTC_OSC_REG bit fields: */ | 113 | /* OMAP_RTC_OSC_REG bit fields: */ |
114 | #define OMAP_RTC_OSC_32KCLK_EN BIT(6) | 114 | #define OMAP_RTC_OSC_32KCLK_EN BIT(6) |
115 | #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) | 115 | #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) |
116 | #define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4) | ||
116 | 117 | ||
117 | /* OMAP_RTC_IRQWAKEEN bit fields: */ | 118 | /* OMAP_RTC_IRQWAKEEN bit fields: */ |
118 | #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) | 119 | #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) |
@@ -146,6 +147,7 @@ struct omap_rtc { | |||
146 | u8 interrupts_reg; | 147 | u8 interrupts_reg; |
147 | bool is_pmic_controller; | 148 | bool is_pmic_controller; |
148 | bool has_ext_clk; | 149 | bool has_ext_clk; |
150 | bool is_suspending; | ||
149 | const struct omap_rtc_device_type *type; | 151 | const struct omap_rtc_device_type *type; |
150 | struct pinctrl_dev *pctldev; | 152 | struct pinctrl_dev *pctldev; |
151 | }; | 153 | }; |
@@ -786,8 +788,9 @@ static int omap_rtc_probe(struct platform_device *pdev) | |||
786 | */ | 788 | */ |
787 | if (rtc->has_ext_clk) { | 789 | if (rtc->has_ext_clk) { |
788 | reg = rtc_read(rtc, OMAP_RTC_OSC_REG); | 790 | reg = rtc_read(rtc, OMAP_RTC_OSC_REG); |
789 | rtc_write(rtc, OMAP_RTC_OSC_REG, | 791 | reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE; |
790 | reg | OMAP_RTC_OSC_SEL_32KCLK_SRC); | 792 | reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC; |
793 | rtc_writel(rtc, OMAP_RTC_OSC_REG, reg); | ||
791 | } | 794 | } |
792 | 795 | ||
793 | rtc->type->lock(rtc); | 796 | rtc->type->lock(rtc); |
@@ -898,8 +901,7 @@ static int omap_rtc_suspend(struct device *dev) | |||
898 | rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0); | 901 | rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0); |
899 | rtc->type->lock(rtc); | 902 | rtc->type->lock(rtc); |
900 | 903 | ||
901 | /* Disable the clock/module */ | 904 | rtc->is_suspending = true; |
902 | pm_runtime_put_sync(dev); | ||
903 | 905 | ||
904 | return 0; | 906 | return 0; |
905 | } | 907 | } |
@@ -908,9 +910,6 @@ static int omap_rtc_resume(struct device *dev) | |||
908 | { | 910 | { |
909 | struct omap_rtc *rtc = dev_get_drvdata(dev); | 911 | struct omap_rtc *rtc = dev_get_drvdata(dev); |
910 | 912 | ||
911 | /* Enable the clock/module so that we can access the registers */ | ||
912 | pm_runtime_get_sync(dev); | ||
913 | |||
914 | rtc->type->unlock(rtc); | 913 | rtc->type->unlock(rtc); |
915 | if (device_may_wakeup(dev)) | 914 | if (device_may_wakeup(dev)) |
916 | disable_irq_wake(rtc->irq_alarm); | 915 | disable_irq_wake(rtc->irq_alarm); |
@@ -918,11 +917,34 @@ static int omap_rtc_resume(struct device *dev) | |||
918 | rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg); | 917 | rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg); |
919 | rtc->type->lock(rtc); | 918 | rtc->type->lock(rtc); |
920 | 919 | ||
920 | rtc->is_suspending = false; | ||
921 | |||
921 | return 0; | 922 | return 0; |
922 | } | 923 | } |
923 | #endif | 924 | #endif |
924 | 925 | ||
925 | static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume); | 926 | #ifdef CONFIG_PM |
927 | static int omap_rtc_runtime_suspend(struct device *dev) | ||
928 | { | ||
929 | struct omap_rtc *rtc = dev_get_drvdata(dev); | ||
930 | |||
931 | if (rtc->is_suspending && !rtc->has_ext_clk) | ||
932 | return -EBUSY; | ||
933 | |||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | static int omap_rtc_runtime_resume(struct device *dev) | ||
938 | { | ||
939 | return 0; | ||
940 | } | ||
941 | #endif | ||
942 | |||
943 | static const struct dev_pm_ops omap_rtc_pm_ops = { | ||
944 | SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume) | ||
945 | SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend, | ||
946 | omap_rtc_runtime_resume, NULL) | ||
947 | }; | ||
926 | 948 | ||
927 | static void omap_rtc_shutdown(struct platform_device *pdev) | 949 | static void omap_rtc_shutdown(struct platform_device *pdev) |
928 | { | 950 | { |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d1421139e6ea..2ffe029ff2b6 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
@@ -2081,9 +2081,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task) | |||
2081 | /* never reached the xmit task callout */ | 2081 | /* never reached the xmit task callout */ |
2082 | if (tdata->skb) | 2082 | if (tdata->skb) |
2083 | __kfree_skb(tdata->skb); | 2083 | __kfree_skb(tdata->skb); |
2084 | memset(tdata, 0, sizeof(*tdata)); | ||
2085 | 2084 | ||
2086 | task_release_itt(task, task->hdr_itt); | 2085 | task_release_itt(task, task->hdr_itt); |
2086 | memset(tdata, 0, sizeof(*tdata)); | ||
2087 | |||
2087 | iscsi_tcp_cleanup_task(task); | 2088 | iscsi_tcp_cleanup_task(task); |
2088 | } | 2089 | } |
2089 | EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); | 2090 | EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 241829e59668..7bb20684e9fa 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
@@ -793,6 +793,7 @@ static void alua_rtpg_work(struct work_struct *work) | |||
793 | WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); | 793 | WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); |
794 | WARN_ON(pg->flags & ALUA_PG_RUN_STPG); | 794 | WARN_ON(pg->flags & ALUA_PG_RUN_STPG); |
795 | spin_unlock_irqrestore(&pg->lock, flags); | 795 | spin_unlock_irqrestore(&pg->lock, flags); |
796 | kref_put(&pg->kref, release_port_group); | ||
796 | return; | 797 | return; |
797 | } | 798 | } |
798 | if (pg->flags & ALUA_SYNC_STPG) | 799 | if (pg->flags & ALUA_SYNC_STPG) |
@@ -890,6 +891,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg, | |||
890 | /* Do not queue if the worker is already running */ | 891 | /* Do not queue if the worker is already running */ |
891 | if (!(pg->flags & ALUA_PG_RUNNING)) { | 892 | if (!(pg->flags & ALUA_PG_RUNNING)) { |
892 | kref_get(&pg->kref); | 893 | kref_get(&pg->kref); |
894 | sdev = NULL; | ||
893 | start_queue = 1; | 895 | start_queue = 1; |
894 | } | 896 | } |
895 | } | 897 | } |
@@ -901,7 +903,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg, | |||
901 | if (start_queue && | 903 | if (start_queue && |
902 | !queue_delayed_work(alua_wq, &pg->rtpg_work, | 904 | !queue_delayed_work(alua_wq, &pg->rtpg_work, |
903 | msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { | 905 | msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { |
904 | scsi_device_put(sdev); | 906 | if (sdev) |
907 | scsi_device_put(sdev); | ||
905 | kref_put(&pg->kref, release_port_group); | 908 | kref_put(&pg->kref, release_port_group); |
906 | } | 909 | } |
907 | } | 910 | } |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index ca86c885dfaa..3aaea713bf37 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -2233,7 +2233,7 @@ struct megasas_instance_template { | |||
2233 | }; | 2233 | }; |
2234 | 2234 | ||
2235 | #define MEGASAS_IS_LOGICAL(scp) \ | 2235 | #define MEGASAS_IS_LOGICAL(scp) \ |
2236 | (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 | 2236 | ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) |
2237 | 2237 | ||
2238 | #define MEGASAS_DEV_INDEX(scp) \ | 2238 | #define MEGASAS_DEV_INDEX(scp) \ |
2239 | (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ | 2239 | (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 209a969a979d..8aa769a2d919 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -1273,9 +1273,9 @@ scsih_target_alloc(struct scsi_target *starget) | |||
1273 | sas_target_priv_data->handle = raid_device->handle; | 1273 | sas_target_priv_data->handle = raid_device->handle; |
1274 | sas_target_priv_data->sas_address = raid_device->wwid; | 1274 | sas_target_priv_data->sas_address = raid_device->wwid; |
1275 | sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; | 1275 | sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; |
1276 | sas_target_priv_data->raid_device = raid_device; | ||
1277 | if (ioc->is_warpdrive) | 1276 | if (ioc->is_warpdrive) |
1278 | raid_device->starget = starget; | 1277 | sas_target_priv_data->raid_device = raid_device; |
1278 | raid_device->starget = starget; | ||
1279 | } | 1279 | } |
1280 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); | 1280 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); |
1281 | return 0; | 1281 | return 0; |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ace65db1d2a2..567fa080e261 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -707,6 +707,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
707 | srb_t *sp; | 707 | srb_t *sp; |
708 | int rval; | 708 | int rval; |
709 | 709 | ||
710 | if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) { | ||
711 | cmd->result = DID_NO_CONNECT << 16; | ||
712 | goto qc24_fail_command; | ||
713 | } | ||
714 | |||
710 | if (ha->flags.eeh_busy) { | 715 | if (ha->flags.eeh_busy) { |
711 | if (ha->flags.pci_channel_io_perm_failure) { | 716 | if (ha->flags.pci_channel_io_perm_failure) { |
712 | ql_dbg(ql_dbg_aer, vha, 0x9010, | 717 | ql_dbg(ql_dbg_aer, vha, 0x9010, |
@@ -1451,6 +1456,15 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1451 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { | 1456 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { |
1452 | sp = req->outstanding_cmds[cnt]; | 1457 | sp = req->outstanding_cmds[cnt]; |
1453 | if (sp) { | 1458 | if (sp) { |
1459 | /* Get a reference to the sp and drop the lock. | ||
1460 | * The reference ensures this sp->done() call | ||
1461 | * - and not the call in qla2xxx_eh_abort() - | ||
1462 | * ends the SCSI command (with result 'res'). | ||
1463 | */ | ||
1464 | sp_get(sp); | ||
1465 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1466 | qla2xxx_eh_abort(GET_CMD_SP(sp)); | ||
1467 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1454 | req->outstanding_cmds[cnt] = NULL; | 1468 | req->outstanding_cmds[cnt] = NULL; |
1455 | sp->done(vha, sp, res); | 1469 | sp->done(vha, sp, res); |
1456 | } | 1470 | } |
@@ -2341,6 +2355,8 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
2341 | { | 2355 | { |
2342 | scsi_qla_host_t *vha = shost_priv(shost); | 2356 | scsi_qla_host_t *vha = shost_priv(shost); |
2343 | 2357 | ||
2358 | if (test_bit(UNLOADING, &vha->dpc_flags)) | ||
2359 | return 1; | ||
2344 | if (!vha->host) | 2360 | if (!vha->host) |
2345 | return 1; | 2361 | return 1; |
2346 | if (time > vha->hw->loop_reset_delay * HZ) | 2362 | if (time > vha->hw->loop_reset_delay * HZ) |
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 4a0d3cdc607c..15ca09cd16f3 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c | |||
@@ -793,6 +793,7 @@ static int pvscsi_abort(struct scsi_cmnd *cmd) | |||
793 | unsigned long flags; | 793 | unsigned long flags; |
794 | int result = SUCCESS; | 794 | int result = SUCCESS; |
795 | DECLARE_COMPLETION_ONSTACK(abort_cmp); | 795 | DECLARE_COMPLETION_ONSTACK(abort_cmp); |
796 | int done; | ||
796 | 797 | ||
797 | scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", | 798 | scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", |
798 | adapter->host->host_no, cmd); | 799 | adapter->host->host_no, cmd); |
@@ -824,10 +825,10 @@ static int pvscsi_abort(struct scsi_cmnd *cmd) | |||
824 | pvscsi_abort_cmd(adapter, ctx); | 825 | pvscsi_abort_cmd(adapter, ctx); |
825 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | 826 | spin_unlock_irqrestore(&adapter->hw_lock, flags); |
826 | /* Wait for 2 secs for the completion. */ | 827 | /* Wait for 2 secs for the completion. */ |
827 | wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); | 828 | done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); |
828 | spin_lock_irqsave(&adapter->hw_lock, flags); | 829 | spin_lock_irqsave(&adapter->hw_lock, flags); |
829 | 830 | ||
830 | if (!completion_done(&abort_cmp)) { | 831 | if (!done) { |
831 | /* | 832 | /* |
832 | * Failed to abort the command, unmark the fact that it | 833 | * Failed to abort the command, unmark the fact that it |
833 | * was requested to be aborted. | 834 | * was requested to be aborted. |
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index c097d2ccbde3..d41292ef85f2 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | 28 | ||
29 | #define PVSCSI_DRIVER_VERSION_STRING "1.0.6.0-k" | 29 | #define PVSCSI_DRIVER_VERSION_STRING "1.0.7.0-k" |
30 | 30 | ||
31 | #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 | 31 | #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 |
32 | 32 | ||
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c index 7043eb0543f6..5ab49a798164 100644 --- a/drivers/staging/comedi/drivers/ni_tio.c +++ b/drivers/staging/comedi/drivers/ni_tio.c | |||
@@ -207,7 +207,8 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter, | |||
207 | * clock period is specified by user with prescaling | 207 | * clock period is specified by user with prescaling |
208 | * already taken into account. | 208 | * already taken into account. |
209 | */ | 209 | */ |
210 | return counter->clock_period_ps; | 210 | *period_ps = counter->clock_period_ps; |
211 | return 0; | ||
211 | } | 212 | } |
212 | 213 | ||
213 | switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) { | 214 | switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) { |
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c index 34307ac3f255..d33d6fe078ad 100644 --- a/drivers/staging/greybus/arche-platform.c +++ b/drivers/staging/greybus/arche-platform.c | |||
@@ -186,6 +186,7 @@ int arche_platform_change_state(enum arche_platform_state state, | |||
186 | exit: | 186 | exit: |
187 | spin_unlock_irqrestore(&arche_pdata->wake_lock, flags); | 187 | spin_unlock_irqrestore(&arche_pdata->wake_lock, flags); |
188 | mutex_unlock(&arche_pdata->platform_state_mutex); | 188 | mutex_unlock(&arche_pdata->platform_state_mutex); |
189 | put_device(&pdev->dev); | ||
189 | of_node_put(np); | 190 | of_node_put(np); |
190 | return ret; | 191 | return ret; |
191 | } | 192 | } |
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 5eecf1cb1028..3892a7470410 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c | |||
@@ -655,6 +655,7 @@ static void ad5933_work(struct work_struct *work) | |||
655 | __be16 buf[2]; | 655 | __be16 buf[2]; |
656 | int val[2]; | 656 | int val[2]; |
657 | unsigned char status; | 657 | unsigned char status; |
658 | int ret; | ||
658 | 659 | ||
659 | mutex_lock(&indio_dev->mlock); | 660 | mutex_lock(&indio_dev->mlock); |
660 | if (st->state == AD5933_CTRL_INIT_START_FREQ) { | 661 | if (st->state == AD5933_CTRL_INIT_START_FREQ) { |
@@ -662,19 +663,22 @@ static void ad5933_work(struct work_struct *work) | |||
662 | ad5933_cmd(st, AD5933_CTRL_START_SWEEP); | 663 | ad5933_cmd(st, AD5933_CTRL_START_SWEEP); |
663 | st->state = AD5933_CTRL_START_SWEEP; | 664 | st->state = AD5933_CTRL_START_SWEEP; |
664 | schedule_delayed_work(&st->work, st->poll_time_jiffies); | 665 | schedule_delayed_work(&st->work, st->poll_time_jiffies); |
665 | mutex_unlock(&indio_dev->mlock); | 666 | goto out; |
666 | return; | ||
667 | } | 667 | } |
668 | 668 | ||
669 | ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); | 669 | ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); |
670 | if (ret) | ||
671 | goto out; | ||
670 | 672 | ||
671 | if (status & AD5933_STAT_DATA_VALID) { | 673 | if (status & AD5933_STAT_DATA_VALID) { |
672 | int scan_count = bitmap_weight(indio_dev->active_scan_mask, | 674 | int scan_count = bitmap_weight(indio_dev->active_scan_mask, |
673 | indio_dev->masklength); | 675 | indio_dev->masklength); |
674 | ad5933_i2c_read(st->client, | 676 | ret = ad5933_i2c_read(st->client, |
675 | test_bit(1, indio_dev->active_scan_mask) ? | 677 | test_bit(1, indio_dev->active_scan_mask) ? |
676 | AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, | 678 | AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, |
677 | scan_count * 2, (u8 *)buf); | 679 | scan_count * 2, (u8 *)buf); |
680 | if (ret) | ||
681 | goto out; | ||
678 | 682 | ||
679 | if (scan_count == 2) { | 683 | if (scan_count == 2) { |
680 | val[0] = be16_to_cpu(buf[0]); | 684 | val[0] = be16_to_cpu(buf[0]); |
@@ -686,8 +690,7 @@ static void ad5933_work(struct work_struct *work) | |||
686 | } else { | 690 | } else { |
687 | /* no data available - try again later */ | 691 | /* no data available - try again later */ |
688 | schedule_delayed_work(&st->work, st->poll_time_jiffies); | 692 | schedule_delayed_work(&st->work, st->poll_time_jiffies); |
689 | mutex_unlock(&indio_dev->mlock); | 693 | goto out; |
690 | return; | ||
691 | } | 694 | } |
692 | 695 | ||
693 | if (status & AD5933_STAT_SWEEP_DONE) { | 696 | if (status & AD5933_STAT_SWEEP_DONE) { |
@@ -700,7 +703,7 @@ static void ad5933_work(struct work_struct *work) | |||
700 | ad5933_cmd(st, AD5933_CTRL_INC_FREQ); | 703 | ad5933_cmd(st, AD5933_CTRL_INC_FREQ); |
701 | schedule_delayed_work(&st->work, st->poll_time_jiffies); | 704 | schedule_delayed_work(&st->work, st->poll_time_jiffies); |
702 | } | 705 | } |
703 | 706 | out: | |
704 | mutex_unlock(&indio_dev->mlock); | 707 | mutex_unlock(&indio_dev->mlock); |
705 | } | 708 | } |
706 | 709 | ||
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c index a324322ee0ad..499952c8ef39 100644 --- a/drivers/staging/nvec/nvec_ps2.c +++ b/drivers/staging/nvec/nvec_ps2.c | |||
@@ -106,13 +106,12 @@ static int nvec_mouse_probe(struct platform_device *pdev) | |||
106 | { | 106 | { |
107 | struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); | 107 | struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); |
108 | struct serio *ser_dev; | 108 | struct serio *ser_dev; |
109 | char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; | ||
110 | 109 | ||
111 | ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); | 110 | ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); |
112 | if (!ser_dev) | 111 | if (!ser_dev) |
113 | return -ENOMEM; | 112 | return -ENOMEM; |
114 | 113 | ||
115 | ser_dev->id.type = SERIO_PS_PSTHRU; | 114 | ser_dev->id.type = SERIO_8042; |
116 | ser_dev->write = ps2_sendcommand; | 115 | ser_dev->write = ps2_sendcommand; |
117 | ser_dev->start = ps2_startstreaming; | 116 | ser_dev->start = ps2_startstreaming; |
118 | ser_dev->stop = ps2_stopstreaming; | 117 | ser_dev->stop = ps2_stopstreaming; |
@@ -127,9 +126,6 @@ static int nvec_mouse_probe(struct platform_device *pdev) | |||
127 | 126 | ||
128 | serio_register_port(ser_dev); | 127 | serio_register_port(ser_dev); |
129 | 128 | ||
130 | /* mouse reset */ | ||
131 | nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset)); | ||
132 | |||
133 | return 0; | 129 | return 0; |
134 | } | 130 | } |
135 | 131 | ||
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h index 955247979aaa..4ed6d8d7712a 100644 --- a/drivers/staging/sm750fb/ddk750_reg.h +++ b/drivers/staging/sm750fb/ddk750_reg.h | |||
@@ -601,13 +601,13 @@ | |||
601 | 601 | ||
602 | #define PANEL_PLANE_TL 0x08001C | 602 | #define PANEL_PLANE_TL 0x08001C |
603 | #define PANEL_PLANE_TL_TOP_SHIFT 16 | 603 | #define PANEL_PLANE_TL_TOP_SHIFT 16 |
604 | #define PANEL_PLANE_TL_TOP_MASK (0xeff << 16) | 604 | #define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16) |
605 | #define PANEL_PLANE_TL_LEFT_MASK 0xeff | 605 | #define PANEL_PLANE_TL_LEFT_MASK 0x7ff |
606 | 606 | ||
607 | #define PANEL_PLANE_BR 0x080020 | 607 | #define PANEL_PLANE_BR 0x080020 |
608 | #define PANEL_PLANE_BR_BOTTOM_SHIFT 16 | 608 | #define PANEL_PLANE_BR_BOTTOM_SHIFT 16 |
609 | #define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16) | 609 | #define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16) |
610 | #define PANEL_PLANE_BR_RIGHT_MASK 0xeff | 610 | #define PANEL_PLANE_BR_RIGHT_MASK 0x7ff |
611 | 611 | ||
612 | #define PANEL_HORIZONTAL_TOTAL 0x080024 | 612 | #define PANEL_HORIZONTAL_TOTAL 0x080024 |
613 | #define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16 | 613 | #define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16 |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 78f0f85bebdc..fada988512a1 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -932,8 +932,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg) | |||
932 | DECLARE_WAITQUEUE(wait, current); | 932 | DECLARE_WAITQUEUE(wait, current); |
933 | struct async_icount old, new; | 933 | struct async_icount old, new; |
934 | 934 | ||
935 | if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)) | ||
936 | return -EINVAL; | ||
937 | do { | 935 | do { |
938 | spin_lock_irq(&acm->read_lock); | 936 | spin_lock_irq(&acm->read_lock); |
939 | old = acm->oldcount; | 937 | old = acm->oldcount; |
@@ -1161,6 +1159,8 @@ static int acm_probe(struct usb_interface *intf, | |||
1161 | if (quirks == IGNORE_DEVICE) | 1159 | if (quirks == IGNORE_DEVICE) |
1162 | return -ENODEV; | 1160 | return -ENODEV; |
1163 | 1161 | ||
1162 | memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header)); | ||
1163 | |||
1164 | num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; | 1164 | num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; |
1165 | 1165 | ||
1166 | /* handle quirks deadly to normal probing*/ | 1166 | /* handle quirks deadly to normal probing*/ |
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 7287a763cd0c..fea446900cad 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
@@ -769,15 +769,14 @@ static int dwc3_core_init(struct dwc3 *dwc) | |||
769 | return 0; | 769 | return 0; |
770 | 770 | ||
771 | err4: | 771 | err4: |
772 | phy_power_off(dwc->usb2_generic_phy); | 772 | phy_power_off(dwc->usb3_generic_phy); |
773 | 773 | ||
774 | err3: | 774 | err3: |
775 | phy_power_off(dwc->usb3_generic_phy); | 775 | phy_power_off(dwc->usb2_generic_phy); |
776 | 776 | ||
777 | err2: | 777 | err2: |
778 | usb_phy_set_suspend(dwc->usb2_phy, 1); | 778 | usb_phy_set_suspend(dwc->usb2_phy, 1); |
779 | usb_phy_set_suspend(dwc->usb3_phy, 1); | 779 | usb_phy_set_suspend(dwc->usb3_phy, 1); |
780 | dwc3_core_exit(dwc); | ||
781 | 780 | ||
782 | err1: | 781 | err1: |
783 | usb_phy_shutdown(dwc->usb2_phy); | 782 | usb_phy_shutdown(dwc->usb2_phy); |
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 89a2f712fdfe..aaaf256f71dd 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/regmap.h> | 32 | #include <linux/regmap.h> |
33 | #include <linux/reset.h> | 33 | #include <linux/reset.h> |
34 | #include <linux/pinctrl/consumer.h> | ||
34 | #include <linux/usb/of.h> | 35 | #include <linux/usb/of.h> |
35 | 36 | ||
36 | #include "core.h" | 37 | #include "core.h" |
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index fe1811650dbc..5d1bd13a56c1 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c | |||
@@ -588,14 +588,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, | |||
588 | 588 | ||
589 | req->length = length; | 589 | req->length = length; |
590 | 590 | ||
591 | /* throttle high/super speed IRQ rate back slightly */ | ||
592 | if (gadget_is_dualspeed(dev->gadget)) | ||
593 | req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH || | ||
594 | dev->gadget->speed == USB_SPEED_SUPER)) && | ||
595 | !list_empty(&dev->tx_reqs)) | ||
596 | ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) | ||
597 | : 0; | ||
598 | |||
599 | retval = usb_ep_queue(in, req, GFP_ATOMIC); | 591 | retval = usb_ep_queue(in, req, GFP_ATOMIC); |
600 | switch (retval) { | 592 | switch (retval) { |
601 | default: | 593 | default: |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index d793f548dfe2..a9a1e4c40480 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
@@ -995,6 +995,14 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) | |||
995 | } | 995 | } |
996 | val = readl(base + ext_cap_offset); | 996 | val = readl(base + ext_cap_offset); |
997 | 997 | ||
998 | /* Auto handoff never worked for these devices. Force it and continue */ | ||
999 | if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) || | ||
1000 | (pdev->vendor == PCI_VENDOR_ID_RENESAS | ||
1001 | && pdev->device == 0x0014)) { | ||
1002 | val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; | ||
1003 | writel(val, base + ext_cap_offset); | ||
1004 | } | ||
1005 | |||
998 | /* If the BIOS owns the HC, signal that the OS wants it, and wait */ | 1006 | /* If the BIOS owns the HC, signal that the OS wants it, and wait */ |
999 | if (val & XHCI_HC_BIOS_OWNED) { | 1007 | if (val & XHCI_HC_BIOS_OWNED) { |
1000 | writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); | 1008 | writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); |
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 210b7e43a6fd..2440f88e07a3 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c | |||
@@ -479,7 +479,8 @@ static int da8xx_probe(struct platform_device *pdev) | |||
479 | 479 | ||
480 | glue->phy = devm_phy_get(&pdev->dev, "usb-phy"); | 480 | glue->phy = devm_phy_get(&pdev->dev, "usb-phy"); |
481 | if (IS_ERR(glue->phy)) { | 481 | if (IS_ERR(glue->phy)) { |
482 | dev_err(&pdev->dev, "failed to get phy\n"); | 482 | if (PTR_ERR(glue->phy) != -EPROBE_DEFER) |
483 | dev_err(&pdev->dev, "failed to get phy\n"); | ||
483 | return PTR_ERR(glue->phy); | 484 | return PTR_ERR(glue->phy); |
484 | } | 485 | } |
485 | 486 | ||
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 27dadc0d9114..e01116e4c067 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -2114,11 +2114,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
2114 | musb->io.ep_offset = musb_flat_ep_offset; | 2114 | musb->io.ep_offset = musb_flat_ep_offset; |
2115 | musb->io.ep_select = musb_flat_ep_select; | 2115 | musb->io.ep_select = musb_flat_ep_select; |
2116 | } | 2116 | } |
2117 | /* And override them with platform specific ops if specified. */ | ||
2118 | if (musb->ops->ep_offset) | ||
2119 | musb->io.ep_offset = musb->ops->ep_offset; | ||
2120 | if (musb->ops->ep_select) | ||
2121 | musb->io.ep_select = musb->ops->ep_select; | ||
2122 | 2117 | ||
2123 | /* At least tusb6010 has its own offsets */ | 2118 | /* At least tusb6010 has its own offsets */ |
2124 | if (musb->ops->ep_offset) | 2119 | if (musb->ops->ep_offset) |
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c index d059ad4d0dbd..97ee1b46db69 100644 --- a/drivers/uwb/lc-rc.c +++ b/drivers/uwb/lc-rc.c | |||
@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index) | |||
56 | struct uwb_rc *rc = NULL; | 56 | struct uwb_rc *rc = NULL; |
57 | 57 | ||
58 | dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); | 58 | dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); |
59 | if (dev) | 59 | if (dev) { |
60 | rc = dev_get_drvdata(dev); | 60 | rc = dev_get_drvdata(dev); |
61 | put_device(dev); | ||
62 | } | ||
63 | |||
61 | return rc; | 64 | return rc; |
62 | } | 65 | } |
63 | 66 | ||
@@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc) | |||
467 | if (dev) { | 470 | if (dev) { |
468 | rc = dev_get_drvdata(dev); | 471 | rc = dev_get_drvdata(dev); |
469 | __uwb_rc_get(rc); | 472 | __uwb_rc_get(rc); |
473 | put_device(dev); | ||
470 | } | 474 | } |
475 | |||
471 | return rc; | 476 | return rc; |
472 | } | 477 | } |
473 | EXPORT_SYMBOL_GPL(__uwb_rc_try_get); | 478 | EXPORT_SYMBOL_GPL(__uwb_rc_try_get); |
@@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev) | |||
520 | 525 | ||
521 | dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev, | 526 | dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev, |
522 | find_rc_grandpa); | 527 | find_rc_grandpa); |
523 | if (dev) | 528 | if (dev) { |
524 | rc = dev_get_drvdata(dev); | 529 | rc = dev_get_drvdata(dev); |
530 | put_device(dev); | ||
531 | } | ||
532 | |||
525 | return rc; | 533 | return rc; |
526 | } | 534 | } |
527 | EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); | 535 | EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); |
@@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr) | |||
553 | struct uwb_rc *rc = NULL; | 561 | struct uwb_rc *rc = NULL; |
554 | 562 | ||
555 | dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev); | 563 | dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev); |
556 | if (dev) | 564 | if (dev) { |
557 | rc = dev_get_drvdata(dev); | 565 | rc = dev_get_drvdata(dev); |
566 | put_device(dev); | ||
567 | } | ||
558 | 568 | ||
559 | return rc; | 569 | return rc; |
560 | } | 570 | } |
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c index c1304b8d4985..678e93741ae1 100644 --- a/drivers/uwb/pal.c +++ b/drivers/uwb/pal.c | |||
@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc) | |||
97 | 97 | ||
98 | dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc); | 98 | dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc); |
99 | 99 | ||
100 | put_device(dev); | ||
101 | |||
100 | return (dev != NULL); | 102 | return (dev != NULL); |
101 | } | 103 | } |
102 | 104 | ||
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c index 19ad8645d93c..e5d9bfc1703a 100644 --- a/drivers/video/fbdev/amba-clcd-versatile.c +++ b/drivers/video/fbdev/amba-clcd-versatile.c | |||
@@ -526,8 +526,8 @@ int versatile_clcd_init_panel(struct clcd_fb *fb, | |||
526 | np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, | 526 | np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, |
527 | &clcd_id); | 527 | &clcd_id); |
528 | if (!np) { | 528 | if (!np) { |
529 | dev_err(dev, "no Versatile syscon node\n"); | 529 | /* Vexpress does not have this */ |
530 | return -ENODEV; | 530 | return 0; |
531 | } | 531 | } |
532 | versatile_clcd_type = (enum versatile_clcd)clcd_id->data; | 532 | versatile_clcd_type = (enum versatile_clcd)clcd_id->data; |
533 | 533 | ||
@@ -1078,6 +1078,17 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2) | |||
1078 | unsigned tail, pos, head; | 1078 | unsigned tail, pos, head; |
1079 | unsigned long flags; | 1079 | unsigned long flags; |
1080 | 1080 | ||
1081 | if (kiocb->ki_flags & IOCB_WRITE) { | ||
1082 | struct file *file = kiocb->ki_filp; | ||
1083 | |||
1084 | /* | ||
1085 | * Tell lockdep we inherited freeze protection from submission | ||
1086 | * thread. | ||
1087 | */ | ||
1088 | __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); | ||
1089 | file_end_write(file); | ||
1090 | } | ||
1091 | |||
1081 | /* | 1092 | /* |
1082 | * Special case handling for sync iocbs: | 1093 | * Special case handling for sync iocbs: |
1083 | * - events go directly into the iocb for fast handling | 1094 | * - events go directly into the iocb for fast handling |
@@ -1392,122 +1403,106 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) | |||
1392 | return -EINVAL; | 1403 | return -EINVAL; |
1393 | } | 1404 | } |
1394 | 1405 | ||
1395 | typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *); | 1406 | static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec, |
1396 | 1407 | bool vectored, bool compat, struct iov_iter *iter) | |
1397 | static int aio_setup_vectored_rw(int rw, char __user *buf, size_t len, | ||
1398 | struct iovec **iovec, | ||
1399 | bool compat, | ||
1400 | struct iov_iter *iter) | ||
1401 | { | 1408 | { |
1409 | void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; | ||
1410 | size_t len = iocb->aio_nbytes; | ||
1411 | |||
1412 | if (!vectored) { | ||
1413 | ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); | ||
1414 | *iovec = NULL; | ||
1415 | return ret; | ||
1416 | } | ||
1402 | #ifdef CONFIG_COMPAT | 1417 | #ifdef CONFIG_COMPAT |
1403 | if (compat) | 1418 | if (compat) |
1404 | return compat_import_iovec(rw, | 1419 | return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec, |
1405 | (struct compat_iovec __user *)buf, | 1420 | iter); |
1406 | len, UIO_FASTIOV, iovec, iter); | ||
1407 | #endif | 1421 | #endif |
1408 | return import_iovec(rw, (struct iovec __user *)buf, | 1422 | return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter); |
1409 | len, UIO_FASTIOV, iovec, iter); | ||
1410 | } | 1423 | } |
1411 | 1424 | ||
1412 | /* | 1425 | static inline ssize_t aio_ret(struct kiocb *req, ssize_t ret) |
1413 | * aio_run_iocb: | 1426 | { |
1414 | * Performs the initial checks and io submission. | 1427 | switch (ret) { |
1415 | */ | 1428 | case -EIOCBQUEUED: |
1416 | static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, | 1429 | return ret; |
1417 | char __user *buf, size_t len, bool compat) | 1430 | case -ERESTARTSYS: |
1431 | case -ERESTARTNOINTR: | ||
1432 | case -ERESTARTNOHAND: | ||
1433 | case -ERESTART_RESTARTBLOCK: | ||
1434 | /* | ||
1435 | * There's no easy way to restart the syscall since other AIO's | ||
1436 | * may be already running. Just fail this IO with EINTR. | ||
1437 | */ | ||
1438 | ret = -EINTR; | ||
1439 | /*FALLTHRU*/ | ||
1440 | default: | ||
1441 | aio_complete(req, ret, 0); | ||
1442 | return 0; | ||
1443 | } | ||
1444 | } | ||
1445 | |||
1446 | static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, | ||
1447 | bool compat) | ||
1418 | { | 1448 | { |
1419 | struct file *file = req->ki_filp; | 1449 | struct file *file = req->ki_filp; |
1420 | ssize_t ret; | ||
1421 | int rw; | ||
1422 | fmode_t mode; | ||
1423 | rw_iter_op *iter_op; | ||
1424 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; | 1450 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
1425 | struct iov_iter iter; | 1451 | struct iov_iter iter; |
1452 | ssize_t ret; | ||
1426 | 1453 | ||
1427 | switch (opcode) { | 1454 | if (unlikely(!(file->f_mode & FMODE_READ))) |
1428 | case IOCB_CMD_PREAD: | 1455 | return -EBADF; |
1429 | case IOCB_CMD_PREADV: | 1456 | if (unlikely(!file->f_op->read_iter)) |
1430 | mode = FMODE_READ; | 1457 | return -EINVAL; |
1431 | rw = READ; | ||
1432 | iter_op = file->f_op->read_iter; | ||
1433 | goto rw_common; | ||
1434 | |||
1435 | case IOCB_CMD_PWRITE: | ||
1436 | case IOCB_CMD_PWRITEV: | ||
1437 | mode = FMODE_WRITE; | ||
1438 | rw = WRITE; | ||
1439 | iter_op = file->f_op->write_iter; | ||
1440 | goto rw_common; | ||
1441 | rw_common: | ||
1442 | if (unlikely(!(file->f_mode & mode))) | ||
1443 | return -EBADF; | ||
1444 | |||
1445 | if (!iter_op) | ||
1446 | return -EINVAL; | ||
1447 | |||
1448 | if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV) | ||
1449 | ret = aio_setup_vectored_rw(rw, buf, len, | ||
1450 | &iovec, compat, &iter); | ||
1451 | else { | ||
1452 | ret = import_single_range(rw, buf, len, iovec, &iter); | ||
1453 | iovec = NULL; | ||
1454 | } | ||
1455 | if (!ret) | ||
1456 | ret = rw_verify_area(rw, file, &req->ki_pos, | ||
1457 | iov_iter_count(&iter)); | ||
1458 | if (ret < 0) { | ||
1459 | kfree(iovec); | ||
1460 | return ret; | ||
1461 | } | ||
1462 | |||
1463 | if (rw == WRITE) | ||
1464 | file_start_write(file); | ||
1465 | |||
1466 | ret = iter_op(req, &iter); | ||
1467 | |||
1468 | if (rw == WRITE) | ||
1469 | file_end_write(file); | ||
1470 | kfree(iovec); | ||
1471 | break; | ||
1472 | |||
1473 | case IOCB_CMD_FDSYNC: | ||
1474 | if (!file->f_op->aio_fsync) | ||
1475 | return -EINVAL; | ||
1476 | |||
1477 | ret = file->f_op->aio_fsync(req, 1); | ||
1478 | break; | ||
1479 | 1458 | ||
1480 | case IOCB_CMD_FSYNC: | 1459 | ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); |
1481 | if (!file->f_op->aio_fsync) | 1460 | if (ret) |
1482 | return -EINVAL; | 1461 | return ret; |
1462 | ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); | ||
1463 | if (!ret) | ||
1464 | ret = aio_ret(req, file->f_op->read_iter(req, &iter)); | ||
1465 | kfree(iovec); | ||
1466 | return ret; | ||
1467 | } | ||
1483 | 1468 | ||
1484 | ret = file->f_op->aio_fsync(req, 0); | 1469 | static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, |
1485 | break; | 1470 | bool compat) |
1471 | { | ||
1472 | struct file *file = req->ki_filp; | ||
1473 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; | ||
1474 | struct iov_iter iter; | ||
1475 | ssize_t ret; | ||
1486 | 1476 | ||
1487 | default: | 1477 | if (unlikely(!(file->f_mode & FMODE_WRITE))) |
1488 | pr_debug("EINVAL: no operation provided\n"); | 1478 | return -EBADF; |
1479 | if (unlikely(!file->f_op->write_iter)) | ||
1489 | return -EINVAL; | 1480 | return -EINVAL; |
1490 | } | ||
1491 | 1481 | ||
1492 | if (ret != -EIOCBQUEUED) { | 1482 | ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); |
1483 | if (ret) | ||
1484 | return ret; | ||
1485 | ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); | ||
1486 | if (!ret) { | ||
1487 | req->ki_flags |= IOCB_WRITE; | ||
1488 | file_start_write(file); | ||
1489 | ret = aio_ret(req, file->f_op->write_iter(req, &iter)); | ||
1493 | /* | 1490 | /* |
1494 | * There's no easy way to restart the syscall since other AIO's | 1491 | * We release freeze protection in aio_complete(). Fool lockdep |
1495 | * may be already running. Just fail this IO with EINTR. | 1492 | * by telling it the lock got released so that it doesn't |
1493 | * complain about held lock when we return to userspace. | ||
1496 | */ | 1494 | */ |
1497 | if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || | 1495 | __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); |
1498 | ret == -ERESTARTNOHAND || | ||
1499 | ret == -ERESTART_RESTARTBLOCK)) | ||
1500 | ret = -EINTR; | ||
1501 | aio_complete(req, ret, 0); | ||
1502 | } | 1496 | } |
1503 | 1497 | kfree(iovec); | |
1504 | return 0; | 1498 | return ret; |
1505 | } | 1499 | } |
1506 | 1500 | ||
1507 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1501 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1508 | struct iocb *iocb, bool compat) | 1502 | struct iocb *iocb, bool compat) |
1509 | { | 1503 | { |
1510 | struct aio_kiocb *req; | 1504 | struct aio_kiocb *req; |
1505 | struct file *file; | ||
1511 | ssize_t ret; | 1506 | ssize_t ret; |
1512 | 1507 | ||
1513 | /* enforce forwards compatibility on users */ | 1508 | /* enforce forwards compatibility on users */ |
@@ -1530,7 +1525,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1530 | if (unlikely(!req)) | 1525 | if (unlikely(!req)) |
1531 | return -EAGAIN; | 1526 | return -EAGAIN; |
1532 | 1527 | ||
1533 | req->common.ki_filp = fget(iocb->aio_fildes); | 1528 | req->common.ki_filp = file = fget(iocb->aio_fildes); |
1534 | if (unlikely(!req->common.ki_filp)) { | 1529 | if (unlikely(!req->common.ki_filp)) { |
1535 | ret = -EBADF; | 1530 | ret = -EBADF; |
1536 | goto out_put_req; | 1531 | goto out_put_req; |
@@ -1565,13 +1560,29 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1565 | req->ki_user_iocb = user_iocb; | 1560 | req->ki_user_iocb = user_iocb; |
1566 | req->ki_user_data = iocb->aio_data; | 1561 | req->ki_user_data = iocb->aio_data; |
1567 | 1562 | ||
1568 | ret = aio_run_iocb(&req->common, iocb->aio_lio_opcode, | 1563 | get_file(file); |
1569 | (char __user *)(unsigned long)iocb->aio_buf, | 1564 | switch (iocb->aio_lio_opcode) { |
1570 | iocb->aio_nbytes, | 1565 | case IOCB_CMD_PREAD: |
1571 | compat); | 1566 | ret = aio_read(&req->common, iocb, false, compat); |
1572 | if (ret) | 1567 | break; |
1573 | goto out_put_req; | 1568 | case IOCB_CMD_PWRITE: |
1569 | ret = aio_write(&req->common, iocb, false, compat); | ||
1570 | break; | ||
1571 | case IOCB_CMD_PREADV: | ||
1572 | ret = aio_read(&req->common, iocb, true, compat); | ||
1573 | break; | ||
1574 | case IOCB_CMD_PWRITEV: | ||
1575 | ret = aio_write(&req->common, iocb, true, compat); | ||
1576 | break; | ||
1577 | default: | ||
1578 | pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); | ||
1579 | ret = -EINVAL; | ||
1580 | break; | ||
1581 | } | ||
1582 | fput(file); | ||
1574 | 1583 | ||
1584 | if (ret && ret != -EIOCBQUEUED) | ||
1585 | goto out_put_req; | ||
1575 | return 0; | 1586 | return 0; |
1576 | out_put_req: | 1587 | out_put_req: |
1577 | put_reqs_available(ctx, 1); | 1588 | put_reqs_available(ctx, 1); |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 18630e800208..f995e3528a33 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
@@ -1770,7 +1770,6 @@ const struct file_operations ceph_file_fops = { | |||
1770 | .fsync = ceph_fsync, | 1770 | .fsync = ceph_fsync, |
1771 | .lock = ceph_lock, | 1771 | .lock = ceph_lock, |
1772 | .flock = ceph_flock, | 1772 | .flock = ceph_flock, |
1773 | .splice_read = generic_file_splice_read, | ||
1774 | .splice_write = iter_file_splice_write, | 1773 | .splice_write = iter_file_splice_write, |
1775 | .unlocked_ioctl = ceph_ioctl, | 1774 | .unlocked_ioctl = ceph_ioctl, |
1776 | .compat_ioctl = ceph_ioctl, | 1775 | .compat_ioctl = ceph_ioctl, |
diff --git a/fs/coredump.c b/fs/coredump.c index 281b768000e6..eb9c92c9b20f 100644 --- a/fs/coredump.c +++ b/fs/coredump.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/slab.h> | 1 | #include <linux/slab.h> |
2 | #include <linux/file.h> | 2 | #include <linux/file.h> |
3 | #include <linux/fdtable.h> | 3 | #include <linux/fdtable.h> |
4 | #include <linux/freezer.h> | ||
4 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
5 | #include <linux/stat.h> | 6 | #include <linux/stat.h> |
6 | #include <linux/fcntl.h> | 7 | #include <linux/fcntl.h> |
@@ -423,7 +424,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state) | |||
423 | if (core_waiters > 0) { | 424 | if (core_waiters > 0) { |
424 | struct core_thread *ptr; | 425 | struct core_thread *ptr; |
425 | 426 | ||
427 | freezer_do_not_count(); | ||
426 | wait_for_completion(&core_state->startup); | 428 | wait_for_completion(&core_state->startup); |
429 | freezer_count(); | ||
427 | /* | 430 | /* |
428 | * Wait for all the threads to become inactive, so that | 431 | * Wait for all the threads to become inactive, so that |
429 | * all the thread context (extended register state, like | 432 | * all the thread context (extended register state, like |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 6a4d0e5418a1..b3ebe512d64c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -286,6 +286,11 @@ const struct dentry_operations fuse_dentry_operations = { | |||
286 | .d_release = fuse_dentry_release, | 286 | .d_release = fuse_dentry_release, |
287 | }; | 287 | }; |
288 | 288 | ||
289 | const struct dentry_operations fuse_root_dentry_operations = { | ||
290 | .d_init = fuse_dentry_init, | ||
291 | .d_release = fuse_dentry_release, | ||
292 | }; | ||
293 | |||
289 | int fuse_valid_type(int m) | 294 | int fuse_valid_type(int m) |
290 | { | 295 | { |
291 | return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || | 296 | return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index abc66a6237fd..2401c5dabb2a 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, | |||
1985 | { | 1985 | { |
1986 | struct inode *inode = page->mapping->host; | 1986 | struct inode *inode = page->mapping->host; |
1987 | 1987 | ||
1988 | /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ | ||
1989 | if (!copied) | ||
1990 | goto unlock; | ||
1991 | |||
1988 | if (!PageUptodate(page)) { | 1992 | if (!PageUptodate(page)) { |
1989 | /* Zero any unwritten bytes at the end of the page */ | 1993 | /* Zero any unwritten bytes at the end of the page */ |
1990 | size_t endoff = (pos + copied) & ~PAGE_MASK; | 1994 | size_t endoff = (pos + copied) & ~PAGE_MASK; |
@@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, | |||
1995 | 1999 | ||
1996 | fuse_write_update_size(inode, pos + copied); | 2000 | fuse_write_update_size(inode, pos + copied); |
1997 | set_page_dirty(page); | 2001 | set_page_dirty(page); |
2002 | |||
2003 | unlock: | ||
1998 | unlock_page(page); | 2004 | unlock_page(page); |
1999 | put_page(page); | 2005 | put_page(page); |
2000 | 2006 | ||
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0dfbb136e59a..91307940c8ac 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -692,6 +692,7 @@ static inline u64 get_node_id(struct inode *inode) | |||
692 | extern const struct file_operations fuse_dev_operations; | 692 | extern const struct file_operations fuse_dev_operations; |
693 | 693 | ||
694 | extern const struct dentry_operations fuse_dentry_operations; | 694 | extern const struct dentry_operations fuse_dentry_operations; |
695 | extern const struct dentry_operations fuse_root_dentry_operations; | ||
695 | 696 | ||
696 | /** | 697 | /** |
697 | * Inode to nodeid comparison. | 698 | * Inode to nodeid comparison. |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 17141099f2e7..6fe6a88ecb4a 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -1131,10 +1131,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
1131 | 1131 | ||
1132 | err = -ENOMEM; | 1132 | err = -ENOMEM; |
1133 | root = fuse_get_root_inode(sb, d.rootmode); | 1133 | root = fuse_get_root_inode(sb, d.rootmode); |
1134 | sb->s_d_op = &fuse_root_dentry_operations; | ||
1134 | root_dentry = d_make_root(root); | 1135 | root_dentry = d_make_root(root); |
1135 | if (!root_dentry) | 1136 | if (!root_dentry) |
1136 | goto err_dev_free; | 1137 | goto err_dev_free; |
1137 | /* only now - we want root dentry with NULL ->d_op */ | 1138 | /* Root dentry doesn't have .d_revalidate */ |
1138 | sb->s_d_op = &fuse_dentry_operations; | 1139 | sb->s_d_op = &fuse_dentry_operations; |
1139 | 1140 | ||
1140 | init_req = fuse_request_alloc(0); | 1141 | init_req = fuse_request_alloc(0); |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 7555ba889d1f..ebecfb8fba06 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -314,7 +314,8 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat | |||
314 | /* Match the full socket address */ | 314 | /* Match the full socket address */ |
315 | if (!rpc_cmp_addr_port(sap, clap)) | 315 | if (!rpc_cmp_addr_port(sap, clap)) |
316 | /* Match all xprt_switch full socket addresses */ | 316 | /* Match all xprt_switch full socket addresses */ |
317 | if (!rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient, | 317 | if (IS_ERR(clp->cl_rpcclient) || |
318 | !rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient, | ||
318 | sap)) | 319 | sap)) |
319 | continue; | 320 | continue; |
320 | 321 | ||
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index c8162c660c44..5551e8ef67fd 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -98,7 +98,7 @@ rename_retry: | |||
98 | return end; | 98 | return end; |
99 | } | 99 | } |
100 | namelen = strlen(base); | 100 | namelen = strlen(base); |
101 | if (flags & NFS_PATH_CANONICAL) { | 101 | if (*end == '/') { |
102 | /* Strip off excess slashes in base string */ | 102 | /* Strip off excess slashes in base string */ |
103 | while (namelen > 0 && base[namelen - 1] == '/') | 103 | while (namelen > 0 && base[namelen - 1] == '/') |
104 | namelen--; | 104 | namelen--; |
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c index b62973045a3e..a61350f75c74 100644 --- a/fs/nfs/nfs4session.c +++ b/fs/nfs/nfs4session.c | |||
@@ -178,12 +178,14 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid, | |||
178 | __must_hold(&tbl->slot_tbl_lock) | 178 | __must_hold(&tbl->slot_tbl_lock) |
179 | { | 179 | { |
180 | struct nfs4_slot *slot; | 180 | struct nfs4_slot *slot; |
181 | int ret; | ||
181 | 182 | ||
182 | slot = nfs4_lookup_slot(tbl, slotid); | 183 | slot = nfs4_lookup_slot(tbl, slotid); |
183 | if (IS_ERR(slot)) | 184 | ret = PTR_ERR_OR_ZERO(slot); |
184 | return PTR_ERR(slot); | 185 | if (!ret) |
185 | *seq_nr = slot->seq_nr; | 186 | *seq_nr = slot->seq_nr; |
186 | return 0; | 187 | |
188 | return ret; | ||
187 | } | 189 | } |
188 | 190 | ||
189 | /* | 191 | /* |
@@ -196,7 +198,7 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid, | |||
196 | static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl, | 198 | static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl, |
197 | u32 slotid, u32 seq_nr) | 199 | u32 slotid, u32 seq_nr) |
198 | { | 200 | { |
199 | u32 cur_seq; | 201 | u32 cur_seq = 0; |
200 | bool ret = false; | 202 | bool ret = false; |
201 | 203 | ||
202 | spin_lock(&tbl->slot_tbl_lock); | 204 | spin_lock(&tbl->slot_tbl_lock); |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 56b2d96f9103..259ef85f435a 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -146,6 +146,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, | |||
146 | u32 id; | 146 | u32 id; |
147 | int i; | 147 | int i; |
148 | 148 | ||
149 | if (fsinfo->nlayouttypes == 0) | ||
150 | goto out_no_driver; | ||
149 | if (!(server->nfs_client->cl_exchange_flags & | 151 | if (!(server->nfs_client->cl_exchange_flags & |
150 | (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { | 152 | (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { |
151 | printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n", | 153 | printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n", |
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index a18613579001..0ee19ecc982d 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c | |||
@@ -1544,8 +1544,6 @@ const struct file_operations ntfs_dir_ops = { | |||
1544 | .iterate = ntfs_readdir, /* Read directory contents. */ | 1544 | .iterate = ntfs_readdir, /* Read directory contents. */ |
1545 | #ifdef NTFS_RW | 1545 | #ifdef NTFS_RW |
1546 | .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */ | 1546 | .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */ |
1547 | /*.aio_fsync = ,*/ /* Sync all outstanding async | ||
1548 | i/o operations on a kiocb. */ | ||
1549 | #endif /* NTFS_RW */ | 1547 | #endif /* NTFS_RW */ |
1550 | /*.ioctl = ,*/ /* Perform function on the | 1548 | /*.ioctl = ,*/ /* Perform function on the |
1551 | mounted filesystem. */ | 1549 | mounted filesystem. */ |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index e7054e2ac922..3ecb9f337b7d 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -3699,7 +3699,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash, | |||
3699 | static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb, | 3699 | static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb, |
3700 | struct ocfs2_dx_root_block *dx_root) | 3700 | struct ocfs2_dx_root_block *dx_root) |
3701 | { | 3701 | { |
3702 | int credits = ocfs2_clusters_to_blocks(osb->sb, 2); | 3702 | int credits = ocfs2_clusters_to_blocks(osb->sb, 3); |
3703 | 3703 | ||
3704 | credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list); | 3704 | credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list); |
3705 | credits += ocfs2_quota_trans_credits(osb->sb); | 3705 | credits += ocfs2_quota_trans_credits(osb->sb); |
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index eb09aa026723..38887cc5577f 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c | |||
@@ -114,6 +114,7 @@ static const struct seq_operations help_debug_ops = { | |||
114 | }; | 114 | }; |
115 | 115 | ||
116 | const struct file_operations debug_help_fops = { | 116 | const struct file_operations debug_help_fops = { |
117 | .owner = THIS_MODULE, | ||
117 | .open = orangefs_debug_help_open, | 118 | .open = orangefs_debug_help_open, |
118 | .read = seq_read, | 119 | .read = seq_read, |
119 | .release = seq_release, | 120 | .release = seq_release, |
@@ -121,6 +122,7 @@ const struct file_operations debug_help_fops = { | |||
121 | }; | 122 | }; |
122 | 123 | ||
123 | static const struct file_operations kernel_debug_fops = { | 124 | static const struct file_operations kernel_debug_fops = { |
125 | .owner = THIS_MODULE, | ||
124 | .open = orangefs_debug_open, | 126 | .open = orangefs_debug_open, |
125 | .read = orangefs_debug_read, | 127 | .read = orangefs_debug_read, |
126 | .write = orangefs_debug_write, | 128 | .write = orangefs_debug_write, |
@@ -141,6 +143,9 @@ static struct client_debug_mask client_debug_mask; | |||
141 | */ | 143 | */ |
142 | static DEFINE_MUTEX(orangefs_debug_lock); | 144 | static DEFINE_MUTEX(orangefs_debug_lock); |
143 | 145 | ||
146 | /* Used to protect data in ORANGEFS_KMOD_DEBUG_HELP_FILE */ | ||
147 | static DEFINE_MUTEX(orangefs_help_file_lock); | ||
148 | |||
144 | /* | 149 | /* |
145 | * initialize kmod debug operations, create orangefs debugfs dir and | 150 | * initialize kmod debug operations, create orangefs debugfs dir and |
146 | * ORANGEFS_KMOD_DEBUG_HELP_FILE. | 151 | * ORANGEFS_KMOD_DEBUG_HELP_FILE. |
@@ -289,6 +294,8 @@ static void *help_start(struct seq_file *m, loff_t *pos) | |||
289 | 294 | ||
290 | gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n"); | 295 | gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n"); |
291 | 296 | ||
297 | mutex_lock(&orangefs_help_file_lock); | ||
298 | |||
292 | if (*pos == 0) | 299 | if (*pos == 0) |
293 | payload = m->private; | 300 | payload = m->private; |
294 | 301 | ||
@@ -305,6 +312,7 @@ static void *help_next(struct seq_file *m, void *v, loff_t *pos) | |||
305 | static void help_stop(struct seq_file *m, void *p) | 312 | static void help_stop(struct seq_file *m, void *p) |
306 | { | 313 | { |
307 | gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n"); | 314 | gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n"); |
315 | mutex_unlock(&orangefs_help_file_lock); | ||
308 | } | 316 | } |
309 | 317 | ||
310 | static int help_show(struct seq_file *m, void *v) | 318 | static int help_show(struct seq_file *m, void *v) |
@@ -610,32 +618,54 @@ out: | |||
610 | * /sys/kernel/debug/orangefs/debug-help can be catted to | 618 | * /sys/kernel/debug/orangefs/debug-help can be catted to |
611 | * see all the available kernel and client debug keywords. | 619 | * see all the available kernel and client debug keywords. |
612 | * | 620 | * |
613 | * When the kernel boots, we have no idea what keywords the | 621 | * When orangefs.ko initializes, we have no idea what keywords the |
614 | * client supports, nor their associated masks. | 622 | * client supports, nor their associated masks. |
615 | * | 623 | * |
616 | * We pass through this function once at boot and stamp a | 624 | * We pass through this function once at module-load and stamp a |
617 | * boilerplate "we don't know" message for the client in the | 625 | * boilerplate "we don't know" message for the client in the |
618 | * debug-help file. We pass through here again when the client | 626 | * debug-help file. We pass through here again when the client |
619 | * starts and then we can fill out the debug-help file fully. | 627 | * starts and then we can fill out the debug-help file fully. |
620 | * | 628 | * |
621 | * The client might be restarted any number of times between | 629 | * The client might be restarted any number of times between |
622 | * reboots, we only build the debug-help file the first time. | 630 | * module reloads, we only build the debug-help file the first time. |
623 | */ | 631 | */ |
624 | int orangefs_prepare_debugfs_help_string(int at_boot) | 632 | int orangefs_prepare_debugfs_help_string(int at_boot) |
625 | { | 633 | { |
626 | int rc = -EINVAL; | ||
627 | int i; | ||
628 | int byte_count = 0; | ||
629 | char *client_title = "Client Debug Keywords:\n"; | 634 | char *client_title = "Client Debug Keywords:\n"; |
630 | char *kernel_title = "Kernel Debug Keywords:\n"; | 635 | char *kernel_title = "Kernel Debug Keywords:\n"; |
636 | size_t string_size = DEBUG_HELP_STRING_SIZE; | ||
637 | size_t result_size; | ||
638 | size_t i; | ||
639 | char *new; | ||
640 | int rc = -EINVAL; | ||
631 | 641 | ||
632 | gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); | 642 | gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); |
633 | 643 | ||
634 | if (at_boot) { | 644 | if (at_boot) |
635 | byte_count += strlen(HELP_STRING_UNINITIALIZED); | ||
636 | client_title = HELP_STRING_UNINITIALIZED; | 645 | client_title = HELP_STRING_UNINITIALIZED; |
637 | } else { | 646 | |
638 | /* | 647 | /* build a new debug_help_string. */ |
648 | new = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL); | ||
649 | if (!new) { | ||
650 | rc = -ENOMEM; | ||
651 | goto out; | ||
652 | } | ||
653 | |||
654 | /* | ||
655 | * strlcat(dst, src, size) will append at most | ||
656 | * "size - strlen(dst) - 1" bytes of src onto dst, | ||
657 | * null terminating the result, and return the total | ||
658 | * length of the string it tried to create. | ||
659 | * | ||
660 | * We'll just plow through here building our new debug | ||
661 | * help string and let strlcat take care of assuring that | ||
662 | * dst doesn't overflow. | ||
663 | */ | ||
664 | strlcat(new, client_title, string_size); | ||
665 | |||
666 | if (!at_boot) { | ||
667 | |||
668 | /* | ||
639 | * fill the client keyword/mask array and remember | 669 | * fill the client keyword/mask array and remember |
640 | * how many elements there were. | 670 | * how many elements there were. |
641 | */ | 671 | */ |
@@ -644,64 +674,40 @@ int orangefs_prepare_debugfs_help_string(int at_boot) | |||
644 | if (cdm_element_count <= 0) | 674 | if (cdm_element_count <= 0) |
645 | goto out; | 675 | goto out; |
646 | 676 | ||
647 | /* Count the bytes destined for debug_help_string. */ | ||
648 | byte_count += strlen(client_title); | ||
649 | |||
650 | for (i = 0; i < cdm_element_count; i++) { | 677 | for (i = 0; i < cdm_element_count; i++) { |
651 | byte_count += strlen(cdm_array[i].keyword + 2); | 678 | strlcat(new, "\t", string_size); |
652 | if (byte_count >= DEBUG_HELP_STRING_SIZE) { | 679 | strlcat(new, cdm_array[i].keyword, string_size); |
653 | pr_info("%s: overflow 1!\n", __func__); | 680 | strlcat(new, "\n", string_size); |
654 | goto out; | ||
655 | } | ||
656 | } | 681 | } |
657 | |||
658 | gossip_debug(GOSSIP_UTILS_DEBUG, | ||
659 | "%s: cdm_element_count:%d:\n", | ||
660 | __func__, | ||
661 | cdm_element_count); | ||
662 | } | 682 | } |
663 | 683 | ||
664 | byte_count += strlen(kernel_title); | 684 | strlcat(new, "\n", string_size); |
685 | strlcat(new, kernel_title, string_size); | ||
686 | |||
665 | for (i = 0; i < num_kmod_keyword_mask_map; i++) { | 687 | for (i = 0; i < num_kmod_keyword_mask_map; i++) { |
666 | byte_count += | 688 | strlcat(new, "\t", string_size); |
667 | strlen(s_kmod_keyword_mask_map[i].keyword + 2); | 689 | strlcat(new, s_kmod_keyword_mask_map[i].keyword, string_size); |
668 | if (byte_count >= DEBUG_HELP_STRING_SIZE) { | 690 | result_size = strlcat(new, "\n", string_size); |
669 | pr_info("%s: overflow 2!\n", __func__); | ||
670 | goto out; | ||
671 | } | ||
672 | } | 691 | } |
673 | 692 | ||
674 | /* build debug_help_string. */ | 693 | /* See if we tried to put too many bytes into "new"... */ |
675 | debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL); | 694 | if (result_size >= string_size) { |
676 | if (!debug_help_string) { | 695 | kfree(new); |
677 | rc = -ENOMEM; | ||
678 | goto out; | 696 | goto out; |
679 | } | 697 | } |
680 | 698 | ||
681 | strcat(debug_help_string, client_title); | 699 | if (at_boot) { |
682 | 700 | debug_help_string = new; | |
683 | if (!at_boot) { | 701 | } else { |
684 | for (i = 0; i < cdm_element_count; i++) { | 702 | mutex_lock(&orangefs_help_file_lock); |
685 | strcat(debug_help_string, "\t"); | 703 | memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE); |
686 | strcat(debug_help_string, cdm_array[i].keyword); | 704 | strlcat(debug_help_string, new, string_size); |
687 | strcat(debug_help_string, "\n"); | 705 | mutex_unlock(&orangefs_help_file_lock); |
688 | } | ||
689 | } | ||
690 | |||
691 | strcat(debug_help_string, "\n"); | ||
692 | strcat(debug_help_string, kernel_title); | ||
693 | |||
694 | for (i = 0; i < num_kmod_keyword_mask_map; i++) { | ||
695 | strcat(debug_help_string, "\t"); | ||
696 | strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword); | ||
697 | strcat(debug_help_string, "\n"); | ||
698 | } | 706 | } |
699 | 707 | ||
700 | rc = 0; | 708 | rc = 0; |
701 | 709 | ||
702 | out: | 710 | out: return rc; |
703 | |||
704 | return rc; | ||
705 | 711 | ||
706 | } | 712 | } |
707 | 713 | ||
@@ -959,8 +965,12 @@ int orangefs_debugfs_new_client_string(void __user *arg) | |||
959 | ret = copy_from_user(&client_debug_array_string, | 965 | ret = copy_from_user(&client_debug_array_string, |
960 | (void __user *)arg, | 966 | (void __user *)arg, |
961 | ORANGEFS_MAX_DEBUG_STRING_LEN); | 967 | ORANGEFS_MAX_DEBUG_STRING_LEN); |
962 | if (ret != 0) | 968 | |
969 | if (ret != 0) { | ||
970 | pr_info("%s: CLIENT_STRING: copy_from_user failed\n", | ||
971 | __func__); | ||
963 | return -EIO; | 972 | return -EIO; |
973 | } | ||
964 | 974 | ||
965 | /* | 975 | /* |
966 | * The real client-core makes an effort to ensure | 976 | * The real client-core makes an effort to ensure |
@@ -975,45 +985,18 @@ int orangefs_debugfs_new_client_string(void __user *arg) | |||
975 | client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] = | 985 | client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] = |
976 | '\0'; | 986 | '\0'; |
977 | 987 | ||
978 | if (ret != 0) { | ||
979 | pr_info("%s: CLIENT_STRING: copy_from_user failed\n", | ||
980 | __func__); | ||
981 | return -EIO; | ||
982 | } | ||
983 | |||
984 | pr_info("%s: client debug array string has been received.\n", | 988 | pr_info("%s: client debug array string has been received.\n", |
985 | __func__); | 989 | __func__); |
986 | 990 | ||
987 | if (!help_string_initialized) { | 991 | if (!help_string_initialized) { |
988 | 992 | ||
989 | /* Free the "we don't know yet" default string... */ | 993 | /* Build a proper debug help string. */ |
990 | kfree(debug_help_string); | ||
991 | |||
992 | /* build a proper debug help string */ | ||
993 | if (orangefs_prepare_debugfs_help_string(0)) { | 994 | if (orangefs_prepare_debugfs_help_string(0)) { |
994 | gossip_err("%s: no debug help string \n", | 995 | gossip_err("%s: no debug help string \n", |
995 | __func__); | 996 | __func__); |
996 | return -EIO; | 997 | return -EIO; |
997 | } | 998 | } |
998 | 999 | ||
999 | /* Replace the boilerplate boot-time debug-help file. */ | ||
1000 | debugfs_remove(help_file_dentry); | ||
1001 | |||
1002 | help_file_dentry = | ||
1003 | debugfs_create_file( | ||
1004 | ORANGEFS_KMOD_DEBUG_HELP_FILE, | ||
1005 | 0444, | ||
1006 | debug_dir, | ||
1007 | debug_help_string, | ||
1008 | &debug_help_fops); | ||
1009 | |||
1010 | if (!help_file_dentry) { | ||
1011 | gossip_err("%s: debugfs_create_file failed for" | ||
1012 | " :%s:!\n", | ||
1013 | __func__, | ||
1014 | ORANGEFS_KMOD_DEBUG_HELP_FILE); | ||
1015 | return -EIO; | ||
1016 | } | ||
1017 | } | 1000 | } |
1018 | 1001 | ||
1019 | debug_mask_to_string(&client_debug_mask, 1); | 1002 | debug_mask_to_string(&client_debug_mask, 1); |
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c index 2e5b03065f34..4113eb0495bf 100644 --- a/fs/orangefs/orangefs-mod.c +++ b/fs/orangefs/orangefs-mod.c | |||
@@ -124,7 +124,7 @@ static int __init orangefs_init(void) | |||
124 | * unknown at boot time. | 124 | * unknown at boot time. |
125 | * | 125 | * |
126 | * orangefs_prepare_debugfs_help_string will be used again | 126 | * orangefs_prepare_debugfs_help_string will be used again |
127 | * later to rebuild the debug-help file after the client starts | 127 | * later to rebuild the debug-help-string after the client starts |
128 | * and passes along the needed info. The argument signifies | 128 | * and passes along the needed info. The argument signifies |
129 | * which time orangefs_prepare_debugfs_help_string is being | 129 | * which time orangefs_prepare_debugfs_help_string is being |
130 | * called. | 130 | * called. |
@@ -152,7 +152,9 @@ static int __init orangefs_init(void) | |||
152 | 152 | ||
153 | ret = register_filesystem(&orangefs_fs_type); | 153 | ret = register_filesystem(&orangefs_fs_type); |
154 | if (ret == 0) { | 154 | if (ret == 0) { |
155 | pr_info("orangefs: module version %s loaded\n", ORANGEFS_VERSION); | 155 | pr_info("%s: module version %s loaded\n", |
156 | __func__, | ||
157 | ORANGEFS_VERSION); | ||
156 | ret = 0; | 158 | ret = 0; |
157 | goto out; | 159 | goto out; |
158 | } | 160 | } |
diff --git a/fs/splice.c b/fs/splice.c index 153d4f3bd441..dcaf185a5731 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -299,13 +299,8 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, | |||
299 | { | 299 | { |
300 | struct iov_iter to; | 300 | struct iov_iter to; |
301 | struct kiocb kiocb; | 301 | struct kiocb kiocb; |
302 | loff_t isize; | ||
303 | int idx, ret; | 302 | int idx, ret; |
304 | 303 | ||
305 | isize = i_size_read(in->f_mapping->host); | ||
306 | if (unlikely(*ppos >= isize)) | ||
307 | return 0; | ||
308 | |||
309 | iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len); | 304 | iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len); |
310 | idx = to.idx; | 305 | idx = to.idx; |
311 | init_sync_kiocb(&kiocb, in); | 306 | init_sync_kiocb(&kiocb, in); |
diff --git a/fs/xattr.c b/fs/xattr.c index 3368659c471e..2d13b4e62fae 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
@@ -170,7 +170,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, | |||
170 | const void *value, size_t size, int flags) | 170 | const void *value, size_t size, int flags) |
171 | { | 171 | { |
172 | struct inode *inode = dentry->d_inode; | 172 | struct inode *inode = dentry->d_inode; |
173 | int error = -EOPNOTSUPP; | 173 | int error = -EAGAIN; |
174 | int issec = !strncmp(name, XATTR_SECURITY_PREFIX, | 174 | int issec = !strncmp(name, XATTR_SECURITY_PREFIX, |
175 | XATTR_SECURITY_PREFIX_LEN); | 175 | XATTR_SECURITY_PREFIX_LEN); |
176 | 176 | ||
@@ -183,15 +183,21 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, | |||
183 | security_inode_post_setxattr(dentry, name, value, | 183 | security_inode_post_setxattr(dentry, name, value, |
184 | size, flags); | 184 | size, flags); |
185 | } | 185 | } |
186 | } else if (issec) { | 186 | } else { |
187 | const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; | ||
188 | |||
189 | if (unlikely(is_bad_inode(inode))) | 187 | if (unlikely(is_bad_inode(inode))) |
190 | return -EIO; | 188 | return -EIO; |
191 | error = security_inode_setsecurity(inode, suffix, value, | 189 | } |
192 | size, flags); | 190 | if (error == -EAGAIN) { |
193 | if (!error) | 191 | error = -EOPNOTSUPP; |
194 | fsnotify_xattr(dentry); | 192 | |
193 | if (issec) { | ||
194 | const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; | ||
195 | |||
196 | error = security_inode_setsecurity(inode, suffix, value, | ||
197 | size, flags); | ||
198 | if (!error) | ||
199 | fsnotify_xattr(dentry); | ||
200 | } | ||
195 | } | 201 | } |
196 | 202 | ||
197 | return error; | 203 | return error; |
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c index 613c5cf19436..5c2929f94bd3 100644 --- a/fs/xfs/libxfs/xfs_defer.c +++ b/fs/xfs/libxfs/xfs_defer.c | |||
@@ -199,9 +199,9 @@ xfs_defer_intake_work( | |||
199 | struct xfs_defer_pending *dfp; | 199 | struct xfs_defer_pending *dfp; |
200 | 200 | ||
201 | list_for_each_entry(dfp, &dop->dop_intake, dfp_list) { | 201 | list_for_each_entry(dfp, &dop->dop_intake, dfp_list) { |
202 | trace_xfs_defer_intake_work(tp->t_mountp, dfp); | ||
203 | dfp->dfp_intent = dfp->dfp_type->create_intent(tp, | 202 | dfp->dfp_intent = dfp->dfp_type->create_intent(tp, |
204 | dfp->dfp_count); | 203 | dfp->dfp_count); |
204 | trace_xfs_defer_intake_work(tp->t_mountp, dfp); | ||
205 | list_sort(tp->t_mountp, &dfp->dfp_work, | 205 | list_sort(tp->t_mountp, &dfp->dfp_work, |
206 | dfp->dfp_type->diff_items); | 206 | dfp->dfp_type->diff_items); |
207 | list_for_each(li, &dfp->dfp_work) | 207 | list_for_each(li, &dfp->dfp_work) |
@@ -221,21 +221,14 @@ xfs_defer_trans_abort( | |||
221 | struct xfs_defer_pending *dfp; | 221 | struct xfs_defer_pending *dfp; |
222 | 222 | ||
223 | trace_xfs_defer_trans_abort(tp->t_mountp, dop); | 223 | trace_xfs_defer_trans_abort(tp->t_mountp, dop); |
224 | /* | ||
225 | * If the transaction was committed, drop the intent reference | ||
226 | * since we're bailing out of here. The other reference is | ||
227 | * dropped when the intent hits the AIL. If the transaction | ||
228 | * was not committed, the intent is freed by the intent item | ||
229 | * unlock handler on abort. | ||
230 | */ | ||
231 | if (!dop->dop_committed) | ||
232 | return; | ||
233 | 224 | ||
234 | /* Abort intent items. */ | 225 | /* Abort intent items that don't have a done item. */ |
235 | list_for_each_entry(dfp, &dop->dop_pending, dfp_list) { | 226 | list_for_each_entry(dfp, &dop->dop_pending, dfp_list) { |
236 | trace_xfs_defer_pending_abort(tp->t_mountp, dfp); | 227 | trace_xfs_defer_pending_abort(tp->t_mountp, dfp); |
237 | if (!dfp->dfp_done) | 228 | if (dfp->dfp_intent && !dfp->dfp_done) { |
238 | dfp->dfp_type->abort_intent(dfp->dfp_intent); | 229 | dfp->dfp_type->abort_intent(dfp->dfp_intent); |
230 | dfp->dfp_intent = NULL; | ||
231 | } | ||
239 | } | 232 | } |
240 | 233 | ||
241 | /* Shut down FS. */ | 234 | /* Shut down FS. */ |
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 40e887068da2..0504ef8f3aa3 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -118,9 +118,9 @@ do { \ | |||
118 | #define this_cpu_generic_read(pcp) \ | 118 | #define this_cpu_generic_read(pcp) \ |
119 | ({ \ | 119 | ({ \ |
120 | typeof(pcp) __ret; \ | 120 | typeof(pcp) __ret; \ |
121 | preempt_disable(); \ | 121 | preempt_disable_notrace(); \ |
122 | __ret = raw_cpu_generic_read(pcp); \ | 122 | __ret = raw_cpu_generic_read(pcp); \ |
123 | preempt_enable(); \ | 123 | preempt_enable_notrace(); \ |
124 | __ret; \ | 124 | __ret; \ |
125 | }) | 125 | }) |
126 | 126 | ||
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index af0254c09424..4df64a1fc09e 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h | |||
@@ -14,6 +14,8 @@ | |||
14 | * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* | 14 | * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* |
15 | * and/or .init.* sections. | 15 | * and/or .init.* sections. |
16 | * [__start_rodata, __end_rodata]: contains .rodata.* sections | 16 | * [__start_rodata, __end_rodata]: contains .rodata.* sections |
17 | * [__start_data_ro_after_init, __end_data_ro_after_init]: | ||
18 | * contains data.ro_after_init section | ||
17 | * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* | 19 | * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* |
18 | * may be out of this range on some architectures. | 20 | * may be out of this range on some architectures. |
19 | * [_sinittext, _einittext]: contains .init.text.* sections | 21 | * [_sinittext, _einittext]: contains .init.text.* sections |
@@ -31,6 +33,7 @@ extern char _data[], _sdata[], _edata[]; | |||
31 | extern char __bss_start[], __bss_stop[]; | 33 | extern char __bss_start[], __bss_stop[]; |
32 | extern char __init_begin[], __init_end[]; | 34 | extern char __init_begin[], __init_end[]; |
33 | extern char _sinittext[], _einittext[]; | 35 | extern char _sinittext[], _einittext[]; |
36 | extern char __start_data_ro_after_init[], __end_data_ro_after_init[]; | ||
34 | extern char _end[]; | 37 | extern char _end[]; |
35 | extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; | 38 | extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; |
36 | extern char __kprobes_text_start[], __kprobes_text_end[]; | 39 | extern char __kprobes_text_start[], __kprobes_text_end[]; |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 30747960bc54..31e1d639abed 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -259,7 +259,10 @@ | |||
259 | * own by defining an empty RO_AFTER_INIT_DATA. | 259 | * own by defining an empty RO_AFTER_INIT_DATA. |
260 | */ | 260 | */ |
261 | #ifndef RO_AFTER_INIT_DATA | 261 | #ifndef RO_AFTER_INIT_DATA |
262 | #define RO_AFTER_INIT_DATA *(.data..ro_after_init) | 262 | #define RO_AFTER_INIT_DATA \ |
263 | __start_data_ro_after_init = .; \ | ||
264 | *(.data..ro_after_init) \ | ||
265 | __end_data_ro_after_init = .; | ||
263 | #endif | 266 | #endif |
264 | 267 | ||
265 | /* | 268 | /* |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 689a8b9b9c8f..61a3d90f32b3 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -555,7 +555,8 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); | |||
555 | int acpi_device_modalias(struct device *, char *, int); | 555 | int acpi_device_modalias(struct device *, char *, int); |
556 | void acpi_walk_dep_device_list(acpi_handle handle); | 556 | void acpi_walk_dep_device_list(acpi_handle handle); |
557 | 557 | ||
558 | struct platform_device *acpi_create_platform_device(struct acpi_device *); | 558 | struct platform_device *acpi_create_platform_device(struct acpi_device *, |
559 | struct property_entry *); | ||
559 | #define ACPI_PTR(_ptr) (_ptr) | 560 | #define ACPI_PTR(_ptr) (_ptr) |
560 | 561 | ||
561 | static inline void acpi_device_set_enumerated(struct acpi_device *adev) | 562 | static inline void acpi_device_set_enumerated(struct acpi_device *adev) |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 96337b15a60d..a8e66344bacc 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
@@ -258,6 +258,8 @@ struct ceph_watch_item { | |||
258 | struct ceph_entity_addr addr; | 258 | struct ceph_entity_addr addr; |
259 | }; | 259 | }; |
260 | 260 | ||
261 | #define CEPH_LINGER_ID_START 0xffff000000000000ULL | ||
262 | |||
261 | struct ceph_osd_client { | 263 | struct ceph_osd_client { |
262 | struct ceph_client *client; | 264 | struct ceph_client *client; |
263 | 265 | ||
diff --git a/include/linux/console.h b/include/linux/console.h index 3672809234a7..d530c4627e54 100644 --- a/include/linux/console.h +++ b/include/linux/console.h | |||
@@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void) | |||
173 | #endif | 173 | #endif |
174 | extern bool console_suspend_enabled; | 174 | extern bool console_suspend_enabled; |
175 | 175 | ||
176 | #ifdef CONFIG_OF | ||
177 | extern void console_set_by_of(void); | ||
178 | #else | ||
179 | static inline void console_set_by_of(void) {} | ||
180 | #endif | ||
181 | |||
182 | /* Suspend and resume console messages over PM events */ | 176 | /* Suspend and resume console messages over PM events */ |
183 | extern void suspend_console(void); | 177 | extern void suspend_console(void); |
184 | extern void resume_console(void); | 178 | extern void resume_console(void); |
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index c46d2aa16d81..1d18af034554 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h | |||
@@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type) | |||
106 | 106 | ||
107 | static inline void frontswap_init(unsigned type, unsigned long *map) | 107 | static inline void frontswap_init(unsigned type, unsigned long *map) |
108 | { | 108 | { |
109 | if (frontswap_enabled()) | 109 | #ifdef CONFIG_FRONTSWAP |
110 | __frontswap_init(type, map); | 110 | __frontswap_init(type, map); |
111 | #endif | ||
111 | } | 112 | } |
112 | 113 | ||
113 | #endif /* _LINUX_FRONTSWAP_H */ | 114 | #endif /* _LINUX_FRONTSWAP_H */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 16d2b6e874d6..dc0478c07b2a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -321,6 +321,7 @@ struct writeback_control; | |||
321 | #define IOCB_HIPRI (1 << 3) | 321 | #define IOCB_HIPRI (1 << 3) |
322 | #define IOCB_DSYNC (1 << 4) | 322 | #define IOCB_DSYNC (1 << 4) |
323 | #define IOCB_SYNC (1 << 5) | 323 | #define IOCB_SYNC (1 << 5) |
324 | #define IOCB_WRITE (1 << 6) | ||
324 | 325 | ||
325 | struct kiocb { | 326 | struct kiocb { |
326 | struct file *ki_filp; | 327 | struct file *ki_filp; |
@@ -1709,7 +1710,6 @@ struct file_operations { | |||
1709 | int (*flush) (struct file *, fl_owner_t id); | 1710 | int (*flush) (struct file *, fl_owner_t id); |
1710 | int (*release) (struct inode *, struct file *); | 1711 | int (*release) (struct inode *, struct file *); |
1711 | int (*fsync) (struct file *, loff_t, loff_t, int datasync); | 1712 | int (*fsync) (struct file *, loff_t, loff_t, int datasync); |
1712 | int (*aio_fsync) (struct kiocb *, int datasync); | ||
1713 | int (*fasync) (int, struct file *, int); | 1713 | int (*fasync) (int, struct file *, int); |
1714 | int (*lock) (struct file *, int, struct file_lock *); | 1714 | int (*lock) (struct file *, int, struct file_lock *); |
1715 | ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); | 1715 | ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9b9f65d99873..e35e6de633b9 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
22 | unsigned char *vec); | 22 | unsigned char *vec); |
23 | extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | 23 | extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
24 | unsigned long new_addr, unsigned long old_end, | 24 | unsigned long new_addr, unsigned long old_end, |
25 | pmd_t *old_pmd, pmd_t *new_pmd); | 25 | pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); |
26 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 26 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
27 | unsigned long addr, pgprot_t newprot, | 27 | unsigned long addr, pgprot_t newprot, |
28 | int prot_numa); | 28 | int prot_numa); |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ca1ad9ebbc92..a0649973ee5b 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -149,7 +149,7 @@ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) | |||
149 | { | 149 | { |
150 | #if defined(CONFIG_NET_L3_MASTER_DEV) | 150 | #if defined(CONFIG_NET_L3_MASTER_DEV) |
151 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && | 151 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && |
152 | ipv6_l3mdev_skb(IP6CB(skb)->flags)) | 152 | skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) |
153 | return true; | 153 | return true; |
154 | #endif | 154 | #endif |
155 | return false; | 155 | return false; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 91ee3643ccc8..bf04a46f6d5b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3354,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | |||
3354 | bool is_skb_forwardable(const struct net_device *dev, | 3354 | bool is_skb_forwardable(const struct net_device *dev, |
3355 | const struct sk_buff *skb); | 3355 | const struct sk_buff *skb); |
3356 | 3356 | ||
3357 | static __always_inline int ____dev_forward_skb(struct net_device *dev, | ||
3358 | struct sk_buff *skb) | ||
3359 | { | ||
3360 | if (skb_orphan_frags(skb, GFP_ATOMIC) || | ||
3361 | unlikely(!is_skb_forwardable(dev, skb))) { | ||
3362 | atomic_long_inc(&dev->rx_dropped); | ||
3363 | kfree_skb(skb); | ||
3364 | return NET_RX_DROP; | ||
3365 | } | ||
3366 | |||
3367 | skb_scrub_packet(skb, true); | ||
3368 | skb->priority = 0; | ||
3369 | return 0; | ||
3370 | } | ||
3371 | |||
3357 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); | 3372 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
3358 | 3373 | ||
3359 | extern int netdev_budget; | 3374 | extern int netdev_budget; |
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index ee1bed7dbfc6..78bb0d7f6b11 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h | |||
@@ -253,6 +253,13 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode) | |||
253 | return -ENOSYS; | 253 | return -ENOSYS; |
254 | } | 254 | } |
255 | 255 | ||
256 | static inline int phy_reset(struct phy *phy) | ||
257 | { | ||
258 | if (!phy) | ||
259 | return 0; | ||
260 | return -ENOSYS; | ||
261 | } | ||
262 | |||
256 | static inline int phy_get_bus_width(struct phy *phy) | 263 | static inline int phy_get_bus_width(struct phy *phy) |
257 | { | 264 | { |
258 | return -ENOSYS; | 265 | return -ENOSYS; |
diff --git a/include/net/ip.h b/include/net/ip.h index 5413883ac47f..d3a107850a41 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -47,8 +47,7 @@ struct inet_skb_parm { | |||
47 | #define IPSKB_REROUTED BIT(4) | 47 | #define IPSKB_REROUTED BIT(4) |
48 | #define IPSKB_DOREDIRECT BIT(5) | 48 | #define IPSKB_DOREDIRECT BIT(5) |
49 | #define IPSKB_FRAG_PMTU BIT(6) | 49 | #define IPSKB_FRAG_PMTU BIT(6) |
50 | #define IPSKB_FRAG_SEGS BIT(7) | 50 | #define IPSKB_L3SLAVE BIT(7) |
51 | #define IPSKB_L3SLAVE BIT(8) | ||
52 | 51 | ||
53 | u16 frag_max_size; | 52 | u16 frag_max_size; |
54 | }; | 53 | }; |
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 20ed9699fcd4..1b1cf33cbfb0 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h | |||
@@ -146,6 +146,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, | |||
146 | { | 146 | { |
147 | int pkt_len, err; | 147 | int pkt_len, err; |
148 | 148 | ||
149 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); | ||
149 | pkt_len = skb->len - skb_inner_network_offset(skb); | 150 | pkt_len = skb->len - skb_inner_network_offset(skb); |
150 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); | 151 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); |
151 | if (unlikely(net_xmit_eval(err))) | 152 | if (unlikely(net_xmit_eval(err))) |
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index 498814626e28..1723a67c0b0a 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h | |||
@@ -30,8 +30,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) | |||
30 | if (net->ct.labels_used == 0) | 30 | if (net->ct.labels_used == 0) |
31 | return NULL; | 31 | return NULL; |
32 | 32 | ||
33 | return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS, | 33 | return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); |
34 | sizeof(struct nf_conn_labels), GFP_ATOMIC); | ||
35 | #else | 34 | #else |
36 | return NULL; | 35 | return NULL; |
37 | #endif | 36 | #endif |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5031e072567b..d79d1e9b9546 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -145,7 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type) | |||
145 | return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; | 145 | return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; |
146 | } | 146 | } |
147 | 147 | ||
148 | unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); | 148 | int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); |
149 | unsigned int nft_parse_register(const struct nlattr *attr); | 149 | unsigned int nft_parse_register(const struct nlattr *attr); |
150 | int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); | 150 | int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); |
151 | 151 | ||
@@ -542,7 +542,8 @@ void *nft_set_elem_init(const struct nft_set *set, | |||
542 | const struct nft_set_ext_tmpl *tmpl, | 542 | const struct nft_set_ext_tmpl *tmpl, |
543 | const u32 *key, const u32 *data, | 543 | const u32 *key, const u32 *data, |
544 | u64 timeout, gfp_t gfp); | 544 | u64 timeout, gfp_t gfp); |
545 | void nft_set_elem_destroy(const struct nft_set *set, void *elem); | 545 | void nft_set_elem_destroy(const struct nft_set *set, void *elem, |
546 | bool destroy_expr); | ||
546 | 547 | ||
547 | /** | 548 | /** |
548 | * struct nft_set_gc_batch_head - nf_tables set garbage collection batch | 549 | * struct nft_set_gc_batch_head - nf_tables set garbage collection batch |
@@ -693,7 +694,6 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) | |||
693 | { | 694 | { |
694 | int err; | 695 | int err; |
695 | 696 | ||
696 | __module_get(src->ops->type->owner); | ||
697 | if (src->ops->clone) { | 697 | if (src->ops->clone) { |
698 | dst->ops = src->ops; | 698 | dst->ops = src->ops; |
699 | err = src->ops->clone(dst, src); | 699 | err = src->ops->clone(dst, src); |
@@ -702,6 +702,8 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) | |||
702 | } else { | 702 | } else { |
703 | memcpy(dst, src, src->ops->size); | 703 | memcpy(dst, src, src->ops->size); |
704 | } | 704 | } |
705 | |||
706 | __module_get(src->ops->type->owner); | ||
705 | return 0; | 707 | return 0; |
706 | } | 708 | } |
707 | 709 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 87a7f42e7639..31acc3f4f132 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -152,7 +152,7 @@ void sctp_unhash_endpoint(struct sctp_endpoint *); | |||
152 | struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, | 152 | struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, |
153 | struct sctphdr *, struct sctp_association **, | 153 | struct sctphdr *, struct sctp_association **, |
154 | struct sctp_transport **); | 154 | struct sctp_transport **); |
155 | void sctp_err_finish(struct sock *, struct sctp_association *); | 155 | void sctp_err_finish(struct sock *, struct sctp_transport *); |
156 | void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, | 156 | void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, |
157 | struct sctp_transport *t, __u32 pmtu); | 157 | struct sctp_transport *t, __u32 pmtu); |
158 | void sctp_icmp_redirect(struct sock *, struct sctp_transport *, | 158 | void sctp_icmp_redirect(struct sock *, struct sctp_transport *, |
diff --git a/include/net/sock.h b/include/net/sock.h index 73c6b008f1b7..92b269709b9a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1596,11 +1596,11 @@ static inline void sock_put(struct sock *sk) | |||
1596 | void sock_gen_put(struct sock *sk); | 1596 | void sock_gen_put(struct sock *sk); |
1597 | 1597 | ||
1598 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, | 1598 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, |
1599 | unsigned int trim_cap); | 1599 | unsigned int trim_cap, bool refcounted); |
1600 | static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, | 1600 | static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, |
1601 | const int nested) | 1601 | const int nested) |
1602 | { | 1602 | { |
1603 | return __sk_receive_skb(sk, skb, nested, 1); | 1603 | return __sk_receive_skb(sk, skb, nested, 1, true); |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) | 1606 | static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 5b82d4d94834..123979fe12bf 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -805,7 +805,7 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) | |||
805 | { | 805 | { |
806 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) | 806 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) |
807 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && | 807 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && |
808 | ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) | 808 | skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) |
809 | return true; | 809 | return true; |
810 | #endif | 810 | #endif |
811 | return false; | 811 | return false; |
@@ -1220,6 +1220,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) | |||
1220 | 1220 | ||
1221 | bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); | 1221 | bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); |
1222 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); | 1222 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); |
1223 | int tcp_filter(struct sock *sk, struct sk_buff *skb); | ||
1223 | 1224 | ||
1224 | #undef STATE_TRACE | 1225 | #undef STATE_TRACE |
1225 | 1226 | ||
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h index 5cd4d4d2dd1d..9c9c6ad55f14 100644 --- a/include/uapi/linux/atm_zatm.h +++ b/include/uapi/linux/atm_zatm.h | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | #include <linux/atmapi.h> | 15 | #include <linux/atmapi.h> |
16 | #include <linux/atmioc.h> | 16 | #include <linux/atmioc.h> |
17 | #include <linux/time.h> | ||
18 | 17 | ||
19 | #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) | 18 | #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) |
20 | /* get pool statistics */ | 19 | /* get pool statistics */ |
diff --git a/include/uapi/linux/bpqether.h b/include/uapi/linux/bpqether.h index a6c35e1a89ad..05865edaefda 100644 --- a/include/uapi/linux/bpqether.h +++ b/include/uapi/linux/bpqether.h | |||
@@ -5,9 +5,7 @@ | |||
5 | * Defines for the BPQETHER pseudo device driver | 5 | * Defines for the BPQETHER pseudo device driver |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef __LINUX_IF_ETHER_H | ||
9 | #include <linux/if_ether.h> | 8 | #include <linux/if_ether.h> |
10 | #endif | ||
11 | 9 | ||
12 | #define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ | 10 | #define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ |
13 | #define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) | 11 | #define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) |
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 33d00a4ce656..819d895edfdc 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h | |||
@@ -18,12 +18,6 @@ | |||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <sound/asound.h> | 19 | #include <sound/asound.h> |
20 | 20 | ||
21 | #ifndef __KERNEL__ | ||
22 | #error This API is an early revision and not enabled in the current | ||
23 | #error kernel release, it will be enabled in a future kernel version | ||
24 | #error with incompatible changes to what is here. | ||
25 | #endif | ||
26 | |||
27 | /* | 21 | /* |
28 | * Maximum number of channels topology kcontrol can represent. | 22 | * Maximum number of channels topology kcontrol can represent. |
29 | */ | 23 | */ |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 570eeca7bdfa..ad1bc67aff1b 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab) | |||
687 | 687 | ||
688 | hlist_for_each_entry_safe(l, n, head, hash_node) { | 688 | hlist_for_each_entry_safe(l, n, head, hash_node) { |
689 | hlist_del_rcu(&l->hash_node); | 689 | hlist_del_rcu(&l->hash_node); |
690 | htab_elem_free(htab, l); | 690 | if (l->state != HTAB_EXTRA_ELEM_USED) |
691 | htab_elem_free(htab, l); | ||
691 | } | 692 | } |
692 | } | 693 | } |
693 | } | 694 | } |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 228f962447a5..237f3d6a7ddc 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -194,7 +194,7 @@ static int map_create(union bpf_attr *attr) | |||
194 | 194 | ||
195 | err = bpf_map_charge_memlock(map); | 195 | err = bpf_map_charge_memlock(map); |
196 | if (err) | 196 | if (err) |
197 | goto free_map; | 197 | goto free_map_nouncharge; |
198 | 198 | ||
199 | err = bpf_map_new_fd(map); | 199 | err = bpf_map_new_fd(map); |
200 | if (err < 0) | 200 | if (err < 0) |
@@ -204,6 +204,8 @@ static int map_create(union bpf_attr *attr) | |||
204 | return err; | 204 | return err; |
205 | 205 | ||
206 | free_map: | 206 | free_map: |
207 | bpf_map_uncharge_memlock(map); | ||
208 | free_map_nouncharge: | ||
207 | map->ops->map_free(map); | 209 | map->ops->map_free(map); |
208 | return err; | 210 | return err; |
209 | } | 211 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9c4d30483264..6b669593e7eb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1341 | 1341 | ||
1342 | } else if (new->flags & IRQF_TRIGGER_MASK) { | 1342 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
1343 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | 1343 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
1344 | unsigned int omsk = irq_settings_get_trigger_mask(desc); | 1344 | unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); |
1345 | 1345 | ||
1346 | if (nmsk != omsk) | 1346 | if (nmsk != omsk) |
1347 | /* hope the handler works with current trigger mode */ | 1347 | /* hope the handler works with current trigger mode */ |
1348 | pr_warn("irq %d uses trigger mode %u; requested %u\n", | 1348 | pr_warn("irq %d uses trigger mode %u; requested %u\n", |
1349 | irq, nmsk, omsk); | 1349 | irq, omsk, nmsk); |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | *old_ptr = new; | 1352 | *old_ptr = new; |
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index 084452e34a12..bdff5ed57f10 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c | |||
@@ -203,8 +203,10 @@ static int __init test_suspend(void) | |||
203 | 203 | ||
204 | /* RTCs have initialized by now too ... can we use one? */ | 204 | /* RTCs have initialized by now too ... can we use one? */ |
205 | dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm); | 205 | dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm); |
206 | if (dev) | 206 | if (dev) { |
207 | rtc = rtc_class_open(dev_name(dev)); | 207 | rtc = rtc_class_open(dev_name(dev)); |
208 | put_device(dev); | ||
209 | } | ||
208 | if (!rtc) { | 210 | if (!rtc) { |
209 | printk(warn_no_rtc); | 211 | printk(warn_no_rtc); |
210 | return 0; | 212 | return 0; |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index de08fc90baaf..f7a55e9ff2f7 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -253,17 +253,6 @@ static int preferred_console = -1; | |||
253 | int console_set_on_cmdline; | 253 | int console_set_on_cmdline; |
254 | EXPORT_SYMBOL(console_set_on_cmdline); | 254 | EXPORT_SYMBOL(console_set_on_cmdline); |
255 | 255 | ||
256 | #ifdef CONFIG_OF | ||
257 | static bool of_specified_console; | ||
258 | |||
259 | void console_set_by_of(void) | ||
260 | { | ||
261 | of_specified_console = true; | ||
262 | } | ||
263 | #else | ||
264 | # define of_specified_console false | ||
265 | #endif | ||
266 | |||
267 | /* Flag: console code may call schedule() */ | 256 | /* Flag: console code may call schedule() */ |
268 | static int console_may_schedule; | 257 | static int console_may_schedule; |
269 | 258 | ||
@@ -794,8 +783,6 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) | |||
794 | return ret; | 783 | return ret; |
795 | } | 784 | } |
796 | 785 | ||
797 | static void cont_flush(void); | ||
798 | |||
799 | static ssize_t devkmsg_read(struct file *file, char __user *buf, | 786 | static ssize_t devkmsg_read(struct file *file, char __user *buf, |
800 | size_t count, loff_t *ppos) | 787 | size_t count, loff_t *ppos) |
801 | { | 788 | { |
@@ -811,7 +798,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, | |||
811 | if (ret) | 798 | if (ret) |
812 | return ret; | 799 | return ret; |
813 | raw_spin_lock_irq(&logbuf_lock); | 800 | raw_spin_lock_irq(&logbuf_lock); |
814 | cont_flush(); | ||
815 | while (user->seq == log_next_seq) { | 801 | while (user->seq == log_next_seq) { |
816 | if (file->f_flags & O_NONBLOCK) { | 802 | if (file->f_flags & O_NONBLOCK) { |
817 | ret = -EAGAIN; | 803 | ret = -EAGAIN; |
@@ -874,7 +860,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) | |||
874 | return -ESPIPE; | 860 | return -ESPIPE; |
875 | 861 | ||
876 | raw_spin_lock_irq(&logbuf_lock); | 862 | raw_spin_lock_irq(&logbuf_lock); |
877 | cont_flush(); | ||
878 | switch (whence) { | 863 | switch (whence) { |
879 | case SEEK_SET: | 864 | case SEEK_SET: |
880 | /* the first record */ | 865 | /* the first record */ |
@@ -913,7 +898,6 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait) | |||
913 | poll_wait(file, &log_wait, wait); | 898 | poll_wait(file, &log_wait, wait); |
914 | 899 | ||
915 | raw_spin_lock_irq(&logbuf_lock); | 900 | raw_spin_lock_irq(&logbuf_lock); |
916 | cont_flush(); | ||
917 | if (user->seq < log_next_seq) { | 901 | if (user->seq < log_next_seq) { |
918 | /* return error when data has vanished underneath us */ | 902 | /* return error when data has vanished underneath us */ |
919 | if (user->seq < log_first_seq) | 903 | if (user->seq < log_first_seq) |
@@ -1300,7 +1284,6 @@ static int syslog_print(char __user *buf, int size) | |||
1300 | size_t skip; | 1284 | size_t skip; |
1301 | 1285 | ||
1302 | raw_spin_lock_irq(&logbuf_lock); | 1286 | raw_spin_lock_irq(&logbuf_lock); |
1303 | cont_flush(); | ||
1304 | if (syslog_seq < log_first_seq) { | 1287 | if (syslog_seq < log_first_seq) { |
1305 | /* messages are gone, move to first one */ | 1288 | /* messages are gone, move to first one */ |
1306 | syslog_seq = log_first_seq; | 1289 | syslog_seq = log_first_seq; |
@@ -1360,7 +1343,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1360 | return -ENOMEM; | 1343 | return -ENOMEM; |
1361 | 1344 | ||
1362 | raw_spin_lock_irq(&logbuf_lock); | 1345 | raw_spin_lock_irq(&logbuf_lock); |
1363 | cont_flush(); | ||
1364 | if (buf) { | 1346 | if (buf) { |
1365 | u64 next_seq; | 1347 | u64 next_seq; |
1366 | u64 seq; | 1348 | u64 seq; |
@@ -1522,7 +1504,6 @@ int do_syslog(int type, char __user *buf, int len, int source) | |||
1522 | /* Number of chars in the log buffer */ | 1504 | /* Number of chars in the log buffer */ |
1523 | case SYSLOG_ACTION_SIZE_UNREAD: | 1505 | case SYSLOG_ACTION_SIZE_UNREAD: |
1524 | raw_spin_lock_irq(&logbuf_lock); | 1506 | raw_spin_lock_irq(&logbuf_lock); |
1525 | cont_flush(); | ||
1526 | if (syslog_seq < log_first_seq) { | 1507 | if (syslog_seq < log_first_seq) { |
1527 | /* messages are gone, move to first one */ | 1508 | /* messages are gone, move to first one */ |
1528 | syslog_seq = log_first_seq; | 1509 | syslog_seq = log_first_seq; |
@@ -2657,7 +2638,7 @@ void register_console(struct console *newcon) | |||
2657 | * didn't select a console we take the first one | 2638 | * didn't select a console we take the first one |
2658 | * that registers here. | 2639 | * that registers here. |
2659 | */ | 2640 | */ |
2660 | if (preferred_console < 0 && !of_specified_console) { | 2641 | if (preferred_console < 0) { |
2661 | if (newcon->index < 0) | 2642 | if (newcon->index < 0) |
2662 | newcon->index = 0; | 2643 | newcon->index = 0; |
2663 | if (newcon->setup == NULL || | 2644 | if (newcon->setup == NULL || |
@@ -3039,7 +3020,6 @@ void kmsg_dump(enum kmsg_dump_reason reason) | |||
3039 | dumper->active = true; | 3020 | dumper->active = true; |
3040 | 3021 | ||
3041 | raw_spin_lock_irqsave(&logbuf_lock, flags); | 3022 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3042 | cont_flush(); | ||
3043 | dumper->cur_seq = clear_seq; | 3023 | dumper->cur_seq = clear_seq; |
3044 | dumper->cur_idx = clear_idx; | 3024 | dumper->cur_idx = clear_idx; |
3045 | dumper->next_seq = log_next_seq; | 3025 | dumper->next_seq = log_next_seq; |
@@ -3130,7 +3110,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, | |||
3130 | bool ret; | 3110 | bool ret; |
3131 | 3111 | ||
3132 | raw_spin_lock_irqsave(&logbuf_lock, flags); | 3112 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3133 | cont_flush(); | ||
3134 | ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); | 3113 | ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); |
3135 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | 3114 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3136 | 3115 | ||
@@ -3173,7 +3152,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | |||
3173 | goto out; | 3152 | goto out; |
3174 | 3153 | ||
3175 | raw_spin_lock_irqsave(&logbuf_lock, flags); | 3154 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3176 | cont_flush(); | ||
3177 | if (dumper->cur_seq < log_first_seq) { | 3155 | if (dumper->cur_seq < log_first_seq) { |
3178 | /* messages are gone, move to first available one */ | 3156 | /* messages are gone, move to first available one */ |
3179 | dumper->cur_seq = log_first_seq; | 3157 | dumper->cur_seq = log_first_seq; |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index b3f05ee20d18..cbb387a265db 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1 | |||
54 | [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, | 54 | [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, |
55 | [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; | 55 | [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; |
56 | 56 | ||
57 | static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { | 57 | /* |
58 | * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family. | ||
59 | * Make sure they are always aligned. | ||
60 | */ | ||
61 | static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = { | ||
58 | [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, | 62 | [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, |
59 | }; | 63 | }; |
60 | 64 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2050a7652a86..da87b3cba5b3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1862,6 +1862,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, | |||
1862 | 1862 | ||
1863 | /* Update rec->flags */ | 1863 | /* Update rec->flags */ |
1864 | do_for_each_ftrace_rec(pg, rec) { | 1864 | do_for_each_ftrace_rec(pg, rec) { |
1865 | |||
1866 | if (rec->flags & FTRACE_FL_DISABLED) | ||
1867 | continue; | ||
1868 | |||
1865 | /* We need to update only differences of filter_hash */ | 1869 | /* We need to update only differences of filter_hash */ |
1866 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | 1870 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); |
1867 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | 1871 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); |
@@ -1884,6 +1888,10 @@ rollback: | |||
1884 | 1888 | ||
1885 | /* Roll back what we did above */ | 1889 | /* Roll back what we did above */ |
1886 | do_for_each_ftrace_rec(pg, rec) { | 1890 | do_for_each_ftrace_rec(pg, rec) { |
1891 | |||
1892 | if (rec->flags & FTRACE_FL_DISABLED) | ||
1893 | continue; | ||
1894 | |||
1887 | if (rec == end) | 1895 | if (rec == end) |
1888 | goto err_out; | 1896 | goto err_out; |
1889 | 1897 | ||
@@ -2397,6 +2405,10 @@ void __weak ftrace_replace_code(int enable) | |||
2397 | return; | 2405 | return; |
2398 | 2406 | ||
2399 | do_for_each_ftrace_rec(pg, rec) { | 2407 | do_for_each_ftrace_rec(pg, rec) { |
2408 | |||
2409 | if (rec->flags & FTRACE_FL_DISABLED) | ||
2410 | continue; | ||
2411 | |||
2400 | failed = __ftrace_replace_code(rec, enable); | 2412 | failed = __ftrace_replace_code(rec, enable); |
2401 | if (failed) { | 2413 | if (failed) { |
2402 | ftrace_bug(failed, rec); | 2414 | ftrace_bug(failed, rec); |
@@ -2763,7 +2775,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2763 | struct dyn_ftrace *rec; | 2775 | struct dyn_ftrace *rec; |
2764 | 2776 | ||
2765 | do_for_each_ftrace_rec(pg, rec) { | 2777 | do_for_each_ftrace_rec(pg, rec) { |
2766 | if (FTRACE_WARN_ON_ONCE(rec->flags)) | 2778 | if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) |
2767 | pr_warn(" %pS flags:%lx\n", | 2779 | pr_warn(" %pS flags:%lx\n", |
2768 | (void *)rec->ip, rec->flags); | 2780 | (void *)rec->ip, rec->flags); |
2769 | } while_for_each_ftrace_rec(); | 2781 | } while_for_each_ftrace_rec(); |
@@ -3598,6 +3610,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod) | |||
3598 | goto out_unlock; | 3610 | goto out_unlock; |
3599 | 3611 | ||
3600 | do_for_each_ftrace_rec(pg, rec) { | 3612 | do_for_each_ftrace_rec(pg, rec) { |
3613 | |||
3614 | if (rec->flags & FTRACE_FL_DISABLED) | ||
3615 | continue; | ||
3616 | |||
3601 | if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { | 3617 | if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { |
3602 | ret = enter_record(hash, rec, clear_filter); | 3618 | ret = enter_record(hash, rec, clear_filter); |
3603 | if (ret < 0) { | 3619 | if (ret < 0) { |
@@ -3793,6 +3809,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3793 | 3809 | ||
3794 | do_for_each_ftrace_rec(pg, rec) { | 3810 | do_for_each_ftrace_rec(pg, rec) { |
3795 | 3811 | ||
3812 | if (rec->flags & FTRACE_FL_DISABLED) | ||
3813 | continue; | ||
3814 | |||
3796 | if (!ftrace_match_record(rec, &func_g, NULL, 0)) | 3815 | if (!ftrace_match_record(rec, &func_g, NULL, 0)) |
3797 | continue; | 3816 | continue; |
3798 | 3817 | ||
@@ -4685,6 +4704,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) | |||
4685 | 4704 | ||
4686 | do_for_each_ftrace_rec(pg, rec) { | 4705 | do_for_each_ftrace_rec(pg, rec) { |
4687 | 4706 | ||
4707 | if (rec->flags & FTRACE_FL_DISABLED) | ||
4708 | continue; | ||
4709 | |||
4688 | if (ftrace_match_record(rec, &func_g, NULL, 0)) { | 4710 | if (ftrace_match_record(rec, &func_g, NULL, 0)) { |
4689 | /* if it is in the array */ | 4711 | /* if it is in the array */ |
4690 | exists = false; | 4712 | exists = false; |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f0c7f1481bae..f2bd21b93dfc 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -683,10 +683,11 @@ static void pipe_advance(struct iov_iter *i, size_t size) | |||
683 | struct pipe_inode_info *pipe = i->pipe; | 683 | struct pipe_inode_info *pipe = i->pipe; |
684 | struct pipe_buffer *buf; | 684 | struct pipe_buffer *buf; |
685 | int idx = i->idx; | 685 | int idx = i->idx; |
686 | size_t off = i->iov_offset; | 686 | size_t off = i->iov_offset, orig_sz; |
687 | 687 | ||
688 | if (unlikely(i->count < size)) | 688 | if (unlikely(i->count < size)) |
689 | size = i->count; | 689 | size = i->count; |
690 | orig_sz = size; | ||
690 | 691 | ||
691 | if (size) { | 692 | if (size) { |
692 | if (off) /* make it relative to the beginning of buffer */ | 693 | if (off) /* make it relative to the beginning of buffer */ |
@@ -713,6 +714,7 @@ static void pipe_advance(struct iov_iter *i, size_t size) | |||
713 | pipe->nrbufs--; | 714 | pipe->nrbufs--; |
714 | } | 715 | } |
715 | } | 716 | } |
717 | i->count -= orig_sz; | ||
716 | } | 718 | } |
717 | 719 | ||
718 | void iov_iter_advance(struct iov_iter *i, size_t size) | 720 | void iov_iter_advance(struct iov_iter *i, size_t size) |
diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 4d830e299989..f87d138e9672 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c | |||
@@ -192,6 +192,7 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace) | |||
192 | trace->entries = stack->entries; | 192 | trace->entries = stack->entries; |
193 | trace->skip = 0; | 193 | trace->skip = 0; |
194 | } | 194 | } |
195 | EXPORT_SYMBOL_GPL(depot_fetch_stack); | ||
195 | 196 | ||
196 | /** | 197 | /** |
197 | * depot_save_stack - save stack in a stack depot. | 198 | * depot_save_stack - save stack in a stack depot. |
@@ -283,3 +284,4 @@ exit: | |||
283 | fast_exit: | 284 | fast_exit: |
284 | return retval; | 285 | return retval; |
285 | } | 286 | } |
287 | EXPORT_SYMBOL_GPL(depot_save_stack); | ||
@@ -385,6 +385,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) | |||
385 | bitmap_maxno = cma_bitmap_maxno(cma); | 385 | bitmap_maxno = cma_bitmap_maxno(cma); |
386 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | 386 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
387 | 387 | ||
388 | if (bitmap_count > bitmap_maxno) | ||
389 | return NULL; | ||
390 | |||
388 | for (;;) { | 391 | for (;;) { |
389 | mutex_lock(&cma->lock); | 392 | mutex_lock(&cma->lock); |
390 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, | 393 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, |
diff --git a/mm/filemap.c b/mm/filemap.c index c7fe2f16503f..50b52fe51937 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1732,6 +1732,9 @@ find_page: | |||
1732 | if (inode->i_blkbits == PAGE_SHIFT || | 1732 | if (inode->i_blkbits == PAGE_SHIFT || |
1733 | !mapping->a_ops->is_partially_uptodate) | 1733 | !mapping->a_ops->is_partially_uptodate) |
1734 | goto page_not_up_to_date; | 1734 | goto page_not_up_to_date; |
1735 | /* pipes can't handle partially uptodate pages */ | ||
1736 | if (unlikely(iter->type & ITER_PIPE)) | ||
1737 | goto page_not_up_to_date; | ||
1735 | if (!trylock_page(page)) | 1738 | if (!trylock_page(page)) |
1736 | goto page_not_up_to_date; | 1739 | goto page_not_up_to_date; |
1737 | /* Did it get truncated before we got the lock? */ | 1740 | /* Did it get truncated before we got the lock? */ |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cdcd25cb30fe..eff3de359d50 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1426,11 +1426,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
1426 | 1426 | ||
1427 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | 1427 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
1428 | unsigned long new_addr, unsigned long old_end, | 1428 | unsigned long new_addr, unsigned long old_end, |
1429 | pmd_t *old_pmd, pmd_t *new_pmd) | 1429 | pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) |
1430 | { | 1430 | { |
1431 | spinlock_t *old_ptl, *new_ptl; | 1431 | spinlock_t *old_ptl, *new_ptl; |
1432 | pmd_t pmd; | 1432 | pmd_t pmd; |
1433 | struct mm_struct *mm = vma->vm_mm; | 1433 | struct mm_struct *mm = vma->vm_mm; |
1434 | bool force_flush = false; | ||
1434 | 1435 | ||
1435 | if ((old_addr & ~HPAGE_PMD_MASK) || | 1436 | if ((old_addr & ~HPAGE_PMD_MASK) || |
1436 | (new_addr & ~HPAGE_PMD_MASK) || | 1437 | (new_addr & ~HPAGE_PMD_MASK) || |
@@ -1455,6 +1456,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |||
1455 | new_ptl = pmd_lockptr(mm, new_pmd); | 1456 | new_ptl = pmd_lockptr(mm, new_pmd); |
1456 | if (new_ptl != old_ptl) | 1457 | if (new_ptl != old_ptl) |
1457 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); | 1458 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
1459 | if (pmd_present(*old_pmd) && pmd_dirty(*old_pmd)) | ||
1460 | force_flush = true; | ||
1458 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); | 1461 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); |
1459 | VM_BUG_ON(!pmd_none(*new_pmd)); | 1462 | VM_BUG_ON(!pmd_none(*new_pmd)); |
1460 | 1463 | ||
@@ -1467,6 +1470,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |||
1467 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); | 1470 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); |
1468 | if (new_ptl != old_ptl) | 1471 | if (new_ptl != old_ptl) |
1469 | spin_unlock(new_ptl); | 1472 | spin_unlock(new_ptl); |
1473 | if (force_flush) | ||
1474 | flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); | ||
1475 | else | ||
1476 | *need_flush = true; | ||
1470 | spin_unlock(old_ptl); | 1477 | spin_unlock(old_ptl); |
1471 | return true; | 1478 | return true; |
1472 | } | 1479 | } |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ec49d9ef1eef..418bf01a50ed 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1826,11 +1826,17 @@ static void return_unused_surplus_pages(struct hstate *h, | |||
1826 | * is not the case is if a reserve map was changed between calls. It | 1826 | * is not the case is if a reserve map was changed between calls. It |
1827 | * is the responsibility of the caller to notice the difference and | 1827 | * is the responsibility of the caller to notice the difference and |
1828 | * take appropriate action. | 1828 | * take appropriate action. |
1829 | * | ||
1830 | * vma_add_reservation is used in error paths where a reservation must | ||
1831 | * be restored when a newly allocated huge page must be freed. It is | ||
1832 | * to be called after calling vma_needs_reservation to determine if a | ||
1833 | * reservation exists. | ||
1829 | */ | 1834 | */ |
1830 | enum vma_resv_mode { | 1835 | enum vma_resv_mode { |
1831 | VMA_NEEDS_RESV, | 1836 | VMA_NEEDS_RESV, |
1832 | VMA_COMMIT_RESV, | 1837 | VMA_COMMIT_RESV, |
1833 | VMA_END_RESV, | 1838 | VMA_END_RESV, |
1839 | VMA_ADD_RESV, | ||
1834 | }; | 1840 | }; |
1835 | static long __vma_reservation_common(struct hstate *h, | 1841 | static long __vma_reservation_common(struct hstate *h, |
1836 | struct vm_area_struct *vma, unsigned long addr, | 1842 | struct vm_area_struct *vma, unsigned long addr, |
@@ -1856,6 +1862,14 @@ static long __vma_reservation_common(struct hstate *h, | |||
1856 | region_abort(resv, idx, idx + 1); | 1862 | region_abort(resv, idx, idx + 1); |
1857 | ret = 0; | 1863 | ret = 0; |
1858 | break; | 1864 | break; |
1865 | case VMA_ADD_RESV: | ||
1866 | if (vma->vm_flags & VM_MAYSHARE) | ||
1867 | ret = region_add(resv, idx, idx + 1); | ||
1868 | else { | ||
1869 | region_abort(resv, idx, idx + 1); | ||
1870 | ret = region_del(resv, idx, idx + 1); | ||
1871 | } | ||
1872 | break; | ||
1859 | default: | 1873 | default: |
1860 | BUG(); | 1874 | BUG(); |
1861 | } | 1875 | } |
@@ -1903,6 +1917,56 @@ static void vma_end_reservation(struct hstate *h, | |||
1903 | (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); | 1917 | (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); |
1904 | } | 1918 | } |
1905 | 1919 | ||
1920 | static long vma_add_reservation(struct hstate *h, | ||
1921 | struct vm_area_struct *vma, unsigned long addr) | ||
1922 | { | ||
1923 | return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); | ||
1924 | } | ||
1925 | |||
1926 | /* | ||
1927 | * This routine is called to restore a reservation on error paths. In the | ||
1928 | * specific error paths, a huge page was allocated (via alloc_huge_page) | ||
1929 | * and is about to be freed. If a reservation for the page existed, | ||
1930 | * alloc_huge_page would have consumed the reservation and set PagePrivate | ||
1931 | * in the newly allocated page. When the page is freed via free_huge_page, | ||
1932 | * the global reservation count will be incremented if PagePrivate is set. | ||
1933 | * However, free_huge_page can not adjust the reserve map. Adjust the | ||
1934 | * reserve map here to be consistent with global reserve count adjustments | ||
1935 | * to be made by free_huge_page. | ||
1936 | */ | ||
1937 | static void restore_reserve_on_error(struct hstate *h, | ||
1938 | struct vm_area_struct *vma, unsigned long address, | ||
1939 | struct page *page) | ||
1940 | { | ||
1941 | if (unlikely(PagePrivate(page))) { | ||
1942 | long rc = vma_needs_reservation(h, vma, address); | ||
1943 | |||
1944 | if (unlikely(rc < 0)) { | ||
1945 | /* | ||
1946 | * Rare out of memory condition in reserve map | ||
1947 | * manipulation. Clear PagePrivate so that | ||
1948 | * global reserve count will not be incremented | ||
1949 | * by free_huge_page. This will make it appear | ||
1950 | * as though the reservation for this page was | ||
1951 | * consumed. This may prevent the task from | ||
1952 | * faulting in the page at a later time. This | ||
1953 | * is better than inconsistent global huge page | ||
1954 | * accounting of reserve counts. | ||
1955 | */ | ||
1956 | ClearPagePrivate(page); | ||
1957 | } else if (rc) { | ||
1958 | rc = vma_add_reservation(h, vma, address); | ||
1959 | if (unlikely(rc < 0)) | ||
1960 | /* | ||
1961 | * See above comment about rare out of | ||
1962 | * memory condition. | ||
1963 | */ | ||
1964 | ClearPagePrivate(page); | ||
1965 | } else | ||
1966 | vma_end_reservation(h, vma, address); | ||
1967 | } | ||
1968 | } | ||
1969 | |||
1906 | struct page *alloc_huge_page(struct vm_area_struct *vma, | 1970 | struct page *alloc_huge_page(struct vm_area_struct *vma, |
1907 | unsigned long addr, int avoid_reserve) | 1971 | unsigned long addr, int avoid_reserve) |
1908 | { | 1972 | { |
@@ -3498,6 +3562,7 @@ retry_avoidcopy: | |||
3498 | spin_unlock(ptl); | 3562 | spin_unlock(ptl); |
3499 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 3563 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
3500 | out_release_all: | 3564 | out_release_all: |
3565 | restore_reserve_on_error(h, vma, address, new_page); | ||
3501 | put_page(new_page); | 3566 | put_page(new_page); |
3502 | out_release_old: | 3567 | out_release_old: |
3503 | put_page(old_page); | 3568 | put_page(old_page); |
@@ -3680,6 +3745,7 @@ backout: | |||
3680 | spin_unlock(ptl); | 3745 | spin_unlock(ptl); |
3681 | backout_unlocked: | 3746 | backout_unlocked: |
3682 | unlock_page(page); | 3747 | unlock_page(page); |
3748 | restore_reserve_on_error(h, vma, address, page); | ||
3683 | put_page(page); | 3749 | put_page(page); |
3684 | goto out; | 3750 | goto out; |
3685 | } | 3751 | } |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index e5355a5b423f..d1380ed93fdf 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -1414,6 +1414,7 @@ static void kmemleak_scan(void) | |||
1414 | /* data/bss scanning */ | 1414 | /* data/bss scanning */ |
1415 | scan_large_block(_sdata, _edata); | 1415 | scan_large_block(_sdata, _edata); |
1416 | scan_large_block(__bss_start, __bss_stop); | 1416 | scan_large_block(__bss_start, __bss_stop); |
1417 | scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); | ||
1417 | 1418 | ||
1418 | #ifdef CONFIG_SMP | 1419 | #ifdef CONFIG_SMP |
1419 | /* per-cpu sections scanning */ | 1420 | /* per-cpu sections scanning */ |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index de88f33519c0..19e796d36a62 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) | |||
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | if (!PageHuge(p) && PageTransHuge(hpage)) { | 1114 | if (!PageHuge(p) && PageTransHuge(hpage)) { |
1115 | lock_page(hpage); | 1115 | lock_page(p); |
1116 | if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) { | 1116 | if (!PageAnon(p) || unlikely(split_huge_page(p))) { |
1117 | unlock_page(hpage); | 1117 | unlock_page(p); |
1118 | if (!PageAnon(hpage)) | 1118 | if (!PageAnon(p)) |
1119 | pr_err("Memory failure: %#lx: non anonymous thp\n", | 1119 | pr_err("Memory failure: %#lx: non anonymous thp\n", |
1120 | pfn); | 1120 | pfn); |
1121 | else | 1121 | else |
@@ -1126,9 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) | |||
1126 | put_hwpoison_page(p); | 1126 | put_hwpoison_page(p); |
1127 | return -EBUSY; | 1127 | return -EBUSY; |
1128 | } | 1128 | } |
1129 | unlock_page(hpage); | 1129 | unlock_page(p); |
1130 | get_hwpoison_page(p); | ||
1131 | put_hwpoison_page(hpage); | ||
1132 | VM_BUG_ON_PAGE(!page_count(p), p); | 1130 | VM_BUG_ON_PAGE(!page_count(p), p); |
1133 | hpage = compound_head(p); | 1131 | hpage = compound_head(p); |
1134 | } | 1132 | } |
diff --git a/mm/mremap.c b/mm/mremap.c index da22ad2a5678..6ccecc03f56a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -104,11 +104,13 @@ static pte_t move_soft_dirty_pte(pte_t pte) | |||
104 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | 104 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, |
105 | unsigned long old_addr, unsigned long old_end, | 105 | unsigned long old_addr, unsigned long old_end, |
106 | struct vm_area_struct *new_vma, pmd_t *new_pmd, | 106 | struct vm_area_struct *new_vma, pmd_t *new_pmd, |
107 | unsigned long new_addr, bool need_rmap_locks) | 107 | unsigned long new_addr, bool need_rmap_locks, bool *need_flush) |
108 | { | 108 | { |
109 | struct mm_struct *mm = vma->vm_mm; | 109 | struct mm_struct *mm = vma->vm_mm; |
110 | pte_t *old_pte, *new_pte, pte; | 110 | pte_t *old_pte, *new_pte, pte; |
111 | spinlock_t *old_ptl, *new_ptl; | 111 | spinlock_t *old_ptl, *new_ptl; |
112 | bool force_flush = false; | ||
113 | unsigned long len = old_end - old_addr; | ||
112 | 114 | ||
113 | /* | 115 | /* |
114 | * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma | 116 | * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma |
@@ -146,6 +148,14 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
146 | new_pte++, new_addr += PAGE_SIZE) { | 148 | new_pte++, new_addr += PAGE_SIZE) { |
147 | if (pte_none(*old_pte)) | 149 | if (pte_none(*old_pte)) |
148 | continue; | 150 | continue; |
151 | |||
152 | /* | ||
153 | * We are remapping a dirty PTE, make sure to | ||
154 | * flush TLB before we drop the PTL for the | ||
155 | * old PTE or we may race with page_mkclean(). | ||
156 | */ | ||
157 | if (pte_present(*old_pte) && pte_dirty(*old_pte)) | ||
158 | force_flush = true; | ||
149 | pte = ptep_get_and_clear(mm, old_addr, old_pte); | 159 | pte = ptep_get_and_clear(mm, old_addr, old_pte); |
150 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); | 160 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
151 | pte = move_soft_dirty_pte(pte); | 161 | pte = move_soft_dirty_pte(pte); |
@@ -156,6 +166,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
156 | if (new_ptl != old_ptl) | 166 | if (new_ptl != old_ptl) |
157 | spin_unlock(new_ptl); | 167 | spin_unlock(new_ptl); |
158 | pte_unmap(new_pte - 1); | 168 | pte_unmap(new_pte - 1); |
169 | if (force_flush) | ||
170 | flush_tlb_range(vma, old_end - len, old_end); | ||
171 | else | ||
172 | *need_flush = true; | ||
159 | pte_unmap_unlock(old_pte - 1, old_ptl); | 173 | pte_unmap_unlock(old_pte - 1, old_ptl); |
160 | if (need_rmap_locks) | 174 | if (need_rmap_locks) |
161 | drop_rmap_locks(vma); | 175 | drop_rmap_locks(vma); |
@@ -201,13 +215,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
201 | if (need_rmap_locks) | 215 | if (need_rmap_locks) |
202 | take_rmap_locks(vma); | 216 | take_rmap_locks(vma); |
203 | moved = move_huge_pmd(vma, old_addr, new_addr, | 217 | moved = move_huge_pmd(vma, old_addr, new_addr, |
204 | old_end, old_pmd, new_pmd); | 218 | old_end, old_pmd, new_pmd, |
219 | &need_flush); | ||
205 | if (need_rmap_locks) | 220 | if (need_rmap_locks) |
206 | drop_rmap_locks(vma); | 221 | drop_rmap_locks(vma); |
207 | if (moved) { | 222 | if (moved) |
208 | need_flush = true; | ||
209 | continue; | 223 | continue; |
210 | } | ||
211 | } | 224 | } |
212 | split_huge_pmd(vma, old_pmd, old_addr); | 225 | split_huge_pmd(vma, old_pmd, old_addr); |
213 | if (pmd_trans_unstable(old_pmd)) | 226 | if (pmd_trans_unstable(old_pmd)) |
@@ -220,11 +233,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
220 | extent = next - new_addr; | 233 | extent = next - new_addr; |
221 | if (extent > LATENCY_LIMIT) | 234 | if (extent > LATENCY_LIMIT) |
222 | extent = LATENCY_LIMIT; | 235 | extent = LATENCY_LIMIT; |
223 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, | 236 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, |
224 | new_vma, new_pmd, new_addr, need_rmap_locks); | 237 | new_pmd, new_addr, need_rmap_locks, &need_flush); |
225 | need_flush = true; | ||
226 | } | 238 | } |
227 | if (likely(need_flush)) | 239 | if (need_flush) |
228 | flush_tlb_range(vma, old_end-len, old_addr); | 240 | flush_tlb_range(vma, old_end-len, old_addr); |
229 | 241 | ||
230 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); | 242 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 072d791dce2d..6de9440e3ae2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3658,7 +3658,7 @@ retry: | |||
3658 | /* Make sure we know about allocations which stall for too long */ | 3658 | /* Make sure we know about allocations which stall for too long */ |
3659 | if (time_after(jiffies, alloc_start + stall_timeout)) { | 3659 | if (time_after(jiffies, alloc_start + stall_timeout)) { |
3660 | warn_alloc(gfp_mask, | 3660 | warn_alloc(gfp_mask, |
3661 | "page alloction stalls for %ums, order:%u\n", | 3661 | "page allocation stalls for %ums, order:%u", |
3662 | jiffies_to_msecs(jiffies-alloc_start), order); | 3662 | jiffies_to_msecs(jiffies-alloc_start), order); |
3663 | stall_timeout += 10 * HZ; | 3663 | stall_timeout += 10 * HZ; |
3664 | } | 3664 | } |
diff --git a/mm/shmem.c b/mm/shmem.c index ad7813d73ea7..166ebf5d2bce 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1483,6 +1483,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, | |||
1483 | copy_highpage(newpage, oldpage); | 1483 | copy_highpage(newpage, oldpage); |
1484 | flush_dcache_page(newpage); | 1484 | flush_dcache_page(newpage); |
1485 | 1485 | ||
1486 | __SetPageLocked(newpage); | ||
1487 | __SetPageSwapBacked(newpage); | ||
1486 | SetPageUptodate(newpage); | 1488 | SetPageUptodate(newpage); |
1487 | set_page_private(newpage, swap_index); | 1489 | set_page_private(newpage, swap_index); |
1488 | SetPageSwapCache(newpage); | 1490 | SetPageSwapCache(newpage); |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 71f0b28a1bec..329b03843863 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, | |||
533 | 533 | ||
534 | s = create_cache(cache_name, root_cache->object_size, | 534 | s = create_cache(cache_name, root_cache->object_size, |
535 | root_cache->size, root_cache->align, | 535 | root_cache->size, root_cache->align, |
536 | root_cache->flags, root_cache->ctor, | 536 | root_cache->flags & CACHE_CREATE_MASK, |
537 | memcg, root_cache); | 537 | root_cache->ctor, memcg, root_cache); |
538 | /* | 538 | /* |
539 | * If we could not create a memcg cache, do not complain, because | 539 | * If we could not create a memcg cache, do not complain, because |
540 | * that's not critical at all as we can always proceed with the root | 540 | * that's not critical at all as we can always proceed with the root |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 2210de290b54..f30438970cd1 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -2224,6 +2224,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p, | |||
2224 | swab32s(&swap_header->info.version); | 2224 | swab32s(&swap_header->info.version); |
2225 | swab32s(&swap_header->info.last_page); | 2225 | swab32s(&swap_header->info.last_page); |
2226 | swab32s(&swap_header->info.nr_badpages); | 2226 | swab32s(&swap_header->info.nr_badpages); |
2227 | if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) | ||
2228 | return 0; | ||
2227 | for (i = 0; i < swap_header->info.nr_badpages; i++) | 2229 | for (i = 0; i < swap_header->info.nr_badpages; i++) |
2228 | swab32s(&swap_header->info.badpages[i]); | 2230 | swab32s(&swap_header->info.badpages[i]); |
2229 | } | 2231 | } |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 8e999ffdf28b..8af9d25ff988 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -1549,24 +1549,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, | |||
1549 | struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; | 1549 | struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; |
1550 | struct sock *sk = sock->sk; | 1550 | struct sock *sk = sock->sk; |
1551 | struct bcm_sock *bo = bcm_sk(sk); | 1551 | struct bcm_sock *bo = bcm_sk(sk); |
1552 | int ret = 0; | ||
1552 | 1553 | ||
1553 | if (len < sizeof(*addr)) | 1554 | if (len < sizeof(*addr)) |
1554 | return -EINVAL; | 1555 | return -EINVAL; |
1555 | 1556 | ||
1556 | if (bo->bound) | 1557 | lock_sock(sk); |
1557 | return -EISCONN; | 1558 | |
1559 | if (bo->bound) { | ||
1560 | ret = -EISCONN; | ||
1561 | goto fail; | ||
1562 | } | ||
1558 | 1563 | ||
1559 | /* bind a device to this socket */ | 1564 | /* bind a device to this socket */ |
1560 | if (addr->can_ifindex) { | 1565 | if (addr->can_ifindex) { |
1561 | struct net_device *dev; | 1566 | struct net_device *dev; |
1562 | 1567 | ||
1563 | dev = dev_get_by_index(&init_net, addr->can_ifindex); | 1568 | dev = dev_get_by_index(&init_net, addr->can_ifindex); |
1564 | if (!dev) | 1569 | if (!dev) { |
1565 | return -ENODEV; | 1570 | ret = -ENODEV; |
1566 | 1571 | goto fail; | |
1572 | } | ||
1567 | if (dev->type != ARPHRD_CAN) { | 1573 | if (dev->type != ARPHRD_CAN) { |
1568 | dev_put(dev); | 1574 | dev_put(dev); |
1569 | return -ENODEV; | 1575 | ret = -ENODEV; |
1576 | goto fail; | ||
1570 | } | 1577 | } |
1571 | 1578 | ||
1572 | bo->ifindex = dev->ifindex; | 1579 | bo->ifindex = dev->ifindex; |
@@ -1577,17 +1584,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, | |||
1577 | bo->ifindex = 0; | 1584 | bo->ifindex = 0; |
1578 | } | 1585 | } |
1579 | 1586 | ||
1580 | bo->bound = 1; | ||
1581 | |||
1582 | if (proc_dir) { | 1587 | if (proc_dir) { |
1583 | /* unique socket address as filename */ | 1588 | /* unique socket address as filename */ |
1584 | sprintf(bo->procname, "%lu", sock_i_ino(sk)); | 1589 | sprintf(bo->procname, "%lu", sock_i_ino(sk)); |
1585 | bo->bcm_proc_read = proc_create_data(bo->procname, 0644, | 1590 | bo->bcm_proc_read = proc_create_data(bo->procname, 0644, |
1586 | proc_dir, | 1591 | proc_dir, |
1587 | &bcm_proc_fops, sk); | 1592 | &bcm_proc_fops, sk); |
1593 | if (!bo->bcm_proc_read) { | ||
1594 | ret = -ENOMEM; | ||
1595 | goto fail; | ||
1596 | } | ||
1588 | } | 1597 | } |
1589 | 1598 | ||
1590 | return 0; | 1599 | bo->bound = 1; |
1600 | |||
1601 | fail: | ||
1602 | release_sock(sk); | ||
1603 | |||
1604 | return ret; | ||
1591 | } | 1605 | } |
1592 | 1606 | ||
1593 | static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, | 1607 | static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, |
diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c index 7d54e944de5e..dcbe67ff3e2b 100644 --- a/net/ceph/ceph_fs.c +++ b/net/ceph/ceph_fs.c | |||
@@ -34,7 +34,8 @@ void ceph_file_layout_from_legacy(struct ceph_file_layout *fl, | |||
34 | fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count); | 34 | fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count); |
35 | fl->object_size = le32_to_cpu(legacy->fl_object_size); | 35 | fl->object_size = le32_to_cpu(legacy->fl_object_size); |
36 | fl->pool_id = le32_to_cpu(legacy->fl_pg_pool); | 36 | fl->pool_id = le32_to_cpu(legacy->fl_pg_pool); |
37 | if (fl->pool_id == 0) | 37 | if (fl->pool_id == 0 && fl->stripe_unit == 0 && |
38 | fl->stripe_count == 0 && fl->object_size == 0) | ||
38 | fl->pool_id = -1; | 39 | fl->pool_id = -1; |
39 | } | 40 | } |
40 | EXPORT_SYMBOL(ceph_file_layout_from_legacy); | 41 | EXPORT_SYMBOL(ceph_file_layout_from_legacy); |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index d9bf7a1d0a58..e6ae15bc41b7 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -4094,6 +4094,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) | |||
4094 | osd_init(&osdc->homeless_osd); | 4094 | osd_init(&osdc->homeless_osd); |
4095 | osdc->homeless_osd.o_osdc = osdc; | 4095 | osdc->homeless_osd.o_osdc = osdc; |
4096 | osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; | 4096 | osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; |
4097 | osdc->last_linger_id = CEPH_LINGER_ID_START; | ||
4097 | osdc->linger_requests = RB_ROOT; | 4098 | osdc->linger_requests = RB_ROOT; |
4098 | osdc->map_checks = RB_ROOT; | 4099 | osdc->map_checks = RB_ROOT; |
4099 | osdc->linger_map_checks = RB_ROOT; | 4100 | osdc->linger_map_checks = RB_ROOT; |
diff --git a/net/core/dev.c b/net/core/dev.c index 820bac239738..6666b28b6815 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); | |||
1766 | 1766 | ||
1767 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | 1767 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
1768 | { | 1768 | { |
1769 | if (skb_orphan_frags(skb, GFP_ATOMIC) || | 1769 | int ret = ____dev_forward_skb(dev, skb); |
1770 | unlikely(!is_skb_forwardable(dev, skb))) { | ||
1771 | atomic_long_inc(&dev->rx_dropped); | ||
1772 | kfree_skb(skb); | ||
1773 | return NET_RX_DROP; | ||
1774 | } | ||
1775 | 1770 | ||
1776 | skb_scrub_packet(skb, true); | 1771 | if (likely(!ret)) { |
1777 | skb->priority = 0; | 1772 | skb->protocol = eth_type_trans(skb, dev); |
1778 | skb->protocol = eth_type_trans(skb, dev); | 1773 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
1779 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | 1774 | } |
1780 | 1775 | ||
1781 | return 0; | 1776 | return ret; |
1782 | } | 1777 | } |
1783 | EXPORT_SYMBOL_GPL(__dev_forward_skb); | 1778 | EXPORT_SYMBOL_GPL(__dev_forward_skb); |
1784 | 1779 | ||
@@ -2484,7 +2479,7 @@ int skb_checksum_help(struct sk_buff *skb) | |||
2484 | goto out; | 2479 | goto out; |
2485 | } | 2480 | } |
2486 | 2481 | ||
2487 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | 2482 | *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; |
2488 | out_set_summed: | 2483 | out_set_summed: |
2489 | skb->ip_summed = CHECKSUM_NONE; | 2484 | skb->ip_summed = CHECKSUM_NONE; |
2490 | out: | 2485 | out: |
diff --git a/net/core/filter.c b/net/core/filter.c index 00351cdf7d0c..b391209838ef 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) | |||
1628 | return dev_forward_skb(dev, skb); | 1628 | return dev_forward_skb(dev, skb); |
1629 | } | 1629 | } |
1630 | 1630 | ||
1631 | static inline int __bpf_rx_skb_no_mac(struct net_device *dev, | ||
1632 | struct sk_buff *skb) | ||
1633 | { | ||
1634 | int ret = ____dev_forward_skb(dev, skb); | ||
1635 | |||
1636 | if (likely(!ret)) { | ||
1637 | skb->dev = dev; | ||
1638 | ret = netif_rx(skb); | ||
1639 | } | ||
1640 | |||
1641 | return ret; | ||
1642 | } | ||
1643 | |||
1631 | static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) | 1644 | static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) |
1632 | { | 1645 | { |
1633 | int ret; | 1646 | int ret; |
@@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) | |||
1647 | return ret; | 1660 | return ret; |
1648 | } | 1661 | } |
1649 | 1662 | ||
1663 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, | ||
1664 | u32 flags) | ||
1665 | { | ||
1666 | /* skb->mac_len is not set on normal egress */ | ||
1667 | unsigned int mlen = skb->network_header - skb->mac_header; | ||
1668 | |||
1669 | __skb_pull(skb, mlen); | ||
1670 | |||
1671 | /* At ingress, the mac header has already been pulled once. | ||
1672 | * At egress, skb_pospull_rcsum has to be done in case that | ||
1673 | * the skb is originated from ingress (i.e. a forwarded skb) | ||
1674 | * to ensure that rcsum starts at net header. | ||
1675 | */ | ||
1676 | if (!skb_at_tc_ingress(skb)) | ||
1677 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); | ||
1678 | skb_pop_mac_header(skb); | ||
1679 | skb_reset_mac_len(skb); | ||
1680 | return flags & BPF_F_INGRESS ? | ||
1681 | __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); | ||
1682 | } | ||
1683 | |||
1684 | static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, | ||
1685 | u32 flags) | ||
1686 | { | ||
1687 | bpf_push_mac_rcsum(skb); | ||
1688 | return flags & BPF_F_INGRESS ? | ||
1689 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); | ||
1690 | } | ||
1691 | |||
1692 | static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, | ||
1693 | u32 flags) | ||
1694 | { | ||
1695 | switch (dev->type) { | ||
1696 | case ARPHRD_TUNNEL: | ||
1697 | case ARPHRD_TUNNEL6: | ||
1698 | case ARPHRD_SIT: | ||
1699 | case ARPHRD_IPGRE: | ||
1700 | case ARPHRD_VOID: | ||
1701 | case ARPHRD_NONE: | ||
1702 | return __bpf_redirect_no_mac(skb, dev, flags); | ||
1703 | default: | ||
1704 | return __bpf_redirect_common(skb, dev, flags); | ||
1705 | } | ||
1706 | } | ||
1707 | |||
1650 | BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) | 1708 | BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) |
1651 | { | 1709 | { |
1652 | struct net_device *dev; | 1710 | struct net_device *dev; |
@@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) | |||
1675 | return -ENOMEM; | 1733 | return -ENOMEM; |
1676 | } | 1734 | } |
1677 | 1735 | ||
1678 | bpf_push_mac_rcsum(clone); | 1736 | return __bpf_redirect(clone, dev, flags); |
1679 | |||
1680 | return flags & BPF_F_INGRESS ? | ||
1681 | __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone); | ||
1682 | } | 1737 | } |
1683 | 1738 | ||
1684 | static const struct bpf_func_proto bpf_clone_redirect_proto = { | 1739 | static const struct bpf_func_proto bpf_clone_redirect_proto = { |
@@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb) | |||
1722 | return -EINVAL; | 1777 | return -EINVAL; |
1723 | } | 1778 | } |
1724 | 1779 | ||
1725 | bpf_push_mac_rcsum(skb); | 1780 | return __bpf_redirect(skb, dev, ri->flags); |
1726 | |||
1727 | return ri->flags & BPF_F_INGRESS ? | ||
1728 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); | ||
1729 | } | 1781 | } |
1730 | 1782 | ||
1731 | static const struct bpf_func_proto bpf_redirect_proto = { | 1783 | static const struct bpf_func_proto bpf_redirect_proto = { |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ab193e5def07..69e4463a4b1b 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -122,7 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, | |||
122 | struct flow_dissector_key_keyid *key_keyid; | 122 | struct flow_dissector_key_keyid *key_keyid; |
123 | bool skip_vlan = false; | 123 | bool skip_vlan = false; |
124 | u8 ip_proto = 0; | 124 | u8 ip_proto = 0; |
125 | bool ret = false; | 125 | bool ret; |
126 | 126 | ||
127 | if (!data) { | 127 | if (!data) { |
128 | data = skb->data; | 128 | data = skb->data; |
@@ -549,12 +549,17 @@ ip_proto_again: | |||
549 | out_good: | 549 | out_good: |
550 | ret = true; | 550 | ret = true; |
551 | 551 | ||
552 | out_bad: | 552 | key_control->thoff = (u16)nhoff; |
553 | out: | ||
553 | key_basic->n_proto = proto; | 554 | key_basic->n_proto = proto; |
554 | key_basic->ip_proto = ip_proto; | 555 | key_basic->ip_proto = ip_proto; |
555 | key_control->thoff = (u16)nhoff; | ||
556 | 556 | ||
557 | return ret; | 557 | return ret; |
558 | |||
559 | out_bad: | ||
560 | ret = false; | ||
561 | key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); | ||
562 | goto out; | ||
558 | } | 563 | } |
559 | EXPORT_SYMBOL(__skb_flow_dissect); | 564 | EXPORT_SYMBOL(__skb_flow_dissect); |
560 | 565 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fb7348f13501..db313ec7af32 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -275,6 +275,7 @@ int rtnl_unregister(int protocol, int msgtype) | |||
275 | 275 | ||
276 | rtnl_msg_handlers[protocol][msgindex].doit = NULL; | 276 | rtnl_msg_handlers[protocol][msgindex].doit = NULL; |
277 | rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; | 277 | rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; |
278 | rtnl_msg_handlers[protocol][msgindex].calcit = NULL; | ||
278 | 279 | ||
279 | return 0; | 280 | return 0; |
280 | } | 281 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index c73e28fc9c2a..5e3ca414357e 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
453 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 453 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
454 | 454 | ||
455 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, | 455 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, |
456 | const int nested, unsigned int trim_cap) | 456 | const int nested, unsigned int trim_cap, bool refcounted) |
457 | { | 457 | { |
458 | int rc = NET_RX_SUCCESS; | 458 | int rc = NET_RX_SUCCESS; |
459 | 459 | ||
@@ -487,7 +487,8 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, | |||
487 | 487 | ||
488 | bh_unlock_sock(sk); | 488 | bh_unlock_sock(sk); |
489 | out: | 489 | out: |
490 | sock_put(sk); | 490 | if (refcounted) |
491 | sock_put(sk); | ||
491 | return rc; | 492 | return rc; |
492 | discard_and_relse: | 493 | discard_and_relse: |
493 | kfree_skb(skb); | 494 | kfree_skb(skb); |
@@ -1543,6 +1544,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1543 | RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); | 1544 | RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); |
1544 | 1545 | ||
1545 | newsk->sk_err = 0; | 1546 | newsk->sk_err = 0; |
1547 | newsk->sk_err_soft = 0; | ||
1546 | newsk->sk_priority = 0; | 1548 | newsk->sk_priority = 0; |
1547 | newsk->sk_incoming_cpu = raw_smp_processor_id(); | 1549 | newsk->sk_incoming_cpu = raw_smp_processor_id(); |
1548 | atomic64_set(&newsk->sk_cookie, 0); | 1550 | atomic64_set(&newsk->sk_cookie, 0); |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 345a3aeb8c7e..b567c8725aea 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
235 | { | 235 | { |
236 | const struct iphdr *iph = (struct iphdr *)skb->data; | 236 | const struct iphdr *iph = (struct iphdr *)skb->data; |
237 | const u8 offset = iph->ihl << 2; | 237 | const u8 offset = iph->ihl << 2; |
238 | const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); | 238 | const struct dccp_hdr *dh; |
239 | struct dccp_sock *dp; | 239 | struct dccp_sock *dp; |
240 | struct inet_sock *inet; | 240 | struct inet_sock *inet; |
241 | const int type = icmp_hdr(skb)->type; | 241 | const int type = icmp_hdr(skb)->type; |
@@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
245 | int err; | 245 | int err; |
246 | struct net *net = dev_net(skb->dev); | 246 | struct net *net = dev_net(skb->dev); |
247 | 247 | ||
248 | if (skb->len < offset + sizeof(*dh) || | 248 | /* Only need dccph_dport & dccph_sport which are the first |
249 | skb->len < offset + __dccp_basic_hdr_len(dh)) { | 249 | * 4 bytes in dccp header. |
250 | __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); | 250 | * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. |
251 | return; | 251 | */ |
252 | } | 252 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); |
253 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); | ||
254 | dh = (struct dccp_hdr *)(skb->data + offset); | ||
253 | 255 | ||
254 | sk = __inet_lookup_established(net, &dccp_hashinfo, | 256 | sk = __inet_lookup_established(net, &dccp_hashinfo, |
255 | iph->daddr, dh->dccph_dport, | 257 | iph->daddr, dh->dccph_dport, |
@@ -868,7 +870,7 @@ lookup: | |||
868 | goto discard_and_relse; | 870 | goto discard_and_relse; |
869 | nf_reset(skb); | 871 | nf_reset(skb); |
870 | 872 | ||
871 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4); | 873 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted); |
872 | 874 | ||
873 | no_dccp_socket: | 875 | no_dccp_socket: |
874 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 876 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 3828f94b234c..715e5d1dc107 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
70 | u8 type, u8 code, int offset, __be32 info) | 70 | u8 type, u8 code, int offset, __be32 info) |
71 | { | 71 | { |
72 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; | 72 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; |
73 | const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); | 73 | const struct dccp_hdr *dh; |
74 | struct dccp_sock *dp; | 74 | struct dccp_sock *dp; |
75 | struct ipv6_pinfo *np; | 75 | struct ipv6_pinfo *np; |
76 | struct sock *sk; | 76 | struct sock *sk; |
@@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
78 | __u64 seq; | 78 | __u64 seq; |
79 | struct net *net = dev_net(skb->dev); | 79 | struct net *net = dev_net(skb->dev); |
80 | 80 | ||
81 | if (skb->len < offset + sizeof(*dh) || | 81 | /* Only need dccph_dport & dccph_sport which are the first |
82 | skb->len < offset + __dccp_basic_hdr_len(dh)) { | 82 | * 4 bytes in dccp header. |
83 | __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), | 83 | * Our caller (icmpv6_notify()) already pulled 8 bytes for us. |
84 | ICMP6_MIB_INERRORS); | 84 | */ |
85 | return; | 85 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); |
86 | } | 86 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); |
87 | dh = (struct dccp_hdr *)(skb->data + offset); | ||
87 | 88 | ||
88 | sk = __inet6_lookup_established(net, &dccp_hashinfo, | 89 | sk = __inet6_lookup_established(net, &dccp_hashinfo, |
89 | &hdr->daddr, dh->dccph_dport, | 90 | &hdr->daddr, dh->dccph_dport, |
@@ -738,7 +739,8 @@ lookup: | |||
738 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 739 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
739 | goto discard_and_relse; | 740 | goto discard_and_relse; |
740 | 741 | ||
741 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0; | 742 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, |
743 | refcounted) ? -1 : 0; | ||
742 | 744 | ||
743 | no_dccp_socket: | 745 | no_dccp_socket: |
744 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 746 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
@@ -956,6 +958,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { | |||
956 | .getsockopt = ipv6_getsockopt, | 958 | .getsockopt = ipv6_getsockopt, |
957 | .addr2sockaddr = inet6_csk_addr2sockaddr, | 959 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
958 | .sockaddr_len = sizeof(struct sockaddr_in6), | 960 | .sockaddr_len = sizeof(struct sockaddr_in6), |
961 | .bind_conflict = inet6_csk_bind_conflict, | ||
959 | #ifdef CONFIG_COMPAT | 962 | #ifdef CONFIG_COMPAT |
960 | .compat_setsockopt = compat_ipv6_setsockopt, | 963 | .compat_setsockopt = compat_ipv6_setsockopt, |
961 | .compat_getsockopt = compat_ipv6_getsockopt, | 964 | .compat_getsockopt = compat_ipv6_getsockopt, |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 41e65804ddf5..9fe25bf63296 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout) | |||
1009 | __kfree_skb(skb); | 1009 | __kfree_skb(skb); |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | /* If socket has been already reset kill it. */ | ||
1013 | if (sk->sk_state == DCCP_CLOSED) | ||
1014 | goto adjudge_to_death; | ||
1015 | |||
1012 | if (data_was_unread) { | 1016 | if (data_was_unread) { |
1013 | /* Unread data was tossed, send an appropriate Reset Code */ | 1017 | /* Unread data was tossed, send an appropriate Reset Code */ |
1014 | DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); | 1018 | DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 9648c97e541f..5ddf5cda07f4 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -533,9 +533,9 @@ EXPORT_SYMBOL(inet_dgram_connect); | |||
533 | 533 | ||
534 | static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) | 534 | static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) |
535 | { | 535 | { |
536 | DEFINE_WAIT(wait); | 536 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
537 | 537 | ||
538 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 538 | add_wait_queue(sk_sleep(sk), &wait); |
539 | sk->sk_write_pending += writebias; | 539 | sk->sk_write_pending += writebias; |
540 | 540 | ||
541 | /* Basic assumption: if someone sets sk->sk_err, he _must_ | 541 | /* Basic assumption: if someone sets sk->sk_err, he _must_ |
@@ -545,13 +545,12 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) | |||
545 | */ | 545 | */ |
546 | while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 546 | while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
547 | release_sock(sk); | 547 | release_sock(sk); |
548 | timeo = schedule_timeout(timeo); | 548 | timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); |
549 | lock_sock(sk); | 549 | lock_sock(sk); |
550 | if (signal_pending(current) || !timeo) | 550 | if (signal_pending(current) || !timeo) |
551 | break; | 551 | break; |
552 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
553 | } | 552 | } |
554 | finish_wait(sk_sleep(sk), &wait); | 553 | remove_wait_queue(sk_sleep(sk), &wait); |
555 | sk->sk_write_pending -= writebias; | 554 | sk->sk_write_pending -= writebias; |
556 | return timeo; | 555 | return timeo; |
557 | } | 556 | } |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 31cef3602585..4cff74d4133f 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -2413,22 +2413,19 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, | |||
2413 | struct key_vector *l, **tp = &iter->tnode; | 2413 | struct key_vector *l, **tp = &iter->tnode; |
2414 | t_key key; | 2414 | t_key key; |
2415 | 2415 | ||
2416 | /* use cache location of next-to-find key */ | 2416 | /* use cached location of previously found key */ |
2417 | if (iter->pos > 0 && pos >= iter->pos) { | 2417 | if (iter->pos > 0 && pos >= iter->pos) { |
2418 | pos -= iter->pos; | ||
2419 | key = iter->key; | 2418 | key = iter->key; |
2420 | } else { | 2419 | } else { |
2421 | iter->pos = 0; | 2420 | iter->pos = 1; |
2422 | key = 0; | 2421 | key = 0; |
2423 | } | 2422 | } |
2424 | 2423 | ||
2425 | while ((l = leaf_walk_rcu(tp, key)) != NULL) { | 2424 | pos -= iter->pos; |
2425 | |||
2426 | while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) { | ||
2426 | key = l->key + 1; | 2427 | key = l->key + 1; |
2427 | iter->pos++; | 2428 | iter->pos++; |
2428 | |||
2429 | if (--pos <= 0) | ||
2430 | break; | ||
2431 | |||
2432 | l = NULL; | 2429 | l = NULL; |
2433 | 2430 | ||
2434 | /* handle unlikely case of a key wrap */ | 2431 | /* handle unlikely case of a key wrap */ |
@@ -2437,7 +2434,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, | |||
2437 | } | 2434 | } |
2438 | 2435 | ||
2439 | if (l) | 2436 | if (l) |
2440 | iter->key = key; /* remember it */ | 2437 | iter->key = l->key; /* remember it */ |
2441 | else | 2438 | else |
2442 | iter->pos = 0; /* forget it */ | 2439 | iter->pos = 0; /* forget it */ |
2443 | 2440 | ||
@@ -2465,7 +2462,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) | |||
2465 | return fib_route_get_idx(iter, *pos); | 2462 | return fib_route_get_idx(iter, *pos); |
2466 | 2463 | ||
2467 | iter->pos = 0; | 2464 | iter->pos = 0; |
2468 | iter->key = 0; | 2465 | iter->key = KEY_MAX; |
2469 | 2466 | ||
2470 | return SEQ_START_TOKEN; | 2467 | return SEQ_START_TOKEN; |
2471 | } | 2468 | } |
@@ -2474,7 +2471,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2474 | { | 2471 | { |
2475 | struct fib_route_iter *iter = seq->private; | 2472 | struct fib_route_iter *iter = seq->private; |
2476 | struct key_vector *l = NULL; | 2473 | struct key_vector *l = NULL; |
2477 | t_key key = iter->key; | 2474 | t_key key = iter->key + 1; |
2478 | 2475 | ||
2479 | ++*pos; | 2476 | ++*pos; |
2480 | 2477 | ||
@@ -2483,7 +2480,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2483 | l = leaf_walk_rcu(&iter->tnode, key); | 2480 | l = leaf_walk_rcu(&iter->tnode, key); |
2484 | 2481 | ||
2485 | if (l) { | 2482 | if (l) { |
2486 | iter->key = l->key + 1; | 2483 | iter->key = l->key; |
2487 | iter->pos++; | 2484 | iter->pos++; |
2488 | } else { | 2485 | } else { |
2489 | iter->pos = 0; | 2486 | iter->pos = 0; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 38abe70e595f..48734ee6293f 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -477,7 +477,7 @@ static struct rtable *icmp_route_lookup(struct net *net, | |||
477 | fl4->flowi4_proto = IPPROTO_ICMP; | 477 | fl4->flowi4_proto = IPPROTO_ICMP; |
478 | fl4->fl4_icmp_type = type; | 478 | fl4->fl4_icmp_type = type; |
479 | fl4->fl4_icmp_code = code; | 479 | fl4->fl4_icmp_code = code; |
480 | fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev); | 480 | fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); |
481 | 481 | ||
482 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); | 482 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); |
483 | rt = __ip_route_output_key_hash(net, fl4, | 483 | rt = __ip_route_output_key_hash(net, fl4, |
@@ -502,7 +502,7 @@ static struct rtable *icmp_route_lookup(struct net *net, | |||
502 | if (err) | 502 | if (err) |
503 | goto relookup_failed; | 503 | goto relookup_failed; |
504 | 504 | ||
505 | if (inet_addr_type_dev_table(net, skb_in->dev, | 505 | if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev, |
506 | fl4_dec.saddr) == RTN_LOCAL) { | 506 | fl4_dec.saddr) == RTN_LOCAL) { |
507 | rt2 = __ip_route_output_key(net, &fl4_dec); | 507 | rt2 = __ip_route_output_key(net, &fl4_dec); |
508 | if (IS_ERR(rt2)) | 508 | if (IS_ERR(rt2)) |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 8b4ffd216839..9f0a7b96646f 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -117,7 +117,7 @@ int ip_forward(struct sk_buff *skb) | |||
117 | if (opt->is_strictroute && rt->rt_uses_gateway) | 117 | if (opt->is_strictroute && rt->rt_uses_gateway) |
118 | goto sr_failed; | 118 | goto sr_failed; |
119 | 119 | ||
120 | IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; | 120 | IPCB(skb)->flags |= IPSKB_FORWARDED; |
121 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); | 121 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); |
122 | if (ip_exceeds_mtu(skb, mtu)) { | 122 | if (ip_exceeds_mtu(skb, mtu)) { |
123 | IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); | 123 | IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 03e7f7310423..105908d841a3 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -239,19 +239,23 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, | |||
239 | struct sk_buff *segs; | 239 | struct sk_buff *segs; |
240 | int ret = 0; | 240 | int ret = 0; |
241 | 241 | ||
242 | /* common case: fragmentation of segments is not allowed, | 242 | /* common case: seglen is <= mtu |
243 | * or seglen is <= mtu | ||
244 | */ | 243 | */ |
245 | if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) || | 244 | if (skb_gso_validate_mtu(skb, mtu)) |
246 | skb_gso_validate_mtu(skb, mtu)) | ||
247 | return ip_finish_output2(net, sk, skb); | 245 | return ip_finish_output2(net, sk, skb); |
248 | 246 | ||
249 | /* Slowpath - GSO segment length is exceeding the dst MTU. | 247 | /* Slowpath - GSO segment length exceeds the egress MTU. |
250 | * | 248 | * |
251 | * This can happen in two cases: | 249 | * This can happen in several cases: |
252 | * 1) TCP GRO packet, DF bit not set | 250 | * - Forwarding of a TCP GRO skb, when DF flag is not set. |
253 | * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly | 251 | * - Forwarding of an skb that arrived on a virtualization interface |
254 | * from host network stack. | 252 | * (virtio-net/vhost/tap) with TSO/GSO size set by other network |
253 | * stack. | ||
254 | * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an | ||
255 | * interface with a smaller MTU. | ||
256 | * - Arriving GRO skb (or GSO skb in a virtualized environment) that is | ||
257 | * bridged to a NETIF_F_TSO tunnel stacked over an interface with an | ||
258 | * insufficent MTU. | ||
255 | */ | 259 | */ |
256 | features = netif_skb_features(skb); | 260 | features = netif_skb_features(skb); |
257 | BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); | 261 | BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); |
@@ -1579,7 +1583,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, | |||
1579 | } | 1583 | } |
1580 | 1584 | ||
1581 | oif = arg->bound_dev_if; | 1585 | oif = arg->bound_dev_if; |
1582 | oif = oif ? : skb->skb_iif; | 1586 | if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) |
1587 | oif = skb->skb_iif; | ||
1583 | 1588 | ||
1584 | flowi4_init_output(&fl4, oif, | 1589 | flowi4_init_output(&fl4, oif, |
1585 | IP4_REPLY_MARK(net, skb->mark), | 1590 | IP4_REPLY_MARK(net, skb->mark), |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 777bc1883870..fed3d29f9eb3 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -63,7 +63,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | |||
63 | int pkt_len = skb->len - skb_inner_network_offset(skb); | 63 | int pkt_len = skb->len - skb_inner_network_offset(skb); |
64 | struct net *net = dev_net(rt->dst.dev); | 64 | struct net *net = dev_net(rt->dst.dev); |
65 | struct net_device *dev = skb->dev; | 65 | struct net_device *dev = skb->dev; |
66 | int skb_iif = skb->skb_iif; | ||
67 | struct iphdr *iph; | 66 | struct iphdr *iph; |
68 | int err; | 67 | int err; |
69 | 68 | ||
@@ -73,16 +72,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | |||
73 | skb_dst_set(skb, &rt->dst); | 72 | skb_dst_set(skb, &rt->dst); |
74 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | 73 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
75 | 74 | ||
76 | if (skb_iif && !(df & htons(IP_DF))) { | ||
77 | /* Arrived from an ingress interface, got encapsulated, with | ||
78 | * fragmentation of encapulating frames allowed. | ||
79 | * If skb is gso, the resulting encapsulated network segments | ||
80 | * may exceed dst mtu. | ||
81 | * Allow IP Fragmentation of segments. | ||
82 | */ | ||
83 | IPCB(skb)->flags |= IPSKB_FRAG_SEGS; | ||
84 | } | ||
85 | |||
86 | /* Push down and install the IP header. */ | 75 | /* Push down and install the IP header. */ |
87 | skb_push(skb, sizeof(struct iphdr)); | 76 | skb_push(skb, sizeof(struct iphdr)); |
88 | skb_reset_network_header(skb); | 77 | skb_reset_network_header(skb); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 5f006e13de56..27089f5ebbb1 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1749,7 +1749,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
1749 | vif->dev->stats.tx_bytes += skb->len; | 1749 | vif->dev->stats.tx_bytes += skb->len; |
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; | 1752 | IPCB(skb)->flags |= IPSKB_FORWARDED; |
1753 | 1753 | ||
1754 | /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally | 1754 | /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally |
1755 | * not only before forwarding, but after forwarding on all output | 1755 | * not only before forwarding, but after forwarding on all output |
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c index bf855e64fc45..0c01a270bf9f 100644 --- a/net/ipv4/netfilter/nft_dup_ipv4.c +++ b/net/ipv4/netfilter/nft_dup_ipv4.c | |||
@@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr, | |||
28 | struct in_addr gw = { | 28 | struct in_addr gw = { |
29 | .s_addr = (__force __be32)regs->data[priv->sreg_addr], | 29 | .s_addr = (__force __be32)regs->data[priv->sreg_addr], |
30 | }; | 30 | }; |
31 | int oif = regs->data[priv->sreg_dev]; | 31 | int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; |
32 | 32 | ||
33 | nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); | 33 | nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); |
34 | } | 34 | } |
@@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
59 | { | 59 | { |
60 | struct nft_dup_ipv4 *priv = nft_expr_priv(expr); | 60 | struct nft_dup_ipv4 *priv = nft_expr_priv(expr); |
61 | 61 | ||
62 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || | 62 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) |
63 | goto nla_put_failure; | ||
64 | if (priv->sreg_dev && | ||
63 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) | 65 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) |
64 | goto nla_put_failure; | 66 | goto nla_put_failure; |
65 | 67 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 62d4d90c1389..2a57566e6e91 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -753,7 +753,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow | |||
753 | goto reject_redirect; | 753 | goto reject_redirect; |
754 | } | 754 | } |
755 | 755 | ||
756 | n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); | 756 | n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); |
757 | if (!n) | ||
758 | n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); | ||
757 | if (!IS_ERR(n)) { | 759 | if (!IS_ERR(n)) { |
758 | if (!(n->nud_state & NUD_VALID)) { | 760 | if (!(n->nud_state & NUD_VALID)) { |
759 | neigh_event_send(n, NULL); | 761 | neigh_event_send(n, NULL); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3251fe71f39f..814af89c1bd3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1164,7 +1164,7 @@ restart: | |||
1164 | 1164 | ||
1165 | err = -EPIPE; | 1165 | err = -EPIPE; |
1166 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 1166 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
1167 | goto out_err; | 1167 | goto do_error; |
1168 | 1168 | ||
1169 | sg = !!(sk->sk_route_caps & NETIF_F_SG); | 1169 | sg = !!(sk->sk_route_caps & NETIF_F_SG); |
1170 | 1170 | ||
@@ -1241,7 +1241,7 @@ new_segment: | |||
1241 | 1241 | ||
1242 | if (!skb_can_coalesce(skb, i, pfrag->page, | 1242 | if (!skb_can_coalesce(skb, i, pfrag->page, |
1243 | pfrag->offset)) { | 1243 | pfrag->offset)) { |
1244 | if (i == sysctl_max_skb_frags || !sg) { | 1244 | if (i >= sysctl_max_skb_frags || !sg) { |
1245 | tcp_mark_push(tp, skb); | 1245 | tcp_mark_push(tp, skb); |
1246 | goto new_segment; | 1246 | goto new_segment; |
1247 | } | 1247 | } |
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 10d728b6804c..ab37c6775630 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
@@ -56,6 +56,7 @@ struct dctcp { | |||
56 | u32 next_seq; | 56 | u32 next_seq; |
57 | u32 ce_state; | 57 | u32 ce_state; |
58 | u32 delayed_ack_reserved; | 58 | u32 delayed_ack_reserved; |
59 | u32 loss_cwnd; | ||
59 | }; | 60 | }; |
60 | 61 | ||
61 | static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ | 62 | static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ |
@@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk) | |||
96 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); | 97 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); |
97 | 98 | ||
98 | ca->delayed_ack_reserved = 0; | 99 | ca->delayed_ack_reserved = 0; |
100 | ca->loss_cwnd = 0; | ||
99 | ca->ce_state = 0; | 101 | ca->ce_state = 0; |
100 | 102 | ||
101 | dctcp_reset(tp, ca); | 103 | dctcp_reset(tp, ca); |
@@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk) | |||
111 | 113 | ||
112 | static u32 dctcp_ssthresh(struct sock *sk) | 114 | static u32 dctcp_ssthresh(struct sock *sk) |
113 | { | 115 | { |
114 | const struct dctcp *ca = inet_csk_ca(sk); | 116 | struct dctcp *ca = inet_csk_ca(sk); |
115 | struct tcp_sock *tp = tcp_sk(sk); | 117 | struct tcp_sock *tp = tcp_sk(sk); |
116 | 118 | ||
119 | ca->loss_cwnd = tp->snd_cwnd; | ||
117 | return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); | 120 | return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); |
118 | } | 121 | } |
119 | 122 | ||
@@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, | |||
308 | return 0; | 311 | return 0; |
309 | } | 312 | } |
310 | 313 | ||
314 | static u32 dctcp_cwnd_undo(struct sock *sk) | ||
315 | { | ||
316 | const struct dctcp *ca = inet_csk_ca(sk); | ||
317 | |||
318 | return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); | ||
319 | } | ||
320 | |||
311 | static struct tcp_congestion_ops dctcp __read_mostly = { | 321 | static struct tcp_congestion_ops dctcp __read_mostly = { |
312 | .init = dctcp_init, | 322 | .init = dctcp_init, |
313 | .in_ack_event = dctcp_update_alpha, | 323 | .in_ack_event = dctcp_update_alpha, |
314 | .cwnd_event = dctcp_cwnd_event, | 324 | .cwnd_event = dctcp_cwnd_event, |
315 | .ssthresh = dctcp_ssthresh, | 325 | .ssthresh = dctcp_ssthresh, |
316 | .cong_avoid = tcp_reno_cong_avoid, | 326 | .cong_avoid = tcp_reno_cong_avoid, |
327 | .undo_cwnd = dctcp_cwnd_undo, | ||
317 | .set_state = dctcp_state, | 328 | .set_state = dctcp_state, |
318 | .get_info = dctcp_get_info, | 329 | .get_info = dctcp_get_info, |
319 | .flags = TCP_CONG_NEEDS_ECN, | 330 | .flags = TCP_CONG_NEEDS_ECN, |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 61b7be303eec..2259114c7242 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1564,6 +1564,21 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
1564 | } | 1564 | } |
1565 | EXPORT_SYMBOL(tcp_add_backlog); | 1565 | EXPORT_SYMBOL(tcp_add_backlog); |
1566 | 1566 | ||
1567 | int tcp_filter(struct sock *sk, struct sk_buff *skb) | ||
1568 | { | ||
1569 | struct tcphdr *th = (struct tcphdr *)skb->data; | ||
1570 | unsigned int eaten = skb->len; | ||
1571 | int err; | ||
1572 | |||
1573 | err = sk_filter_trim_cap(sk, skb, th->doff * 4); | ||
1574 | if (!err) { | ||
1575 | eaten -= skb->len; | ||
1576 | TCP_SKB_CB(skb)->end_seq -= eaten; | ||
1577 | } | ||
1578 | return err; | ||
1579 | } | ||
1580 | EXPORT_SYMBOL(tcp_filter); | ||
1581 | |||
1567 | /* | 1582 | /* |
1568 | * From tcp_input.c | 1583 | * From tcp_input.c |
1569 | */ | 1584 | */ |
@@ -1676,8 +1691,10 @@ process: | |||
1676 | 1691 | ||
1677 | nf_reset(skb); | 1692 | nf_reset(skb); |
1678 | 1693 | ||
1679 | if (sk_filter(sk, skb)) | 1694 | if (tcp_filter(sk, skb)) |
1680 | goto discard_and_relse; | 1695 | goto discard_and_relse; |
1696 | th = (const struct tcphdr *)skb->data; | ||
1697 | iph = ip_hdr(skb); | ||
1681 | 1698 | ||
1682 | skb->dev = NULL; | 1699 | skb->dev = NULL; |
1683 | 1700 | ||
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index bd59c343d35f..7370ad2e693a 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -448,7 +448,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, | |||
448 | if (__ipv6_addr_needs_scope_id(addr_type)) | 448 | if (__ipv6_addr_needs_scope_id(addr_type)) |
449 | iif = skb->dev->ifindex; | 449 | iif = skb->dev->ifindex; |
450 | else | 450 | else |
451 | iif = l3mdev_master_ifindex(skb->dev); | 451 | iif = l3mdev_master_ifindex(skb_dst(skb)->dev); |
452 | 452 | ||
453 | /* | 453 | /* |
454 | * Must not send error if the source does not uniquely | 454 | * Must not send error if the source does not uniquely |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6001e781164e..59eb4ed99ce8 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1366,7 +1366,7 @@ emsgsize: | |||
1366 | if (((length > mtu) || | 1366 | if (((length > mtu) || |
1367 | (skb && skb_is_gso(skb))) && | 1367 | (skb && skb_is_gso(skb))) && |
1368 | (sk->sk_protocol == IPPROTO_UDP) && | 1368 | (sk->sk_protocol == IPPROTO_UDP) && |
1369 | (rt->dst.dev->features & NETIF_F_UFO) && | 1369 | (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && |
1370 | (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { | 1370 | (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { |
1371 | err = ip6_ufo_append_data(sk, queue, getfrag, from, length, | 1371 | err = ip6_ufo_append_data(sk, queue, getfrag, from, length, |
1372 | hh_len, fragheaderlen, exthdrlen, | 1372 | hh_len, fragheaderlen, exthdrlen, |
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index a7520528ecd2..b283f293ee4a 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c | |||
@@ -88,9 +88,6 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, | |||
88 | 88 | ||
89 | uh->len = htons(skb->len); | 89 | uh->len = htons(skb->len); |
90 | 90 | ||
91 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
92 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | ||
93 | | IPSKB_REROUTED); | ||
94 | skb_dst_set(skb, dst); | 91 | skb_dst_set(skb, dst); |
95 | 92 | ||
96 | udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); | 93 | udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); |
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c index 8bfd470cbe72..831f86e1ec08 100644 --- a/net/ipv6/netfilter/nft_dup_ipv6.c +++ b/net/ipv6/netfilter/nft_dup_ipv6.c | |||
@@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr, | |||
26 | { | 26 | { |
27 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); | 27 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); |
28 | struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr]; | 28 | struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr]; |
29 | int oif = regs->data[priv->sreg_dev]; | 29 | int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; |
30 | 30 | ||
31 | nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); | 31 | nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); |
32 | } | 32 | } |
@@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
57 | { | 57 | { |
58 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); | 58 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); |
59 | 59 | ||
60 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || | 60 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) |
61 | goto nla_put_failure; | ||
62 | if (priv->sreg_dev && | ||
61 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) | 63 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) |
62 | goto nla_put_failure; | 64 | goto nla_put_failure; |
63 | 65 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 947ed1ded026..1b57e11e6e0d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1364,6 +1364,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, | |||
1364 | if (rt6->rt6i_flags & RTF_LOCAL) | 1364 | if (rt6->rt6i_flags & RTF_LOCAL) |
1365 | return; | 1365 | return; |
1366 | 1366 | ||
1367 | if (dst_metric_locked(dst, RTAX_MTU)) | ||
1368 | return; | ||
1369 | |||
1367 | dst_confirm(dst); | 1370 | dst_confirm(dst); |
1368 | mtu = max_t(u32, mtu, IPV6_MIN_MTU); | 1371 | mtu = max_t(u32, mtu, IPV6_MIN_MTU); |
1369 | if (mtu >= dst_mtu(dst)) | 1372 | if (mtu >= dst_mtu(dst)) |
@@ -2758,6 +2761,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) | |||
2758 | PMTU discouvery. | 2761 | PMTU discouvery. |
2759 | */ | 2762 | */ |
2760 | if (rt->dst.dev == arg->dev && | 2763 | if (rt->dst.dev == arg->dev && |
2764 | dst_metric_raw(&rt->dst, RTAX_MTU) && | ||
2761 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { | 2765 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { |
2762 | if (rt->rt6i_flags & RTF_CACHE) { | 2766 | if (rt->rt6i_flags & RTF_CACHE) { |
2763 | /* For RTF_CACHE with rt6i_pmtu == 0 | 2767 | /* For RTF_CACHE with rt6i_pmtu == 0 |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5a27ab4eab39..b9f1fee9a886 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -818,8 +818,12 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 | |||
818 | fl6.flowi6_proto = IPPROTO_TCP; | 818 | fl6.flowi6_proto = IPPROTO_TCP; |
819 | if (rt6_need_strict(&fl6.daddr) && !oif) | 819 | if (rt6_need_strict(&fl6.daddr) && !oif) |
820 | fl6.flowi6_oif = tcp_v6_iif(skb); | 820 | fl6.flowi6_oif = tcp_v6_iif(skb); |
821 | else | 821 | else { |
822 | fl6.flowi6_oif = oif ? : skb->skb_iif; | 822 | if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) |
823 | oif = skb->skb_iif; | ||
824 | |||
825 | fl6.flowi6_oif = oif; | ||
826 | } | ||
823 | 827 | ||
824 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); | 828 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); |
825 | fl6.fl6_dport = t1->dest; | 829 | fl6.fl6_dport = t1->dest; |
@@ -1225,7 +1229,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1225 | if (skb->protocol == htons(ETH_P_IP)) | 1229 | if (skb->protocol == htons(ETH_P_IP)) |
1226 | return tcp_v4_do_rcv(sk, skb); | 1230 | return tcp_v4_do_rcv(sk, skb); |
1227 | 1231 | ||
1228 | if (sk_filter(sk, skb)) | 1232 | if (tcp_filter(sk, skb)) |
1229 | goto discard; | 1233 | goto discard; |
1230 | 1234 | ||
1231 | /* | 1235 | /* |
@@ -1453,8 +1457,10 @@ process: | |||
1453 | if (tcp_v6_inbound_md5_hash(sk, skb)) | 1457 | if (tcp_v6_inbound_md5_hash(sk, skb)) |
1454 | goto discard_and_relse; | 1458 | goto discard_and_relse; |
1455 | 1459 | ||
1456 | if (sk_filter(sk, skb)) | 1460 | if (tcp_filter(sk, skb)) |
1457 | goto discard_and_relse; | 1461 | goto discard_and_relse; |
1462 | th = (const struct tcphdr *)skb->data; | ||
1463 | hdr = ipv6_hdr(skb); | ||
1458 | 1464 | ||
1459 | skb->dev = NULL; | 1465 | skb->dev = NULL; |
1460 | 1466 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c3c809b2e712..a6e44ef2ec9a 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -2845,7 +2845,7 @@ static struct genl_family ip_vs_genl_family = { | |||
2845 | .hdrsize = 0, | 2845 | .hdrsize = 0, |
2846 | .name = IPVS_GENL_NAME, | 2846 | .name = IPVS_GENL_NAME, |
2847 | .version = IPVS_GENL_VERSION, | 2847 | .version = IPVS_GENL_VERSION, |
2848 | .maxattr = IPVS_CMD_MAX, | 2848 | .maxattr = IPVS_CMD_ATTR_MAX, |
2849 | .netnsok = true, /* Make ipvsadm to work on netns */ | 2849 | .netnsok = true, /* Make ipvsadm to work on netns */ |
2850 | }; | 2850 | }; |
2851 | 2851 | ||
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 1b07578bedf3..9350530c16c1 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -283,6 +283,7 @@ struct ip_vs_sync_buff { | |||
283 | */ | 283 | */ |
284 | static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) | 284 | static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) |
285 | { | 285 | { |
286 | memset(ho, 0, sizeof(*ho)); | ||
286 | ho->init_seq = get_unaligned_be32(&no->init_seq); | 287 | ho->init_seq = get_unaligned_be32(&no->init_seq); |
287 | ho->delta = get_unaligned_be32(&no->delta); | 288 | ho->delta = get_unaligned_be32(&no->delta); |
288 | ho->previous_delta = get_unaligned_be32(&no->previous_delta); | 289 | ho->previous_delta = get_unaligned_be32(&no->previous_delta); |
@@ -917,8 +918,10 @@ static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *pa | |||
917 | kfree(param->pe_data); | 918 | kfree(param->pe_data); |
918 | } | 919 | } |
919 | 920 | ||
920 | if (opt) | 921 | if (opt) { |
921 | memcpy(&cp->in_seq, opt, sizeof(*opt)); | 922 | cp->in_seq = opt->in_seq; |
923 | cp->out_seq = opt->out_seq; | ||
924 | } | ||
922 | atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); | 925 | atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); |
923 | cp->state = state; | 926 | cp->state = state; |
924 | cp->old_state = cp->state; | 927 | cp->old_state = cp->state; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index df2f5a3901df..0f87e5d21be7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -76,6 +76,7 @@ struct conntrack_gc_work { | |||
76 | struct delayed_work dwork; | 76 | struct delayed_work dwork; |
77 | u32 last_bucket; | 77 | u32 last_bucket; |
78 | bool exiting; | 78 | bool exiting; |
79 | long next_gc_run; | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | static __read_mostly struct kmem_cache *nf_conntrack_cachep; | 82 | static __read_mostly struct kmem_cache *nf_conntrack_cachep; |
@@ -83,9 +84,11 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock; | |||
83 | static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); | 84 | static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); |
84 | static __read_mostly bool nf_conntrack_locks_all; | 85 | static __read_mostly bool nf_conntrack_locks_all; |
85 | 86 | ||
87 | /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ | ||
86 | #define GC_MAX_BUCKETS_DIV 64u | 88 | #define GC_MAX_BUCKETS_DIV 64u |
87 | #define GC_MAX_BUCKETS 8192u | 89 | /* upper bound of scan intervals */ |
88 | #define GC_INTERVAL (5 * HZ) | 90 | #define GC_INTERVAL_MAX (2 * HZ) |
91 | /* maximum conntracks to evict per gc run */ | ||
89 | #define GC_MAX_EVICTS 256u | 92 | #define GC_MAX_EVICTS 256u |
90 | 93 | ||
91 | static struct conntrack_gc_work conntrack_gc_work; | 94 | static struct conntrack_gc_work conntrack_gc_work; |
@@ -936,13 +939,13 @@ static noinline int early_drop(struct net *net, unsigned int _hash) | |||
936 | static void gc_worker(struct work_struct *work) | 939 | static void gc_worker(struct work_struct *work) |
937 | { | 940 | { |
938 | unsigned int i, goal, buckets = 0, expired_count = 0; | 941 | unsigned int i, goal, buckets = 0, expired_count = 0; |
939 | unsigned long next_run = GC_INTERVAL; | ||
940 | unsigned int ratio, scanned = 0; | ||
941 | struct conntrack_gc_work *gc_work; | 942 | struct conntrack_gc_work *gc_work; |
943 | unsigned int ratio, scanned = 0; | ||
944 | unsigned long next_run; | ||
942 | 945 | ||
943 | gc_work = container_of(work, struct conntrack_gc_work, dwork.work); | 946 | gc_work = container_of(work, struct conntrack_gc_work, dwork.work); |
944 | 947 | ||
945 | goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS); | 948 | goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; |
946 | i = gc_work->last_bucket; | 949 | i = gc_work->last_bucket; |
947 | 950 | ||
948 | do { | 951 | do { |
@@ -982,17 +985,47 @@ static void gc_worker(struct work_struct *work) | |||
982 | if (gc_work->exiting) | 985 | if (gc_work->exiting) |
983 | return; | 986 | return; |
984 | 987 | ||
988 | /* | ||
989 | * Eviction will normally happen from the packet path, and not | ||
990 | * from this gc worker. | ||
991 | * | ||
992 | * This worker is only here to reap expired entries when system went | ||
993 | * idle after a busy period. | ||
994 | * | ||
995 | * The heuristics below are supposed to balance conflicting goals: | ||
996 | * | ||
997 | * 1. Minimize time until we notice a stale entry | ||
998 | * 2. Maximize scan intervals to not waste cycles | ||
999 | * | ||
1000 | * Normally, expired_count will be 0, this increases the next_run time | ||
1001 | * to priorize 2) above. | ||
1002 | * | ||
1003 | * As soon as a timed-out entry is found, move towards 1) and increase | ||
1004 | * the scan frequency. | ||
1005 | * In case we have lots of evictions next scan is done immediately. | ||
1006 | */ | ||
985 | ratio = scanned ? expired_count * 100 / scanned : 0; | 1007 | ratio = scanned ? expired_count * 100 / scanned : 0; |
986 | if (ratio >= 90 || expired_count == GC_MAX_EVICTS) | 1008 | if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { |
1009 | gc_work->next_gc_run = 0; | ||
987 | next_run = 0; | 1010 | next_run = 0; |
1011 | } else if (expired_count) { | ||
1012 | gc_work->next_gc_run /= 2U; | ||
1013 | next_run = msecs_to_jiffies(1); | ||
1014 | } else { | ||
1015 | if (gc_work->next_gc_run < GC_INTERVAL_MAX) | ||
1016 | gc_work->next_gc_run += msecs_to_jiffies(1); | ||
1017 | |||
1018 | next_run = gc_work->next_gc_run; | ||
1019 | } | ||
988 | 1020 | ||
989 | gc_work->last_bucket = i; | 1021 | gc_work->last_bucket = i; |
990 | schedule_delayed_work(&gc_work->dwork, next_run); | 1022 | queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); |
991 | } | 1023 | } |
992 | 1024 | ||
993 | static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) | 1025 | static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) |
994 | { | 1026 | { |
995 | INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); | 1027 | INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); |
1028 | gc_work->next_gc_run = GC_INTERVAL_MAX; | ||
996 | gc_work->exiting = false; | 1029 | gc_work->exiting = false; |
997 | } | 1030 | } |
998 | 1031 | ||
@@ -1885,7 +1918,7 @@ int nf_conntrack_init_start(void) | |||
1885 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); | 1918 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); |
1886 | 1919 | ||
1887 | conntrack_gc_work_init(&conntrack_gc_work); | 1920 | conntrack_gc_work_init(&conntrack_gc_work); |
1888 | schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL); | 1921 | queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); |
1889 | 1922 | ||
1890 | return 0; | 1923 | return 0; |
1891 | 1924 | ||
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 336e21559e01..7341adf7059d 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -138,9 +138,14 @@ __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) | |||
138 | 138 | ||
139 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 139 | for (i = 0; i < nf_ct_helper_hsize; i++) { |
140 | hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { | 140 | hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { |
141 | if (!strcmp(h->name, name) && | 141 | if (strcmp(h->name, name)) |
142 | h->tuple.src.l3num == l3num && | 142 | continue; |
143 | h->tuple.dst.protonum == protonum) | 143 | |
144 | if (h->tuple.src.l3num != NFPROTO_UNSPEC && | ||
145 | h->tuple.src.l3num != l3num) | ||
146 | continue; | ||
147 | |||
148 | if (h->tuple.dst.protonum == protonum) | ||
144 | return h; | 149 | return h; |
145 | } | 150 | } |
146 | } | 151 | } |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 621b81c7bddc..c3fc14e021ec 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -1436,9 +1436,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff, | |||
1436 | handler = &sip_handlers[i]; | 1436 | handler = &sip_handlers[i]; |
1437 | if (handler->request == NULL) | 1437 | if (handler->request == NULL) |
1438 | continue; | 1438 | continue; |
1439 | if (*datalen < handler->len || | 1439 | if (*datalen < handler->len + 2 || |
1440 | strncasecmp(*dptr, handler->method, handler->len)) | 1440 | strncasecmp(*dptr, handler->method, handler->len)) |
1441 | continue; | 1441 | continue; |
1442 | if ((*dptr)[handler->len] != ' ' || | ||
1443 | !isalpha((*dptr)[handler->len+1])) | ||
1444 | continue; | ||
1442 | 1445 | ||
1443 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, | 1446 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, |
1444 | &matchoff, &matchlen) <= 0) { | 1447 | &matchoff, &matchlen) <= 0) { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 24db22257586..026581b04ea8 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -2956,12 +2956,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, | |||
2956 | 2956 | ||
2957 | err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); | 2957 | err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); |
2958 | if (err < 0) | 2958 | if (err < 0) |
2959 | goto err2; | 2959 | goto err3; |
2960 | 2960 | ||
2961 | list_add_tail_rcu(&set->list, &table->sets); | 2961 | list_add_tail_rcu(&set->list, &table->sets); |
2962 | table->use++; | 2962 | table->use++; |
2963 | return 0; | 2963 | return 0; |
2964 | 2964 | ||
2965 | err3: | ||
2966 | ops->destroy(set); | ||
2965 | err2: | 2967 | err2: |
2966 | kfree(set); | 2968 | kfree(set); |
2967 | err1: | 2969 | err1: |
@@ -3452,14 +3454,15 @@ void *nft_set_elem_init(const struct nft_set *set, | |||
3452 | return elem; | 3454 | return elem; |
3453 | } | 3455 | } |
3454 | 3456 | ||
3455 | void nft_set_elem_destroy(const struct nft_set *set, void *elem) | 3457 | void nft_set_elem_destroy(const struct nft_set *set, void *elem, |
3458 | bool destroy_expr) | ||
3456 | { | 3459 | { |
3457 | struct nft_set_ext *ext = nft_set_elem_ext(set, elem); | 3460 | struct nft_set_ext *ext = nft_set_elem_ext(set, elem); |
3458 | 3461 | ||
3459 | nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); | 3462 | nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); |
3460 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) | 3463 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) |
3461 | nft_data_uninit(nft_set_ext_data(ext), set->dtype); | 3464 | nft_data_uninit(nft_set_ext_data(ext), set->dtype); |
3462 | if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) | 3465 | if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) |
3463 | nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); | 3466 | nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); |
3464 | 3467 | ||
3465 | kfree(elem); | 3468 | kfree(elem); |
@@ -3565,6 +3568,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, | |||
3565 | dreg = nft_type_to_reg(set->dtype); | 3568 | dreg = nft_type_to_reg(set->dtype); |
3566 | list_for_each_entry(binding, &set->bindings, list) { | 3569 | list_for_each_entry(binding, &set->bindings, list) { |
3567 | struct nft_ctx bind_ctx = { | 3570 | struct nft_ctx bind_ctx = { |
3571 | .net = ctx->net, | ||
3568 | .afi = ctx->afi, | 3572 | .afi = ctx->afi, |
3569 | .table = ctx->table, | 3573 | .table = ctx->table, |
3570 | .chain = (struct nft_chain *)binding->chain, | 3574 | .chain = (struct nft_chain *)binding->chain, |
@@ -3812,7 +3816,7 @@ void nft_set_gc_batch_release(struct rcu_head *rcu) | |||
3812 | 3816 | ||
3813 | gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); | 3817 | gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); |
3814 | for (i = 0; i < gcb->head.cnt; i++) | 3818 | for (i = 0; i < gcb->head.cnt; i++) |
3815 | nft_set_elem_destroy(gcb->head.set, gcb->elems[i]); | 3819 | nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true); |
3816 | kfree(gcb); | 3820 | kfree(gcb); |
3817 | } | 3821 | } |
3818 | EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); | 3822 | EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); |
@@ -4030,7 +4034,7 @@ static void nf_tables_commit_release(struct nft_trans *trans) | |||
4030 | break; | 4034 | break; |
4031 | case NFT_MSG_DELSETELEM: | 4035 | case NFT_MSG_DELSETELEM: |
4032 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 4036 | nft_set_elem_destroy(nft_trans_elem_set(trans), |
4033 | nft_trans_elem(trans).priv); | 4037 | nft_trans_elem(trans).priv, true); |
4034 | break; | 4038 | break; |
4035 | } | 4039 | } |
4036 | kfree(trans); | 4040 | kfree(trans); |
@@ -4171,7 +4175,7 @@ static void nf_tables_abort_release(struct nft_trans *trans) | |||
4171 | break; | 4175 | break; |
4172 | case NFT_MSG_NEWSETELEM: | 4176 | case NFT_MSG_NEWSETELEM: |
4173 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 4177 | nft_set_elem_destroy(nft_trans_elem_set(trans), |
4174 | nft_trans_elem(trans).priv); | 4178 | nft_trans_elem(trans).priv, true); |
4175 | break; | 4179 | break; |
4176 | } | 4180 | } |
4177 | kfree(trans); | 4181 | kfree(trans); |
@@ -4421,7 +4425,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, | |||
4421 | * Otherwise a 0 is returned and the attribute value is stored in the | 4425 | * Otherwise a 0 is returned and the attribute value is stored in the |
4422 | * destination variable. | 4426 | * destination variable. |
4423 | */ | 4427 | */ |
4424 | unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) | 4428 | int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) |
4425 | { | 4429 | { |
4426 | u32 val; | 4430 | u32 val; |
4427 | 4431 | ||
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 517f08767a3c..31ca94793aa9 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
@@ -44,18 +44,22 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr, | |||
44 | ®s->data[priv->sreg_key], | 44 | ®s->data[priv->sreg_key], |
45 | ®s->data[priv->sreg_data], | 45 | ®s->data[priv->sreg_data], |
46 | timeout, GFP_ATOMIC); | 46 | timeout, GFP_ATOMIC); |
47 | if (elem == NULL) { | 47 | if (elem == NULL) |
48 | if (set->size) | 48 | goto err1; |
49 | atomic_dec(&set->nelems); | ||
50 | return NULL; | ||
51 | } | ||
52 | 49 | ||
53 | ext = nft_set_elem_ext(set, elem); | 50 | ext = nft_set_elem_ext(set, elem); |
54 | if (priv->expr != NULL && | 51 | if (priv->expr != NULL && |
55 | nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) | 52 | nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) |
56 | return NULL; | 53 | goto err2; |
57 | 54 | ||
58 | return elem; | 55 | return elem; |
56 | |||
57 | err2: | ||
58 | nft_set_elem_destroy(set, elem, false); | ||
59 | err1: | ||
60 | if (set->size) | ||
61 | atomic_dec(&set->nelems); | ||
62 | return NULL; | ||
59 | } | 63 | } |
60 | 64 | ||
61 | static void nft_dynset_eval(const struct nft_expr *expr, | 65 | static void nft_dynset_eval(const struct nft_expr *expr, |
@@ -139,6 +143,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, | |||
139 | return PTR_ERR(set); | 143 | return PTR_ERR(set); |
140 | } | 144 | } |
141 | 145 | ||
146 | if (set->ops->update == NULL) | ||
147 | return -EOPNOTSUPP; | ||
148 | |||
142 | if (set->flags & NFT_SET_CONSTANT) | 149 | if (set->flags & NFT_SET_CONSTANT) |
143 | return -EBUSY; | 150 | return -EBUSY; |
144 | 151 | ||
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 3794cb2fc788..a3dface3e6e6 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c | |||
@@ -98,7 +98,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key, | |||
98 | const struct nft_set_ext **ext) | 98 | const struct nft_set_ext **ext) |
99 | { | 99 | { |
100 | struct nft_hash *priv = nft_set_priv(set); | 100 | struct nft_hash *priv = nft_set_priv(set); |
101 | struct nft_hash_elem *he; | 101 | struct nft_hash_elem *he, *prev; |
102 | struct nft_hash_cmp_arg arg = { | 102 | struct nft_hash_cmp_arg arg = { |
103 | .genmask = NFT_GENMASK_ANY, | 103 | .genmask = NFT_GENMASK_ANY, |
104 | .set = set, | 104 | .set = set, |
@@ -112,15 +112,24 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key, | |||
112 | he = new(set, expr, regs); | 112 | he = new(set, expr, regs); |
113 | if (he == NULL) | 113 | if (he == NULL) |
114 | goto err1; | 114 | goto err1; |
115 | if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node, | 115 | |
116 | nft_hash_params)) | 116 | prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, |
117 | nft_hash_params); | ||
118 | if (IS_ERR(prev)) | ||
117 | goto err2; | 119 | goto err2; |
120 | |||
121 | /* Another cpu may race to insert the element with the same key */ | ||
122 | if (prev) { | ||
123 | nft_set_elem_destroy(set, he, true); | ||
124 | he = prev; | ||
125 | } | ||
126 | |||
118 | out: | 127 | out: |
119 | *ext = &he->ext; | 128 | *ext = &he->ext; |
120 | return true; | 129 | return true; |
121 | 130 | ||
122 | err2: | 131 | err2: |
123 | nft_set_elem_destroy(set, he); | 132 | nft_set_elem_destroy(set, he, true); |
124 | err1: | 133 | err1: |
125 | return false; | 134 | return false; |
126 | } | 135 | } |
@@ -332,7 +341,7 @@ static int nft_hash_init(const struct nft_set *set, | |||
332 | 341 | ||
333 | static void nft_hash_elem_destroy(void *ptr, void *arg) | 342 | static void nft_hash_elem_destroy(void *ptr, void *arg) |
334 | { | 343 | { |
335 | nft_set_elem_destroy((const struct nft_set *)arg, ptr); | 344 | nft_set_elem_destroy((const struct nft_set *)arg, ptr, true); |
336 | } | 345 | } |
337 | 346 | ||
338 | static void nft_hash_destroy(const struct nft_set *set) | 347 | static void nft_hash_destroy(const struct nft_set *set) |
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 38b5bda242f8..36493a7cae88 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
@@ -266,7 +266,7 @@ static void nft_rbtree_destroy(const struct nft_set *set) | |||
266 | while ((node = priv->root.rb_node) != NULL) { | 266 | while ((node = priv->root.rb_node) != NULL) { |
267 | rb_erase(node, &priv->root); | 267 | rb_erase(node, &priv->root); |
268 | rbe = rb_entry(node, struct nft_rbtree_elem, node); | 268 | rbe = rb_entry(node, struct nft_rbtree_elem, node); |
269 | nft_set_elem_destroy(set, rbe); | 269 | nft_set_elem_destroy(set, rbe, true); |
270 | } | 270 | } |
271 | } | 271 | } |
272 | 272 | ||
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 69f78e96fdb4..b83e158e116a 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c | |||
@@ -44,7 +44,7 @@ connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
44 | u_int32_t newmark; | 44 | u_int32_t newmark; |
45 | 45 | ||
46 | ct = nf_ct_get(skb, &ctinfo); | 46 | ct = nf_ct_get(skb, &ctinfo); |
47 | if (ct == NULL) | 47 | if (ct == NULL || nf_ct_is_untracked(ct)) |
48 | return XT_CONTINUE; | 48 | return XT_CONTINUE; |
49 | 49 | ||
50 | switch (info->mode) { | 50 | switch (info->mode) { |
@@ -97,7 +97,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
97 | const struct nf_conn *ct; | 97 | const struct nf_conn *ct; |
98 | 98 | ||
99 | ct = nf_ct_get(skb, &ctinfo); | 99 | ct = nf_ct_get(skb, &ctinfo); |
100 | if (ct == NULL) | 100 | if (ct == NULL || nf_ct_is_untracked(ct)) |
101 | return false; | 101 | return false; |
102 | 102 | ||
103 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; | 103 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; |
diff --git a/net/netlink/diag.c b/net/netlink/diag.c index b2f0e986a6f4..a5546249fb10 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c | |||
@@ -178,11 +178,8 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
178 | } | 178 | } |
179 | cb->args[1] = i; | 179 | cb->args[1] = i; |
180 | } else { | 180 | } else { |
181 | if (req->sdiag_protocol >= MAX_LINKS) { | 181 | if (req->sdiag_protocol >= MAX_LINKS) |
182 | read_unlock(&nl_table_lock); | ||
183 | rcu_read_unlock(); | ||
184 | return -ENOENT; | 182 | return -ENOENT; |
185 | } | ||
186 | 183 | ||
187 | err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); | 184 | err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); |
188 | } | 185 | } |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 23cc12639ba7..49c28e8ef01b 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -404,7 +404,7 @@ int __genl_register_family(struct genl_family *family) | |||
404 | 404 | ||
405 | err = genl_validate_assign_mc_groups(family); | 405 | err = genl_validate_assign_mc_groups(family); |
406 | if (err) | 406 | if (err) |
407 | goto errout_locked; | 407 | goto errout_free; |
408 | 408 | ||
409 | list_add_tail(&family->family_list, genl_family_chain(family->id)); | 409 | list_add_tail(&family->family_list, genl_family_chain(family->id)); |
410 | genl_unlock_all(); | 410 | genl_unlock_all(); |
@@ -417,6 +417,8 @@ int __genl_register_family(struct genl_family *family) | |||
417 | 417 | ||
418 | return 0; | 418 | return 0; |
419 | 419 | ||
420 | errout_free: | ||
421 | kfree(family->attrbuf); | ||
420 | errout_locked: | 422 | errout_locked: |
421 | genl_unlock_all(); | 423 | genl_unlock_all(); |
422 | errout: | 424 | errout: |
diff --git a/net/sctp/input.c b/net/sctp/input.c index a2ea1d1cc06a..a01a56ec8b8c 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -181,9 +181,10 @@ int sctp_rcv(struct sk_buff *skb) | |||
181 | * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB | 181 | * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB |
182 | */ | 182 | */ |
183 | if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { | 183 | if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { |
184 | if (asoc) { | 184 | if (transport) { |
185 | sctp_association_put(asoc); | 185 | sctp_transport_put(transport); |
186 | asoc = NULL; | 186 | asoc = NULL; |
187 | transport = NULL; | ||
187 | } else { | 188 | } else { |
188 | sctp_endpoint_put(ep); | 189 | sctp_endpoint_put(ep); |
189 | ep = NULL; | 190 | ep = NULL; |
@@ -269,8 +270,8 @@ int sctp_rcv(struct sk_buff *skb) | |||
269 | bh_unlock_sock(sk); | 270 | bh_unlock_sock(sk); |
270 | 271 | ||
271 | /* Release the asoc/ep ref we took in the lookup calls. */ | 272 | /* Release the asoc/ep ref we took in the lookup calls. */ |
272 | if (asoc) | 273 | if (transport) |
273 | sctp_association_put(asoc); | 274 | sctp_transport_put(transport); |
274 | else | 275 | else |
275 | sctp_endpoint_put(ep); | 276 | sctp_endpoint_put(ep); |
276 | 277 | ||
@@ -283,8 +284,8 @@ discard_it: | |||
283 | 284 | ||
284 | discard_release: | 285 | discard_release: |
285 | /* Release the asoc/ep ref we took in the lookup calls. */ | 286 | /* Release the asoc/ep ref we took in the lookup calls. */ |
286 | if (asoc) | 287 | if (transport) |
287 | sctp_association_put(asoc); | 288 | sctp_transport_put(transport); |
288 | else | 289 | else |
289 | sctp_endpoint_put(ep); | 290 | sctp_endpoint_put(ep); |
290 | 291 | ||
@@ -300,6 +301,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
300 | { | 301 | { |
301 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 302 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
302 | struct sctp_inq *inqueue = &chunk->rcvr->inqueue; | 303 | struct sctp_inq *inqueue = &chunk->rcvr->inqueue; |
304 | struct sctp_transport *t = chunk->transport; | ||
303 | struct sctp_ep_common *rcvr = NULL; | 305 | struct sctp_ep_common *rcvr = NULL; |
304 | int backloged = 0; | 306 | int backloged = 0; |
305 | 307 | ||
@@ -351,7 +353,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
351 | done: | 353 | done: |
352 | /* Release the refs we took in sctp_add_backlog */ | 354 | /* Release the refs we took in sctp_add_backlog */ |
353 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 355 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
354 | sctp_association_put(sctp_assoc(rcvr)); | 356 | sctp_transport_put(t); |
355 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 357 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
356 | sctp_endpoint_put(sctp_ep(rcvr)); | 358 | sctp_endpoint_put(sctp_ep(rcvr)); |
357 | else | 359 | else |
@@ -363,6 +365,7 @@ done: | |||
363 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | 365 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) |
364 | { | 366 | { |
365 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 367 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
368 | struct sctp_transport *t = chunk->transport; | ||
366 | struct sctp_ep_common *rcvr = chunk->rcvr; | 369 | struct sctp_ep_common *rcvr = chunk->rcvr; |
367 | int ret; | 370 | int ret; |
368 | 371 | ||
@@ -373,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
373 | * from us | 376 | * from us |
374 | */ | 377 | */ |
375 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 378 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
376 | sctp_association_hold(sctp_assoc(rcvr)); | 379 | sctp_transport_hold(t); |
377 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 380 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
378 | sctp_endpoint_hold(sctp_ep(rcvr)); | 381 | sctp_endpoint_hold(sctp_ep(rcvr)); |
379 | else | 382 | else |
@@ -537,15 +540,15 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, | |||
537 | return sk; | 540 | return sk; |
538 | 541 | ||
539 | out: | 542 | out: |
540 | sctp_association_put(asoc); | 543 | sctp_transport_put(transport); |
541 | return NULL; | 544 | return NULL; |
542 | } | 545 | } |
543 | 546 | ||
544 | /* Common cleanup code for icmp/icmpv6 error handler. */ | 547 | /* Common cleanup code for icmp/icmpv6 error handler. */ |
545 | void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) | 548 | void sctp_err_finish(struct sock *sk, struct sctp_transport *t) |
546 | { | 549 | { |
547 | bh_unlock_sock(sk); | 550 | bh_unlock_sock(sk); |
548 | sctp_association_put(asoc); | 551 | sctp_transport_put(t); |
549 | } | 552 | } |
550 | 553 | ||
551 | /* | 554 | /* |
@@ -641,7 +644,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) | |||
641 | } | 644 | } |
642 | 645 | ||
643 | out_unlock: | 646 | out_unlock: |
644 | sctp_err_finish(sk, asoc); | 647 | sctp_err_finish(sk, transport); |
645 | } | 648 | } |
646 | 649 | ||
647 | /* | 650 | /* |
@@ -952,11 +955,8 @@ static struct sctp_association *__sctp_lookup_association( | |||
952 | goto out; | 955 | goto out; |
953 | 956 | ||
954 | asoc = t->asoc; | 957 | asoc = t->asoc; |
955 | sctp_association_hold(asoc); | ||
956 | *pt = t; | 958 | *pt = t; |
957 | 959 | ||
958 | sctp_transport_put(t); | ||
959 | |||
960 | out: | 960 | out: |
961 | return asoc; | 961 | return asoc; |
962 | } | 962 | } |
@@ -986,7 +986,7 @@ int sctp_has_association(struct net *net, | |||
986 | struct sctp_transport *transport; | 986 | struct sctp_transport *transport; |
987 | 987 | ||
988 | if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { | 988 | if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { |
989 | sctp_association_put(asoc); | 989 | sctp_transport_put(transport); |
990 | return 1; | 990 | return 1; |
991 | } | 991 | } |
992 | 992 | ||
@@ -1021,7 +1021,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, | |||
1021 | struct sctphdr *sh = sctp_hdr(skb); | 1021 | struct sctphdr *sh = sctp_hdr(skb); |
1022 | union sctp_params params; | 1022 | union sctp_params params; |
1023 | sctp_init_chunk_t *init; | 1023 | sctp_init_chunk_t *init; |
1024 | struct sctp_transport *transport; | ||
1025 | struct sctp_af *af; | 1024 | struct sctp_af *af; |
1026 | 1025 | ||
1027 | /* | 1026 | /* |
@@ -1052,7 +1051,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, | |||
1052 | 1051 | ||
1053 | af->from_addr_param(paddr, params.addr, sh->source, 0); | 1052 | af->from_addr_param(paddr, params.addr, sh->source, 0); |
1054 | 1053 | ||
1055 | asoc = __sctp_lookup_association(net, laddr, paddr, &transport); | 1054 | asoc = __sctp_lookup_association(net, laddr, paddr, transportp); |
1056 | if (asoc) | 1055 | if (asoc) |
1057 | return asoc; | 1056 | return asoc; |
1058 | } | 1057 | } |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f473779e8b1c..176af3080a2b 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -198,7 +198,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
198 | } | 198 | } |
199 | 199 | ||
200 | out_unlock: | 200 | out_unlock: |
201 | sctp_err_finish(sk, asoc); | 201 | sctp_err_finish(sk, transport); |
202 | out: | 202 | out: |
203 | if (likely(idev != NULL)) | 203 | if (likely(idev != NULL)) |
204 | in6_dev_put(idev); | 204 | in6_dev_put(idev); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9fbb6feb8c27..f23ad913dc7a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -1214,9 +1214,12 @@ static int __sctp_connect(struct sock *sk, | |||
1214 | 1214 | ||
1215 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); | 1215 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); |
1216 | 1216 | ||
1217 | err = sctp_wait_for_connect(asoc, &timeo); | 1217 | if (assoc_id) |
1218 | if ((err == 0 || err == -EINPROGRESS) && assoc_id) | ||
1219 | *assoc_id = asoc->assoc_id; | 1218 | *assoc_id = asoc->assoc_id; |
1219 | err = sctp_wait_for_connect(asoc, &timeo); | ||
1220 | /* Note: the asoc may be freed after the return of | ||
1221 | * sctp_wait_for_connect. | ||
1222 | */ | ||
1220 | 1223 | ||
1221 | /* Don't free association on exit. */ | 1224 | /* Don't free association on exit. */ |
1222 | asoc = NULL; | 1225 | asoc = NULL; |
@@ -4282,19 +4285,18 @@ static void sctp_shutdown(struct sock *sk, int how) | |||
4282 | { | 4285 | { |
4283 | struct net *net = sock_net(sk); | 4286 | struct net *net = sock_net(sk); |
4284 | struct sctp_endpoint *ep; | 4287 | struct sctp_endpoint *ep; |
4285 | struct sctp_association *asoc; | ||
4286 | 4288 | ||
4287 | if (!sctp_style(sk, TCP)) | 4289 | if (!sctp_style(sk, TCP)) |
4288 | return; | 4290 | return; |
4289 | 4291 | ||
4290 | if (how & SEND_SHUTDOWN) { | 4292 | ep = sctp_sk(sk)->ep; |
4293 | if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { | ||
4294 | struct sctp_association *asoc; | ||
4295 | |||
4291 | sk->sk_state = SCTP_SS_CLOSING; | 4296 | sk->sk_state = SCTP_SS_CLOSING; |
4292 | ep = sctp_sk(sk)->ep; | 4297 | asoc = list_entry(ep->asocs.next, |
4293 | if (!list_empty(&ep->asocs)) { | 4298 | struct sctp_association, asocs); |
4294 | asoc = list_entry(ep->asocs.next, | 4299 | sctp_primitive_SHUTDOWN(net, asoc, NULL); |
4295 | struct sctp_association, asocs); | ||
4296 | sctp_primitive_SHUTDOWN(net, asoc, NULL); | ||
4297 | } | ||
4298 | } | 4300 | } |
4299 | } | 4301 | } |
4300 | 4302 | ||
@@ -4480,12 +4482,9 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), | |||
4480 | if (!transport || !sctp_transport_hold(transport)) | 4482 | if (!transport || !sctp_transport_hold(transport)) |
4481 | goto out; | 4483 | goto out; |
4482 | 4484 | ||
4483 | sctp_association_hold(transport->asoc); | ||
4484 | sctp_transport_put(transport); | ||
4485 | |||
4486 | rcu_read_unlock(); | 4485 | rcu_read_unlock(); |
4487 | err = cb(transport, p); | 4486 | err = cb(transport, p); |
4488 | sctp_association_put(transport->asoc); | 4487 | sctp_transport_put(transport); |
4489 | 4488 | ||
4490 | out: | 4489 | out: |
4491 | return err; | 4490 | return err; |
diff --git a/net/socket.c b/net/socket.c index 5a9bf5ee2464..73dc69f9681e 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -341,8 +341,23 @@ static const struct xattr_handler sockfs_xattr_handler = { | |||
341 | .get = sockfs_xattr_get, | 341 | .get = sockfs_xattr_get, |
342 | }; | 342 | }; |
343 | 343 | ||
344 | static int sockfs_security_xattr_set(const struct xattr_handler *handler, | ||
345 | struct dentry *dentry, struct inode *inode, | ||
346 | const char *suffix, const void *value, | ||
347 | size_t size, int flags) | ||
348 | { | ||
349 | /* Handled by LSM. */ | ||
350 | return -EAGAIN; | ||
351 | } | ||
352 | |||
353 | static const struct xattr_handler sockfs_security_xattr_handler = { | ||
354 | .prefix = XATTR_SECURITY_PREFIX, | ||
355 | .set = sockfs_security_xattr_set, | ||
356 | }; | ||
357 | |||
344 | static const struct xattr_handler *sockfs_xattr_handlers[] = { | 358 | static const struct xattr_handler *sockfs_xattr_handlers[] = { |
345 | &sockfs_xattr_handler, | 359 | &sockfs_xattr_handler, |
360 | &sockfs_security_xattr_handler, | ||
346 | NULL | 361 | NULL |
347 | }; | 362 | }; |
348 | 363 | ||
@@ -2038,6 +2053,8 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | |||
2038 | if (err) | 2053 | if (err) |
2039 | break; | 2054 | break; |
2040 | ++datagrams; | 2055 | ++datagrams; |
2056 | if (msg_data_left(&msg_sys)) | ||
2057 | break; | ||
2041 | cond_resched(); | 2058 | cond_resched(); |
2042 | } | 2059 | } |
2043 | 2060 | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 34dd7b26ee5f..62a482790937 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -2753,14 +2753,18 @@ EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout); | |||
2753 | 2753 | ||
2754 | void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) | 2754 | void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) |
2755 | { | 2755 | { |
2756 | rcu_read_lock(); | ||
2756 | xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); | 2757 | xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); |
2758 | rcu_read_unlock(); | ||
2757 | } | 2759 | } |
2758 | EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put); | 2760 | EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put); |
2759 | 2761 | ||
2760 | void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) | 2762 | void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) |
2761 | { | 2763 | { |
2764 | rcu_read_lock(); | ||
2762 | rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), | 2765 | rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), |
2763 | xprt); | 2766 | xprt); |
2767 | rcu_read_unlock(); | ||
2764 | } | 2768 | } |
2765 | EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); | 2769 | EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); |
2766 | 2770 | ||
@@ -2770,9 +2774,8 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, | |||
2770 | struct rpc_xprt_switch *xps; | 2774 | struct rpc_xprt_switch *xps; |
2771 | bool ret; | 2775 | bool ret; |
2772 | 2776 | ||
2773 | xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); | ||
2774 | |||
2775 | rcu_read_lock(); | 2777 | rcu_read_lock(); |
2778 | xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); | ||
2776 | ret = rpc_xprt_switch_has_addr(xps, sap); | 2779 | ret = rpc_xprt_switch_has_addr(xps, sap); |
2777 | rcu_read_unlock(); | 2780 | rcu_read_unlock(); |
2778 | return ret; | 2781 | return ret; |
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 210949562786..26b26beef2d4 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -44,18 +44,20 @@ | |||
44 | * being done. | 44 | * being done. |
45 | * | 45 | * |
46 | * When the underlying transport disconnects, MRs are left in one of | 46 | * When the underlying transport disconnects, MRs are left in one of |
47 | * three states: | 47 | * four states: |
48 | * | 48 | * |
49 | * INVALID: The MR was not in use before the QP entered ERROR state. | 49 | * INVALID: The MR was not in use before the QP entered ERROR state. |
50 | * (Or, the LOCAL_INV WR has not completed or flushed yet). | ||
51 | * | ||
52 | * STALE: The MR was being registered or unregistered when the QP | ||
53 | * entered ERROR state, and the pending WR was flushed. | ||
54 | * | 50 | * |
55 | * VALID: The MR was registered before the QP entered ERROR state. | 51 | * VALID: The MR was registered before the QP entered ERROR state. |
56 | * | 52 | * |
57 | * When frwr_op_map encounters STALE and VALID MRs, they are recovered | 53 | * FLUSHED_FR: The MR was being registered when the QP entered ERROR |
58 | * with ib_dereg_mr and then are re-initialized. Beause MR recovery | 54 | * state, and the pending WR was flushed. |
55 | * | ||
56 | * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR | ||
57 | * state, and the pending WR was flushed. | ||
58 | * | ||
59 | * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered | ||
60 | * with ib_dereg_mr and then are re-initialized. Because MR recovery | ||
59 | * allocates fresh resources, it is deferred to a workqueue, and the | 61 | * allocates fresh resources, it is deferred to a workqueue, and the |
60 | * recovered MRs are placed back on the rb_mws list when recovery is | 62 | * recovered MRs are placed back on the rb_mws list when recovery is |
61 | * complete. frwr_op_map allocates another MR for the current RPC while | 63 | * complete. frwr_op_map allocates another MR for the current RPC while |
@@ -177,12 +179,15 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) | |||
177 | static void | 179 | static void |
178 | frwr_op_recover_mr(struct rpcrdma_mw *mw) | 180 | frwr_op_recover_mr(struct rpcrdma_mw *mw) |
179 | { | 181 | { |
182 | enum rpcrdma_frmr_state state = mw->frmr.fr_state; | ||
180 | struct rpcrdma_xprt *r_xprt = mw->mw_xprt; | 183 | struct rpcrdma_xprt *r_xprt = mw->mw_xprt; |
181 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | 184 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
182 | int rc; | 185 | int rc; |
183 | 186 | ||
184 | rc = __frwr_reset_mr(ia, mw); | 187 | rc = __frwr_reset_mr(ia, mw); |
185 | ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir); | 188 | if (state != FRMR_FLUSHED_LI) |
189 | ib_dma_unmap_sg(ia->ri_device, | ||
190 | mw->mw_sg, mw->mw_nents, mw->mw_dir); | ||
186 | if (rc) | 191 | if (rc) |
187 | goto out_release; | 192 | goto out_release; |
188 | 193 | ||
@@ -262,10 +267,8 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) | |||
262 | } | 267 | } |
263 | 268 | ||
264 | static void | 269 | static void |
265 | __frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr, | 270 | __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr) |
266 | const char *wr) | ||
267 | { | 271 | { |
268 | frmr->fr_state = FRMR_IS_STALE; | ||
269 | if (wc->status != IB_WC_WR_FLUSH_ERR) | 272 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
270 | pr_err("rpcrdma: %s: %s (%u/0x%x)\n", | 273 | pr_err("rpcrdma: %s: %s (%u/0x%x)\n", |
271 | wr, ib_wc_status_msg(wc->status), | 274 | wr, ib_wc_status_msg(wc->status), |
@@ -288,7 +291,8 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) | |||
288 | if (wc->status != IB_WC_SUCCESS) { | 291 | if (wc->status != IB_WC_SUCCESS) { |
289 | cqe = wc->wr_cqe; | 292 | cqe = wc->wr_cqe; |
290 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); | 293 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); |
291 | __frwr_sendcompletion_flush(wc, frmr, "fastreg"); | 294 | frmr->fr_state = FRMR_FLUSHED_FR; |
295 | __frwr_sendcompletion_flush(wc, "fastreg"); | ||
292 | } | 296 | } |
293 | } | 297 | } |
294 | 298 | ||
@@ -308,7 +312,8 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) | |||
308 | if (wc->status != IB_WC_SUCCESS) { | 312 | if (wc->status != IB_WC_SUCCESS) { |
309 | cqe = wc->wr_cqe; | 313 | cqe = wc->wr_cqe; |
310 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); | 314 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); |
311 | __frwr_sendcompletion_flush(wc, frmr, "localinv"); | 315 | frmr->fr_state = FRMR_FLUSHED_LI; |
316 | __frwr_sendcompletion_flush(wc, "localinv"); | ||
312 | } | 317 | } |
313 | } | 318 | } |
314 | 319 | ||
@@ -328,8 +333,10 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) | |||
328 | /* WARNING: Only wr_cqe and status are reliable at this point */ | 333 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
329 | cqe = wc->wr_cqe; | 334 | cqe = wc->wr_cqe; |
330 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); | 335 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); |
331 | if (wc->status != IB_WC_SUCCESS) | 336 | if (wc->status != IB_WC_SUCCESS) { |
332 | __frwr_sendcompletion_flush(wc, frmr, "localinv"); | 337 | frmr->fr_state = FRMR_FLUSHED_LI; |
338 | __frwr_sendcompletion_flush(wc, "localinv"); | ||
339 | } | ||
333 | complete(&frmr->fr_linv_done); | 340 | complete(&frmr->fr_linv_done); |
334 | } | 341 | } |
335 | 342 | ||
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 0d35b761c883..6e1bba358203 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -216,7 +216,8 @@ struct rpcrdma_rep { | |||
216 | enum rpcrdma_frmr_state { | 216 | enum rpcrdma_frmr_state { |
217 | FRMR_IS_INVALID, /* ready to be used */ | 217 | FRMR_IS_INVALID, /* ready to be used */ |
218 | FRMR_IS_VALID, /* in use */ | 218 | FRMR_IS_VALID, /* in use */ |
219 | FRMR_IS_STALE, /* failed completion */ | 219 | FRMR_FLUSHED_FR, /* flushed FASTREG WR */ |
220 | FRMR_FLUSHED_LI, /* flushed LOCALINV WR */ | ||
220 | }; | 221 | }; |
221 | 222 | ||
222 | struct rpcrdma_frmr { | 223 | struct rpcrdma_frmr { |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 145082e2ba36..5d1c14a2f268 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -2812,7 +2812,8 @@ static int unix_seq_show(struct seq_file *seq, void *v) | |||
2812 | i++; | 2812 | i++; |
2813 | } | 2813 | } |
2814 | for ( ; i < len; i++) | 2814 | for ( ; i < len; i++) |
2815 | seq_putc(seq, u->addr->name->sun_path[i]); | 2815 | seq_putc(seq, u->addr->name->sun_path[i] ?: |
2816 | '@'); | ||
2816 | } | 2817 | } |
2817 | unix_state_unlock(s); | 2818 | unix_state_unlock(s); |
2818 | seq_putc(seq, '\n'); | 2819 | seq_putc(seq, '\n'); |
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 12b7304d55dc..72c58675973e 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile | |||
@@ -27,6 +27,7 @@ hostprogs-y += xdp2 | |||
27 | hostprogs-y += test_current_task_under_cgroup | 27 | hostprogs-y += test_current_task_under_cgroup |
28 | hostprogs-y += trace_event | 28 | hostprogs-y += trace_event |
29 | hostprogs-y += sampleip | 29 | hostprogs-y += sampleip |
30 | hostprogs-y += tc_l2_redirect | ||
30 | 31 | ||
31 | test_verifier-objs := test_verifier.o libbpf.o | 32 | test_verifier-objs := test_verifier.o libbpf.o |
32 | test_maps-objs := test_maps.o libbpf.o | 33 | test_maps-objs := test_maps.o libbpf.o |
@@ -56,6 +57,7 @@ test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \ | |||
56 | test_current_task_under_cgroup_user.o | 57 | test_current_task_under_cgroup_user.o |
57 | trace_event-objs := bpf_load.o libbpf.o trace_event_user.o | 58 | trace_event-objs := bpf_load.o libbpf.o trace_event_user.o |
58 | sampleip-objs := bpf_load.o libbpf.o sampleip_user.o | 59 | sampleip-objs := bpf_load.o libbpf.o sampleip_user.o |
60 | tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o | ||
59 | 61 | ||
60 | # Tell kbuild to always build the programs | 62 | # Tell kbuild to always build the programs |
61 | always := $(hostprogs-y) | 63 | always := $(hostprogs-y) |
@@ -72,6 +74,7 @@ always += test_probe_write_user_kern.o | |||
72 | always += trace_output_kern.o | 74 | always += trace_output_kern.o |
73 | always += tcbpf1_kern.o | 75 | always += tcbpf1_kern.o |
74 | always += tcbpf2_kern.o | 76 | always += tcbpf2_kern.o |
77 | always += tc_l2_redirect_kern.o | ||
75 | always += lathist_kern.o | 78 | always += lathist_kern.o |
76 | always += offwaketime_kern.o | 79 | always += offwaketime_kern.o |
77 | always += spintest_kern.o | 80 | always += spintest_kern.o |
@@ -111,6 +114,7 @@ HOSTLOADLIBES_xdp2 += -lelf | |||
111 | HOSTLOADLIBES_test_current_task_under_cgroup += -lelf | 114 | HOSTLOADLIBES_test_current_task_under_cgroup += -lelf |
112 | HOSTLOADLIBES_trace_event += -lelf | 115 | HOSTLOADLIBES_trace_event += -lelf |
113 | HOSTLOADLIBES_sampleip += -lelf | 116 | HOSTLOADLIBES_sampleip += -lelf |
117 | HOSTLOADLIBES_tc_l2_redirect += -l elf | ||
114 | 118 | ||
115 | # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: | 119 | # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: |
116 | # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang | 120 | # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang |
diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh new file mode 100755 index 000000000000..80a05591a140 --- /dev/null +++ b/samples/bpf/tc_l2_redirect.sh | |||
@@ -0,0 +1,173 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | [[ -z $TC ]] && TC='tc' | ||
4 | [[ -z $IP ]] && IP='ip' | ||
5 | |||
6 | REDIRECT_USER='./tc_l2_redirect' | ||
7 | REDIRECT_BPF='./tc_l2_redirect_kern.o' | ||
8 | |||
9 | RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter) | ||
10 | IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding) | ||
11 | |||
12 | function config_common { | ||
13 | local tun_type=$1 | ||
14 | |||
15 | $IP netns add ns1 | ||
16 | $IP netns add ns2 | ||
17 | $IP link add ve1 type veth peer name vens1 | ||
18 | $IP link add ve2 type veth peer name vens2 | ||
19 | $IP link set dev ve1 up | ||
20 | $IP link set dev ve2 up | ||
21 | $IP link set dev ve1 mtu 1500 | ||
22 | $IP link set dev ve2 mtu 1500 | ||
23 | $IP link set dev vens1 netns ns1 | ||
24 | $IP link set dev vens2 netns ns2 | ||
25 | |||
26 | $IP -n ns1 link set dev lo up | ||
27 | $IP -n ns1 link set dev vens1 up | ||
28 | $IP -n ns1 addr add 10.1.1.101/24 dev vens1 | ||
29 | $IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad | ||
30 | $IP -n ns1 route add default via 10.1.1.1 dev vens1 | ||
31 | $IP -n ns1 route add default via 2401:db01::1 dev vens1 | ||
32 | |||
33 | $IP -n ns2 link set dev lo up | ||
34 | $IP -n ns2 link set dev vens2 up | ||
35 | $IP -n ns2 addr add 10.2.1.102/24 dev vens2 | ||
36 | $IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad | ||
37 | $IP -n ns2 addr add 10.10.1.102 dev lo | ||
38 | $IP -n ns2 addr add 2401:face::66/64 dev lo nodad | ||
39 | $IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1 | ||
40 | $IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1 | ||
41 | $IP -n ns2 link set dev ipt2 up | ||
42 | $IP -n ns2 link set dev ip6t2 up | ||
43 | $IP netns exec ns2 $TC qdisc add dev vens2 clsact | ||
44 | $IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip | ||
45 | if [[ $tun_type == "ipip" ]]; then | ||
46 | $IP -n ns2 route add 10.1.1.0/24 dev ipt2 | ||
47 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 | ||
48 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0 | ||
49 | else | ||
50 | $IP -n ns2 route add 10.1.1.0/24 dev ip6t2 | ||
51 | $IP -n ns2 route add 2401:db01::/64 dev ip6t2 | ||
52 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 | ||
53 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0 | ||
54 | fi | ||
55 | |||
56 | $IP addr add 10.1.1.1/24 dev ve1 | ||
57 | $IP addr add 2401:db01::1/64 dev ve1 nodad | ||
58 | $IP addr add 10.2.1.1/24 dev ve2 | ||
59 | $IP addr add 2401:db02::1/64 dev ve2 nodad | ||
60 | |||
61 | $TC qdisc add dev ve2 clsact | ||
62 | $TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward | ||
63 | |||
64 | sysctl -q -w net.ipv4.conf.all.rp_filter=0 | ||
65 | sysctl -q -w net.ipv6.conf.all.forwarding=1 | ||
66 | } | ||
67 | |||
68 | function cleanup { | ||
69 | set +e | ||
70 | [[ -z $DEBUG ]] || set +x | ||
71 | $IP netns delete ns1 >& /dev/null | ||
72 | $IP netns delete ns2 >& /dev/null | ||
73 | $IP link del ve1 >& /dev/null | ||
74 | $IP link del ve2 >& /dev/null | ||
75 | $IP link del ipt >& /dev/null | ||
76 | $IP link del ip6t >& /dev/null | ||
77 | sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER | ||
78 | sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING | ||
79 | rm -f /sys/fs/bpf/tc/globals/tun_iface | ||
80 | [[ -z $DEBUG ]] || set -x | ||
81 | set -e | ||
82 | } | ||
83 | |||
84 | function l2_to_ipip { | ||
85 | echo -n "l2_to_ipip $1: " | ||
86 | |||
87 | local dir=$1 | ||
88 | |||
89 | config_common ipip | ||
90 | |||
91 | $IP link add ipt type ipip external | ||
92 | $IP link set dev ipt up | ||
93 | sysctl -q -w net.ipv4.conf.ipt.rp_filter=0 | ||
94 | sysctl -q -w net.ipv4.conf.ipt.forwarding=1 | ||
95 | |||
96 | if [[ $dir == "egress" ]]; then | ||
97 | $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 | ||
98 | $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect | ||
99 | sysctl -q -w net.ipv4.conf.ve1.forwarding=1 | ||
100 | else | ||
101 | $TC qdisc add dev ve1 clsact | ||
102 | $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect | ||
103 | fi | ||
104 | |||
105 | $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex) | ||
106 | |||
107 | $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null | ||
108 | |||
109 | if [[ $dir == "egress" ]]; then | ||
110 | # test direct egress to ve2 (i.e. not forwarding from | ||
111 | # ve1 to ve2). | ||
112 | ping -c1 10.10.1.102 >& /dev/null | ||
113 | fi | ||
114 | |||
115 | cleanup | ||
116 | |||
117 | echo "OK" | ||
118 | } | ||
119 | |||
120 | function l2_to_ip6tnl { | ||
121 | echo -n "l2_to_ip6tnl $1: " | ||
122 | |||
123 | local dir=$1 | ||
124 | |||
125 | config_common ip6tnl | ||
126 | |||
127 | $IP link add ip6t type ip6tnl mode any external | ||
128 | $IP link set dev ip6t up | ||
129 | sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0 | ||
130 | sysctl -q -w net.ipv4.conf.ip6t.forwarding=1 | ||
131 | |||
132 | if [[ $dir == "egress" ]]; then | ||
133 | $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 | ||
134 | $IP route add 2401:face::/64 via 2401:db02::66 dev ve2 | ||
135 | $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect | ||
136 | sysctl -q -w net.ipv4.conf.ve1.forwarding=1 | ||
137 | else | ||
138 | $TC qdisc add dev ve1 clsact | ||
139 | $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect | ||
140 | fi | ||
141 | |||
142 | $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex) | ||
143 | |||
144 | $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null | ||
145 | $IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null | ||
146 | |||
147 | if [[ $dir == "egress" ]]; then | ||
148 | # test direct egress to ve2 (i.e. not forwarding from | ||
149 | # ve1 to ve2). | ||
150 | ping -c1 10.10.1.102 >& /dev/null | ||
151 | ping -6 -c1 2401:face::66 >& /dev/null | ||
152 | fi | ||
153 | |||
154 | cleanup | ||
155 | |||
156 | echo "OK" | ||
157 | } | ||
158 | |||
159 | cleanup | ||
160 | test_names="l2_to_ipip l2_to_ip6tnl" | ||
161 | test_dirs="ingress egress" | ||
162 | if [[ $# -ge 2 ]]; then | ||
163 | test_names=$1 | ||
164 | test_dirs=$2 | ||
165 | elif [[ $# -ge 1 ]]; then | ||
166 | test_names=$1 | ||
167 | fi | ||
168 | |||
169 | for t in $test_names; do | ||
170 | for d in $test_dirs; do | ||
171 | $t $d | ||
172 | done | ||
173 | done | ||
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c new file mode 100644 index 000000000000..92a44729dbe4 --- /dev/null +++ b/samples/bpf/tc_l2_redirect_kern.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* Copyright (c) 2016 Facebook | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or | ||
4 | * modify it under the terms of version 2 of the GNU General Public | ||
5 | * License as published by the Free Software Foundation. | ||
6 | */ | ||
7 | #include <uapi/linux/bpf.h> | ||
8 | #include <uapi/linux/if_ether.h> | ||
9 | #include <uapi/linux/if_packet.h> | ||
10 | #include <uapi/linux/ip.h> | ||
11 | #include <uapi/linux/ipv6.h> | ||
12 | #include <uapi/linux/in.h> | ||
13 | #include <uapi/linux/tcp.h> | ||
14 | #include <uapi/linux/filter.h> | ||
15 | #include <uapi/linux/pkt_cls.h> | ||
16 | #include <net/ipv6.h> | ||
17 | #include "bpf_helpers.h" | ||
18 | |||
19 | #define _htonl __builtin_bswap32 | ||
20 | |||
21 | #define PIN_GLOBAL_NS 2 | ||
22 | struct bpf_elf_map { | ||
23 | __u32 type; | ||
24 | __u32 size_key; | ||
25 | __u32 size_value; | ||
26 | __u32 max_elem; | ||
27 | __u32 flags; | ||
28 | __u32 id; | ||
29 | __u32 pinning; | ||
30 | }; | ||
31 | |||
32 | /* copy of 'struct ethhdr' without __packed */ | ||
33 | struct eth_hdr { | ||
34 | unsigned char h_dest[ETH_ALEN]; | ||
35 | unsigned char h_source[ETH_ALEN]; | ||
36 | unsigned short h_proto; | ||
37 | }; | ||
38 | |||
39 | struct bpf_elf_map SEC("maps") tun_iface = { | ||
40 | .type = BPF_MAP_TYPE_ARRAY, | ||
41 | .size_key = sizeof(int), | ||
42 | .size_value = sizeof(int), | ||
43 | .pinning = PIN_GLOBAL_NS, | ||
44 | .max_elem = 1, | ||
45 | }; | ||
46 | |||
47 | static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr) | ||
48 | { | ||
49 | if (eth_proto == htons(ETH_P_IP)) | ||
50 | return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100); | ||
51 | else if (eth_proto == htons(ETH_P_IPV6)) | ||
52 | return (daddr == _htonl(0x2401face)); | ||
53 | |||
54 | return false; | ||
55 | } | ||
56 | |||
57 | SEC("l2_to_iptun_ingress_forward") | ||
58 | int _l2_to_iptun_ingress_forward(struct __sk_buff *skb) | ||
59 | { | ||
60 | struct bpf_tunnel_key tkey = {}; | ||
61 | void *data = (void *)(long)skb->data; | ||
62 | struct eth_hdr *eth = data; | ||
63 | void *data_end = (void *)(long)skb->data_end; | ||
64 | int key = 0, *ifindex; | ||
65 | |||
66 | int ret; | ||
67 | |||
68 | if (data + sizeof(*eth) > data_end) | ||
69 | return TC_ACT_OK; | ||
70 | |||
71 | ifindex = bpf_map_lookup_elem(&tun_iface, &key); | ||
72 | if (!ifindex) | ||
73 | return TC_ACT_OK; | ||
74 | |||
75 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
76 | char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n"; | ||
77 | struct iphdr *iph = data + sizeof(*eth); | ||
78 | |||
79 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
80 | return TC_ACT_OK; | ||
81 | |||
82 | if (iph->protocol != IPPROTO_IPIP) | ||
83 | return TC_ACT_OK; | ||
84 | |||
85 | bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex, | ||
86 | _htonl(iph->daddr)); | ||
87 | return bpf_redirect(*ifindex, BPF_F_INGRESS); | ||
88 | } else if (eth->h_proto == htons(ETH_P_IPV6)) { | ||
89 | char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n"; | ||
90 | struct ipv6hdr *ip6h = data + sizeof(*eth); | ||
91 | |||
92 | if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) | ||
93 | return TC_ACT_OK; | ||
94 | |||
95 | if (ip6h->nexthdr != IPPROTO_IPIP && | ||
96 | ip6h->nexthdr != IPPROTO_IPV6) | ||
97 | return TC_ACT_OK; | ||
98 | |||
99 | bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex, | ||
100 | _htonl(ip6h->daddr.s6_addr32[0]), | ||
101 | _htonl(ip6h->daddr.s6_addr32[3])); | ||
102 | return bpf_redirect(*ifindex, BPF_F_INGRESS); | ||
103 | } | ||
104 | |||
105 | return TC_ACT_OK; | ||
106 | } | ||
107 | |||
108 | SEC("l2_to_iptun_ingress_redirect") | ||
109 | int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb) | ||
110 | { | ||
111 | struct bpf_tunnel_key tkey = {}; | ||
112 | void *data = (void *)(long)skb->data; | ||
113 | struct eth_hdr *eth = data; | ||
114 | void *data_end = (void *)(long)skb->data_end; | ||
115 | int key = 0, *ifindex; | ||
116 | |||
117 | int ret; | ||
118 | |||
119 | if (data + sizeof(*eth) > data_end) | ||
120 | return TC_ACT_OK; | ||
121 | |||
122 | ifindex = bpf_map_lookup_elem(&tun_iface, &key); | ||
123 | if (!ifindex) | ||
124 | return TC_ACT_OK; | ||
125 | |||
126 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
127 | char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; | ||
128 | struct iphdr *iph = data + sizeof(*eth); | ||
129 | __be32 daddr = iph->daddr; | ||
130 | |||
131 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
132 | return TC_ACT_OK; | ||
133 | |||
134 | if (!is_vip_addr(eth->h_proto, daddr)) | ||
135 | return TC_ACT_OK; | ||
136 | |||
137 | bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex); | ||
138 | } else { | ||
139 | return TC_ACT_OK; | ||
140 | } | ||
141 | |||
142 | tkey.tunnel_id = 10000; | ||
143 | tkey.tunnel_ttl = 64; | ||
144 | tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */ | ||
145 | bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0); | ||
146 | return bpf_redirect(*ifindex, 0); | ||
147 | } | ||
148 | |||
149 | SEC("l2_to_ip6tun_ingress_redirect") | ||
150 | int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb) | ||
151 | { | ||
152 | struct bpf_tunnel_key tkey = {}; | ||
153 | void *data = (void *)(long)skb->data; | ||
154 | struct eth_hdr *eth = data; | ||
155 | void *data_end = (void *)(long)skb->data_end; | ||
156 | int key = 0, *ifindex; | ||
157 | |||
158 | if (data + sizeof(*eth) > data_end) | ||
159 | return TC_ACT_OK; | ||
160 | |||
161 | ifindex = bpf_map_lookup_elem(&tun_iface, &key); | ||
162 | if (!ifindex) | ||
163 | return TC_ACT_OK; | ||
164 | |||
165 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
166 | char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; | ||
167 | struct iphdr *iph = data + sizeof(*eth); | ||
168 | |||
169 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
170 | return TC_ACT_OK; | ||
171 | |||
172 | if (!is_vip_addr(eth->h_proto, iph->daddr)) | ||
173 | return TC_ACT_OK; | ||
174 | |||
175 | bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr), | ||
176 | *ifindex); | ||
177 | } else if (eth->h_proto == htons(ETH_P_IPV6)) { | ||
178 | char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n"; | ||
179 | struct ipv6hdr *ip6h = data + sizeof(*eth); | ||
180 | |||
181 | if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) | ||
182 | return TC_ACT_OK; | ||
183 | |||
184 | if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) | ||
185 | return TC_ACT_OK; | ||
186 | |||
187 | bpf_trace_printk(fmt6, sizeof(fmt6), | ||
188 | _htonl(ip6h->daddr.s6_addr32[0]), *ifindex); | ||
189 | } else { | ||
190 | return TC_ACT_OK; | ||
191 | } | ||
192 | |||
193 | tkey.tunnel_id = 10000; | ||
194 | tkey.tunnel_ttl = 64; | ||
195 | /* 2401:db02:0:0:0:0:0:66 */ | ||
196 | tkey.remote_ipv6[0] = _htonl(0x2401db02); | ||
197 | tkey.remote_ipv6[1] = 0; | ||
198 | tkey.remote_ipv6[2] = 0; | ||
199 | tkey.remote_ipv6[3] = _htonl(0x00000066); | ||
200 | bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6); | ||
201 | return bpf_redirect(*ifindex, 0); | ||
202 | } | ||
203 | |||
204 | SEC("drop_non_tun_vip") | ||
205 | int _drop_non_tun_vip(struct __sk_buff *skb) | ||
206 | { | ||
207 | struct bpf_tunnel_key tkey = {}; | ||
208 | void *data = (void *)(long)skb->data; | ||
209 | struct eth_hdr *eth = data; | ||
210 | void *data_end = (void *)(long)skb->data_end; | ||
211 | |||
212 | if (data + sizeof(*eth) > data_end) | ||
213 | return TC_ACT_OK; | ||
214 | |||
215 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
216 | struct iphdr *iph = data + sizeof(*eth); | ||
217 | |||
218 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
219 | return TC_ACT_OK; | ||
220 | |||
221 | if (is_vip_addr(eth->h_proto, iph->daddr)) | ||
222 | return TC_ACT_SHOT; | ||
223 | } else if (eth->h_proto == htons(ETH_P_IPV6)) { | ||
224 | struct ipv6hdr *ip6h = data + sizeof(*eth); | ||
225 | |||
226 | if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) | ||
227 | return TC_ACT_OK; | ||
228 | |||
229 | if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) | ||
230 | return TC_ACT_SHOT; | ||
231 | } | ||
232 | |||
233 | return TC_ACT_OK; | ||
234 | } | ||
235 | |||
236 | char _license[] SEC("license") = "GPL"; | ||
diff --git a/samples/bpf/tc_l2_redirect_user.c b/samples/bpf/tc_l2_redirect_user.c new file mode 100644 index 000000000000..4013c5337b91 --- /dev/null +++ b/samples/bpf/tc_l2_redirect_user.c | |||
@@ -0,0 +1,73 @@ | |||
1 | /* Copyright (c) 2016 Facebook | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or | ||
4 | * modify it under the terms of version 2 of the GNU General Public | ||
5 | * License as published by the Free Software Foundation. | ||
6 | */ | ||
7 | #include <linux/unistd.h> | ||
8 | #include <linux/bpf.h> | ||
9 | |||
10 | #include <stdlib.h> | ||
11 | #include <stdio.h> | ||
12 | #include <unistd.h> | ||
13 | #include <string.h> | ||
14 | #include <errno.h> | ||
15 | |||
16 | #include "libbpf.h" | ||
17 | |||
18 | static void usage(void) | ||
19 | { | ||
20 | printf("Usage: tc_l2_ipip_redirect [...]\n"); | ||
21 | printf(" -U <file> Update an already pinned BPF array\n"); | ||
22 | printf(" -i <ifindex> Interface index\n"); | ||
23 | printf(" -h Display this help\n"); | ||
24 | } | ||
25 | |||
26 | int main(int argc, char **argv) | ||
27 | { | ||
28 | const char *pinned_file = NULL; | ||
29 | int ifindex = -1; | ||
30 | int array_key = 0; | ||
31 | int array_fd = -1; | ||
32 | int ret = -1; | ||
33 | int opt; | ||
34 | |||
35 | while ((opt = getopt(argc, argv, "F:U:i:")) != -1) { | ||
36 | switch (opt) { | ||
37 | /* General args */ | ||
38 | case 'U': | ||
39 | pinned_file = optarg; | ||
40 | break; | ||
41 | case 'i': | ||
42 | ifindex = atoi(optarg); | ||
43 | break; | ||
44 | default: | ||
45 | usage(); | ||
46 | goto out; | ||
47 | } | ||
48 | } | ||
49 | |||
50 | if (ifindex < 0 || !pinned_file) { | ||
51 | usage(); | ||
52 | goto out; | ||
53 | } | ||
54 | |||
55 | array_fd = bpf_obj_get(pinned_file); | ||
56 | if (array_fd < 0) { | ||
57 | fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n", | ||
58 | pinned_file, strerror(errno), errno); | ||
59 | goto out; | ||
60 | } | ||
61 | |||
62 | /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ | ||
63 | ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0); | ||
64 | if (ret) { | ||
65 | perror("bpf_update_elem"); | ||
66 | goto out; | ||
67 | } | ||
68 | |||
69 | out: | ||
70 | if (array_fd != -1) | ||
71 | close(array_fd); | ||
72 | return ret; | ||
73 | } | ||
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 53449a6ff6aa..7c321a603b07 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn | |||
@@ -36,6 +36,7 @@ warning-2 += -Wshadow | |||
36 | warning-2 += $(call cc-option, -Wlogical-op) | 36 | warning-2 += $(call cc-option, -Wlogical-op) |
37 | warning-2 += $(call cc-option, -Wmissing-field-initializers) | 37 | warning-2 += $(call cc-option, -Wmissing-field-initializers) |
38 | warning-2 += $(call cc-option, -Wsign-compare) | 38 | warning-2 += $(call cc-option, -Wsign-compare) |
39 | warning-2 += $(call cc-option, -Wmaybe-uninitialized) | ||
39 | 40 | ||
40 | warning-3 := -Wbad-function-cast | 41 | warning-3 := -Wbad-function-cast |
41 | warning-3 += -Wcast-qual | 42 | warning-3 += -Wcast-qual |
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan index dd779c40c8e6..3b1b13818d59 100644 --- a/scripts/Makefile.ubsan +++ b/scripts/Makefile.ubsan | |||
@@ -17,4 +17,8 @@ endif | |||
17 | ifdef CONFIG_UBSAN_NULL | 17 | ifdef CONFIG_UBSAN_NULL |
18 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=null) | 18 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=null) |
19 | endif | 19 | endif |
20 | |||
21 | # -fsanitize=* options makes GCC less smart than usual and | ||
22 | # increase number of 'maybe-uninitialized false-positives | ||
23 | CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized) | ||
20 | endif | 24 | endif |
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter index 19f5adfd877d..d9ff038c1b28 100755 --- a/scripts/bloat-o-meter +++ b/scripts/bloat-o-meter | |||
@@ -8,6 +8,9 @@ | |||
8 | # of the GNU General Public License, incorporated herein by reference. | 8 | # of the GNU General Public License, incorporated herein by reference. |
9 | 9 | ||
10 | import sys, os, re | 10 | import sys, os, re |
11 | from signal import signal, SIGPIPE, SIG_DFL | ||
12 | |||
13 | signal(SIGPIPE, SIG_DFL) | ||
11 | 14 | ||
12 | if len(sys.argv) != 3: | 15 | if len(sys.argv) != 3: |
13 | sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0]) | 16 | sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0]) |
diff --git a/sound/core/info.c b/sound/core/info.c index 895362a696c9..8ab72e0f5932 100644 --- a/sound/core/info.c +++ b/sound/core/info.c | |||
@@ -325,10 +325,15 @@ static ssize_t snd_info_text_entry_write(struct file *file, | |||
325 | size_t next; | 325 | size_t next; |
326 | int err = 0; | 326 | int err = 0; |
327 | 327 | ||
328 | if (!entry->c.text.write) | ||
329 | return -EIO; | ||
328 | pos = *offset; | 330 | pos = *offset; |
329 | if (!valid_pos(pos, count)) | 331 | if (!valid_pos(pos, count)) |
330 | return -EIO; | 332 | return -EIO; |
331 | next = pos + count; | 333 | next = pos + count; |
334 | /* don't handle too large text inputs */ | ||
335 | if (next > 16 * 1024) | ||
336 | return -EIO; | ||
332 | mutex_lock(&entry->access); | 337 | mutex_lock(&entry->access); |
333 | buf = data->wbuffer; | 338 | buf = data->wbuffer; |
334 | if (!buf) { | 339 | if (!buf) { |
@@ -366,7 +371,9 @@ static int snd_info_seq_show(struct seq_file *seq, void *p) | |||
366 | struct snd_info_private_data *data = seq->private; | 371 | struct snd_info_private_data *data = seq->private; |
367 | struct snd_info_entry *entry = data->entry; | 372 | struct snd_info_entry *entry = data->entry; |
368 | 373 | ||
369 | if (entry->c.text.read) { | 374 | if (!entry->c.text.read) { |
375 | return -EIO; | ||
376 | } else { | ||
370 | data->rbuffer->buffer = (char *)seq; /* XXX hack! */ | 377 | data->rbuffer->buffer = (char *)seq; /* XXX hack! */ |
371 | entry->c.text.read(entry, data->rbuffer); | 378 | entry->c.text.read(entry, data->rbuffer); |
372 | } | 379 | } |
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c index 18baea2f7d65..84f86745c30e 100644 --- a/sound/soc/codecs/cs4270.c +++ b/sound/soc/codecs/cs4270.c | |||
@@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"), | |||
148 | }; | 148 | }; |
149 | 149 | ||
150 | static const struct snd_soc_dapm_route cs4270_dapm_routes[] = { | 150 | static const struct snd_soc_dapm_route cs4270_dapm_routes[] = { |
151 | { "Capture", NULL, "AINA" }, | 151 | { "Capture", NULL, "AINL" }, |
152 | { "Capture", NULL, "AINB" }, | 152 | { "Capture", NULL, "AINR" }, |
153 | 153 | ||
154 | { "AOUTA", NULL, "Playback" }, | 154 | { "AOUTL", NULL, "Playback" }, |
155 | { "AOUTB", NULL, "Playback" }, | 155 | { "AOUTR", NULL, "Playback" }, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | /** | 158 | /** |
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c index 1152aa5e7c39..cf37936bfe3a 100644 --- a/sound/soc/codecs/da7219.c +++ b/sound/soc/codecs/da7219.c | |||
@@ -880,7 +880,8 @@ static const struct snd_soc_dapm_widget da7219_dapm_widgets[] = { | |||
880 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), | 880 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), |
881 | 881 | ||
882 | /* DAI */ | 882 | /* DAI */ |
883 | SND_SOC_DAPM_AIF_OUT("DAIOUT", "Capture", 0, SND_SOC_NOPM, 0, 0), | 883 | SND_SOC_DAPM_AIF_OUT("DAIOUT", "Capture", 0, DA7219_DAI_TDM_CTRL, |
884 | DA7219_DAI_OE_SHIFT, DA7219_NO_INVERT), | ||
884 | SND_SOC_DAPM_AIF_IN("DAIIN", "Playback", 0, SND_SOC_NOPM, 0, 0), | 885 | SND_SOC_DAPM_AIF_IN("DAIIN", "Playback", 0, SND_SOC_NOPM, 0, 0), |
885 | 886 | ||
886 | /* Output Muxes */ | 887 | /* Output Muxes */ |
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index b904492d7744..90b5948e0ff3 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c | |||
@@ -364,7 +364,12 @@ static int hdmi_of_xlate_dai_name(struct snd_soc_component *component, | |||
364 | struct of_phandle_args *args, | 364 | struct of_phandle_args *args, |
365 | const char **dai_name) | 365 | const char **dai_name) |
366 | { | 366 | { |
367 | int id = args->args[0]; | 367 | int id; |
368 | |||
369 | if (args->args_count) | ||
370 | id = args->args[0]; | ||
371 | else | ||
372 | id = 0; | ||
368 | 373 | ||
369 | if (id < ARRAY_SIZE(hdmi_dai_name)) { | 374 | if (id < ARRAY_SIZE(hdmi_dai_name)) { |
370 | *dai_name = hdmi_dai_name[id]; | 375 | *dai_name = hdmi_dai_name[id]; |
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c index 55558643166f..2db8179047ae 100644 --- a/sound/soc/codecs/rt298.c +++ b/sound/soc/codecs/rt298.c | |||
@@ -249,6 +249,11 @@ static int rt298_jack_detect(struct rt298_priv *rt298, bool *hp, bool *mic) | |||
249 | snd_soc_dapm_force_enable_pin(dapm, "LDO1"); | 249 | snd_soc_dapm_force_enable_pin(dapm, "LDO1"); |
250 | snd_soc_dapm_sync(dapm); | 250 | snd_soc_dapm_sync(dapm); |
251 | 251 | ||
252 | regmap_update_bits(rt298->regmap, | ||
253 | RT298_POWER_CTRL1, 0x1001, 0); | ||
254 | regmap_update_bits(rt298->regmap, | ||
255 | RT298_POWER_CTRL2, 0x4, 0x4); | ||
256 | |||
252 | regmap_write(rt298->regmap, RT298_SET_MIC1, 0x24); | 257 | regmap_write(rt298->regmap, RT298_SET_MIC1, 0x24); |
253 | msleep(50); | 258 | msleep(50); |
254 | 259 | ||
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c index 01a18d88f1eb..00ff2788879e 100644 --- a/sound/soc/codecs/rt5663.c +++ b/sound/soc/codecs/rt5663.c | |||
@@ -1547,11 +1547,11 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert) | |||
1547 | msleep(sleep_time[i]); | 1547 | msleep(sleep_time[i]); |
1548 | val = snd_soc_read(codec, RT5663_EM_JACK_TYPE_2) & | 1548 | val = snd_soc_read(codec, RT5663_EM_JACK_TYPE_2) & |
1549 | 0x0003; | 1549 | 0x0003; |
1550 | dev_dbg(codec->dev, "%s: MX-00e7 val=%x sleep %d\n", | ||
1551 | __func__, val, sleep_time[i]); | ||
1550 | i++; | 1552 | i++; |
1551 | if (val == 0x1 || val == 0x2 || val == 0x3) | 1553 | if (val == 0x1 || val == 0x2 || val == 0x3) |
1552 | break; | 1554 | break; |
1553 | dev_dbg(codec->dev, "%s: MX-00e7 val=%x sleep %d\n", | ||
1554 | __func__, val, sleep_time[i]); | ||
1555 | } | 1555 | } |
1556 | dev_dbg(codec->dev, "%s val = %d\n", __func__, val); | 1556 | dev_dbg(codec->dev, "%s val = %d\n", __func__, val); |
1557 | switch (val) { | 1557 | switch (val) { |
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c index 7b31ee9b82bc..d6e00c77edcd 100644 --- a/sound/soc/codecs/sti-sas.c +++ b/sound/soc/codecs/sti-sas.c | |||
@@ -424,7 +424,7 @@ static const struct snd_soc_dai_ops stih407_dac_ops = { | |||
424 | static const struct regmap_config stih407_sas_regmap = { | 424 | static const struct regmap_config stih407_sas_regmap = { |
425 | .reg_bits = 32, | 425 | .reg_bits = 32, |
426 | .val_bits = 32, | 426 | .val_bits = 32, |
427 | 427 | .fast_io = true, | |
428 | .max_register = STIH407_AUDIO_DAC_CTRL, | 428 | .max_register = STIH407_AUDIO_DAC_CTRL, |
429 | .reg_defaults = stih407_sas_reg_defaults, | 429 | .reg_defaults = stih407_sas_reg_defaults, |
430 | .num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults), | 430 | .num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults), |
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c index df5e5cb33baa..810369f687d7 100644 --- a/sound/soc/codecs/tas571x.c +++ b/sound/soc/codecs/tas571x.c | |||
@@ -341,20 +341,9 @@ static int tas571x_set_bias_level(struct snd_soc_codec *codec, | |||
341 | return ret; | 341 | return ret; |
342 | } | 342 | } |
343 | } | 343 | } |
344 | |||
345 | gpiod_set_value(priv->pdn_gpio, 0); | ||
346 | usleep_range(5000, 6000); | ||
347 | |||
348 | regcache_cache_only(priv->regmap, false); | ||
349 | ret = regcache_sync(priv->regmap); | ||
350 | if (ret) | ||
351 | return ret; | ||
352 | } | 344 | } |
353 | break; | 345 | break; |
354 | case SND_SOC_BIAS_OFF: | 346 | case SND_SOC_BIAS_OFF: |
355 | regcache_cache_only(priv->regmap, true); | ||
356 | gpiod_set_value(priv->pdn_gpio, 1); | ||
357 | |||
358 | if (!IS_ERR(priv->mclk)) | 347 | if (!IS_ERR(priv->mclk)) |
359 | clk_disable_unprepare(priv->mclk); | 348 | clk_disable_unprepare(priv->mclk); |
360 | break; | 349 | break; |
@@ -401,16 +390,6 @@ static const struct snd_kcontrol_new tas5711_controls[] = { | |||
401 | TAS571X_SOFT_MUTE_REG, | 390 | TAS571X_SOFT_MUTE_REG, |
402 | TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT, | 391 | TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT, |
403 | 1, 1), | 392 | 1, 1), |
404 | |||
405 | SOC_DOUBLE_R_RANGE("CH1 Mixer Volume", | ||
406 | TAS5717_CH1_LEFT_CH_MIX_REG, | ||
407 | TAS5717_CH1_RIGHT_CH_MIX_REG, | ||
408 | 16, 0, 0x80, 0), | ||
409 | |||
410 | SOC_DOUBLE_R_RANGE("CH2 Mixer Volume", | ||
411 | TAS5717_CH2_LEFT_CH_MIX_REG, | ||
412 | TAS5717_CH2_RIGHT_CH_MIX_REG, | ||
413 | 16, 0, 0x80, 0), | ||
414 | }; | 393 | }; |
415 | 394 | ||
416 | static const struct regmap_range tas571x_readonly_regs_range[] = { | 395 | static const struct regmap_range tas571x_readonly_regs_range[] = { |
@@ -488,6 +467,16 @@ static const struct snd_kcontrol_new tas5717_controls[] = { | |||
488 | TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT, | 467 | TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT, |
489 | 1, 1), | 468 | 1, 1), |
490 | 469 | ||
470 | SOC_DOUBLE_R_RANGE("CH1 Mixer Volume", | ||
471 | TAS5717_CH1_LEFT_CH_MIX_REG, | ||
472 | TAS5717_CH1_RIGHT_CH_MIX_REG, | ||
473 | 16, 0, 0x80, 0), | ||
474 | |||
475 | SOC_DOUBLE_R_RANGE("CH2 Mixer Volume", | ||
476 | TAS5717_CH2_LEFT_CH_MIX_REG, | ||
477 | TAS5717_CH2_RIGHT_CH_MIX_REG, | ||
478 | 16, 0, 0x80, 0), | ||
479 | |||
491 | /* | 480 | /* |
492 | * The biquads are named according to the register names. | 481 | * The biquads are named according to the register names. |
493 | * Please note that TI's TAS57xx Graphical Development Environment | 482 | * Please note that TI's TAS57xx Graphical Development Environment |
@@ -747,13 +736,14 @@ static int tas571x_i2c_probe(struct i2c_client *client, | |||
747 | /* pulse the active low reset line for ~100us */ | 736 | /* pulse the active low reset line for ~100us */ |
748 | usleep_range(100, 200); | 737 | usleep_range(100, 200); |
749 | gpiod_set_value(priv->reset_gpio, 0); | 738 | gpiod_set_value(priv->reset_gpio, 0); |
750 | usleep_range(12000, 20000); | 739 | usleep_range(13500, 20000); |
751 | } | 740 | } |
752 | 741 | ||
753 | ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0); | 742 | ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0); |
754 | if (ret) | 743 | if (ret) |
755 | return ret; | 744 | return ret; |
756 | 745 | ||
746 | usleep_range(50000, 60000); | ||
757 | 747 | ||
758 | memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver)); | 748 | memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver)); |
759 | priv->codec_driver.component_driver.controls = priv->chip->controls; | 749 | priv->codec_driver.component_driver.controls = priv->chip->controls; |
@@ -770,9 +760,6 @@ static int tas571x_i2c_probe(struct i2c_client *client, | |||
770 | return ret; | 760 | return ret; |
771 | } | 761 | } |
772 | 762 | ||
773 | regcache_cache_only(priv->regmap, true); | ||
774 | gpiod_set_value(priv->pdn_gpio, 1); | ||
775 | |||
776 | return snd_soc_register_codec(&client->dev, &priv->codec_driver, | 763 | return snd_soc_register_codec(&client->dev, &priv->codec_driver, |
777 | &tas571x_dai, 1); | 764 | &tas571x_dai, 1); |
778 | } | 765 | } |
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index 26eb5a0a5575..fd5d1e091038 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig | |||
@@ -47,6 +47,7 @@ config SND_SOC_INTEL_SST_MATCH | |||
47 | 47 | ||
48 | config SND_SOC_INTEL_HASWELL | 48 | config SND_SOC_INTEL_HASWELL |
49 | tristate | 49 | tristate |
50 | select SND_SOC_INTEL_SST_FIRMWARE | ||
50 | 51 | ||
51 | config SND_SOC_INTEL_BAYTRAIL | 52 | config SND_SOC_INTEL_BAYTRAIL |
52 | tristate | 53 | tristate |
@@ -56,7 +57,6 @@ config SND_SOC_INTEL_HASWELL_MACH | |||
56 | depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM | 57 | depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM |
57 | depends on DW_DMAC_CORE | 58 | depends on DW_DMAC_CORE |
58 | select SND_SOC_INTEL_SST | 59 | select SND_SOC_INTEL_SST |
59 | select SND_SOC_INTEL_SST_FIRMWARE | ||
60 | select SND_SOC_INTEL_HASWELL | 60 | select SND_SOC_INTEL_HASWELL |
61 | select SND_SOC_RT5640 | 61 | select SND_SOC_RT5640 |
62 | help | 62 | help |
@@ -138,7 +138,6 @@ config SND_SOC_INTEL_BROADWELL_MACH | |||
138 | I2C_DESIGNWARE_PLATFORM | 138 | I2C_DESIGNWARE_PLATFORM |
139 | depends on DW_DMAC_CORE | 139 | depends on DW_DMAC_CORE |
140 | select SND_SOC_INTEL_SST | 140 | select SND_SOC_INTEL_SST |
141 | select SND_SOC_INTEL_SST_FIRMWARE | ||
142 | select SND_SOC_INTEL_HASWELL | 141 | select SND_SOC_INTEL_HASWELL |
143 | select SND_SOC_RT286 | 142 | select SND_SOC_RT286 |
144 | help | 143 | help |
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c index ba5c0d71720a..0a88537ca58a 100644 --- a/sound/soc/intel/atom/sst/sst_acpi.c +++ b/sound/soc/intel/atom/sst/sst_acpi.c | |||
@@ -416,6 +416,7 @@ static const struct dmi_system_id cht_table[] = { | |||
416 | DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), | 416 | DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), |
417 | }, | 417 | }, |
418 | }, | 418 | }, |
419 | { } | ||
419 | }; | 420 | }; |
420 | 421 | ||
421 | 422 | ||
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c index 6532b8f0ab2f..865a21e557cc 100644 --- a/sound/soc/intel/boards/bxt_da7219_max98357a.c +++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c | |||
@@ -130,8 +130,8 @@ static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd) | |||
130 | */ | 130 | */ |
131 | ret = snd_soc_card_jack_new(rtd->card, "Headset Jack", | 131 | ret = snd_soc_card_jack_new(rtd->card, "Headset Jack", |
132 | SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | | 132 | SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | |
133 | SND_JACK_BTN_2 | SND_JACK_BTN_3, &broxton_headset, | 133 | SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT, |
134 | NULL, 0); | 134 | &broxton_headset, NULL, 0); |
135 | if (ret) { | 135 | if (ret) { |
136 | dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret); | 136 | dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret); |
137 | return ret; | 137 | return ret; |
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index 2989c164dafe..06fa5e85dd0e 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c | |||
@@ -674,7 +674,7 @@ static int skl_probe(struct pci_dev *pci, | |||
674 | 674 | ||
675 | if (skl->nhlt == NULL) { | 675 | if (skl->nhlt == NULL) { |
676 | err = -ENODEV; | 676 | err = -ENODEV; |
677 | goto out_free; | 677 | goto out_display_power_off; |
678 | } | 678 | } |
679 | 679 | ||
680 | skl_nhlt_update_topology_bin(skl); | 680 | skl_nhlt_update_topology_bin(skl); |
@@ -746,6 +746,9 @@ out_mach_free: | |||
746 | skl_machine_device_unregister(skl); | 746 | skl_machine_device_unregister(skl); |
747 | out_nhlt_free: | 747 | out_nhlt_free: |
748 | skl_nhlt_free(skl->nhlt); | 748 | skl_nhlt_free(skl->nhlt); |
749 | out_display_power_off: | ||
750 | if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) | ||
751 | snd_hdac_display_power(bus, false); | ||
749 | out_free: | 752 | out_free: |
750 | skl->init_failed = 1; | 753 | skl->init_failed = 1; |
751 | skl_free(ebus); | 754 | skl_free(ebus); |
@@ -785,8 +788,7 @@ static void skl_remove(struct pci_dev *pci) | |||
785 | 788 | ||
786 | release_firmware(skl->tplg); | 789 | release_firmware(skl->tplg); |
787 | 790 | ||
788 | if (pci_dev_run_wake(pci)) | 791 | pm_runtime_get_noresume(&pci->dev); |
789 | pm_runtime_get_noresume(&pci->dev); | ||
790 | 792 | ||
791 | /* codec removal, invoke bus_device_remove */ | 793 | /* codec removal, invoke bus_device_remove */ |
792 | snd_hdac_ext_bus_device_remove(ebus); | 794 | snd_hdac_ext_bus_device_remove(ebus); |
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig index f2bf8661dd21..823b5a236d8d 100644 --- a/sound/soc/pxa/Kconfig +++ b/sound/soc/pxa/Kconfig | |||
@@ -208,7 +208,7 @@ config SND_PXA2XX_SOC_IMOTE2 | |||
208 | 208 | ||
209 | config SND_MMP_SOC_BROWNSTONE | 209 | config SND_MMP_SOC_BROWNSTONE |
210 | tristate "SoC Audio support for Marvell Brownstone" | 210 | tristate "SoC Audio support for Marvell Brownstone" |
211 | depends on SND_MMP_SOC && MACH_BROWNSTONE | 211 | depends on SND_MMP_SOC && MACH_BROWNSTONE && I2C |
212 | select SND_MMP_SOC_SSPA | 212 | select SND_MMP_SOC_SSPA |
213 | select MFD_WM8994 | 213 | select MFD_WM8994 |
214 | select SND_SOC_WM8994 | 214 | select SND_SOC_WM8994 |
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index 3cde9fb977fa..eff3f9a8b685 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c | |||
@@ -586,3 +586,6 @@ int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev) | |||
586 | return 0; | 586 | return 0; |
587 | } | 587 | } |
588 | EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove); | 588 | EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove); |
589 | |||
590 | MODULE_DESCRIPTION("QTi LPASS CPU Driver"); | ||
591 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c index e2ff538a8aa5..b392e51de94d 100644 --- a/sound/soc/qcom/lpass-platform.c +++ b/sound/soc/qcom/lpass-platform.c | |||
@@ -61,7 +61,41 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) | |||
61 | { | 61 | { |
62 | struct snd_pcm_runtime *runtime = substream->runtime; | 62 | struct snd_pcm_runtime *runtime = substream->runtime; |
63 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 63 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
64 | int ret; | 64 | struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; |
65 | struct lpass_data *drvdata = | ||
66 | snd_soc_platform_get_drvdata(soc_runtime->platform); | ||
67 | struct lpass_variant *v = drvdata->variant; | ||
68 | int ret, dma_ch, dir = substream->stream; | ||
69 | struct lpass_pcm_data *data; | ||
70 | |||
71 | data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL); | ||
72 | if (!data) | ||
73 | return -ENOMEM; | ||
74 | |||
75 | data->i2s_port = cpu_dai->driver->id; | ||
76 | runtime->private_data = data; | ||
77 | |||
78 | dma_ch = 0; | ||
79 | if (v->alloc_dma_channel) | ||
80 | dma_ch = v->alloc_dma_channel(drvdata, dir); | ||
81 | if (dma_ch < 0) | ||
82 | return dma_ch; | ||
83 | |||
84 | drvdata->substream[dma_ch] = substream; | ||
85 | |||
86 | ret = regmap_write(drvdata->lpaif_map, | ||
87 | LPAIF_DMACTL_REG(v, dma_ch, dir), 0); | ||
88 | if (ret) { | ||
89 | dev_err(soc_runtime->dev, | ||
90 | "%s() error writing to rdmactl reg: %d\n", | ||
91 | __func__, ret); | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | if (dir == SNDRV_PCM_STREAM_PLAYBACK) | ||
96 | data->rdma_ch = dma_ch; | ||
97 | else | ||
98 | data->wrdma_ch = dma_ch; | ||
65 | 99 | ||
66 | snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware); | 100 | snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware); |
67 | 101 | ||
@@ -80,13 +114,40 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) | |||
80 | return 0; | 114 | return 0; |
81 | } | 115 | } |
82 | 116 | ||
117 | static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream) | ||
118 | { | ||
119 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
120 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | ||
121 | struct lpass_data *drvdata = | ||
122 | snd_soc_platform_get_drvdata(soc_runtime->platform); | ||
123 | struct lpass_variant *v = drvdata->variant; | ||
124 | struct lpass_pcm_data *data; | ||
125 | int dma_ch, dir = substream->stream; | ||
126 | |||
127 | data = runtime->private_data; | ||
128 | v = drvdata->variant; | ||
129 | |||
130 | if (dir == SNDRV_PCM_STREAM_PLAYBACK) | ||
131 | dma_ch = data->rdma_ch; | ||
132 | else | ||
133 | dma_ch = data->wrdma_ch; | ||
134 | |||
135 | drvdata->substream[dma_ch] = NULL; | ||
136 | |||
137 | if (v->free_dma_channel) | ||
138 | v->free_dma_channel(drvdata, dma_ch); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
83 | static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream, | 143 | static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream, |
84 | struct snd_pcm_hw_params *params) | 144 | struct snd_pcm_hw_params *params) |
85 | { | 145 | { |
86 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 146 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
87 | struct lpass_data *drvdata = | 147 | struct lpass_data *drvdata = |
88 | snd_soc_platform_get_drvdata(soc_runtime->platform); | 148 | snd_soc_platform_get_drvdata(soc_runtime->platform); |
89 | struct lpass_pcm_data *pcm_data = drvdata->private_data; | 149 | struct snd_pcm_runtime *rt = substream->runtime; |
150 | struct lpass_pcm_data *pcm_data = rt->private_data; | ||
90 | struct lpass_variant *v = drvdata->variant; | 151 | struct lpass_variant *v = drvdata->variant; |
91 | snd_pcm_format_t format = params_format(params); | 152 | snd_pcm_format_t format = params_format(params); |
92 | unsigned int channels = params_channels(params); | 153 | unsigned int channels = params_channels(params); |
@@ -179,7 +240,8 @@ static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream) | |||
179 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 240 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
180 | struct lpass_data *drvdata = | 241 | struct lpass_data *drvdata = |
181 | snd_soc_platform_get_drvdata(soc_runtime->platform); | 242 | snd_soc_platform_get_drvdata(soc_runtime->platform); |
182 | struct lpass_pcm_data *pcm_data = drvdata->private_data; | 243 | struct snd_pcm_runtime *rt = substream->runtime; |
244 | struct lpass_pcm_data *pcm_data = rt->private_data; | ||
183 | struct lpass_variant *v = drvdata->variant; | 245 | struct lpass_variant *v = drvdata->variant; |
184 | unsigned int reg; | 246 | unsigned int reg; |
185 | int ret; | 247 | int ret; |
@@ -203,7 +265,8 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream) | |||
203 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 265 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
204 | struct lpass_data *drvdata = | 266 | struct lpass_data *drvdata = |
205 | snd_soc_platform_get_drvdata(soc_runtime->platform); | 267 | snd_soc_platform_get_drvdata(soc_runtime->platform); |
206 | struct lpass_pcm_data *pcm_data = drvdata->private_data; | 268 | struct snd_pcm_runtime *rt = substream->runtime; |
269 | struct lpass_pcm_data *pcm_data = rt->private_data; | ||
207 | struct lpass_variant *v = drvdata->variant; | 270 | struct lpass_variant *v = drvdata->variant; |
208 | int ret, ch, dir = substream->stream; | 271 | int ret, ch, dir = substream->stream; |
209 | 272 | ||
@@ -257,7 +320,8 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream, | |||
257 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 320 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
258 | struct lpass_data *drvdata = | 321 | struct lpass_data *drvdata = |
259 | snd_soc_platform_get_drvdata(soc_runtime->platform); | 322 | snd_soc_platform_get_drvdata(soc_runtime->platform); |
260 | struct lpass_pcm_data *pcm_data = drvdata->private_data; | 323 | struct snd_pcm_runtime *rt = substream->runtime; |
324 | struct lpass_pcm_data *pcm_data = rt->private_data; | ||
261 | struct lpass_variant *v = drvdata->variant; | 325 | struct lpass_variant *v = drvdata->variant; |
262 | int ret, ch, dir = substream->stream; | 326 | int ret, ch, dir = substream->stream; |
263 | 327 | ||
@@ -333,7 +397,8 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer( | |||
333 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 397 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
334 | struct lpass_data *drvdata = | 398 | struct lpass_data *drvdata = |
335 | snd_soc_platform_get_drvdata(soc_runtime->platform); | 399 | snd_soc_platform_get_drvdata(soc_runtime->platform); |
336 | struct lpass_pcm_data *pcm_data = drvdata->private_data; | 400 | struct snd_pcm_runtime *rt = substream->runtime; |
401 | struct lpass_pcm_data *pcm_data = rt->private_data; | ||
337 | struct lpass_variant *v = drvdata->variant; | 402 | struct lpass_variant *v = drvdata->variant; |
338 | unsigned int base_addr, curr_addr; | 403 | unsigned int base_addr, curr_addr; |
339 | int ret, ch, dir = substream->stream; | 404 | int ret, ch, dir = substream->stream; |
@@ -374,6 +439,7 @@ static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream, | |||
374 | 439 | ||
375 | static const struct snd_pcm_ops lpass_platform_pcm_ops = { | 440 | static const struct snd_pcm_ops lpass_platform_pcm_ops = { |
376 | .open = lpass_platform_pcmops_open, | 441 | .open = lpass_platform_pcmops_open, |
442 | .close = lpass_platform_pcmops_close, | ||
377 | .ioctl = snd_pcm_lib_ioctl, | 443 | .ioctl = snd_pcm_lib_ioctl, |
378 | .hw_params = lpass_platform_pcmops_hw_params, | 444 | .hw_params = lpass_platform_pcmops_hw_params, |
379 | .hw_free = lpass_platform_pcmops_hw_free, | 445 | .hw_free = lpass_platform_pcmops_hw_free, |
@@ -470,117 +536,45 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime) | |||
470 | { | 536 | { |
471 | struct snd_pcm *pcm = soc_runtime->pcm; | 537 | struct snd_pcm *pcm = soc_runtime->pcm; |
472 | struct snd_pcm_substream *psubstream, *csubstream; | 538 | struct snd_pcm_substream *psubstream, *csubstream; |
473 | struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai; | ||
474 | struct lpass_data *drvdata = | ||
475 | snd_soc_platform_get_drvdata(soc_runtime->platform); | ||
476 | struct lpass_variant *v = drvdata->variant; | ||
477 | int ret = -EINVAL; | 539 | int ret = -EINVAL; |
478 | struct lpass_pcm_data *data; | ||
479 | size_t size = lpass_platform_pcm_hardware.buffer_bytes_max; | 540 | size_t size = lpass_platform_pcm_hardware.buffer_bytes_max; |
480 | 541 | ||
481 | data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL); | ||
482 | if (!data) | ||
483 | return -ENOMEM; | ||
484 | |||
485 | data->i2s_port = cpu_dai->driver->id; | ||
486 | drvdata->private_data = data; | ||
487 | |||
488 | psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; | 542 | psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; |
489 | if (psubstream) { | 543 | if (psubstream) { |
490 | if (v->alloc_dma_channel) | ||
491 | data->rdma_ch = v->alloc_dma_channel(drvdata, | ||
492 | SNDRV_PCM_STREAM_PLAYBACK); | ||
493 | |||
494 | if (data->rdma_ch < 0) | ||
495 | return data->rdma_ch; | ||
496 | |||
497 | drvdata->substream[data->rdma_ch] = psubstream; | ||
498 | |||
499 | ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, | 544 | ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, |
500 | soc_runtime->platform->dev, | 545 | soc_runtime->platform->dev, |
501 | size, &psubstream->dma_buffer); | 546 | size, &psubstream->dma_buffer); |
502 | if (ret) | ||
503 | goto playback_alloc_err; | ||
504 | |||
505 | ret = regmap_write(drvdata->lpaif_map, | ||
506 | LPAIF_RDMACTL_REG(v, data->rdma_ch), 0); | ||
507 | if (ret) { | 547 | if (ret) { |
508 | dev_err(soc_runtime->dev, | 548 | dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n"); |
509 | "%s() error writing to rdmactl reg: %d\n", | 549 | return ret; |
510 | __func__, ret); | ||
511 | goto capture_alloc_err; | ||
512 | } | 550 | } |
513 | } | 551 | } |
514 | 552 | ||
515 | csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; | 553 | csubstream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; |
516 | if (csubstream) { | 554 | if (csubstream) { |
517 | if (v->alloc_dma_channel) | ||
518 | data->wrdma_ch = v->alloc_dma_channel(drvdata, | ||
519 | SNDRV_PCM_STREAM_CAPTURE); | ||
520 | |||
521 | if (data->wrdma_ch < 0) { | ||
522 | ret = data->wrdma_ch; | ||
523 | goto capture_alloc_err; | ||
524 | } | ||
525 | |||
526 | drvdata->substream[data->wrdma_ch] = csubstream; | ||
527 | |||
528 | ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, | 555 | ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, |
529 | soc_runtime->platform->dev, | 556 | soc_runtime->platform->dev, |
530 | size, &csubstream->dma_buffer); | 557 | size, &csubstream->dma_buffer); |
531 | if (ret) | ||
532 | goto capture_alloc_err; | ||
533 | |||
534 | ret = regmap_write(drvdata->lpaif_map, | ||
535 | LPAIF_WRDMACTL_REG(v, data->wrdma_ch), 0); | ||
536 | if (ret) { | 558 | if (ret) { |
537 | dev_err(soc_runtime->dev, | 559 | dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n"); |
538 | "%s() error writing to wrdmactl reg: %d\n", | 560 | if (psubstream) |
539 | __func__, ret); | 561 | snd_dma_free_pages(&psubstream->dma_buffer); |
540 | goto capture_reg_err; | 562 | return ret; |
541 | } | 563 | } |
564 | |||
542 | } | 565 | } |
543 | 566 | ||
544 | return 0; | 567 | return 0; |
545 | |||
546 | capture_reg_err: | ||
547 | if (csubstream) | ||
548 | snd_dma_free_pages(&csubstream->dma_buffer); | ||
549 | |||
550 | capture_alloc_err: | ||
551 | if (psubstream) | ||
552 | snd_dma_free_pages(&psubstream->dma_buffer); | ||
553 | |||
554 | playback_alloc_err: | ||
555 | dev_err(soc_runtime->dev, "Cannot allocate buffer(s)\n"); | ||
556 | |||
557 | return ret; | ||
558 | } | 568 | } |
559 | 569 | ||
560 | static void lpass_platform_pcm_free(struct snd_pcm *pcm) | 570 | static void lpass_platform_pcm_free(struct snd_pcm *pcm) |
561 | { | 571 | { |
562 | struct snd_soc_pcm_runtime *rt; | ||
563 | struct lpass_data *drvdata; | ||
564 | struct lpass_pcm_data *data; | ||
565 | struct lpass_variant *v; | ||
566 | struct snd_pcm_substream *substream; | 572 | struct snd_pcm_substream *substream; |
567 | int ch, i; | 573 | int i; |
568 | 574 | ||
569 | for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) { | 575 | for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) { |
570 | substream = pcm->streams[i].substream; | 576 | substream = pcm->streams[i].substream; |
571 | if (substream) { | 577 | if (substream) { |
572 | rt = substream->private_data; | ||
573 | drvdata = snd_soc_platform_get_drvdata(rt->platform); | ||
574 | data = drvdata->private_data; | ||
575 | |||
576 | ch = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | ||
577 | ? data->rdma_ch | ||
578 | : data->wrdma_ch; | ||
579 | v = drvdata->variant; | ||
580 | drvdata->substream[ch] = NULL; | ||
581 | if (v->free_dma_channel) | ||
582 | v->free_dma_channel(drvdata, ch); | ||
583 | |||
584 | snd_dma_free_pages(&substream->dma_buffer); | 578 | snd_dma_free_pages(&substream->dma_buffer); |
585 | substream->dma_buffer.area = NULL; | 579 | substream->dma_buffer.area = NULL; |
586 | substream->dma_buffer.addr = 0; | 580 | substream->dma_buffer.addr = 0; |
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h index 35b3cea8207d..924971b6ded5 100644 --- a/sound/soc/qcom/lpass.h +++ b/sound/soc/qcom/lpass.h | |||
@@ -59,7 +59,6 @@ struct lpass_data { | |||
59 | struct clk *pcnoc_mport_clk; | 59 | struct clk *pcnoc_mport_clk; |
60 | struct clk *pcnoc_sway_clk; | 60 | struct clk *pcnoc_sway_clk; |
61 | 61 | ||
62 | void *private_data; | ||
63 | }; | 62 | }; |
64 | 63 | ||
65 | /* Vairant data per each SOC */ | 64 | /* Vairant data per each SOC */ |
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c index 97d6700b1009..cbc0023c2bc8 100644 --- a/sound/soc/samsung/ac97.c +++ b/sound/soc/samsung/ac97.c | |||
@@ -383,11 +383,6 @@ static int s3c_ac97_probe(struct platform_device *pdev) | |||
383 | goto err4; | 383 | goto err4; |
384 | } | 384 | } |
385 | 385 | ||
386 | ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component, | ||
387 | s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai)); | ||
388 | if (ret) | ||
389 | goto err5; | ||
390 | |||
391 | ret = samsung_asoc_dma_platform_register(&pdev->dev, | 386 | ret = samsung_asoc_dma_platform_register(&pdev->dev, |
392 | ac97_pdata->dma_filter, | 387 | ac97_pdata->dma_filter, |
393 | NULL, NULL); | 388 | NULL, NULL); |
@@ -396,6 +391,11 @@ static int s3c_ac97_probe(struct platform_device *pdev) | |||
396 | goto err5; | 391 | goto err5; |
397 | } | 392 | } |
398 | 393 | ||
394 | ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component, | ||
395 | s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai)); | ||
396 | if (ret) | ||
397 | goto err5; | ||
398 | |||
399 | return 0; | 399 | return 0; |
400 | err5: | 400 | err5: |
401 | free_irq(irq_res->start, NULL); | 401 | free_irq(irq_res->start, NULL); |
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index 7e32cf4581f8..7825bff45ae3 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c | |||
@@ -1237,14 +1237,14 @@ static int samsung_i2s_probe(struct platform_device *pdev) | |||
1237 | dev_err(&pdev->dev, "Unable to get drvdata\n"); | 1237 | dev_err(&pdev->dev, "Unable to get drvdata\n"); |
1238 | return -EFAULT; | 1238 | return -EFAULT; |
1239 | } | 1239 | } |
1240 | ret = devm_snd_soc_register_component(&sec_dai->pdev->dev, | 1240 | ret = samsung_asoc_dma_platform_register(&pdev->dev, |
1241 | &samsung_i2s_component, | 1241 | sec_dai->filter, "tx-sec", NULL); |
1242 | &sec_dai->i2s_dai_drv, 1); | ||
1243 | if (ret != 0) | 1242 | if (ret != 0) |
1244 | return ret; | 1243 | return ret; |
1245 | 1244 | ||
1246 | return samsung_asoc_dma_platform_register(&pdev->dev, | 1245 | return devm_snd_soc_register_component(&sec_dai->pdev->dev, |
1247 | sec_dai->filter, "tx-sec", NULL); | 1246 | &samsung_i2s_component, |
1247 | &sec_dai->i2s_dai_drv, 1); | ||
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | pri_dai = i2s_alloc_dai(pdev, false); | 1250 | pri_dai = i2s_alloc_dai(pdev, false); |
@@ -1314,6 +1314,11 @@ static int samsung_i2s_probe(struct platform_device *pdev) | |||
1314 | if (quirks & QUIRK_PRI_6CHAN) | 1314 | if (quirks & QUIRK_PRI_6CHAN) |
1315 | pri_dai->i2s_dai_drv.playback.channels_max = 6; | 1315 | pri_dai->i2s_dai_drv.playback.channels_max = 6; |
1316 | 1316 | ||
1317 | ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter, | ||
1318 | NULL, NULL); | ||
1319 | if (ret < 0) | ||
1320 | goto err_disable_clk; | ||
1321 | |||
1317 | if (quirks & QUIRK_SEC_DAI) { | 1322 | if (quirks & QUIRK_SEC_DAI) { |
1318 | sec_dai = i2s_alloc_dai(pdev, true); | 1323 | sec_dai = i2s_alloc_dai(pdev, true); |
1319 | if (!sec_dai) { | 1324 | if (!sec_dai) { |
@@ -1353,10 +1358,6 @@ static int samsung_i2s_probe(struct platform_device *pdev) | |||
1353 | if (ret < 0) | 1358 | if (ret < 0) |
1354 | goto err_free_dai; | 1359 | goto err_free_dai; |
1355 | 1360 | ||
1356 | ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter, | ||
1357 | NULL, NULL); | ||
1358 | if (ret < 0) | ||
1359 | goto err_free_dai; | ||
1360 | 1361 | ||
1361 | pm_runtime_enable(&pdev->dev); | 1362 | pm_runtime_enable(&pdev->dev); |
1362 | 1363 | ||
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c index 43e367a9acc3..c484985812ed 100644 --- a/sound/soc/samsung/pcm.c +++ b/sound/soc/samsung/pcm.c | |||
@@ -565,24 +565,25 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev) | |||
565 | pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id]; | 565 | pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id]; |
566 | pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id]; | 566 | pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id]; |
567 | 567 | ||
568 | ret = samsung_asoc_dma_platform_register(&pdev->dev, filter, | ||
569 | NULL, NULL); | ||
570 | if (ret) { | ||
571 | dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret); | ||
572 | goto err5; | ||
573 | } | ||
574 | |||
568 | pm_runtime_enable(&pdev->dev); | 575 | pm_runtime_enable(&pdev->dev); |
569 | 576 | ||
570 | ret = devm_snd_soc_register_component(&pdev->dev, &s3c_pcm_component, | 577 | ret = devm_snd_soc_register_component(&pdev->dev, &s3c_pcm_component, |
571 | &s3c_pcm_dai[pdev->id], 1); | 578 | &s3c_pcm_dai[pdev->id], 1); |
572 | if (ret != 0) { | 579 | if (ret != 0) { |
573 | dev_err(&pdev->dev, "failed to get register DAI: %d\n", ret); | 580 | dev_err(&pdev->dev, "failed to get register DAI: %d\n", ret); |
574 | goto err5; | 581 | goto err6; |
575 | } | ||
576 | |||
577 | ret = samsung_asoc_dma_platform_register(&pdev->dev, filter, | ||
578 | NULL, NULL); | ||
579 | if (ret) { | ||
580 | dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret); | ||
581 | goto err5; | ||
582 | } | 582 | } |
583 | 583 | ||
584 | return 0; | 584 | return 0; |
585 | 585 | err6: | |
586 | pm_runtime_disable(&pdev->dev); | ||
586 | err5: | 587 | err5: |
587 | clk_disable_unprepare(pcm->pclk); | 588 | clk_disable_unprepare(pcm->pclk); |
588 | err4: | 589 | err4: |
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c index 3e89fbc0c51d..0a4718207e6e 100644 --- a/sound/soc/samsung/s3c2412-i2s.c +++ b/sound/soc/samsung/s3c2412-i2s.c | |||
@@ -168,19 +168,19 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev) | |||
168 | s3c2412_i2s_pcm_stereo_in.addr = res->start + S3C2412_IISRXD; | 168 | s3c2412_i2s_pcm_stereo_in.addr = res->start + S3C2412_IISRXD; |
169 | s3c2412_i2s_pcm_stereo_in.filter_data = pdata->dma_capture; | 169 | s3c2412_i2s_pcm_stereo_in.filter_data = pdata->dma_capture; |
170 | 170 | ||
171 | ret = s3c_i2sv2_register_component(&pdev->dev, -1, | 171 | ret = samsung_asoc_dma_platform_register(&pdev->dev, |
172 | &s3c2412_i2s_component, | 172 | pdata->dma_filter, |
173 | &s3c2412_i2s_dai); | 173 | NULL, NULL); |
174 | if (ret) { | 174 | if (ret) { |
175 | pr_err("failed to register the dai\n"); | 175 | pr_err("failed to register the DMA: %d\n", ret); |
176 | return ret; | 176 | return ret; |
177 | } | 177 | } |
178 | 178 | ||
179 | ret = samsung_asoc_dma_platform_register(&pdev->dev, | 179 | ret = s3c_i2sv2_register_component(&pdev->dev, -1, |
180 | pdata->dma_filter, | 180 | &s3c2412_i2s_component, |
181 | NULL, NULL); | 181 | &s3c2412_i2s_dai); |
182 | if (ret) | 182 | if (ret) |
183 | pr_err("failed to register the DMA: %d\n", ret); | 183 | pr_err("failed to register the dai\n"); |
184 | 184 | ||
185 | return ret; | 185 | return ret; |
186 | } | 186 | } |
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c index c78a936a3099..9052f6a7073e 100644 --- a/sound/soc/samsung/s3c24xx-i2s.c +++ b/sound/soc/samsung/s3c24xx-i2s.c | |||
@@ -474,18 +474,18 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev) | |||
474 | s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO; | 474 | s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO; |
475 | s3c24xx_i2s_pcm_stereo_in.filter_data = pdata->dma_capture; | 475 | s3c24xx_i2s_pcm_stereo_in.filter_data = pdata->dma_capture; |
476 | 476 | ||
477 | ret = devm_snd_soc_register_component(&pdev->dev, | 477 | ret = samsung_asoc_dma_platform_register(&pdev->dev, |
478 | &s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1); | 478 | pdata->dma_filter, |
479 | NULL, NULL); | ||
479 | if (ret) { | 480 | if (ret) { |
480 | pr_err("failed to register the dai\n"); | 481 | pr_err("failed to register the dma: %d\n", ret); |
481 | return ret; | 482 | return ret; |
482 | } | 483 | } |
483 | 484 | ||
484 | ret = samsung_asoc_dma_platform_register(&pdev->dev, | 485 | ret = devm_snd_soc_register_component(&pdev->dev, |
485 | pdata->dma_filter, | 486 | &s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1); |
486 | NULL, NULL); | ||
487 | if (ret) | 487 | if (ret) |
488 | pr_err("failed to register the dma: %d\n", ret); | 488 | pr_err("failed to register the dai\n"); |
489 | 489 | ||
490 | return ret; | 490 | return ret; |
491 | } | 491 | } |
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c index 26c1fbed4d35..779504f54bc0 100644 --- a/sound/soc/samsung/spdif.c +++ b/sound/soc/samsung/spdif.c | |||
@@ -416,15 +416,6 @@ static int spdif_probe(struct platform_device *pdev) | |||
416 | goto err3; | 416 | goto err3; |
417 | } | 417 | } |
418 | 418 | ||
419 | dev_set_drvdata(&pdev->dev, spdif); | ||
420 | |||
421 | ret = devm_snd_soc_register_component(&pdev->dev, | ||
422 | &samsung_spdif_component, &samsung_spdif_dai, 1); | ||
423 | if (ret != 0) { | ||
424 | dev_err(&pdev->dev, "fail to register dai\n"); | ||
425 | goto err4; | ||
426 | } | ||
427 | |||
428 | spdif_stereo_out.addr_width = 2; | 419 | spdif_stereo_out.addr_width = 2; |
429 | spdif_stereo_out.addr = mem_res->start + DATA_OUTBUF; | 420 | spdif_stereo_out.addr = mem_res->start + DATA_OUTBUF; |
430 | filter = NULL; | 421 | filter = NULL; |
@@ -432,7 +423,6 @@ static int spdif_probe(struct platform_device *pdev) | |||
432 | spdif_stereo_out.filter_data = spdif_pdata->dma_playback; | 423 | spdif_stereo_out.filter_data = spdif_pdata->dma_playback; |
433 | filter = spdif_pdata->dma_filter; | 424 | filter = spdif_pdata->dma_filter; |
434 | } | 425 | } |
435 | |||
436 | spdif->dma_playback = &spdif_stereo_out; | 426 | spdif->dma_playback = &spdif_stereo_out; |
437 | 427 | ||
438 | ret = samsung_asoc_dma_platform_register(&pdev->dev, filter, | 428 | ret = samsung_asoc_dma_platform_register(&pdev->dev, filter, |
@@ -442,6 +432,15 @@ static int spdif_probe(struct platform_device *pdev) | |||
442 | goto err4; | 432 | goto err4; |
443 | } | 433 | } |
444 | 434 | ||
435 | dev_set_drvdata(&pdev->dev, spdif); | ||
436 | |||
437 | ret = devm_snd_soc_register_component(&pdev->dev, | ||
438 | &samsung_spdif_component, &samsung_spdif_dai, 1); | ||
439 | if (ret != 0) { | ||
440 | dev_err(&pdev->dev, "fail to register dai\n"); | ||
441 | goto err4; | ||
442 | } | ||
443 | |||
445 | return 0; | 444 | return 0; |
446 | err4: | 445 | err4: |
447 | iounmap(spdif->regs); | 446 | iounmap(spdif->regs); |
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c index 1bc8ebc2528e..ad54d4cf58ad 100644 --- a/sound/soc/sti/uniperif_player.c +++ b/sound/soc/sti/uniperif_player.c | |||
@@ -614,7 +614,11 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol, | |||
614 | iec958->status[3] = ucontrol->value.iec958.status[3]; | 614 | iec958->status[3] = ucontrol->value.iec958.status[3]; |
615 | mutex_unlock(&player->ctrl_lock); | 615 | mutex_unlock(&player->ctrl_lock); |
616 | 616 | ||
617 | uni_player_set_channel_status(player, NULL); | 617 | if (player->substream && player->substream->runtime) |
618 | uni_player_set_channel_status(player, | ||
619 | player->substream->runtime); | ||
620 | else | ||
621 | uni_player_set_channel_status(player, NULL); | ||
618 | 622 | ||
619 | return 0; | 623 | return 0; |
620 | } | 624 | } |
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c index e047ec06d538..56ed9472e89f 100644 --- a/sound/soc/sunxi/sun4i-codec.c +++ b/sound/soc/sunxi/sun4i-codec.c | |||
@@ -765,11 +765,11 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev) | |||
765 | 765 | ||
766 | card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); | 766 | card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); |
767 | if (!card) | 767 | if (!card) |
768 | return NULL; | 768 | return ERR_PTR(-ENOMEM); |
769 | 769 | ||
770 | card->dai_link = sun4i_codec_create_link(dev, &card->num_links); | 770 | card->dai_link = sun4i_codec_create_link(dev, &card->num_links); |
771 | if (!card->dai_link) | 771 | if (!card->dai_link) |
772 | return NULL; | 772 | return ERR_PTR(-ENOMEM); |
773 | 773 | ||
774 | card->dev = dev; | 774 | card->dev = dev; |
775 | card->name = "sun4i-codec"; | 775 | card->name = "sun4i-codec"; |
@@ -829,12 +829,6 @@ static int sun4i_codec_probe(struct platform_device *pdev) | |||
829 | return PTR_ERR(scodec->clk_module); | 829 | return PTR_ERR(scodec->clk_module); |
830 | } | 830 | } |
831 | 831 | ||
832 | /* Enable the bus clock */ | ||
833 | if (clk_prepare_enable(scodec->clk_apb)) { | ||
834 | dev_err(&pdev->dev, "Failed to enable the APB clock\n"); | ||
835 | return -EINVAL; | ||
836 | } | ||
837 | |||
838 | scodec->gpio_pa = devm_gpiod_get_optional(&pdev->dev, "allwinner,pa", | 832 | scodec->gpio_pa = devm_gpiod_get_optional(&pdev->dev, "allwinner,pa", |
839 | GPIOD_OUT_LOW); | 833 | GPIOD_OUT_LOW); |
840 | if (IS_ERR(scodec->gpio_pa)) { | 834 | if (IS_ERR(scodec->gpio_pa)) { |
@@ -844,6 +838,12 @@ static int sun4i_codec_probe(struct platform_device *pdev) | |||
844 | return ret; | 838 | return ret; |
845 | } | 839 | } |
846 | 840 | ||
841 | /* Enable the bus clock */ | ||
842 | if (clk_prepare_enable(scodec->clk_apb)) { | ||
843 | dev_err(&pdev->dev, "Failed to enable the APB clock\n"); | ||
844 | return -EINVAL; | ||
845 | } | ||
846 | |||
847 | /* DMA configuration for TX FIFO */ | 847 | /* DMA configuration for TX FIFO */ |
848 | scodec->playback_dma_data.addr = res->start + SUN4I_CODEC_DAC_TXDATA; | 848 | scodec->playback_dma_data.addr = res->start + SUN4I_CODEC_DAC_TXDATA; |
849 | scodec->playback_dma_data.maxburst = 4; | 849 | scodec->playback_dma_data.maxburst = 4; |
@@ -876,7 +876,8 @@ static int sun4i_codec_probe(struct platform_device *pdev) | |||
876 | } | 876 | } |
877 | 877 | ||
878 | card = sun4i_codec_create_card(&pdev->dev); | 878 | card = sun4i_codec_create_card(&pdev->dev); |
879 | if (!card) { | 879 | if (IS_ERR(card)) { |
880 | ret = PTR_ERR(card); | ||
880 | dev_err(&pdev->dev, "Failed to create our card\n"); | 881 | dev_err(&pdev->dev, "Failed to create our card\n"); |
881 | goto err_unregister_codec; | 882 | goto err_unregister_codec; |
882 | } | 883 | } |
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 4ffff7be9299..a53fef0c673b 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c | |||
@@ -1337,8 +1337,8 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser, | |||
1337 | } | 1337 | } |
1338 | 1338 | ||
1339 | if (first) { | 1339 | if (first) { |
1340 | ui_browser__printf(&browser->b, "%c", folded_sign); | 1340 | ui_browser__printf(&browser->b, "%c ", folded_sign); |
1341 | width--; | 1341 | width -= 2; |
1342 | first = false; | 1342 | first = false; |
1343 | } else { | 1343 | } else { |
1344 | ui_browser__printf(&browser->b, " "); | 1344 | ui_browser__printf(&browser->b, " "); |
@@ -1361,8 +1361,10 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser, | |||
1361 | width -= hpp.buf - s; | 1361 | width -= hpp.buf - s; |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | ui_browser__write_nstring(&browser->b, "", hierarchy_indent); | 1364 | if (!first) { |
1365 | width -= hierarchy_indent; | 1365 | ui_browser__write_nstring(&browser->b, "", hierarchy_indent); |
1366 | width -= hierarchy_indent; | ||
1367 | } | ||
1366 | 1368 | ||
1367 | if (column >= browser->b.horiz_scroll) { | 1369 | if (column >= browser->b.horiz_scroll) { |
1368 | char s[2048]; | 1370 | char s[2048]; |
@@ -1381,7 +1383,13 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser, | |||
1381 | } | 1383 | } |
1382 | 1384 | ||
1383 | perf_hpp_list__for_each_format(entry->hpp_list, fmt) { | 1385 | perf_hpp_list__for_each_format(entry->hpp_list, fmt) { |
1384 | ui_browser__write_nstring(&browser->b, "", 2); | 1386 | if (first) { |
1387 | ui_browser__printf(&browser->b, "%c ", folded_sign); | ||
1388 | first = false; | ||
1389 | } else { | ||
1390 | ui_browser__write_nstring(&browser->b, "", 2); | ||
1391 | } | ||
1392 | |||
1385 | width -= 2; | 1393 | width -= 2; |
1386 | 1394 | ||
1387 | /* | 1395 | /* |
@@ -1555,10 +1563,11 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows | |||
1555 | int indent = hists->nr_hpp_node - 2; | 1563 | int indent = hists->nr_hpp_node - 2; |
1556 | bool first_node, first_col; | 1564 | bool first_node, first_col; |
1557 | 1565 | ||
1558 | ret = scnprintf(buf, size, " "); | 1566 | ret = scnprintf(buf, size, " "); |
1559 | if (advance_hpp_check(&dummy_hpp, ret)) | 1567 | if (advance_hpp_check(&dummy_hpp, ret)) |
1560 | return ret; | 1568 | return ret; |
1561 | 1569 | ||
1570 | first_node = true; | ||
1562 | /* the first hpp_list_node is for overhead columns */ | 1571 | /* the first hpp_list_node is for overhead columns */ |
1563 | fmt_node = list_first_entry(&hists->hpp_formats, | 1572 | fmt_node = list_first_entry(&hists->hpp_formats, |
1564 | struct perf_hpp_list_node, list); | 1573 | struct perf_hpp_list_node, list); |
@@ -1573,12 +1582,16 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows | |||
1573 | ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " "); | 1582 | ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " "); |
1574 | if (advance_hpp_check(&dummy_hpp, ret)) | 1583 | if (advance_hpp_check(&dummy_hpp, ret)) |
1575 | break; | 1584 | break; |
1585 | |||
1586 | first_node = false; | ||
1576 | } | 1587 | } |
1577 | 1588 | ||
1578 | ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", | 1589 | if (!first_node) { |
1579 | indent * HIERARCHY_INDENT, ""); | 1590 | ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", |
1580 | if (advance_hpp_check(&dummy_hpp, ret)) | 1591 | indent * HIERARCHY_INDENT, ""); |
1581 | return ret; | 1592 | if (advance_hpp_check(&dummy_hpp, ret)) |
1593 | return ret; | ||
1594 | } | ||
1582 | 1595 | ||
1583 | first_node = true; | 1596 | first_node = true; |
1584 | list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { | 1597 | list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { |
@@ -2076,8 +2089,21 @@ void hist_browser__init(struct hist_browser *browser, | |||
2076 | browser->b.use_navkeypressed = true; | 2089 | browser->b.use_navkeypressed = true; |
2077 | browser->show_headers = symbol_conf.show_hist_headers; | 2090 | browser->show_headers = symbol_conf.show_hist_headers; |
2078 | 2091 | ||
2079 | hists__for_each_format(hists, fmt) | 2092 | if (symbol_conf.report_hierarchy) { |
2093 | struct perf_hpp_list_node *fmt_node; | ||
2094 | |||
2095 | /* count overhead columns (in the first node) */ | ||
2096 | fmt_node = list_first_entry(&hists->hpp_formats, | ||
2097 | struct perf_hpp_list_node, list); | ||
2098 | perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) | ||
2099 | ++browser->b.columns; | ||
2100 | |||
2101 | /* add a single column for whole hierarchy sort keys*/ | ||
2080 | ++browser->b.columns; | 2102 | ++browser->b.columns; |
2103 | } else { | ||
2104 | hists__for_each_format(hists, fmt) | ||
2105 | ++browser->b.columns; | ||
2106 | } | ||
2081 | 2107 | ||
2082 | hists__reset_column_width(hists); | 2108 | hists__reset_column_width(hists); |
2083 | } | 2109 | } |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index b02992efb513..a69f027368ef 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -1600,18 +1600,18 @@ static void hists__hierarchy_output_resort(struct hists *hists, | |||
1600 | if (prog) | 1600 | if (prog) |
1601 | ui_progress__update(prog, 1); | 1601 | ui_progress__update(prog, 1); |
1602 | 1602 | ||
1603 | hists->nr_entries++; | ||
1604 | if (!he->filtered) { | ||
1605 | hists->nr_non_filtered_entries++; | ||
1606 | hists__calc_col_len(hists, he); | ||
1607 | } | ||
1608 | |||
1603 | if (!he->leaf) { | 1609 | if (!he->leaf) { |
1604 | hists__hierarchy_output_resort(hists, prog, | 1610 | hists__hierarchy_output_resort(hists, prog, |
1605 | &he->hroot_in, | 1611 | &he->hroot_in, |
1606 | &he->hroot_out, | 1612 | &he->hroot_out, |
1607 | min_callchain_hits, | 1613 | min_callchain_hits, |
1608 | use_callchain); | 1614 | use_callchain); |
1609 | hists->nr_entries++; | ||
1610 | if (!he->filtered) { | ||
1611 | hists->nr_non_filtered_entries++; | ||
1612 | hists__calc_col_len(hists, he); | ||
1613 | } | ||
1614 | |||
1615 | continue; | 1615 | continue; |
1616 | } | 1616 | } |
1617 | 1617 | ||
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c index b4bf76971dc9..1eef0aed6423 100644 --- a/tools/power/cpupower/utils/cpufreq-set.c +++ b/tools/power/cpupower/utils/cpufreq-set.c | |||
@@ -296,7 +296,7 @@ int cmd_freq_set(int argc, char **argv) | |||
296 | struct cpufreq_affected_cpus *cpus; | 296 | struct cpufreq_affected_cpus *cpus; |
297 | 297 | ||
298 | if (!bitmask_isbitset(cpus_chosen, cpu) || | 298 | if (!bitmask_isbitset(cpus_chosen, cpu) || |
299 | cpupower_is_cpu_online(cpu)) | 299 | cpupower_is_cpu_online(cpu) != 1) |
300 | continue; | 300 | continue; |
301 | 301 | ||
302 | cpus = cpufreq_get_related_cpus(cpu); | 302 | cpus = cpufreq_get_related_cpus(cpu); |
@@ -316,10 +316,7 @@ int cmd_freq_set(int argc, char **argv) | |||
316 | cpu <= bitmask_last(cpus_chosen); cpu++) { | 316 | cpu <= bitmask_last(cpus_chosen); cpu++) { |
317 | 317 | ||
318 | if (!bitmask_isbitset(cpus_chosen, cpu) || | 318 | if (!bitmask_isbitset(cpus_chosen, cpu) || |
319 | cpupower_is_cpu_online(cpu)) | 319 | cpupower_is_cpu_online(cpu) != 1) |
320 | continue; | ||
321 | |||
322 | if (cpupower_is_cpu_online(cpu) != 1) | ||
323 | continue; | 320 | continue; |
324 | 321 | ||
325 | printf(_("Setting cpu: %d\n"), cpu); | 322 | printf(_("Setting cpu: %d\n"), cpu); |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index e18b30ddcdce..ebe1b9fa3c4d 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) | |||
453 | return container_of(dev, struct vgic_io_device, dev); | 453 | return container_of(dev, struct vgic_io_device, dev); |
454 | } | 454 | } |
455 | 455 | ||
456 | static bool check_region(const struct vgic_register_region *region, | 456 | static bool check_region(const struct kvm *kvm, |
457 | const struct vgic_register_region *region, | ||
457 | gpa_t addr, int len) | 458 | gpa_t addr, int len) |
458 | { | 459 | { |
459 | if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1) | 460 | int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; |
460 | return true; | 461 | |
461 | if ((region->access_flags & VGIC_ACCESS_32bit) && | 462 | switch (len) { |
462 | len == sizeof(u32) && !(addr & 3)) | 463 | case sizeof(u8): |
463 | return true; | 464 | flags = VGIC_ACCESS_8bit; |
464 | if ((region->access_flags & VGIC_ACCESS_64bit) && | 465 | break; |
465 | len == sizeof(u64) && !(addr & 7)) | 466 | case sizeof(u32): |
466 | return true; | 467 | flags = VGIC_ACCESS_32bit; |
468 | break; | ||
469 | case sizeof(u64): | ||
470 | flags = VGIC_ACCESS_64bit; | ||
471 | break; | ||
472 | default: | ||
473 | return false; | ||
474 | } | ||
475 | |||
476 | if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { | ||
477 | if (!region->bits_per_irq) | ||
478 | return true; | ||
479 | |||
480 | /* Do we access a non-allocated IRQ? */ | ||
481 | return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; | ||
482 | } | ||
467 | 483 | ||
468 | return false; | 484 | return false; |
469 | } | 485 | } |
@@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |||
477 | 493 | ||
478 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, | 494 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, |
479 | addr - iodev->base_addr); | 495 | addr - iodev->base_addr); |
480 | if (!region || !check_region(region, addr, len)) { | 496 | if (!region || !check_region(vcpu->kvm, region, addr, len)) { |
481 | memset(val, 0, len); | 497 | memset(val, 0, len); |
482 | return 0; | 498 | return 0; |
483 | } | 499 | } |
@@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |||
510 | 526 | ||
511 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, | 527 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, |
512 | addr - iodev->base_addr); | 528 | addr - iodev->base_addr); |
513 | if (!region) | 529 | if (!region || !check_region(vcpu->kvm, region, addr, len)) |
514 | return 0; | ||
515 | |||
516 | if (!check_region(region, addr, len)) | ||
517 | return 0; | 530 | return 0; |
518 | 531 | ||
519 | switch (iodev->iodev_type) { | 532 | switch (iodev->iodev_type) { |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 4c34d39d44a0..84961b4e4422 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h | |||
@@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops; | |||
50 | #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) | 50 | #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * (addr & mask) gives us the byte offset for the INT ID, so we want to | 53 | * (addr & mask) gives us the _byte_ offset for the INT ID. |
54 | * divide this with 'bytes per irq' to get the INT ID, which is given | 54 | * We multiply this by 8 the get the _bit_ offset, then divide this by |
55 | * by '(bits) / 8'. But we do this with fixed-point-arithmetic and | 55 | * the number of bits to learn the actual INT ID. |
56 | * take advantage of the fact that division by a fraction equals | 56 | * But instead of a division (which requires a "long long div" implementation), |
57 | * multiplication with the inverted fraction, and scale up both the | 57 | * we shift by the binary logarithm of <bits>. |
58 | * numerator and denominator with 8 to support at most 64 bits per IRQ: | 58 | * This assumes that <bits> is a power of two. |
59 | */ | 59 | */ |
60 | #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ | 60 | #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ |
61 | 64 / (bits) / 8) | 61 | 8 >> ilog2(bits)) |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Some VGIC registers store per-IRQ information, with a different number | 64 | * Some VGIC registers store per-IRQ information, with a different number |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 2893d5ba523a..6440b56ec90e 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -273,6 +273,18 @@ retry: | |||
273 | * no more work for us to do. | 273 | * no more work for us to do. |
274 | */ | 274 | */ |
275 | spin_unlock(&irq->irq_lock); | 275 | spin_unlock(&irq->irq_lock); |
276 | |||
277 | /* | ||
278 | * We have to kick the VCPU here, because we could be | ||
279 | * queueing an edge-triggered interrupt for which we | ||
280 | * get no EOI maintenance interrupt. In that case, | ||
281 | * while the IRQ is already on the VCPU's AP list, the | ||
282 | * VCPU could have EOI'ed the original interrupt and | ||
283 | * won't see this one until it exits for some other | ||
284 | * reason. | ||
285 | */ | ||
286 | if (vcpu) | ||
287 | kvm_vcpu_kick(vcpu); | ||
276 | return false; | 288 | return false; |
277 | } | 289 | } |
278 | 290 | ||