diff options
453 files changed, 4096 insertions, 2231 deletions
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt index 996ce84352cb..7cccc49b6bea 100644 --- a/Documentation/devicetree/bindings/input/gpio-keys.txt +++ b/Documentation/devicetree/bindings/input/gpio-keys.txt | |||
@@ -1,4 +1,4 @@ | |||
1 | Device-Tree bindings for input/gpio_keys.c keyboard driver | 1 | Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible = "gpio-keys"; | 4 | - compatible = "gpio-keys"; |
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 457d5ae16f23..3e17ac1d5d58 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt | |||
@@ -10,6 +10,7 @@ Required properties: | |||
10 | Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on | 10 | Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on |
11 | the Cadence GEM, or the generic form: "cdns,gem". | 11 | the Cadence GEM, or the generic form: "cdns,gem". |
12 | Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs. | 12 | Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs. |
13 | Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs. | ||
13 | Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs. | 14 | Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs. |
14 | Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. | 15 | Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. |
15 | Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. | 16 | Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. |
diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst index 3f4f6c9ffad7..a4222b6cd2d3 100644 --- a/Documentation/media/uapi/dvb/video_function_calls.rst +++ b/Documentation/media/uapi/dvb/video_function_calls.rst | |||
@@ -33,4 +33,3 @@ Video Function Calls | |||
33 | video-clear-buffer | 33 | video-clear-buffer |
34 | video-set-streamtype | 34 | video-set-streamtype |
35 | video-set-format | 35 | video-set-format |
36 | video-set-attributes | ||
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index c664064f76fb..647f94128a85 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -4510,7 +4510,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits. | |||
4510 | Architectures: s390 | 4510 | Architectures: s390 |
4511 | Parameters: none | 4511 | Parameters: none |
4512 | Returns: 0 on success, -EINVAL if hpage module parameter was not set | 4512 | Returns: 0 on success, -EINVAL if hpage module parameter was not set |
4513 | or cmma is enabled | 4513 | or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL |
4514 | flag set | ||
4514 | 4515 | ||
4515 | With this capability the KVM support for memory backing with 1m pages | 4516 | With this capability the KVM support for memory backing with 1m pages |
4516 | through hugetlbfs can be enabled for a VM. After the capability is | 4517 | through hugetlbfs can be enabled for a VM. After the capability is |
@@ -4521,6 +4522,15 @@ hpage module parameter is not set to 1, -EINVAL is returned. | |||
4521 | While it is generally possible to create a huge page backed VM without | 4522 | While it is generally possible to create a huge page backed VM without |
4522 | this capability, the VM will not be able to run. | 4523 | this capability, the VM will not be able to run. |
4523 | 4524 | ||
4525 | 7.14 KVM_CAP_MSR_PLATFORM_INFO | ||
4526 | |||
4527 | Architectures: x86 | ||
4528 | Parameters: args[0] whether feature should be enabled or not | ||
4529 | |||
4530 | With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise, | ||
4531 | a #GP would be raised when the guest tries to access. Currently, this | ||
4532 | capability does not enable write permissions of this MSR for the guest. | ||
4533 | |||
4524 | 8. Other capabilities. | 4534 | 8. Other capabilities. |
4525 | ---------------------- | 4535 | ---------------------- |
4526 | 4536 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 9dff31e38fac..cba1218534b6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -9716,13 +9716,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/ | |||
9716 | S: Maintained | 9716 | S: Maintained |
9717 | F: drivers/media/dvb-frontends/mn88473* | 9717 | F: drivers/media/dvb-frontends/mn88473* |
9718 | 9718 | ||
9719 | PCI DRIVER FOR MOBIVEIL PCIE IP | ||
9720 | M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> | ||
9721 | L: linux-pci@vger.kernel.org | ||
9722 | S: Supported | ||
9723 | F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt | ||
9724 | F: drivers/pci/controller/pcie-mobiveil.c | ||
9725 | |||
9726 | MODULE SUPPORT | 9719 | MODULE SUPPORT |
9727 | M: Jessica Yu <jeyu@kernel.org> | 9720 | M: Jessica Yu <jeyu@kernel.org> |
9728 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next | 9721 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next |
@@ -10949,7 +10942,7 @@ M: Willy Tarreau <willy@haproxy.com> | |||
10949 | M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com> | 10942 | M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com> |
10950 | S: Odd Fixes | 10943 | S: Odd Fixes |
10951 | F: Documentation/auxdisplay/lcd-panel-cgram.txt | 10944 | F: Documentation/auxdisplay/lcd-panel-cgram.txt |
10952 | F: drivers/misc/panel.c | 10945 | F: drivers/auxdisplay/panel.c |
10953 | 10946 | ||
10954 | PARALLEL PORT SUBSYSTEM | 10947 | PARALLEL PORT SUBSYSTEM |
10955 | M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> | 10948 | M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> |
@@ -11137,6 +11130,13 @@ F: include/uapi/linux/switchtec_ioctl.h | |||
11137 | F: include/linux/switchtec.h | 11130 | F: include/linux/switchtec.h |
11138 | F: drivers/ntb/hw/mscc/ | 11131 | F: drivers/ntb/hw/mscc/ |
11139 | 11132 | ||
11133 | PCI DRIVER FOR MOBIVEIL PCIE IP | ||
11134 | M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> | ||
11135 | L: linux-pci@vger.kernel.org | ||
11136 | S: Supported | ||
11137 | F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt | ||
11138 | F: drivers/pci/controller/pcie-mobiveil.c | ||
11139 | |||
11140 | PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) | 11140 | PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) |
11141 | M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | 11141 | M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> |
11142 | M: Jason Cooper <jason@lakedaemon.net> | 11142 | M: Jason Cooper <jason@lakedaemon.net> |
@@ -11203,8 +11203,14 @@ F: tools/pci/ | |||
11203 | 11203 | ||
11204 | PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC | 11204 | PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC |
11205 | M: Russell Currey <ruscur@russell.cc> | 11205 | M: Russell Currey <ruscur@russell.cc> |
11206 | M: Sam Bobroff <sbobroff@linux.ibm.com> | ||
11207 | M: Oliver O'Halloran <oohall@gmail.com> | ||
11206 | L: linuxppc-dev@lists.ozlabs.org | 11208 | L: linuxppc-dev@lists.ozlabs.org |
11207 | S: Supported | 11209 | S: Supported |
11210 | F: Documentation/PCI/pci-error-recovery.txt | ||
11211 | F: drivers/pci/pcie/aer.c | ||
11212 | F: drivers/pci/pcie/dpc.c | ||
11213 | F: drivers/pci/pcie/err.c | ||
11208 | F: Documentation/powerpc/eeh-pci-error-recovery.txt | 11214 | F: Documentation/powerpc/eeh-pci-error-recovery.txt |
11209 | F: arch/powerpc/kernel/eeh*.c | 11215 | F: arch/powerpc/kernel/eeh*.c |
11210 | F: arch/powerpc/platforms/*/eeh*.c | 11216 | F: arch/powerpc/platforms/*/eeh*.c |
@@ -12260,6 +12266,7 @@ F: Documentation/networking/rds.txt | |||
12260 | 12266 | ||
12261 | RDT - RESOURCE ALLOCATION | 12267 | RDT - RESOURCE ALLOCATION |
12262 | M: Fenghua Yu <fenghua.yu@intel.com> | 12268 | M: Fenghua Yu <fenghua.yu@intel.com> |
12269 | M: Reinette Chatre <reinette.chatre@intel.com> | ||
12263 | L: linux-kernel@vger.kernel.org | 12270 | L: linux-kernel@vger.kernel.org |
12264 | S: Supported | 12271 | S: Supported |
12265 | F: arch/x86/kernel/cpu/intel_rdt* | 12272 | F: arch/x86/kernel/cpu/intel_rdt* |
@@ -13449,9 +13456,8 @@ F: drivers/i2c/busses/i2c-synquacer.c | |||
13449 | F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt | 13456 | F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt |
13450 | 13457 | ||
13451 | SOCIONEXT UNIPHIER SOUND DRIVER | 13458 | SOCIONEXT UNIPHIER SOUND DRIVER |
13452 | M: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com> | ||
13453 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 13459 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
13454 | S: Maintained | 13460 | S: Orphan |
13455 | F: sound/soc/uniphier/ | 13461 | F: sound/soc/uniphier/ |
13456 | 13462 | ||
13457 | SOEKRIS NET48XX LED SUPPORT | 13463 | SOEKRIS NET48XX LED SUPPORT |
@@ -15919,6 +15925,7 @@ F: net/x25/ | |||
15919 | X86 ARCHITECTURE (32-BIT AND 64-BIT) | 15925 | X86 ARCHITECTURE (32-BIT AND 64-BIT) |
15920 | M: Thomas Gleixner <tglx@linutronix.de> | 15926 | M: Thomas Gleixner <tglx@linutronix.de> |
15921 | M: Ingo Molnar <mingo@redhat.com> | 15927 | M: Ingo Molnar <mingo@redhat.com> |
15928 | M: Borislav Petkov <bp@alien8.de> | ||
15922 | R: "H. Peter Anvin" <hpa@zytor.com> | 15929 | R: "H. Peter Anvin" <hpa@zytor.com> |
15923 | M: x86@kernel.org | 15930 | M: x86@kernel.org |
15924 | L: linux-kernel@vger.kernel.org | 15931 | L: linux-kernel@vger.kernel.org |
@@ -15947,6 +15954,15 @@ M: Borislav Petkov <bp@alien8.de> | |||
15947 | S: Maintained | 15954 | S: Maintained |
15948 | F: arch/x86/kernel/cpu/microcode/* | 15955 | F: arch/x86/kernel/cpu/microcode/* |
15949 | 15956 | ||
15957 | X86 MM | ||
15958 | M: Dave Hansen <dave.hansen@linux.intel.com> | ||
15959 | M: Andy Lutomirski <luto@kernel.org> | ||
15960 | M: Peter Zijlstra <peterz@infradead.org> | ||
15961 | L: linux-kernel@vger.kernel.org | ||
15962 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm | ||
15963 | S: Maintained | ||
15964 | F: arch/x86/mm/ | ||
15965 | |||
15950 | X86 PLATFORM DRIVERS | 15966 | X86 PLATFORM DRIVERS |
15951 | M: Darren Hart <dvhart@infradead.org> | 15967 | M: Darren Hart <dvhart@infradead.org> |
15952 | M: Andy Shevchenko <andy@infradead.org> | 15968 | M: Andy Shevchenko <andy@infradead.org> |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 19 | 3 | PATCHLEVEL = 19 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc4 | 5 | EXTRAVERSION = -rc6 |
6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -299,19 +299,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) | |||
299 | KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) | 299 | KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) |
300 | export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION | 300 | export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION |
301 | 301 | ||
302 | # SUBARCH tells the usermode build what the underlying arch is. That is set | 302 | include scripts/subarch.include |
303 | # first, and if a usermode build is happening, the "ARCH=um" on the command | ||
304 | # line overrides the setting of ARCH below. If a native build is happening, | ||
305 | # then ARCH is assigned, getting whatever value it gets normally, and | ||
306 | # SUBARCH is subsequently ignored. | ||
307 | |||
308 | SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ | ||
309 | -e s/sun4u/sparc64/ \ | ||
310 | -e s/arm.*/arm/ -e s/sa110/arm/ \ | ||
311 | -e s/s390x/s390/ -e s/parisc64/parisc/ \ | ||
312 | -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ | ||
313 | -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ | ||
314 | -e s/riscv.*/riscv/) | ||
315 | 303 | ||
316 | # Cross compiling and selecting different set of gcc/bin-utils | 304 | # Cross compiling and selecting different set of gcc/bin-utils |
317 | # --------------------------------------------------------------------------- | 305 | # --------------------------------------------------------------------------- |
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi index 7cb235ef0fb6..6e9e1c2f9def 100644 --- a/arch/arm/boot/dts/sama5d3_emac.dtsi +++ b/arch/arm/boot/dts/sama5d3_emac.dtsi | |||
@@ -41,7 +41,7 @@ | |||
41 | }; | 41 | }; |
42 | 42 | ||
43 | macb1: ethernet@f802c000 { | 43 | macb1: ethernet@f802c000 { |
44 | compatible = "cdns,at91sam9260-macb", "cdns,macb"; | 44 | compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb"; |
45 | reg = <0xf802c000 0x100>; | 45 | reg = <0xf802c000 0x100>; |
46 | interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>; | 46 | interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>; |
47 | pinctrl-names = "default"; | 47 | pinctrl-names = "default"; |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 13a688fc8cd0..2fdc865ca374 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -1051,7 +1051,6 @@ static inline void vmemmap_remove_mapping(unsigned long start, | |||
1051 | return hash__vmemmap_remove_mapping(start, page_size); | 1051 | return hash__vmemmap_remove_mapping(start, page_size); |
1052 | } | 1052 | } |
1053 | #endif | 1053 | #endif |
1054 | struct page *realmode_pfn_to_page(unsigned long pfn); | ||
1055 | 1054 | ||
1056 | static inline pte_t pmd_pte(pmd_t pmd) | 1055 | static inline pte_t pmd_pte(pmd_t pmd) |
1057 | { | 1056 | { |
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index ab3a4fba38e3..3d4b88cb8599 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h | |||
@@ -220,8 +220,6 @@ extern void iommu_del_device(struct device *dev); | |||
220 | extern int __init tce_iommu_bus_notifier_init(void); | 220 | extern int __init tce_iommu_bus_notifier_init(void); |
221 | extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, | 221 | extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, |
222 | unsigned long *hpa, enum dma_data_direction *direction); | 222 | unsigned long *hpa, enum dma_data_direction *direction); |
223 | extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry, | ||
224 | unsigned long *hpa, enum dma_data_direction *direction); | ||
225 | #else | 223 | #else |
226 | static inline void iommu_register_group(struct iommu_table_group *table_group, | 224 | static inline void iommu_register_group(struct iommu_table_group *table_group, |
227 | int pci_domain_number, | 225 | int pci_domain_number, |
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index b2f89b621b15..b694d6af1150 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h | |||
@@ -38,6 +38,7 @@ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | |||
38 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); | 38 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
39 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, | 39 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
40 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); | 40 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
41 | extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); | ||
41 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); | 42 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
42 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); | 43 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); |
43 | #endif | 44 | #endif |
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 1a951b00465d..1fffbba8d6a5 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h | |||
@@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex); | |||
9 | 9 | ||
10 | extern unsigned int rtas_data; | 10 | extern unsigned int rtas_data; |
11 | extern unsigned long long memory_limit; | 11 | extern unsigned long long memory_limit; |
12 | extern bool init_mem_is_free; | ||
12 | extern unsigned long klimit; | 13 | extern unsigned long klimit; |
13 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); | 14 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
14 | 15 | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index ea04dfb8c092..2d8fc8c9da7a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100) | |||
1314 | 1314 | ||
1315 | #ifdef CONFIG_PPC_DENORMALISATION | 1315 | #ifdef CONFIG_PPC_DENORMALISATION |
1316 | mfspr r10,SPRN_HSRR1 | 1316 | mfspr r10,SPRN_HSRR1 |
1317 | mfspr r11,SPRN_HSRR0 /* save HSRR0 */ | ||
1318 | andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ | 1317 | andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ |
1319 | addi r11,r11,-4 /* HSRR0 is next instruction */ | ||
1320 | bne+ denorm_assist | 1318 | bne+ denorm_assist |
1321 | #endif | 1319 | #endif |
1322 | 1320 | ||
@@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | |||
1382 | */ | 1380 | */ |
1383 | XVCPSGNDP32(32) | 1381 | XVCPSGNDP32(32) |
1384 | denorm_done: | 1382 | denorm_done: |
1383 | mfspr r11,SPRN_HSRR0 | ||
1384 | subi r11,r11,4 | ||
1385 | mtspr SPRN_HSRR0,r11 | 1385 | mtspr SPRN_HSRR0,r11 |
1386 | mtcrf 0x80,r9 | 1386 | mtcrf 0x80,r9 |
1387 | ld r9,PACA_EXGEN+EX_R9(r13) | 1387 | ld r9,PACA_EXGEN+EX_R9(r13) |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index af7a20dc6e09..19b4c628f3be 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -1013,31 +1013,6 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, | |||
1013 | } | 1013 | } |
1014 | EXPORT_SYMBOL_GPL(iommu_tce_xchg); | 1014 | EXPORT_SYMBOL_GPL(iommu_tce_xchg); |
1015 | 1015 | ||
1016 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1017 | long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry, | ||
1018 | unsigned long *hpa, enum dma_data_direction *direction) | ||
1019 | { | ||
1020 | long ret; | ||
1021 | |||
1022 | ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); | ||
1023 | |||
1024 | if (!ret && ((*direction == DMA_FROM_DEVICE) || | ||
1025 | (*direction == DMA_BIDIRECTIONAL))) { | ||
1026 | struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT); | ||
1027 | |||
1028 | if (likely(pg)) { | ||
1029 | SetPageDirty(pg); | ||
1030 | } else { | ||
1031 | tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); | ||
1032 | ret = -EFAULT; | ||
1033 | } | ||
1034 | } | ||
1035 | |||
1036 | return ret; | ||
1037 | } | ||
1038 | EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm); | ||
1039 | #endif | ||
1040 | |||
1041 | int iommu_take_ownership(struct iommu_table *tbl) | 1016 | int iommu_take_ownership(struct iommu_table *tbl) |
1042 | { | 1017 | { |
1043 | unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; | 1018 | unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; |
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 6bffbc5affe7..7716374786bd 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S | |||
@@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim) | |||
176 | std r1, PACATMSCRATCH(r13) | 176 | std r1, PACATMSCRATCH(r13) |
177 | ld r1, PACAR1(r13) | 177 | ld r1, PACAR1(r13) |
178 | 178 | ||
179 | /* Store the PPR in r11 and reset to decent value */ | ||
180 | std r11, GPR11(r1) /* Temporary stash */ | 179 | std r11, GPR11(r1) /* Temporary stash */ |
181 | 180 | ||
181 | /* | ||
182 | * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is | ||
183 | * clobbered by an exception once we turn on MSR_RI below. | ||
184 | */ | ||
185 | ld r11, PACATMSCRATCH(r13) | ||
186 | std r11, GPR1(r1) | ||
187 | |||
188 | /* | ||
189 | * Store r13 away so we can free up the scratch SPR for the SLB fault | ||
190 | * handler (needed once we start accessing the thread_struct). | ||
191 | */ | ||
192 | GET_SCRATCH0(r11) | ||
193 | std r11, GPR13(r1) | ||
194 | |||
182 | /* Reset MSR RI so we can take SLB faults again */ | 195 | /* Reset MSR RI so we can take SLB faults again */ |
183 | li r11, MSR_RI | 196 | li r11, MSR_RI |
184 | mtmsrd r11, 1 | 197 | mtmsrd r11, 1 |
185 | 198 | ||
199 | /* Store the PPR in r11 and reset to decent value */ | ||
186 | mfspr r11, SPRN_PPR | 200 | mfspr r11, SPRN_PPR |
187 | HMT_MEDIUM | 201 | HMT_MEDIUM |
188 | 202 | ||
@@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim) | |||
207 | SAVE_GPR(8, r7) /* user r8 */ | 221 | SAVE_GPR(8, r7) /* user r8 */ |
208 | SAVE_GPR(9, r7) /* user r9 */ | 222 | SAVE_GPR(9, r7) /* user r9 */ |
209 | SAVE_GPR(10, r7) /* user r10 */ | 223 | SAVE_GPR(10, r7) /* user r10 */ |
210 | ld r3, PACATMSCRATCH(r13) /* user r1 */ | 224 | ld r3, GPR1(r1) /* user r1 */ |
211 | ld r4, GPR7(r1) /* user r7 */ | 225 | ld r4, GPR7(r1) /* user r7 */ |
212 | ld r5, GPR11(r1) /* user r11 */ | 226 | ld r5, GPR11(r1) /* user r11 */ |
213 | ld r6, GPR12(r1) /* user r12 */ | 227 | ld r6, GPR12(r1) /* user r12 */ |
214 | GET_SCRATCH0(8) /* user r13 */ | 228 | ld r8, GPR13(r1) /* user r13 */ |
215 | std r3, GPR1(r7) | 229 | std r3, GPR1(r7) |
216 | std r4, GPR7(r7) | 230 | std r4, GPR7(r7) |
217 | std r5, GPR11(r7) | 231 | std r5, GPR11(r7) |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index fd6e8c13685f..933c574e1cf7 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c | |||
@@ -525,8 +525,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
525 | unsigned long ea, unsigned long dsisr) | 525 | unsigned long ea, unsigned long dsisr) |
526 | { | 526 | { |
527 | struct kvm *kvm = vcpu->kvm; | 527 | struct kvm *kvm = vcpu->kvm; |
528 | unsigned long mmu_seq, pte_size; | 528 | unsigned long mmu_seq; |
529 | unsigned long gpa, gfn, hva, pfn; | 529 | unsigned long gpa, gfn, hva; |
530 | struct kvm_memory_slot *memslot; | 530 | struct kvm_memory_slot *memslot; |
531 | struct page *page = NULL; | 531 | struct page *page = NULL; |
532 | long ret; | 532 | long ret; |
@@ -623,9 +623,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
623 | */ | 623 | */ |
624 | hva = gfn_to_hva_memslot(memslot, gfn); | 624 | hva = gfn_to_hva_memslot(memslot, gfn); |
625 | if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) { | 625 | if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) { |
626 | pfn = page_to_pfn(page); | ||
627 | upgrade_write = true; | 626 | upgrade_write = true; |
628 | } else { | 627 | } else { |
628 | unsigned long pfn; | ||
629 | |||
629 | /* Call KVM generic code to do the slow-path check */ | 630 | /* Call KVM generic code to do the slow-path check */ |
630 | pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, | 631 | pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, |
631 | writing, upgrade_p); | 632 | writing, upgrade_p); |
@@ -639,63 +640,45 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
639 | } | 640 | } |
640 | } | 641 | } |
641 | 642 | ||
642 | /* See if we can insert a 1GB or 2MB large PTE here */ | ||
643 | level = 0; | ||
644 | if (page && PageCompound(page)) { | ||
645 | pte_size = PAGE_SIZE << compound_order(compound_head(page)); | ||
646 | if (pte_size >= PUD_SIZE && | ||
647 | (gpa & (PUD_SIZE - PAGE_SIZE)) == | ||
648 | (hva & (PUD_SIZE - PAGE_SIZE))) { | ||
649 | level = 2; | ||
650 | pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1); | ||
651 | } else if (pte_size >= PMD_SIZE && | ||
652 | (gpa & (PMD_SIZE - PAGE_SIZE)) == | ||
653 | (hva & (PMD_SIZE - PAGE_SIZE))) { | ||
654 | level = 1; | ||
655 | pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1); | ||
656 | } | ||
657 | } | ||
658 | |||
659 | /* | 643 | /* |
660 | * Compute the PTE value that we need to insert. | 644 | * Read the PTE from the process' radix tree and use that |
645 | * so we get the shift and attribute bits. | ||
661 | */ | 646 | */ |
662 | if (page) { | 647 | local_irq_disable(); |
663 | pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE | | 648 | ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); |
664 | _PAGE_ACCESSED; | 649 | pte = *ptep; |
665 | if (writing || upgrade_write) | 650 | local_irq_enable(); |
666 | pgflags |= _PAGE_WRITE | _PAGE_DIRTY; | 651 | |
667 | pte = pfn_pte(pfn, __pgprot(pgflags)); | 652 | /* Get pte level from shift/size */ |
653 | if (shift == PUD_SHIFT && | ||
654 | (gpa & (PUD_SIZE - PAGE_SIZE)) == | ||
655 | (hva & (PUD_SIZE - PAGE_SIZE))) { | ||
656 | level = 2; | ||
657 | } else if (shift == PMD_SHIFT && | ||
658 | (gpa & (PMD_SIZE - PAGE_SIZE)) == | ||
659 | (hva & (PMD_SIZE - PAGE_SIZE))) { | ||
660 | level = 1; | ||
668 | } else { | 661 | } else { |
669 | /* | 662 | level = 0; |
670 | * Read the PTE from the process' radix tree and use that | 663 | if (shift > PAGE_SHIFT) { |
671 | * so we get the attribute bits. | 664 | /* |
672 | */ | 665 | * If the pte maps more than one page, bring over |
673 | local_irq_disable(); | 666 | * bits from the virtual address to get the real |
674 | ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); | 667 | * address of the specific single page we want. |
675 | pte = *ptep; | 668 | */ |
676 | local_irq_enable(); | 669 | unsigned long rpnmask = (1ul << shift) - PAGE_SIZE; |
677 | if (shift == PUD_SHIFT && | 670 | pte = __pte(pte_val(pte) | (hva & rpnmask)); |
678 | (gpa & (PUD_SIZE - PAGE_SIZE)) == | ||
679 | (hva & (PUD_SIZE - PAGE_SIZE))) { | ||
680 | level = 2; | ||
681 | } else if (shift == PMD_SHIFT && | ||
682 | (gpa & (PMD_SIZE - PAGE_SIZE)) == | ||
683 | (hva & (PMD_SIZE - PAGE_SIZE))) { | ||
684 | level = 1; | ||
685 | } else if (shift && shift != PAGE_SHIFT) { | ||
686 | /* Adjust PFN */ | ||
687 | unsigned long mask = (1ul << shift) - PAGE_SIZE; | ||
688 | pte = __pte(pte_val(pte) | (hva & mask)); | ||
689 | } | ||
690 | pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); | ||
691 | if (writing || upgrade_write) { | ||
692 | if (pte_val(pte) & _PAGE_WRITE) | ||
693 | pte = __pte(pte_val(pte) | _PAGE_DIRTY); | ||
694 | } else { | ||
695 | pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); | ||
696 | } | 671 | } |
697 | } | 672 | } |
698 | 673 | ||
674 | pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); | ||
675 | if (writing || upgrade_write) { | ||
676 | if (pte_val(pte) & _PAGE_WRITE) | ||
677 | pte = __pte(pte_val(pte) | _PAGE_DIRTY); | ||
678 | } else { | ||
679 | pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); | ||
680 | } | ||
681 | |||
699 | /* Allocate space in the tree and write the PTE */ | 682 | /* Allocate space in the tree and write the PTE */ |
700 | ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); | 683 | ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); |
701 | 684 | ||
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 506a4d400458..6821ead4b4eb 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
@@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, | |||
187 | EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); | 187 | EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); |
188 | 188 | ||
189 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 189 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
190 | static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry) | 190 | static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, |
191 | unsigned long entry, unsigned long *hpa, | ||
192 | enum dma_data_direction *direction) | ||
193 | { | ||
194 | long ret; | ||
195 | |||
196 | ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); | ||
197 | |||
198 | if (!ret && ((*direction == DMA_FROM_DEVICE) || | ||
199 | (*direction == DMA_BIDIRECTIONAL))) { | ||
200 | __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); | ||
201 | /* | ||
202 | * kvmppc_rm_tce_iommu_do_map() updates the UA cache after | ||
203 | * calling this so we still get here a valid UA. | ||
204 | */ | ||
205 | if (pua && *pua) | ||
206 | mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua)); | ||
207 | } | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl, | ||
213 | unsigned long entry) | ||
191 | { | 214 | { |
192 | unsigned long hpa = 0; | 215 | unsigned long hpa = 0; |
193 | enum dma_data_direction dir = DMA_NONE; | 216 | enum dma_data_direction dir = DMA_NONE; |
194 | 217 | ||
195 | iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); | 218 | iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); |
196 | } | 219 | } |
197 | 220 | ||
198 | static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, | 221 | static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, |
@@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, | |||
224 | unsigned long hpa = 0; | 247 | unsigned long hpa = 0; |
225 | long ret; | 248 | long ret; |
226 | 249 | ||
227 | if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir)) | 250 | if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir)) |
228 | /* | 251 | /* |
229 | * real mode xchg can fail if struct page crosses | 252 | * real mode xchg can fail if struct page crosses |
230 | * a page boundary | 253 | * a page boundary |
@@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, | |||
236 | 259 | ||
237 | ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); | 260 | ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); |
238 | if (ret) | 261 | if (ret) |
239 | iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); | 262 | iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); |
240 | 263 | ||
241 | return ret; | 264 | return ret; |
242 | } | 265 | } |
@@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | |||
282 | if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) | 305 | if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) |
283 | return H_CLOSED; | 306 | return H_CLOSED; |
284 | 307 | ||
285 | ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); | 308 | ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); |
286 | if (ret) { | 309 | if (ret) { |
287 | mm_iommu_mapped_dec(mem); | 310 | mm_iommu_mapped_dec(mem); |
288 | /* | 311 | /* |
@@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
371 | return ret; | 394 | return ret; |
372 | 395 | ||
373 | WARN_ON_ONCE_RM(1); | 396 | WARN_ON_ONCE_RM(1); |
374 | kvmppc_rm_clear_tce(stit->tbl, entry); | 397 | kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); |
375 | } | 398 | } |
376 | 399 | ||
377 | kvmppc_tce_put(stt, entry, tce); | 400 | kvmppc_tce_put(stt, entry, tce); |
@@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |||
520 | goto unlock_exit; | 543 | goto unlock_exit; |
521 | 544 | ||
522 | WARN_ON_ONCE_RM(1); | 545 | WARN_ON_ONCE_RM(1); |
523 | kvmppc_rm_clear_tce(stit->tbl, entry); | 546 | kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); |
524 | } | 547 | } |
525 | 548 | ||
526 | kvmppc_tce_put(stt, entry + i, tce); | 549 | kvmppc_tce_put(stt, entry + i, tce); |
@@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, | |||
571 | return ret; | 594 | return ret; |
572 | 595 | ||
573 | WARN_ON_ONCE_RM(1); | 596 | WARN_ON_ONCE_RM(1); |
574 | kvmppc_rm_clear_tce(stit->tbl, entry); | 597 | kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); |
575 | } | 598 | } |
576 | } | 599 | } |
577 | 600 | ||
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 886ed94b9c13..d05c8af4ac51 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S | |||
@@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic) | |||
443 | addc r0, r8, r9 | 443 | addc r0, r8, r9 |
444 | ld r10, 0(r4) | 444 | ld r10, 0(r4) |
445 | ld r11, 8(r4) | 445 | ld r11, 8(r4) |
446 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
447 | rotldi r5, r5, 8 | ||
448 | #endif | ||
446 | adde r0, r0, r10 | 449 | adde r0, r0, r10 |
447 | add r5, r5, r7 | 450 | add r5, r5, r7 |
448 | adde r0, r0, r11 | 451 | adde r0, r0, r11 |
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 850f3b8f4da5..6ae2777c220d 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c | |||
@@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, | |||
28 | { | 28 | { |
29 | int err; | 29 | int err; |
30 | 30 | ||
31 | /* Make sure we aren't patching a freed init section */ | ||
32 | if (init_mem_is_free && init_section_contains(exec_addr, 4)) { | ||
33 | pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr); | ||
34 | return 0; | ||
35 | } | ||
36 | |||
31 | __put_user_size(instr, patch_addr, 4, err); | 37 | __put_user_size(instr, patch_addr, 4, err); |
32 | if (err) | 38 | if (err) |
33 | return err; | 39 | return err; |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 51ce091914f9..7a9886f98b0c 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -308,55 +308,6 @@ void register_page_bootmem_memmap(unsigned long section_nr, | |||
308 | { | 308 | { |
309 | } | 309 | } |
310 | 310 | ||
311 | /* | ||
312 | * We do not have access to the sparsemem vmemmap, so we fallback to | ||
313 | * walking the list of sparsemem blocks which we already maintain for | ||
314 | * the sake of crashdump. In the long run, we might want to maintain | ||
315 | * a tree if performance of that linear walk becomes a problem. | ||
316 | * | ||
317 | * realmode_pfn_to_page functions can fail due to: | ||
318 | * 1) As real sparsemem blocks do not lay in RAM continously (they | ||
319 | * are in virtual address space which is not available in the real mode), | ||
320 | * the requested page struct can be split between blocks so get_page/put_page | ||
321 | * may fail. | ||
322 | * 2) When huge pages are used, the get_page/put_page API will fail | ||
323 | * in real mode as the linked addresses in the page struct are virtual | ||
324 | * too. | ||
325 | */ | ||
326 | struct page *realmode_pfn_to_page(unsigned long pfn) | ||
327 | { | ||
328 | struct vmemmap_backing *vmem_back; | ||
329 | struct page *page; | ||
330 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; | ||
331 | unsigned long pg_va = (unsigned long) pfn_to_page(pfn); | ||
332 | |||
333 | for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { | ||
334 | if (pg_va < vmem_back->virt_addr) | ||
335 | continue; | ||
336 | |||
337 | /* After vmemmap_list entry free is possible, need check all */ | ||
338 | if ((pg_va + sizeof(struct page)) <= | ||
339 | (vmem_back->virt_addr + page_size)) { | ||
340 | page = (struct page *) (vmem_back->phys + pg_va - | ||
341 | vmem_back->virt_addr); | ||
342 | return page; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | /* Probably that page struct is split between real pages */ | ||
347 | return NULL; | ||
348 | } | ||
349 | EXPORT_SYMBOL_GPL(realmode_pfn_to_page); | ||
350 | |||
351 | #else | ||
352 | |||
353 | struct page *realmode_pfn_to_page(unsigned long pfn) | ||
354 | { | ||
355 | struct page *page = pfn_to_page(pfn); | ||
356 | return page; | ||
357 | } | ||
358 | EXPORT_SYMBOL_GPL(realmode_pfn_to_page); | ||
359 | |||
360 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 311 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
361 | 312 | ||
362 | #ifdef CONFIG_PPC_BOOK3S_64 | 313 | #ifdef CONFIG_PPC_BOOK3S_64 |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 5c8530d0c611..04ccb274a620 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | unsigned long long memory_limit; | 65 | unsigned long long memory_limit; |
66 | bool init_mem_is_free; | ||
66 | 67 | ||
67 | #ifdef CONFIG_HIGHMEM | 68 | #ifdef CONFIG_HIGHMEM |
68 | pte_t *kmap_pte; | 69 | pte_t *kmap_pte; |
@@ -396,6 +397,7 @@ void free_initmem(void) | |||
396 | { | 397 | { |
397 | ppc_md.progress = ppc_printk_progress; | 398 | ppc_md.progress = ppc_printk_progress; |
398 | mark_initmem_nx(); | 399 | mark_initmem_nx(); |
400 | init_mem_is_free = true; | ||
399 | free_initmem_default(POISON_FREE_INITMEM); | 401 | free_initmem_default(POISON_FREE_INITMEM); |
400 | } | 402 | } |
401 | 403 | ||
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index c9ee9e23845f..56c2234cc6ae 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c | |||
@@ -18,11 +18,15 @@ | |||
18 | #include <linux/migrate.h> | 18 | #include <linux/migrate.h> |
19 | #include <linux/hugetlb.h> | 19 | #include <linux/hugetlb.h> |
20 | #include <linux/swap.h> | 20 | #include <linux/swap.h> |
21 | #include <linux/sizes.h> | ||
21 | #include <asm/mmu_context.h> | 22 | #include <asm/mmu_context.h> |
22 | #include <asm/pte-walk.h> | 23 | #include <asm/pte-walk.h> |
23 | 24 | ||
24 | static DEFINE_MUTEX(mem_list_mutex); | 25 | static DEFINE_MUTEX(mem_list_mutex); |
25 | 26 | ||
27 | #define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1 | ||
28 | #define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1) | ||
29 | |||
26 | struct mm_iommu_table_group_mem_t { | 30 | struct mm_iommu_table_group_mem_t { |
27 | struct list_head next; | 31 | struct list_head next; |
28 | struct rcu_head rcu; | 32 | struct rcu_head rcu; |
@@ -263,6 +267,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) | |||
263 | if (!page) | 267 | if (!page) |
264 | continue; | 268 | continue; |
265 | 269 | ||
270 | if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY) | ||
271 | SetPageDirty(page); | ||
272 | |||
266 | put_page(page); | 273 | put_page(page); |
267 | mem->hpas[i] = 0; | 274 | mem->hpas[i] = 0; |
268 | } | 275 | } |
@@ -360,7 +367,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm, | |||
360 | 367 | ||
361 | return ret; | 368 | return ret; |
362 | } | 369 | } |
363 | EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm); | ||
364 | 370 | ||
365 | struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, | 371 | struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
366 | unsigned long ua, unsigned long entries) | 372 | unsigned long ua, unsigned long entries) |
@@ -390,7 +396,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | |||
390 | if (pageshift > mem->pageshift) | 396 | if (pageshift > mem->pageshift) |
391 | return -EFAULT; | 397 | return -EFAULT; |
392 | 398 | ||
393 | *hpa = *va | (ua & ~PAGE_MASK); | 399 | *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); |
394 | 400 | ||
395 | return 0; | 401 | return 0; |
396 | } | 402 | } |
@@ -413,11 +419,31 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, | |||
413 | if (!pa) | 419 | if (!pa) |
414 | return -EFAULT; | 420 | return -EFAULT; |
415 | 421 | ||
416 | *hpa = *pa | (ua & ~PAGE_MASK); | 422 | *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); |
417 | 423 | ||
418 | return 0; | 424 | return 0; |
419 | } | 425 | } |
420 | EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm); | 426 | |
427 | extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua) | ||
428 | { | ||
429 | struct mm_iommu_table_group_mem_t *mem; | ||
430 | long entry; | ||
431 | void *va; | ||
432 | unsigned long *pa; | ||
433 | |||
434 | mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE); | ||
435 | if (!mem) | ||
436 | return; | ||
437 | |||
438 | entry = (ua - mem->ua) >> PAGE_SHIFT; | ||
439 | va = &mem->hpas[entry]; | ||
440 | |||
441 | pa = (void *) vmalloc_to_phys(va); | ||
442 | if (!pa) | ||
443 | return; | ||
444 | |||
445 | *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY; | ||
446 | } | ||
421 | 447 | ||
422 | long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) | 448 | long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) |
423 | { | 449 | { |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 35ac5422903a..59d07bd5374a 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu) | |||
1204 | int new_nid; | 1204 | int new_nid; |
1205 | 1205 | ||
1206 | /* Use associativity from first thread for all siblings */ | 1206 | /* Use associativity from first thread for all siblings */ |
1207 | vphn_get_associativity(cpu, associativity); | 1207 | if (vphn_get_associativity(cpu, associativity)) |
1208 | return cpu_to_node(cpu); | ||
1209 | |||
1208 | new_nid = associativity_to_nid(associativity); | 1210 | new_nid = associativity_to_nid(associativity); |
1209 | if (new_nid < 0 || !node_possible(new_nid)) | 1211 | if (new_nid < 0 || !node_possible(new_nid)) |
1210 | new_nid = first_online_node; | 1212 | new_nid = first_online_node; |
@@ -1452,7 +1454,8 @@ static struct timer_list topology_timer; | |||
1452 | 1454 | ||
1453 | static void reset_topology_timer(void) | 1455 | static void reset_topology_timer(void) |
1454 | { | 1456 | { |
1455 | mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); | 1457 | if (vphn_enabled) |
1458 | mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); | ||
1456 | } | 1459 | } |
1457 | 1460 | ||
1458 | #ifdef CONFIG_SMP | 1461 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index 333b1f80c435..b271b283c785 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c | |||
@@ -45,7 +45,7 @@ static void scan_pkey_feature(void) | |||
45 | * Since any pkey can be used for data or execute, we will just treat | 45 | * Since any pkey can be used for data or execute, we will just treat |
46 | * all keys as equal and track them as one entity. | 46 | * all keys as equal and track them as one entity. |
47 | */ | 47 | */ |
48 | pkeys_total = be32_to_cpu(vals[0]); | 48 | pkeys_total = vals[0]; |
49 | pkeys_devtree_defined = true; | 49 | pkeys_devtree_defined = true; |
50 | } | 50 | } |
51 | 51 | ||
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index 6c5db1acbe8d..fe9691040f54 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c | |||
@@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, | |||
276 | level_shift = entries_shift + 3; | 276 | level_shift = entries_shift + 3; |
277 | level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT); | 277 | level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT); |
278 | 278 | ||
279 | if ((level_shift - 3) * levels + page_shift >= 60) | 279 | if ((level_shift - 3) * levels + page_shift >= 55) |
280 | return -EINVAL; | 280 | return -EINVAL; |
281 | 281 | ||
282 | /* Allocate TCE table */ | 282 | /* Allocate TCE table */ |
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..c9fecd120d18 --- /dev/null +++ b/arch/riscv/include/asm/asm-prototypes.h | |||
@@ -0,0 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _ASM_RISCV_PROTOTYPES_H | ||
3 | |||
4 | #include <linux/ftrace.h> | ||
5 | #include <asm-generic/asm-prototypes.h> | ||
6 | |||
7 | #endif /* _ASM_RISCV_PROTOTYPES_H */ | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f69333fd2fa3..ac5da6b0b862 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -481,7 +481,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
481 | break; | 481 | break; |
482 | case KVM_CAP_S390_HPAGE_1M: | 482 | case KVM_CAP_S390_HPAGE_1M: |
483 | r = 0; | 483 | r = 0; |
484 | if (hpage) | 484 | if (hpage && !kvm_is_ucontrol(kvm)) |
485 | r = 1; | 485 | r = 1; |
486 | break; | 486 | break; |
487 | case KVM_CAP_S390_MEM_OP: | 487 | case KVM_CAP_S390_MEM_OP: |
@@ -691,7 +691,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
691 | mutex_lock(&kvm->lock); | 691 | mutex_lock(&kvm->lock); |
692 | if (kvm->created_vcpus) | 692 | if (kvm->created_vcpus) |
693 | r = -EBUSY; | 693 | r = -EBUSY; |
694 | else if (!hpage || kvm->arch.use_cmma) | 694 | else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) |
695 | r = -EINVAL; | 695 | r = -EINVAL; |
696 | else { | 696 | else { |
697 | r = 0; | 697 | r = 0; |
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index bb44990c8212..911c7ded35f1 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c | |||
@@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) | |||
708 | vmaddr |= gaddr & ~PMD_MASK; | 708 | vmaddr |= gaddr & ~PMD_MASK; |
709 | /* Find vma in the parent mm */ | 709 | /* Find vma in the parent mm */ |
710 | vma = find_vma(gmap->mm, vmaddr); | 710 | vma = find_vma(gmap->mm, vmaddr); |
711 | if (!vma) | ||
712 | continue; | ||
711 | /* | 713 | /* |
712 | * We do not discard pages that are backed by | 714 | * We do not discard pages that are backed by |
713 | * hugetlbfs, so we don't have to refault them. | 715 | * hugetlbfs, so we don't have to refault them. |
714 | */ | 716 | */ |
715 | if (vma && is_vm_hugetlb_page(vma)) | 717 | if (is_vm_hugetlb_page(vma)) |
716 | continue; | 718 | continue; |
717 | size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); | 719 | size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); |
718 | zap_page_range(vma, vmaddr, size); | 720 | zap_page_range(vma, vmaddr, size); |
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index eaa843a52907..a480356e0ed8 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S | |||
@@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit) | |||
25 | push %ebx | 25 | push %ebx |
26 | push %ecx | 26 | push %ecx |
27 | push %edx | 27 | push %edx |
28 | push %edi | ||
29 | |||
30 | /* | ||
31 | * RIP-relative addressing is needed to access the encryption bit | ||
32 | * variable. Since we are running in 32-bit mode we need this call/pop | ||
33 | * sequence to get the proper relative addressing. | ||
34 | */ | ||
35 | call 1f | ||
36 | 1: popl %edi | ||
37 | subl $1b, %edi | ||
38 | |||
39 | movl enc_bit(%edi), %eax | ||
40 | cmpl $0, %eax | ||
41 | jge .Lsev_exit | ||
42 | 28 | ||
43 | /* Check if running under a hypervisor */ | 29 | /* Check if running under a hypervisor */ |
44 | movl $1, %eax | 30 | movl $1, %eax |
@@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit) | |||
69 | 55 | ||
70 | movl %ebx, %eax | 56 | movl %ebx, %eax |
71 | andl $0x3f, %eax /* Return the encryption bit location */ | 57 | andl $0x3f, %eax /* Return the encryption bit location */ |
72 | movl %eax, enc_bit(%edi) | ||
73 | jmp .Lsev_exit | 58 | jmp .Lsev_exit |
74 | 59 | ||
75 | .Lno_sev: | 60 | .Lno_sev: |
76 | xor %eax, %eax | 61 | xor %eax, %eax |
77 | movl %eax, enc_bit(%edi) | ||
78 | 62 | ||
79 | .Lsev_exit: | 63 | .Lsev_exit: |
80 | pop %edi | ||
81 | pop %edx | 64 | pop %edx |
82 | pop %ecx | 65 | pop %ecx |
83 | pop %ebx | 66 | pop %ebx |
@@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask) | |||
113 | ENDPROC(set_sev_encryption_mask) | 96 | ENDPROC(set_sev_encryption_mask) |
114 | 97 | ||
115 | .data | 98 | .data |
116 | enc_bit: | ||
117 | .int 0xffffffff | ||
118 | 99 | ||
119 | #ifdef CONFIG_AMD_MEM_ENCRYPT | 100 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
120 | .balign 8 | 101 | .balign 8 |
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index acd11b3bf639..2a356b948720 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c | |||
@@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void) | |||
379 | { | 379 | { |
380 | if (!boot_cpu_has(X86_FEATURE_XMM2) || | 380 | if (!boot_cpu_has(X86_FEATURE_XMM2) || |
381 | !boot_cpu_has(X86_FEATURE_AES) || | 381 | !boot_cpu_has(X86_FEATURE_AES) || |
382 | !boot_cpu_has(X86_FEATURE_OSXSAVE) || | ||
383 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) | 382 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) |
384 | return -ENODEV; | 383 | return -ENODEV; |
385 | 384 | ||
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c index 2071c3d1ae07..dbe8bb980da1 100644 --- a/arch/x86/crypto/aegis128l-aesni-glue.c +++ b/arch/x86/crypto/aegis128l-aesni-glue.c | |||
@@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void) | |||
379 | { | 379 | { |
380 | if (!boot_cpu_has(X86_FEATURE_XMM2) || | 380 | if (!boot_cpu_has(X86_FEATURE_XMM2) || |
381 | !boot_cpu_has(X86_FEATURE_AES) || | 381 | !boot_cpu_has(X86_FEATURE_AES) || |
382 | !boot_cpu_has(X86_FEATURE_OSXSAVE) || | ||
383 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) | 382 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) |
384 | return -ENODEV; | 383 | return -ENODEV; |
385 | 384 | ||
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c index b5f2a8fd5a71..8bebda2de92f 100644 --- a/arch/x86/crypto/aegis256-aesni-glue.c +++ b/arch/x86/crypto/aegis256-aesni-glue.c | |||
@@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void) | |||
379 | { | 379 | { |
380 | if (!boot_cpu_has(X86_FEATURE_XMM2) || | 380 | if (!boot_cpu_has(X86_FEATURE_XMM2) || |
381 | !boot_cpu_has(X86_FEATURE_AES) || | 381 | !boot_cpu_has(X86_FEATURE_AES) || |
382 | !boot_cpu_has(X86_FEATURE_OSXSAVE) || | ||
383 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) | 382 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) |
384 | return -ENODEV; | 383 | return -ENODEV; |
385 | 384 | ||
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c index 95cf857d2cbb..f40244eaf14d 100644 --- a/arch/x86/crypto/morus1280-sse2-glue.c +++ b/arch/x86/crypto/morus1280-sse2-glue.c | |||
@@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350); | |||
40 | static int __init crypto_morus1280_sse2_module_init(void) | 40 | static int __init crypto_morus1280_sse2_module_init(void) |
41 | { | 41 | { |
42 | if (!boot_cpu_has(X86_FEATURE_XMM2) || | 42 | if (!boot_cpu_has(X86_FEATURE_XMM2) || |
43 | !boot_cpu_has(X86_FEATURE_OSXSAVE) || | ||
44 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) | 43 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) |
45 | return -ENODEV; | 44 | return -ENODEV; |
46 | 45 | ||
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c index 615fb7bc9a32..9afaf8f8565a 100644 --- a/arch/x86/crypto/morus640-sse2-glue.c +++ b/arch/x86/crypto/morus640-sse2-glue.c | |||
@@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400); | |||
40 | static int __init crypto_morus640_sse2_module_init(void) | 40 | static int __init crypto_morus640_sse2_module_init(void) |
41 | { | 41 | { |
42 | if (!boot_cpu_has(X86_FEATURE_XMM2) || | 42 | if (!boot_cpu_has(X86_FEATURE_XMM2) || |
43 | !boot_cpu_has(X86_FEATURE_OSXSAVE) || | ||
44 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) | 43 | !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) |
45 | return -ENODEV; | 44 | return -ENODEV; |
46 | 45 | ||
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 5b0f613428c2..2c43e3055948 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c | |||
@@ -95,8 +95,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val) | |||
95 | */ | 95 | */ |
96 | static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) | 96 | static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) |
97 | { | 97 | { |
98 | struct ipi_arg_ex **arg; | 98 | struct hv_send_ipi_ex **arg; |
99 | struct ipi_arg_ex *ipi_arg; | 99 | struct hv_send_ipi_ex *ipi_arg; |
100 | unsigned long flags; | 100 | unsigned long flags; |
101 | int nr_bank = 0; | 101 | int nr_bank = 0; |
102 | int ret = 1; | 102 | int ret = 1; |
@@ -105,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) | |||
105 | return false; | 105 | return false; |
106 | 106 | ||
107 | local_irq_save(flags); | 107 | local_irq_save(flags); |
108 | arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); | 108 | arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); |
109 | 109 | ||
110 | ipi_arg = *arg; | 110 | ipi_arg = *arg; |
111 | if (unlikely(!ipi_arg)) | 111 | if (unlikely(!ipi_arg)) |
@@ -135,7 +135,7 @@ ipi_mask_ex_done: | |||
135 | static bool __send_ipi_mask(const struct cpumask *mask, int vector) | 135 | static bool __send_ipi_mask(const struct cpumask *mask, int vector) |
136 | { | 136 | { |
137 | int cur_cpu, vcpu; | 137 | int cur_cpu, vcpu; |
138 | struct ipi_arg_non_ex ipi_arg; | 138 | struct hv_send_ipi ipi_arg; |
139 | int ret = 1; | 139 | int ret = 1; |
140 | 140 | ||
141 | trace_hyperv_send_ipi_mask(mask, vector); | 141 | trace_hyperv_send_ipi_mask(mask, vector); |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index e203169931c7..6390bd8c141b 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -14,6 +14,16 @@ | |||
14 | #ifndef _ASM_X86_FIXMAP_H | 14 | #ifndef _ASM_X86_FIXMAP_H |
15 | #define _ASM_X86_FIXMAP_H | 15 | #define _ASM_X86_FIXMAP_H |
16 | 16 | ||
17 | /* | ||
18 | * Exposed to assembly code for setting up initial page tables. Cannot be | ||
19 | * calculated in assembly code (fixmap entries are an enum), but is sanity | ||
20 | * checked in the actual fixmap C code to make sure that the fixmap is | ||
21 | * covered fully. | ||
22 | */ | ||
23 | #define FIXMAP_PMD_NUM 2 | ||
24 | /* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */ | ||
25 | #define FIXMAP_PMD_TOP 507 | ||
26 | |||
17 | #ifndef __ASSEMBLY__ | 27 | #ifndef __ASSEMBLY__ |
18 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
19 | #include <asm/acpi.h> | 29 | #include <asm/acpi.h> |
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index e977b6b3a538..00e01d215f74 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h | |||
@@ -726,19 +726,21 @@ struct hv_enlightened_vmcs { | |||
726 | #define HV_STIMER_AUTOENABLE (1ULL << 3) | 726 | #define HV_STIMER_AUTOENABLE (1ULL << 3) |
727 | #define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) | 727 | #define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) |
728 | 728 | ||
729 | struct ipi_arg_non_ex { | ||
730 | u32 vector; | ||
731 | u32 reserved; | ||
732 | u64 cpu_mask; | ||
733 | }; | ||
734 | |||
735 | struct hv_vpset { | 729 | struct hv_vpset { |
736 | u64 format; | 730 | u64 format; |
737 | u64 valid_bank_mask; | 731 | u64 valid_bank_mask; |
738 | u64 bank_contents[]; | 732 | u64 bank_contents[]; |
739 | }; | 733 | }; |
740 | 734 | ||
741 | struct ipi_arg_ex { | 735 | /* HvCallSendSyntheticClusterIpi hypercall */ |
736 | struct hv_send_ipi { | ||
737 | u32 vector; | ||
738 | u32 reserved; | ||
739 | u64 cpu_mask; | ||
740 | }; | ||
741 | |||
742 | /* HvCallSendSyntheticClusterIpiEx hypercall */ | ||
743 | struct hv_send_ipi_ex { | ||
742 | u32 vector; | 744 | u32 vector; |
743 | u32 reserved; | 745 | u32 reserved; |
744 | struct hv_vpset vp_set; | 746 | struct hv_vpset vp_set; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8e90488c3d56..09b2e3e2cf1b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -869,6 +869,8 @@ struct kvm_arch { | |||
869 | 869 | ||
870 | bool x2apic_format; | 870 | bool x2apic_format; |
871 | bool x2apic_broadcast_quirk_disabled; | 871 | bool x2apic_broadcast_quirk_disabled; |
872 | |||
873 | bool guest_can_read_msr_platform_info; | ||
872 | }; | 874 | }; |
873 | 875 | ||
874 | struct kvm_vm_stat { | 876 | struct kvm_vm_stat { |
@@ -1022,6 +1024,7 @@ struct kvm_x86_ops { | |||
1022 | void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); | 1024 | void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); |
1023 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); | 1025 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); |
1024 | void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); | 1026 | void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); |
1027 | bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); | ||
1025 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); | 1028 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); |
1026 | void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); | 1029 | void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); |
1027 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); | 1030 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); |
@@ -1055,6 +1058,7 @@ struct kvm_x86_ops { | |||
1055 | bool (*umip_emulated)(void); | 1058 | bool (*umip_emulated)(void); |
1056 | 1059 | ||
1057 | int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); | 1060 | int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); |
1061 | void (*request_immediate_exit)(struct kvm_vcpu *vcpu); | ||
1058 | 1062 | ||
1059 | void (*sched_in)(struct kvm_vcpu *kvm, int cpu); | 1063 | void (*sched_in)(struct kvm_vcpu *kvm, int cpu); |
1060 | 1064 | ||
@@ -1482,6 +1486,7 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); | |||
1482 | 1486 | ||
1483 | int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); | 1487 | int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); |
1484 | int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); | 1488 | int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); |
1489 | void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu); | ||
1485 | 1490 | ||
1486 | int kvm_is_in_guest(void); | 1491 | int kvm_is_in_guest(void); |
1487 | 1492 | ||
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index c0643831706e..616f8e637bc3 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h | |||
@@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); | |||
48 | 48 | ||
49 | /* Architecture __weak replacement functions */ | 49 | /* Architecture __weak replacement functions */ |
50 | void __init mem_encrypt_init(void); | 50 | void __init mem_encrypt_init(void); |
51 | void __init mem_encrypt_free_decrypted_mem(void); | ||
51 | 52 | ||
52 | bool sme_active(void); | 53 | bool sme_active(void); |
53 | bool sev_active(void); | 54 | bool sev_active(void); |
54 | 55 | ||
56 | #define __bss_decrypted __attribute__((__section__(".bss..decrypted"))) | ||
57 | |||
55 | #else /* !CONFIG_AMD_MEM_ENCRYPT */ | 58 | #else /* !CONFIG_AMD_MEM_ENCRYPT */ |
56 | 59 | ||
57 | #define sme_me_mask 0ULL | 60 | #define sme_me_mask 0ULL |
@@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; | |||
77 | static inline int __init | 80 | static inline int __init |
78 | early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } | 81 | early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } |
79 | 82 | ||
83 | #define __bss_decrypted | ||
84 | |||
80 | #endif /* CONFIG_AMD_MEM_ENCRYPT */ | 85 | #endif /* CONFIG_AMD_MEM_ENCRYPT */ |
81 | 86 | ||
82 | /* | 87 | /* |
@@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; | |||
88 | #define __sme_pa(x) (__pa(x) | sme_me_mask) | 93 | #define __sme_pa(x) (__pa(x) | sme_me_mask) |
89 | #define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask) | 94 | #define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask) |
90 | 95 | ||
96 | extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; | ||
97 | |||
91 | #endif /* __ASSEMBLY__ */ | 98 | #endif /* __ASSEMBLY__ */ |
92 | 99 | ||
93 | #endif /* __X86_MEM_ENCRYPT_H__ */ | 100 | #endif /* __X86_MEM_ENCRYPT_H__ */ |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index ce2b59047cb8..9c85b54bf03c 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/processor.h> | 14 | #include <asm/processor.h> |
15 | #include <linux/bitops.h> | 15 | #include <linux/bitops.h> |
16 | #include <linux/threads.h> | 16 | #include <linux/threads.h> |
17 | #include <asm/fixmap.h> | ||
17 | 18 | ||
18 | extern p4d_t level4_kernel_pgt[512]; | 19 | extern p4d_t level4_kernel_pgt[512]; |
19 | extern p4d_t level4_ident_pgt[512]; | 20 | extern p4d_t level4_ident_pgt[512]; |
@@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512]; | |||
22 | extern pmd_t level2_kernel_pgt[512]; | 23 | extern pmd_t level2_kernel_pgt[512]; |
23 | extern pmd_t level2_fixmap_pgt[512]; | 24 | extern pmd_t level2_fixmap_pgt[512]; |
24 | extern pmd_t level2_ident_pgt[512]; | 25 | extern pmd_t level2_ident_pgt[512]; |
25 | extern pte_t level1_fixmap_pgt[512]; | 26 | extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM]; |
26 | extern pgd_t init_top_pgt[]; | 27 | extern pgd_t init_top_pgt[]; |
27 | 28 | ||
28 | #define swapper_pg_dir init_top_pgt | 29 | #define swapper_pg_dir init_top_pgt |
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 86299efa804a..fd23d5778ea1 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h | |||
@@ -377,6 +377,7 @@ struct kvm_sync_regs { | |||
377 | 377 | ||
378 | #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) | 378 | #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) |
379 | #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) | 379 | #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) |
380 | #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) | ||
380 | 381 | ||
381 | #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 | 382 | #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 |
382 | #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 | 383 | #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 |
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h index 4e588f36228f..285eb3ec4200 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/intel_rdt.h | |||
@@ -382,6 +382,11 @@ static inline bool is_mbm_event(int e) | |||
382 | e <= QOS_L3_MBM_LOCAL_EVENT_ID); | 382 | e <= QOS_L3_MBM_LOCAL_EVENT_ID); |
383 | } | 383 | } |
384 | 384 | ||
385 | struct rdt_parse_data { | ||
386 | struct rdtgroup *rdtgrp; | ||
387 | char *buf; | ||
388 | }; | ||
389 | |||
385 | /** | 390 | /** |
386 | * struct rdt_resource - attributes of an RDT resource | 391 | * struct rdt_resource - attributes of an RDT resource |
387 | * @rid: The index of the resource | 392 | * @rid: The index of the resource |
@@ -423,16 +428,19 @@ struct rdt_resource { | |||
423 | struct rdt_cache cache; | 428 | struct rdt_cache cache; |
424 | struct rdt_membw membw; | 429 | struct rdt_membw membw; |
425 | const char *format_str; | 430 | const char *format_str; |
426 | int (*parse_ctrlval) (void *data, struct rdt_resource *r, | 431 | int (*parse_ctrlval)(struct rdt_parse_data *data, |
427 | struct rdt_domain *d); | 432 | struct rdt_resource *r, |
433 | struct rdt_domain *d); | ||
428 | struct list_head evt_list; | 434 | struct list_head evt_list; |
429 | int num_rmid; | 435 | int num_rmid; |
430 | unsigned int mon_scale; | 436 | unsigned int mon_scale; |
431 | unsigned long fflags; | 437 | unsigned long fflags; |
432 | }; | 438 | }; |
433 | 439 | ||
434 | int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d); | 440 | int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, |
435 | int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d); | 441 | struct rdt_domain *d); |
442 | int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, | ||
443 | struct rdt_domain *d); | ||
436 | 444 | ||
437 | extern struct mutex rdtgroup_mutex; | 445 | extern struct mutex rdtgroup_mutex; |
438 | 446 | ||
@@ -536,6 +544,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); | |||
536 | void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); | 544 | void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); |
537 | struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); | 545 | struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); |
538 | int update_domains(struct rdt_resource *r, int closid); | 546 | int update_domains(struct rdt_resource *r, int closid); |
547 | int closids_supported(void); | ||
539 | void closid_free(int closid); | 548 | void closid_free(int closid); |
540 | int alloc_rmid(void); | 549 | int alloc_rmid(void); |
541 | void free_rmid(u32 rmid); | 550 | void free_rmid(u32 rmid); |
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c index af358ca05160..0f53049719cd 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c | |||
@@ -64,19 +64,19 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) | |||
64 | return true; | 64 | return true; |
65 | } | 65 | } |
66 | 66 | ||
67 | int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d) | 67 | int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, |
68 | struct rdt_domain *d) | ||
68 | { | 69 | { |
69 | unsigned long data; | 70 | unsigned long bw_val; |
70 | char *buf = _buf; | ||
71 | 71 | ||
72 | if (d->have_new_ctrl) { | 72 | if (d->have_new_ctrl) { |
73 | rdt_last_cmd_printf("duplicate domain %d\n", d->id); | 73 | rdt_last_cmd_printf("duplicate domain %d\n", d->id); |
74 | return -EINVAL; | 74 | return -EINVAL; |
75 | } | 75 | } |
76 | 76 | ||
77 | if (!bw_validate(buf, &data, r)) | 77 | if (!bw_validate(data->buf, &bw_val, r)) |
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | d->new_ctrl = data; | 79 | d->new_ctrl = bw_val; |
80 | d->have_new_ctrl = true; | 80 | d->have_new_ctrl = true; |
81 | 81 | ||
82 | return 0; | 82 | return 0; |
@@ -123,18 +123,13 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) | |||
123 | return true; | 123 | return true; |
124 | } | 124 | } |
125 | 125 | ||
126 | struct rdt_cbm_parse_data { | ||
127 | struct rdtgroup *rdtgrp; | ||
128 | char *buf; | ||
129 | }; | ||
130 | |||
131 | /* | 126 | /* |
132 | * Read one cache bit mask (hex). Check that it is valid for the current | 127 | * Read one cache bit mask (hex). Check that it is valid for the current |
133 | * resource type. | 128 | * resource type. |
134 | */ | 129 | */ |
135 | int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d) | 130 | int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, |
131 | struct rdt_domain *d) | ||
136 | { | 132 | { |
137 | struct rdt_cbm_parse_data *data = _data; | ||
138 | struct rdtgroup *rdtgrp = data->rdtgrp; | 133 | struct rdtgroup *rdtgrp = data->rdtgrp; |
139 | u32 cbm_val; | 134 | u32 cbm_val; |
140 | 135 | ||
@@ -195,11 +190,17 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d) | |||
195 | static int parse_line(char *line, struct rdt_resource *r, | 190 | static int parse_line(char *line, struct rdt_resource *r, |
196 | struct rdtgroup *rdtgrp) | 191 | struct rdtgroup *rdtgrp) |
197 | { | 192 | { |
198 | struct rdt_cbm_parse_data data; | 193 | struct rdt_parse_data data; |
199 | char *dom = NULL, *id; | 194 | char *dom = NULL, *id; |
200 | struct rdt_domain *d; | 195 | struct rdt_domain *d; |
201 | unsigned long dom_id; | 196 | unsigned long dom_id; |
202 | 197 | ||
198 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && | ||
199 | r->rid == RDT_RESOURCE_MBA) { | ||
200 | rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | |||
203 | next: | 204 | next: |
204 | if (!line || line[0] == '\0') | 205 | if (!line || line[0] == '\0') |
205 | return 0; | 206 | return 0; |
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index b799c00bef09..1b8e86a5d5e1 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | |||
@@ -97,6 +97,12 @@ void rdt_last_cmd_printf(const char *fmt, ...) | |||
97 | * limited as the number of resources grows. | 97 | * limited as the number of resources grows. |
98 | */ | 98 | */ |
99 | static int closid_free_map; | 99 | static int closid_free_map; |
100 | static int closid_free_map_len; | ||
101 | |||
102 | int closids_supported(void) | ||
103 | { | ||
104 | return closid_free_map_len; | ||
105 | } | ||
100 | 106 | ||
101 | static void closid_init(void) | 107 | static void closid_init(void) |
102 | { | 108 | { |
@@ -111,6 +117,7 @@ static void closid_init(void) | |||
111 | 117 | ||
112 | /* CLOSID 0 is always reserved for the default group */ | 118 | /* CLOSID 0 is always reserved for the default group */ |
113 | closid_free_map &= ~1; | 119 | closid_free_map &= ~1; |
120 | closid_free_map_len = rdt_min_closid; | ||
114 | } | 121 | } |
115 | 122 | ||
116 | static int closid_alloc(void) | 123 | static int closid_alloc(void) |
@@ -802,7 +809,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, | |||
802 | sw_shareable = 0; | 809 | sw_shareable = 0; |
803 | exclusive = 0; | 810 | exclusive = 0; |
804 | seq_printf(seq, "%d=", dom->id); | 811 | seq_printf(seq, "%d=", dom->id); |
805 | for (i = 0; i < r->num_closid; i++, ctrl++) { | 812 | for (i = 0; i < closids_supported(); i++, ctrl++) { |
806 | if (!closid_allocated(i)) | 813 | if (!closid_allocated(i)) |
807 | continue; | 814 | continue; |
808 | mode = rdtgroup_mode_by_closid(i); | 815 | mode = rdtgroup_mode_by_closid(i); |
@@ -989,7 +996,7 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, | |||
989 | 996 | ||
990 | /* Check for overlap with other resource groups */ | 997 | /* Check for overlap with other resource groups */ |
991 | ctrl = d->ctrl_val; | 998 | ctrl = d->ctrl_val; |
992 | for (i = 0; i < r->num_closid; i++, ctrl++) { | 999 | for (i = 0; i < closids_supported(); i++, ctrl++) { |
993 | ctrl_b = (unsigned long *)ctrl; | 1000 | ctrl_b = (unsigned long *)ctrl; |
994 | mode = rdtgroup_mode_by_closid(i); | 1001 | mode = rdtgroup_mode_by_closid(i); |
995 | if (closid_allocated(i) && i != closid && | 1002 | if (closid_allocated(i) && i != closid && |
@@ -1024,16 +1031,27 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) | |||
1024 | { | 1031 | { |
1025 | int closid = rdtgrp->closid; | 1032 | int closid = rdtgrp->closid; |
1026 | struct rdt_resource *r; | 1033 | struct rdt_resource *r; |
1034 | bool has_cache = false; | ||
1027 | struct rdt_domain *d; | 1035 | struct rdt_domain *d; |
1028 | 1036 | ||
1029 | for_each_alloc_enabled_rdt_resource(r) { | 1037 | for_each_alloc_enabled_rdt_resource(r) { |
1038 | if (r->rid == RDT_RESOURCE_MBA) | ||
1039 | continue; | ||
1040 | has_cache = true; | ||
1030 | list_for_each_entry(d, &r->domains, list) { | 1041 | list_for_each_entry(d, &r->domains, list) { |
1031 | if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], | 1042 | if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], |
1032 | rdtgrp->closid, false)) | 1043 | rdtgrp->closid, false)) { |
1044 | rdt_last_cmd_puts("schemata overlaps\n"); | ||
1033 | return false; | 1045 | return false; |
1046 | } | ||
1034 | } | 1047 | } |
1035 | } | 1048 | } |
1036 | 1049 | ||
1050 | if (!has_cache) { | ||
1051 | rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n"); | ||
1052 | return false; | ||
1053 | } | ||
1054 | |||
1037 | return true; | 1055 | return true; |
1038 | } | 1056 | } |
1039 | 1057 | ||
@@ -1085,7 +1103,6 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, | |||
1085 | rdtgrp->mode = RDT_MODE_SHAREABLE; | 1103 | rdtgrp->mode = RDT_MODE_SHAREABLE; |
1086 | } else if (!strcmp(buf, "exclusive")) { | 1104 | } else if (!strcmp(buf, "exclusive")) { |
1087 | if (!rdtgroup_mode_test_exclusive(rdtgrp)) { | 1105 | if (!rdtgroup_mode_test_exclusive(rdtgrp)) { |
1088 | rdt_last_cmd_printf("schemata overlaps\n"); | ||
1089 | ret = -EINVAL; | 1106 | ret = -EINVAL; |
1090 | goto out; | 1107 | goto out; |
1091 | } | 1108 | } |
@@ -1155,8 +1172,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, | |||
1155 | struct rdt_resource *r; | 1172 | struct rdt_resource *r; |
1156 | struct rdt_domain *d; | 1173 | struct rdt_domain *d; |
1157 | unsigned int size; | 1174 | unsigned int size; |
1158 | bool sep = false; | 1175 | bool sep; |
1159 | u32 cbm; | 1176 | u32 ctrl; |
1160 | 1177 | ||
1161 | rdtgrp = rdtgroup_kn_lock_live(of->kn); | 1178 | rdtgrp = rdtgroup_kn_lock_live(of->kn); |
1162 | if (!rdtgrp) { | 1179 | if (!rdtgrp) { |
@@ -1174,6 +1191,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, | |||
1174 | } | 1191 | } |
1175 | 1192 | ||
1176 | for_each_alloc_enabled_rdt_resource(r) { | 1193 | for_each_alloc_enabled_rdt_resource(r) { |
1194 | sep = false; | ||
1177 | seq_printf(s, "%*s:", max_name_width, r->name); | 1195 | seq_printf(s, "%*s:", max_name_width, r->name); |
1178 | list_for_each_entry(d, &r->domains, list) { | 1196 | list_for_each_entry(d, &r->domains, list) { |
1179 | if (sep) | 1197 | if (sep) |
@@ -1181,8 +1199,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, | |||
1181 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { | 1199 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { |
1182 | size = 0; | 1200 | size = 0; |
1183 | } else { | 1201 | } else { |
1184 | cbm = d->ctrl_val[rdtgrp->closid]; | 1202 | ctrl = (!is_mba_sc(r) ? |
1185 | size = rdtgroup_cbm_to_size(r, d, cbm); | 1203 | d->ctrl_val[rdtgrp->closid] : |
1204 | d->mbps_val[rdtgrp->closid]); | ||
1205 | if (r->rid == RDT_RESOURCE_MBA) | ||
1206 | size = ctrl; | ||
1207 | else | ||
1208 | size = rdtgroup_cbm_to_size(r, d, ctrl); | ||
1186 | } | 1209 | } |
1187 | seq_printf(s, "%d=%u", d->id, size); | 1210 | seq_printf(s, "%d=%u", d->id, size); |
1188 | sep = true; | 1211 | sep = true; |
@@ -2336,12 +2359,18 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) | |||
2336 | u32 *ctrl; | 2359 | u32 *ctrl; |
2337 | 2360 | ||
2338 | for_each_alloc_enabled_rdt_resource(r) { | 2361 | for_each_alloc_enabled_rdt_resource(r) { |
2362 | /* | ||
2363 | * Only initialize default allocations for CBM cache | ||
2364 | * resources | ||
2365 | */ | ||
2366 | if (r->rid == RDT_RESOURCE_MBA) | ||
2367 | continue; | ||
2339 | list_for_each_entry(d, &r->domains, list) { | 2368 | list_for_each_entry(d, &r->domains, list) { |
2340 | d->have_new_ctrl = false; | 2369 | d->have_new_ctrl = false; |
2341 | d->new_ctrl = r->cache.shareable_bits; | 2370 | d->new_ctrl = r->cache.shareable_bits; |
2342 | used_b = r->cache.shareable_bits; | 2371 | used_b = r->cache.shareable_bits; |
2343 | ctrl = d->ctrl_val; | 2372 | ctrl = d->ctrl_val; |
2344 | for (i = 0; i < r->num_closid; i++, ctrl++) { | 2373 | for (i = 0; i < closids_supported(); i++, ctrl++) { |
2345 | if (closid_allocated(i) && i != closid) { | 2374 | if (closid_allocated(i) && i != closid) { |
2346 | mode = rdtgroup_mode_by_closid(i); | 2375 | mode = rdtgroup_mode_by_closid(i); |
2347 | if (mode == RDT_MODE_PSEUDO_LOCKSETUP) | 2376 | if (mode == RDT_MODE_PSEUDO_LOCKSETUP) |
@@ -2373,6 +2402,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) | |||
2373 | } | 2402 | } |
2374 | 2403 | ||
2375 | for_each_alloc_enabled_rdt_resource(r) { | 2404 | for_each_alloc_enabled_rdt_resource(r) { |
2405 | /* | ||
2406 | * Only initialize default allocations for CBM cache | ||
2407 | * resources | ||
2408 | */ | ||
2409 | if (r->rid == RDT_RESOURCE_MBA) | ||
2410 | continue; | ||
2376 | ret = update_domains(r, rdtgrp->closid); | 2411 | ret = update_domains(r, rdtgrp->closid); |
2377 | if (ret < 0) { | 2412 | if (ret < 0) { |
2378 | rdt_last_cmd_puts("failed to initialize allocations\n"); | 2413 | rdt_last_cmd_puts("failed to initialize allocations\n"); |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 8047379e575a..ddee1f0870c4 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/bootparam_utils.h> | 35 | #include <asm/bootparam_utils.h> |
36 | #include <asm/microcode.h> | 36 | #include <asm/microcode.h> |
37 | #include <asm/kasan.h> | 37 | #include <asm/kasan.h> |
38 | #include <asm/fixmap.h> | ||
38 | 39 | ||
39 | /* | 40 | /* |
40 | * Manage page tables very early on. | 41 | * Manage page tables very early on. |
@@ -112,6 +113,7 @@ static bool __head check_la57_support(unsigned long physaddr) | |||
112 | unsigned long __head __startup_64(unsigned long physaddr, | 113 | unsigned long __head __startup_64(unsigned long physaddr, |
113 | struct boot_params *bp) | 114 | struct boot_params *bp) |
114 | { | 115 | { |
116 | unsigned long vaddr, vaddr_end; | ||
115 | unsigned long load_delta, *p; | 117 | unsigned long load_delta, *p; |
116 | unsigned long pgtable_flags; | 118 | unsigned long pgtable_flags; |
117 | pgdval_t *pgd; | 119 | pgdval_t *pgd; |
@@ -165,7 +167,8 @@ unsigned long __head __startup_64(unsigned long physaddr, | |||
165 | pud[511] += load_delta; | 167 | pud[511] += load_delta; |
166 | 168 | ||
167 | pmd = fixup_pointer(level2_fixmap_pgt, physaddr); | 169 | pmd = fixup_pointer(level2_fixmap_pgt, physaddr); |
168 | pmd[506] += load_delta; | 170 | for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) |
171 | pmd[i] += load_delta; | ||
169 | 172 | ||
170 | /* | 173 | /* |
171 | * Set up the identity mapping for the switchover. These | 174 | * Set up the identity mapping for the switchover. These |
@@ -235,6 +238,21 @@ unsigned long __head __startup_64(unsigned long physaddr, | |||
235 | sme_encrypt_kernel(bp); | 238 | sme_encrypt_kernel(bp); |
236 | 239 | ||
237 | /* | 240 | /* |
241 | * Clear the memory encryption mask from the .bss..decrypted section. | ||
242 | * The bss section will be memset to zero later in the initialization so | ||
243 | * there is no need to zero it after changing the memory encryption | ||
244 | * attribute. | ||
245 | */ | ||
246 | if (mem_encrypt_active()) { | ||
247 | vaddr = (unsigned long)__start_bss_decrypted; | ||
248 | vaddr_end = (unsigned long)__end_bss_decrypted; | ||
249 | for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { | ||
250 | i = pmd_index(vaddr); | ||
251 | pmd[i] -= sme_get_me_mask(); | ||
252 | } | ||
253 | } | ||
254 | |||
255 | /* | ||
238 | * Return the SME encryption mask (if SME is active) to be used as a | 256 | * Return the SME encryption mask (if SME is active) to be used as a |
239 | * modifier for the initial pgdir entry programmed into CR3. | 257 | * modifier for the initial pgdir entry programmed into CR3. |
240 | */ | 258 | */ |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 15ebc2fc166e..a3618cf04cf6 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -24,6 +24,7 @@ | |||
24 | #include "../entry/calling.h" | 24 | #include "../entry/calling.h" |
25 | #include <asm/export.h> | 25 | #include <asm/export.h> |
26 | #include <asm/nospec-branch.h> | 26 | #include <asm/nospec-branch.h> |
27 | #include <asm/fixmap.h> | ||
27 | 28 | ||
28 | #ifdef CONFIG_PARAVIRT | 29 | #ifdef CONFIG_PARAVIRT |
29 | #include <asm/asm-offsets.h> | 30 | #include <asm/asm-offsets.h> |
@@ -445,13 +446,20 @@ NEXT_PAGE(level2_kernel_pgt) | |||
445 | KERNEL_IMAGE_SIZE/PMD_SIZE) | 446 | KERNEL_IMAGE_SIZE/PMD_SIZE) |
446 | 447 | ||
447 | NEXT_PAGE(level2_fixmap_pgt) | 448 | NEXT_PAGE(level2_fixmap_pgt) |
448 | .fill 506,8,0 | 449 | .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 |
449 | .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC | 450 | pgtno = 0 |
450 | /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ | 451 | .rept (FIXMAP_PMD_NUM) |
451 | .fill 5,8,0 | 452 | .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ |
453 | + _PAGE_TABLE_NOENC; | ||
454 | pgtno = pgtno + 1 | ||
455 | .endr | ||
456 | /* 6 MB reserved space + a 2MB hole */ | ||
457 | .fill 4,8,0 | ||
452 | 458 | ||
453 | NEXT_PAGE(level1_fixmap_pgt) | 459 | NEXT_PAGE(level1_fixmap_pgt) |
460 | .rept (FIXMAP_PMD_NUM) | ||
454 | .fill 512,8,0 | 461 | .fill 512,8,0 |
462 | .endr | ||
455 | 463 | ||
456 | #undef PMDS | 464 | #undef PMDS |
457 | 465 | ||
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1e6764648af3..013fe3d21dbb 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/sched/clock.h> | 28 | #include <linux/sched/clock.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/set_memory.h> | ||
31 | 32 | ||
32 | #include <asm/hypervisor.h> | 33 | #include <asm/hypervisor.h> |
33 | #include <asm/mem_encrypt.h> | 34 | #include <asm/mem_encrypt.h> |
@@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); | |||
61 | (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info)) | 62 | (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info)) |
62 | 63 | ||
63 | static struct pvclock_vsyscall_time_info | 64 | static struct pvclock_vsyscall_time_info |
64 | hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE); | 65 | hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE); |
65 | static struct pvclock_wall_clock wall_clock; | 66 | static struct pvclock_wall_clock wall_clock __bss_decrypted; |
66 | static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); | 67 | static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); |
68 | static struct pvclock_vsyscall_time_info *hvclock_mem; | ||
67 | 69 | ||
68 | static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) | 70 | static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) |
69 | { | 71 | { |
@@ -236,6 +238,45 @@ static void kvm_shutdown(void) | |||
236 | native_machine_shutdown(); | 238 | native_machine_shutdown(); |
237 | } | 239 | } |
238 | 240 | ||
241 | static void __init kvmclock_init_mem(void) | ||
242 | { | ||
243 | unsigned long ncpus; | ||
244 | unsigned int order; | ||
245 | struct page *p; | ||
246 | int r; | ||
247 | |||
248 | if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus()) | ||
249 | return; | ||
250 | |||
251 | ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE; | ||
252 | order = get_order(ncpus * sizeof(*hvclock_mem)); | ||
253 | |||
254 | p = alloc_pages(GFP_KERNEL, order); | ||
255 | if (!p) { | ||
256 | pr_warn("%s: failed to alloc %d pages", __func__, (1U << order)); | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | hvclock_mem = page_address(p); | ||
261 | |||
262 | /* | ||
263 | * hvclock is shared between the guest and the hypervisor, must | ||
264 | * be mapped decrypted. | ||
265 | */ | ||
266 | if (sev_active()) { | ||
267 | r = set_memory_decrypted((unsigned long) hvclock_mem, | ||
268 | 1UL << order); | ||
269 | if (r) { | ||
270 | __free_pages(p, order); | ||
271 | hvclock_mem = NULL; | ||
272 | pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n"); | ||
273 | return; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | memset(hvclock_mem, 0, PAGE_SIZE << order); | ||
278 | } | ||
279 | |||
239 | static int __init kvm_setup_vsyscall_timeinfo(void) | 280 | static int __init kvm_setup_vsyscall_timeinfo(void) |
240 | { | 281 | { |
241 | #ifdef CONFIG_X86_64 | 282 | #ifdef CONFIG_X86_64 |
@@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void) | |||
250 | 291 | ||
251 | kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; | 292 | kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; |
252 | #endif | 293 | #endif |
294 | |||
295 | kvmclock_init_mem(); | ||
296 | |||
253 | return 0; | 297 | return 0; |
254 | } | 298 | } |
255 | early_initcall(kvm_setup_vsyscall_timeinfo); | 299 | early_initcall(kvm_setup_vsyscall_timeinfo); |
@@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu) | |||
269 | /* Use the static page for the first CPUs, allocate otherwise */ | 313 | /* Use the static page for the first CPUs, allocate otherwise */ |
270 | if (cpu < HVC_BOOT_ARRAY_SIZE) | 314 | if (cpu < HVC_BOOT_ARRAY_SIZE) |
271 | p = &hv_clock_boot[cpu]; | 315 | p = &hv_clock_boot[cpu]; |
316 | else if (hvclock_mem) | ||
317 | p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE; | ||
272 | else | 318 | else |
273 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 319 | return -ENOMEM; |
274 | 320 | ||
275 | per_cpu(hv_clock_per_cpu, cpu) = p; | 321 | per_cpu(hv_clock_per_cpu, cpu) = p; |
276 | return p ? 0 : -ENOMEM; | 322 | return p ? 0 : -ENOMEM; |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index afdb303285f8..8dc69d82567e 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -91,7 +91,7 @@ unsigned paravirt_patch_call(void *insnbuf, | |||
91 | 91 | ||
92 | if (len < 5) { | 92 | if (len < 5) { |
93 | #ifdef CONFIG_RETPOLINE | 93 | #ifdef CONFIG_RETPOLINE |
94 | WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr); | 94 | WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr); |
95 | #endif | 95 | #endif |
96 | return len; /* call too long for patch site */ | 96 | return len; /* call too long for patch site */ |
97 | } | 97 | } |
@@ -111,7 +111,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, | |||
111 | 111 | ||
112 | if (len < 5) { | 112 | if (len < 5) { |
113 | #ifdef CONFIG_RETPOLINE | 113 | #ifdef CONFIG_RETPOLINE |
114 | WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr); | 114 | WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr); |
115 | #endif | 115 | #endif |
116 | return len; /* call too long for patch site */ | 116 | return len; /* call too long for patch site */ |
117 | } | 117 | } |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 8bde0a419f86..5dd3317d761f 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -65,6 +65,23 @@ jiffies_64 = jiffies; | |||
65 | #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); | 65 | #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); |
66 | #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); | 66 | #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); |
67 | 67 | ||
68 | /* | ||
69 | * This section contains data which will be mapped as decrypted. Memory | ||
70 | * encryption operates on a page basis. Make this section PMD-aligned | ||
71 | * to avoid splitting the pages while mapping the section early. | ||
72 | * | ||
73 | * Note: We use a separate section so that only this section gets | ||
74 | * decrypted to avoid exposing more than we wish. | ||
75 | */ | ||
76 | #define BSS_DECRYPTED \ | ||
77 | . = ALIGN(PMD_SIZE); \ | ||
78 | __start_bss_decrypted = .; \ | ||
79 | *(.bss..decrypted); \ | ||
80 | . = ALIGN(PAGE_SIZE); \ | ||
81 | __start_bss_decrypted_unused = .; \ | ||
82 | . = ALIGN(PMD_SIZE); \ | ||
83 | __end_bss_decrypted = .; \ | ||
84 | |||
68 | #else | 85 | #else |
69 | 86 | ||
70 | #define X86_ALIGN_RODATA_BEGIN | 87 | #define X86_ALIGN_RODATA_BEGIN |
@@ -74,6 +91,7 @@ jiffies_64 = jiffies; | |||
74 | 91 | ||
75 | #define ALIGN_ENTRY_TEXT_BEGIN | 92 | #define ALIGN_ENTRY_TEXT_BEGIN |
76 | #define ALIGN_ENTRY_TEXT_END | 93 | #define ALIGN_ENTRY_TEXT_END |
94 | #define BSS_DECRYPTED | ||
77 | 95 | ||
78 | #endif | 96 | #endif |
79 | 97 | ||
@@ -355,6 +373,7 @@ SECTIONS | |||
355 | __bss_start = .; | 373 | __bss_start = .; |
356 | *(.bss..page_aligned) | 374 | *(.bss..page_aligned) |
357 | *(.bss) | 375 | *(.bss) |
376 | BSS_DECRYPTED | ||
358 | . = ALIGN(PAGE_SIZE); | 377 | . = ALIGN(PAGE_SIZE); |
359 | __bss_stop = .; | 378 | __bss_stop = .; |
360 | } | 379 | } |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 17c0472c5b34..fbb0e6df121b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1344,9 +1344,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read); | |||
1344 | 1344 | ||
1345 | static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) | 1345 | static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) |
1346 | { | 1346 | { |
1347 | return kvm_apic_hw_enabled(apic) && | 1347 | return addr >= apic->base_address && |
1348 | addr >= apic->base_address && | 1348 | addr < apic->base_address + LAPIC_MMIO_LENGTH; |
1349 | addr < apic->base_address + LAPIC_MMIO_LENGTH; | ||
1350 | } | 1349 | } |
1351 | 1350 | ||
1352 | static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, | 1351 | static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, |
@@ -1358,6 +1357,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, | |||
1358 | if (!apic_mmio_in_range(apic, address)) | 1357 | if (!apic_mmio_in_range(apic, address)) |
1359 | return -EOPNOTSUPP; | 1358 | return -EOPNOTSUPP; |
1360 | 1359 | ||
1360 | if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) { | ||
1361 | if (!kvm_check_has_quirk(vcpu->kvm, | ||
1362 | KVM_X86_QUIRK_LAPIC_MMIO_HOLE)) | ||
1363 | return -EOPNOTSUPP; | ||
1364 | |||
1365 | memset(data, 0xff, len); | ||
1366 | return 0; | ||
1367 | } | ||
1368 | |||
1361 | kvm_lapic_reg_read(apic, offset, len, data); | 1369 | kvm_lapic_reg_read(apic, offset, len, data); |
1362 | 1370 | ||
1363 | return 0; | 1371 | return 0; |
@@ -1917,6 +1925,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, | |||
1917 | if (!apic_mmio_in_range(apic, address)) | 1925 | if (!apic_mmio_in_range(apic, address)) |
1918 | return -EOPNOTSUPP; | 1926 | return -EOPNOTSUPP; |
1919 | 1927 | ||
1928 | if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) { | ||
1929 | if (!kvm_check_has_quirk(vcpu->kvm, | ||
1930 | KVM_X86_QUIRK_LAPIC_MMIO_HOLE)) | ||
1931 | return -EOPNOTSUPP; | ||
1932 | |||
1933 | return 0; | ||
1934 | } | ||
1935 | |||
1920 | /* | 1936 | /* |
1921 | * APIC register must be aligned on 128-bits boundary. | 1937 | * APIC register must be aligned on 128-bits boundary. |
1922 | * 32/64/128 bits registers must be accessed thru 32 bits. | 1938 | * 32/64/128 bits registers must be accessed thru 32 bits. |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e24ea7067373..d7e9bce6ff61 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -899,7 +899,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) | |||
899 | { | 899 | { |
900 | /* | 900 | /* |
901 | * Make sure the write to vcpu->mode is not reordered in front of | 901 | * Make sure the write to vcpu->mode is not reordered in front of |
902 | * reads to sptes. If it does, kvm_commit_zap_page() can see us | 902 | * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us |
903 | * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. | 903 | * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. |
904 | */ | 904 | */ |
905 | smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); | 905 | smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); |
@@ -5417,7 +5417,12 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu) | |||
5417 | { | 5417 | { |
5418 | MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); | 5418 | MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
5419 | 5419 | ||
5420 | kvm_init_mmu(vcpu, true); | 5420 | /* |
5421 | * kvm_mmu_setup() is called only on vCPU initialization. | ||
5422 | * Therefore, no need to reset mmu roots as they are not yet | ||
5423 | * initialized. | ||
5424 | */ | ||
5425 | kvm_init_mmu(vcpu, false); | ||
5421 | } | 5426 | } |
5422 | 5427 | ||
5423 | static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, | 5428 | static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 89c4c5aa15f1..d96092b35936 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1226,8 +1226,7 @@ static __init int sev_hardware_setup(void) | |||
1226 | min_sev_asid = cpuid_edx(0x8000001F); | 1226 | min_sev_asid = cpuid_edx(0x8000001F); |
1227 | 1227 | ||
1228 | /* Initialize SEV ASID bitmap */ | 1228 | /* Initialize SEV ASID bitmap */ |
1229 | sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid), | 1229 | sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); |
1230 | sizeof(unsigned long), GFP_KERNEL); | ||
1231 | if (!sev_asid_bitmap) | 1230 | if (!sev_asid_bitmap) |
1232 | return 1; | 1231 | return 1; |
1233 | 1232 | ||
@@ -1405,7 +1404,7 @@ static __exit void svm_hardware_unsetup(void) | |||
1405 | int cpu; | 1404 | int cpu; |
1406 | 1405 | ||
1407 | if (svm_sev_enabled()) | 1406 | if (svm_sev_enabled()) |
1408 | kfree(sev_asid_bitmap); | 1407 | bitmap_free(sev_asid_bitmap); |
1409 | 1408 | ||
1410 | for_each_possible_cpu(cpu) | 1409 | for_each_possible_cpu(cpu) |
1411 | svm_cpu_uninit(cpu); | 1410 | svm_cpu_uninit(cpu); |
@@ -7149,6 +7148,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
7149 | .check_intercept = svm_check_intercept, | 7148 | .check_intercept = svm_check_intercept, |
7150 | .handle_external_intr = svm_handle_external_intr, | 7149 | .handle_external_intr = svm_handle_external_intr, |
7151 | 7150 | ||
7151 | .request_immediate_exit = __kvm_request_immediate_exit, | ||
7152 | |||
7152 | .sched_in = svm_sched_in, | 7153 | .sched_in = svm_sched_in, |
7153 | 7154 | ||
7154 | .pmu_ops = &amd_pmu_ops, | 7155 | .pmu_ops = &amd_pmu_ops, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 533a327372c8..06412ba46aa3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -397,6 +397,7 @@ struct loaded_vmcs { | |||
397 | int cpu; | 397 | int cpu; |
398 | bool launched; | 398 | bool launched; |
399 | bool nmi_known_unmasked; | 399 | bool nmi_known_unmasked; |
400 | bool hv_timer_armed; | ||
400 | /* Support for vnmi-less CPUs */ | 401 | /* Support for vnmi-less CPUs */ |
401 | int soft_vnmi_blocked; | 402 | int soft_vnmi_blocked; |
402 | ktime_t entry_time; | 403 | ktime_t entry_time; |
@@ -1019,6 +1020,8 @@ struct vcpu_vmx { | |||
1019 | int ple_window; | 1020 | int ple_window; |
1020 | bool ple_window_dirty; | 1021 | bool ple_window_dirty; |
1021 | 1022 | ||
1023 | bool req_immediate_exit; | ||
1024 | |||
1022 | /* Support for PML */ | 1025 | /* Support for PML */ |
1023 | #define PML_ENTITY_NUM 512 | 1026 | #define PML_ENTITY_NUM 512 |
1024 | struct page *pml_pg; | 1027 | struct page *pml_pg; |
@@ -2864,6 +2867,8 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) | |||
2864 | u16 fs_sel, gs_sel; | 2867 | u16 fs_sel, gs_sel; |
2865 | int i; | 2868 | int i; |
2866 | 2869 | ||
2870 | vmx->req_immediate_exit = false; | ||
2871 | |||
2867 | if (vmx->loaded_cpu_state) | 2872 | if (vmx->loaded_cpu_state) |
2868 | return; | 2873 | return; |
2869 | 2874 | ||
@@ -5393,9 +5398,10 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
5393 | * To use VMXON (and later other VMX instructions), a guest | 5398 | * To use VMXON (and later other VMX instructions), a guest |
5394 | * must first be able to turn on cr4.VMXE (see handle_vmon()). | 5399 | * must first be able to turn on cr4.VMXE (see handle_vmon()). |
5395 | * So basically the check on whether to allow nested VMX | 5400 | * So basically the check on whether to allow nested VMX |
5396 | * is here. | 5401 | * is here. We operate under the default treatment of SMM, |
5402 | * so VMX cannot be enabled under SMM. | ||
5397 | */ | 5403 | */ |
5398 | if (!nested_vmx_allowed(vcpu)) | 5404 | if (!nested_vmx_allowed(vcpu) || is_smm(vcpu)) |
5399 | return 1; | 5405 | return 1; |
5400 | } | 5406 | } |
5401 | 5407 | ||
@@ -6183,6 +6189,27 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) | |||
6183 | nested_mark_vmcs12_pages_dirty(vcpu); | 6189 | nested_mark_vmcs12_pages_dirty(vcpu); |
6184 | } | 6190 | } |
6185 | 6191 | ||
6192 | static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) | ||
6193 | { | ||
6194 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
6195 | void *vapic_page; | ||
6196 | u32 vppr; | ||
6197 | int rvi; | ||
6198 | |||
6199 | if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || | ||
6200 | !nested_cpu_has_vid(get_vmcs12(vcpu)) || | ||
6201 | WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) | ||
6202 | return false; | ||
6203 | |||
6204 | rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff; | ||
6205 | |||
6206 | vapic_page = kmap(vmx->nested.virtual_apic_page); | ||
6207 | vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); | ||
6208 | kunmap(vmx->nested.virtual_apic_page); | ||
6209 | |||
6210 | return ((rvi & 0xf0) > (vppr & 0xf0)); | ||
6211 | } | ||
6212 | |||
6186 | static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, | 6213 | static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, |
6187 | bool nested) | 6214 | bool nested) |
6188 | { | 6215 | { |
@@ -7966,6 +7993,9 @@ static __init int hardware_setup(void) | |||
7966 | kvm_x86_ops->enable_log_dirty_pt_masked = NULL; | 7993 | kvm_x86_ops->enable_log_dirty_pt_masked = NULL; |
7967 | } | 7994 | } |
7968 | 7995 | ||
7996 | if (!cpu_has_vmx_preemption_timer()) | ||
7997 | kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; | ||
7998 | |||
7969 | if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { | 7999 | if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { |
7970 | u64 vmx_msr; | 8000 | u64 vmx_msr; |
7971 | 8001 | ||
@@ -9208,7 +9238,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) | |||
9208 | 9238 | ||
9209 | static int handle_preemption_timer(struct kvm_vcpu *vcpu) | 9239 | static int handle_preemption_timer(struct kvm_vcpu *vcpu) |
9210 | { | 9240 | { |
9211 | kvm_lapic_expired_hv_timer(vcpu); | 9241 | if (!to_vmx(vcpu)->req_immediate_exit) |
9242 | kvm_lapic_expired_hv_timer(vcpu); | ||
9212 | return 1; | 9243 | return 1; |
9213 | } | 9244 | } |
9214 | 9245 | ||
@@ -10595,24 +10626,43 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) | |||
10595 | msrs[i].host, false); | 10626 | msrs[i].host, false); |
10596 | } | 10627 | } |
10597 | 10628 | ||
10598 | static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) | 10629 | static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val) |
10630 | { | ||
10631 | vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val); | ||
10632 | if (!vmx->loaded_vmcs->hv_timer_armed) | ||
10633 | vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, | ||
10634 | PIN_BASED_VMX_PREEMPTION_TIMER); | ||
10635 | vmx->loaded_vmcs->hv_timer_armed = true; | ||
10636 | } | ||
10637 | |||
10638 | static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) | ||
10599 | { | 10639 | { |
10600 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 10640 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
10601 | u64 tscl; | 10641 | u64 tscl; |
10602 | u32 delta_tsc; | 10642 | u32 delta_tsc; |
10603 | 10643 | ||
10604 | if (vmx->hv_deadline_tsc == -1) | 10644 | if (vmx->req_immediate_exit) { |
10645 | vmx_arm_hv_timer(vmx, 0); | ||
10605 | return; | 10646 | return; |
10647 | } | ||
10606 | 10648 | ||
10607 | tscl = rdtsc(); | 10649 | if (vmx->hv_deadline_tsc != -1) { |
10608 | if (vmx->hv_deadline_tsc > tscl) | 10650 | tscl = rdtsc(); |
10609 | /* sure to be 32 bit only because checked on set_hv_timer */ | 10651 | if (vmx->hv_deadline_tsc > tscl) |
10610 | delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> | 10652 | /* set_hv_timer ensures the delta fits in 32-bits */ |
10611 | cpu_preemption_timer_multi); | 10653 | delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> |
10612 | else | 10654 | cpu_preemption_timer_multi); |
10613 | delta_tsc = 0; | 10655 | else |
10656 | delta_tsc = 0; | ||
10614 | 10657 | ||
10615 | vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); | 10658 | vmx_arm_hv_timer(vmx, delta_tsc); |
10659 | return; | ||
10660 | } | ||
10661 | |||
10662 | if (vmx->loaded_vmcs->hv_timer_armed) | ||
10663 | vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, | ||
10664 | PIN_BASED_VMX_PREEMPTION_TIMER); | ||
10665 | vmx->loaded_vmcs->hv_timer_armed = false; | ||
10616 | } | 10666 | } |
10617 | 10667 | ||
10618 | static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | 10668 | static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
@@ -10672,7 +10722,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
10672 | 10722 | ||
10673 | atomic_switch_perf_msrs(vmx); | 10723 | atomic_switch_perf_msrs(vmx); |
10674 | 10724 | ||
10675 | vmx_arm_hv_timer(vcpu); | 10725 | vmx_update_hv_timer(vcpu); |
10676 | 10726 | ||
10677 | /* | 10727 | /* |
10678 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if | 10728 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if |
@@ -11427,16 +11477,18 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) | |||
11427 | u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; | 11477 | u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; |
11428 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 11478 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
11429 | 11479 | ||
11430 | if (vcpu->arch.virtual_tsc_khz == 0) | 11480 | /* |
11431 | return; | 11481 | * A timer value of zero is architecturally guaranteed to cause |
11432 | 11482 | * a VMExit prior to executing any instructions in the guest. | |
11433 | /* Make sure short timeouts reliably trigger an immediate vmexit. | 11483 | */ |
11434 | * hrtimer_start does not guarantee this. */ | 11484 | if (preemption_timeout == 0) { |
11435 | if (preemption_timeout <= 1) { | ||
11436 | vmx_preemption_timer_fn(&vmx->nested.preemption_timer); | 11485 | vmx_preemption_timer_fn(&vmx->nested.preemption_timer); |
11437 | return; | 11486 | return; |
11438 | } | 11487 | } |
11439 | 11488 | ||
11489 | if (vcpu->arch.virtual_tsc_khz == 0) | ||
11490 | return; | ||
11491 | |||
11440 | preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; | 11492 | preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; |
11441 | preemption_timeout *= 1000000; | 11493 | preemption_timeout *= 1000000; |
11442 | do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); | 11494 | do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); |
@@ -11646,11 +11698,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, | |||
11646 | * bits 15:8 should be zero in posted_intr_nv, | 11698 | * bits 15:8 should be zero in posted_intr_nv, |
11647 | * the descriptor address has been already checked | 11699 | * the descriptor address has been already checked |
11648 | * in nested_get_vmcs12_pages. | 11700 | * in nested_get_vmcs12_pages. |
11701 | * | ||
11702 | * bits 5:0 of posted_intr_desc_addr should be zero. | ||
11649 | */ | 11703 | */ |
11650 | if (nested_cpu_has_posted_intr(vmcs12) && | 11704 | if (nested_cpu_has_posted_intr(vmcs12) && |
11651 | (!nested_cpu_has_vid(vmcs12) || | 11705 | (!nested_cpu_has_vid(vmcs12) || |
11652 | !nested_exit_intr_ack_set(vcpu) || | 11706 | !nested_exit_intr_ack_set(vcpu) || |
11653 | vmcs12->posted_intr_nv & 0xff00)) | 11707 | (vmcs12->posted_intr_nv & 0xff00) || |
11708 | (vmcs12->posted_intr_desc_addr & 0x3f) || | ||
11709 | (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr)))) | ||
11654 | return -EINVAL; | 11710 | return -EINVAL; |
11655 | 11711 | ||
11656 | /* tpr shadow is needed by all apicv features. */ | 11712 | /* tpr shadow is needed by all apicv features. */ |
@@ -12076,11 +12132,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
12076 | 12132 | ||
12077 | exec_control = vmcs12->pin_based_vm_exec_control; | 12133 | exec_control = vmcs12->pin_based_vm_exec_control; |
12078 | 12134 | ||
12079 | /* Preemption timer setting is only taken from vmcs01. */ | 12135 | /* Preemption timer setting is computed directly in vmx_vcpu_run. */ |
12080 | exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; | ||
12081 | exec_control |= vmcs_config.pin_based_exec_ctrl; | 12136 | exec_control |= vmcs_config.pin_based_exec_ctrl; |
12082 | if (vmx->hv_deadline_tsc == -1) | 12137 | exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; |
12083 | exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; | 12138 | vmx->loaded_vmcs->hv_timer_armed = false; |
12084 | 12139 | ||
12085 | /* Posted interrupts setting is only taken from vmcs12. */ | 12140 | /* Posted interrupts setting is only taken from vmcs12. */ |
12086 | if (nested_cpu_has_posted_intr(vmcs12)) { | 12141 | if (nested_cpu_has_posted_intr(vmcs12)) { |
@@ -12318,6 +12373,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
12318 | vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) | 12373 | vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) |
12319 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | 12374 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; |
12320 | 12375 | ||
12376 | if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id) | ||
12377 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
12378 | |||
12321 | if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12)) | 12379 | if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12)) |
12322 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | 12380 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; |
12323 | 12381 | ||
@@ -12863,6 +12921,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) | |||
12863 | return 0; | 12921 | return 0; |
12864 | } | 12922 | } |
12865 | 12923 | ||
12924 | static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) | ||
12925 | { | ||
12926 | to_vmx(vcpu)->req_immediate_exit = true; | ||
12927 | } | ||
12928 | |||
12866 | static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) | 12929 | static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) |
12867 | { | 12930 | { |
12868 | ktime_t remaining = | 12931 | ktime_t remaining = |
@@ -13253,12 +13316,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
13253 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); | 13316 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); |
13254 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); | 13317 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); |
13255 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); | 13318 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); |
13256 | if (vmx->hv_deadline_tsc == -1) | 13319 | |
13257 | vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, | ||
13258 | PIN_BASED_VMX_PREEMPTION_TIMER); | ||
13259 | else | ||
13260 | vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, | ||
13261 | PIN_BASED_VMX_PREEMPTION_TIMER); | ||
13262 | if (kvm_has_tsc_control) | 13320 | if (kvm_has_tsc_control) |
13263 | decache_tsc_multiplier(vmx); | 13321 | decache_tsc_multiplier(vmx); |
13264 | 13322 | ||
@@ -13462,18 +13520,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) | |||
13462 | return -ERANGE; | 13520 | return -ERANGE; |
13463 | 13521 | ||
13464 | vmx->hv_deadline_tsc = tscl + delta_tsc; | 13522 | vmx->hv_deadline_tsc = tscl + delta_tsc; |
13465 | vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, | ||
13466 | PIN_BASED_VMX_PREEMPTION_TIMER); | ||
13467 | |||
13468 | return delta_tsc == 0; | 13523 | return delta_tsc == 0; |
13469 | } | 13524 | } |
13470 | 13525 | ||
13471 | static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) | 13526 | static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) |
13472 | { | 13527 | { |
13473 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 13528 | to_vmx(vcpu)->hv_deadline_tsc = -1; |
13474 | vmx->hv_deadline_tsc = -1; | ||
13475 | vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, | ||
13476 | PIN_BASED_VMX_PREEMPTION_TIMER); | ||
13477 | } | 13529 | } |
13478 | #endif | 13530 | #endif |
13479 | 13531 | ||
@@ -13954,6 +14006,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, | |||
13954 | ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) | 14006 | ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) |
13955 | return -EINVAL; | 14007 | return -EINVAL; |
13956 | 14008 | ||
14009 | /* | ||
14010 | * SMM temporarily disables VMX, so we cannot be in guest mode, | ||
14011 | * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags | ||
14012 | * must be zero. | ||
14013 | */ | ||
14014 | if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags) | ||
14015 | return -EINVAL; | ||
14016 | |||
13957 | if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && | 14017 | if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && |
13958 | !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) | 14018 | !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) |
13959 | return -EINVAL; | 14019 | return -EINVAL; |
@@ -14097,6 +14157,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
14097 | .apicv_post_state_restore = vmx_apicv_post_state_restore, | 14157 | .apicv_post_state_restore = vmx_apicv_post_state_restore, |
14098 | .hwapic_irr_update = vmx_hwapic_irr_update, | 14158 | .hwapic_irr_update = vmx_hwapic_irr_update, |
14099 | .hwapic_isr_update = vmx_hwapic_isr_update, | 14159 | .hwapic_isr_update = vmx_hwapic_isr_update, |
14160 | .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, | ||
14100 | .sync_pir_to_irr = vmx_sync_pir_to_irr, | 14161 | .sync_pir_to_irr = vmx_sync_pir_to_irr, |
14101 | .deliver_posted_interrupt = vmx_deliver_posted_interrupt, | 14162 | .deliver_posted_interrupt = vmx_deliver_posted_interrupt, |
14102 | 14163 | ||
@@ -14130,6 +14191,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
14130 | .umip_emulated = vmx_umip_emulated, | 14191 | .umip_emulated = vmx_umip_emulated, |
14131 | 14192 | ||
14132 | .check_nested_events = vmx_check_nested_events, | 14193 | .check_nested_events = vmx_check_nested_events, |
14194 | .request_immediate_exit = vmx_request_immediate_exit, | ||
14133 | 14195 | ||
14134 | .sched_in = vmx_sched_in, | 14196 | .sched_in = vmx_sched_in, |
14135 | 14197 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 542f6315444d..edbf00ec56b3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -628,7 +628,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu) | |||
628 | gfn_t gfn; | 628 | gfn_t gfn; |
629 | int r; | 629 | int r; |
630 | 630 | ||
631 | if (is_long_mode(vcpu) || !is_pae(vcpu)) | 631 | if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu)) |
632 | return false; | 632 | return false; |
633 | 633 | ||
634 | if (!test_bit(VCPU_EXREG_PDPTR, | 634 | if (!test_bit(VCPU_EXREG_PDPTR, |
@@ -2537,7 +2537,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2537 | break; | 2537 | break; |
2538 | case MSR_PLATFORM_INFO: | 2538 | case MSR_PLATFORM_INFO: |
2539 | if (!msr_info->host_initiated || | 2539 | if (!msr_info->host_initiated || |
2540 | data & ~MSR_PLATFORM_INFO_CPUID_FAULT || | ||
2541 | (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && | 2540 | (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && |
2542 | cpuid_fault_enabled(vcpu))) | 2541 | cpuid_fault_enabled(vcpu))) |
2543 | return 1; | 2542 | return 1; |
@@ -2780,6 +2779,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2780 | msr_info->data = vcpu->arch.osvw.status; | 2779 | msr_info->data = vcpu->arch.osvw.status; |
2781 | break; | 2780 | break; |
2782 | case MSR_PLATFORM_INFO: | 2781 | case MSR_PLATFORM_INFO: |
2782 | if (!msr_info->host_initiated && | ||
2783 | !vcpu->kvm->arch.guest_can_read_msr_platform_info) | ||
2784 | return 1; | ||
2783 | msr_info->data = vcpu->arch.msr_platform_info; | 2785 | msr_info->data = vcpu->arch.msr_platform_info; |
2784 | break; | 2786 | break; |
2785 | case MSR_MISC_FEATURES_ENABLES: | 2787 | case MSR_MISC_FEATURES_ENABLES: |
@@ -2927,6 +2929,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2927 | case KVM_CAP_SPLIT_IRQCHIP: | 2929 | case KVM_CAP_SPLIT_IRQCHIP: |
2928 | case KVM_CAP_IMMEDIATE_EXIT: | 2930 | case KVM_CAP_IMMEDIATE_EXIT: |
2929 | case KVM_CAP_GET_MSR_FEATURES: | 2931 | case KVM_CAP_GET_MSR_FEATURES: |
2932 | case KVM_CAP_MSR_PLATFORM_INFO: | ||
2930 | r = 1; | 2933 | r = 1; |
2931 | break; | 2934 | break; |
2932 | case KVM_CAP_SYNC_REGS: | 2935 | case KVM_CAP_SYNC_REGS: |
@@ -4007,19 +4010,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
4007 | break; | 4010 | break; |
4008 | 4011 | ||
4009 | BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); | 4012 | BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); |
4013 | r = -EFAULT; | ||
4010 | if (get_user(user_data_size, &user_kvm_nested_state->size)) | 4014 | if (get_user(user_data_size, &user_kvm_nested_state->size)) |
4011 | return -EFAULT; | 4015 | break; |
4012 | 4016 | ||
4013 | r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, | 4017 | r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, |
4014 | user_data_size); | 4018 | user_data_size); |
4015 | if (r < 0) | 4019 | if (r < 0) |
4016 | return r; | 4020 | break; |
4017 | 4021 | ||
4018 | if (r > user_data_size) { | 4022 | if (r > user_data_size) { |
4019 | if (put_user(r, &user_kvm_nested_state->size)) | 4023 | if (put_user(r, &user_kvm_nested_state->size)) |
4020 | return -EFAULT; | 4024 | r = -EFAULT; |
4021 | return -E2BIG; | 4025 | else |
4026 | r = -E2BIG; | ||
4027 | break; | ||
4022 | } | 4028 | } |
4029 | |||
4023 | r = 0; | 4030 | r = 0; |
4024 | break; | 4031 | break; |
4025 | } | 4032 | } |
@@ -4031,19 +4038,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
4031 | if (!kvm_x86_ops->set_nested_state) | 4038 | if (!kvm_x86_ops->set_nested_state) |
4032 | break; | 4039 | break; |
4033 | 4040 | ||
4041 | r = -EFAULT; | ||
4034 | if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) | 4042 | if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) |
4035 | return -EFAULT; | 4043 | break; |
4036 | 4044 | ||
4045 | r = -EINVAL; | ||
4037 | if (kvm_state.size < sizeof(kvm_state)) | 4046 | if (kvm_state.size < sizeof(kvm_state)) |
4038 | return -EINVAL; | 4047 | break; |
4039 | 4048 | ||
4040 | if (kvm_state.flags & | 4049 | if (kvm_state.flags & |
4041 | ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE)) | 4050 | ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE)) |
4042 | return -EINVAL; | 4051 | break; |
4043 | 4052 | ||
4044 | /* nested_run_pending implies guest_mode. */ | 4053 | /* nested_run_pending implies guest_mode. */ |
4045 | if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) | 4054 | if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) |
4046 | return -EINVAL; | 4055 | break; |
4047 | 4056 | ||
4048 | r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); | 4057 | r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); |
4049 | break; | 4058 | break; |
@@ -4350,6 +4359,10 @@ split_irqchip_unlock: | |||
4350 | kvm->arch.pause_in_guest = true; | 4359 | kvm->arch.pause_in_guest = true; |
4351 | r = 0; | 4360 | r = 0; |
4352 | break; | 4361 | break; |
4362 | case KVM_CAP_MSR_PLATFORM_INFO: | ||
4363 | kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; | ||
4364 | r = 0; | ||
4365 | break; | ||
4353 | default: | 4366 | default: |
4354 | r = -EINVAL; | 4367 | r = -EINVAL; |
4355 | break; | 4368 | break; |
@@ -7361,6 +7374,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) | |||
7361 | } | 7374 | } |
7362 | EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); | 7375 | EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); |
7363 | 7376 | ||
7377 | void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) | ||
7378 | { | ||
7379 | smp_send_reschedule(vcpu->cpu); | ||
7380 | } | ||
7381 | EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); | ||
7382 | |||
7364 | /* | 7383 | /* |
7365 | * Returns 1 to let vcpu_run() continue the guest execution loop without | 7384 | * Returns 1 to let vcpu_run() continue the guest execution loop without |
7366 | * exiting to the userspace. Otherwise, the value will be returned to the | 7385 | * exiting to the userspace. Otherwise, the value will be returned to the |
@@ -7565,7 +7584,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
7565 | 7584 | ||
7566 | if (req_immediate_exit) { | 7585 | if (req_immediate_exit) { |
7567 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 7586 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
7568 | smp_send_reschedule(vcpu->cpu); | 7587 | kvm_x86_ops->request_immediate_exit(vcpu); |
7569 | } | 7588 | } |
7570 | 7589 | ||
7571 | trace_kvm_entry(vcpu->vcpu_id); | 7590 | trace_kvm_entry(vcpu->vcpu_id); |
@@ -7829,6 +7848,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) | |||
7829 | return 0; | 7848 | return 0; |
7830 | } | 7849 | } |
7831 | 7850 | ||
7851 | /* Swap (qemu) user FPU context for the guest FPU context. */ | ||
7852 | static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | ||
7853 | { | ||
7854 | preempt_disable(); | ||
7855 | copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); | ||
7856 | /* PKRU is separately restored in kvm_x86_ops->run. */ | ||
7857 | __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, | ||
7858 | ~XFEATURE_MASK_PKRU); | ||
7859 | preempt_enable(); | ||
7860 | trace_kvm_fpu(1); | ||
7861 | } | ||
7862 | |||
7863 | /* When vcpu_run ends, restore user space FPU context. */ | ||
7864 | static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | ||
7865 | { | ||
7866 | preempt_disable(); | ||
7867 | copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); | ||
7868 | copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); | ||
7869 | preempt_enable(); | ||
7870 | ++vcpu->stat.fpu_reload; | ||
7871 | trace_kvm_fpu(0); | ||
7872 | } | ||
7873 | |||
7832 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 7874 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
7833 | { | 7875 | { |
7834 | int r; | 7876 | int r; |
@@ -8177,7 +8219,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
8177 | kvm_update_cpuid(vcpu); | 8219 | kvm_update_cpuid(vcpu); |
8178 | 8220 | ||
8179 | idx = srcu_read_lock(&vcpu->kvm->srcu); | 8221 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
8180 | if (!is_long_mode(vcpu) && is_pae(vcpu)) { | 8222 | if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) { |
8181 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); | 8223 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); |
8182 | mmu_reset_needed = 1; | 8224 | mmu_reset_needed = 1; |
8183 | } | 8225 | } |
@@ -8406,29 +8448,6 @@ static void fx_init(struct kvm_vcpu *vcpu) | |||
8406 | vcpu->arch.cr0 |= X86_CR0_ET; | 8448 | vcpu->arch.cr0 |= X86_CR0_ET; |
8407 | } | 8449 | } |
8408 | 8450 | ||
8409 | /* Swap (qemu) user FPU context for the guest FPU context. */ | ||
8410 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | ||
8411 | { | ||
8412 | preempt_disable(); | ||
8413 | copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); | ||
8414 | /* PKRU is separately restored in kvm_x86_ops->run. */ | ||
8415 | __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, | ||
8416 | ~XFEATURE_MASK_PKRU); | ||
8417 | preempt_enable(); | ||
8418 | trace_kvm_fpu(1); | ||
8419 | } | ||
8420 | |||
8421 | /* When vcpu_run ends, restore user space FPU context. */ | ||
8422 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | ||
8423 | { | ||
8424 | preempt_disable(); | ||
8425 | copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); | ||
8426 | copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); | ||
8427 | preempt_enable(); | ||
8428 | ++vcpu->stat.fpu_reload; | ||
8429 | trace_kvm_fpu(0); | ||
8430 | } | ||
8431 | |||
8432 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | 8451 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
8433 | { | 8452 | { |
8434 | void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; | 8453 | void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; |
@@ -8852,6 +8871,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
8852 | kvm->arch.kvmclock_offset = -ktime_get_boot_ns(); | 8871 | kvm->arch.kvmclock_offset = -ktime_get_boot_ns(); |
8853 | pvclock_update_vm_gtod_copy(kvm); | 8872 | pvclock_update_vm_gtod_copy(kvm); |
8854 | 8873 | ||
8874 | kvm->arch.guest_can_read_msr_platform_info = true; | ||
8875 | |||
8855 | INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); | 8876 | INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); |
8856 | INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); | 8877 | INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); |
8857 | 8878 | ||
@@ -9200,6 +9221,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |||
9200 | kvm_page_track_flush_slot(kvm, slot); | 9221 | kvm_page_track_flush_slot(kvm, slot); |
9201 | } | 9222 | } |
9202 | 9223 | ||
9224 | static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) | ||
9225 | { | ||
9226 | return (is_guest_mode(vcpu) && | ||
9227 | kvm_x86_ops->guest_apic_has_interrupt && | ||
9228 | kvm_x86_ops->guest_apic_has_interrupt(vcpu)); | ||
9229 | } | ||
9230 | |||
9203 | static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) | 9231 | static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) |
9204 | { | 9232 | { |
9205 | if (!list_empty_careful(&vcpu->async_pf.done)) | 9233 | if (!list_empty_careful(&vcpu->async_pf.done)) |
@@ -9224,7 +9252,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) | |||
9224 | return true; | 9252 | return true; |
9225 | 9253 | ||
9226 | if (kvm_arch_interrupt_allowed(vcpu) && | 9254 | if (kvm_arch_interrupt_allowed(vcpu) && |
9227 | kvm_cpu_has_interrupt(vcpu)) | 9255 | (kvm_cpu_has_interrupt(vcpu) || |
9256 | kvm_guest_apic_has_interrupt(vcpu))) | ||
9228 | return true; | 9257 | return true; |
9229 | 9258 | ||
9230 | if (kvm_hv_has_stimer_pending(vcpu)) | 9259 | if (kvm_hv_has_stimer_pending(vcpu)) |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 7a8fc26c1115..faca978ebf9d 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end) | |||
815 | set_memory_np_noalias(begin_ul, len_pages); | 815 | set_memory_np_noalias(begin_ul, len_pages); |
816 | } | 816 | } |
817 | 817 | ||
818 | void __weak mem_encrypt_free_decrypted_mem(void) { } | ||
819 | |||
818 | void __ref free_initmem(void) | 820 | void __ref free_initmem(void) |
819 | { | 821 | { |
820 | e820__reallocate_tables(); | 822 | e820__reallocate_tables(); |
821 | 823 | ||
824 | mem_encrypt_free_decrypted_mem(); | ||
825 | |||
822 | free_kernel_image_pages(&__init_begin, &__init_end); | 826 | free_kernel_image_pages(&__init_begin, &__init_end); |
823 | } | 827 | } |
824 | 828 | ||
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index b2de398d1fd3..006f373f54ab 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c | |||
@@ -348,6 +348,30 @@ bool sev_active(void) | |||
348 | EXPORT_SYMBOL(sev_active); | 348 | EXPORT_SYMBOL(sev_active); |
349 | 349 | ||
350 | /* Architecture __weak replacement functions */ | 350 | /* Architecture __weak replacement functions */ |
351 | void __init mem_encrypt_free_decrypted_mem(void) | ||
352 | { | ||
353 | unsigned long vaddr, vaddr_end, npages; | ||
354 | int r; | ||
355 | |||
356 | vaddr = (unsigned long)__start_bss_decrypted_unused; | ||
357 | vaddr_end = (unsigned long)__end_bss_decrypted; | ||
358 | npages = (vaddr_end - vaddr) >> PAGE_SHIFT; | ||
359 | |||
360 | /* | ||
361 | * The unused memory range was mapped decrypted, change the encryption | ||
362 | * attribute from decrypted to encrypted before freeing it. | ||
363 | */ | ||
364 | if (mem_encrypt_active()) { | ||
365 | r = set_memory_encrypted(vaddr, npages); | ||
366 | if (r) { | ||
367 | pr_warn("failed to free unused decrypted pages\n"); | ||
368 | return; | ||
369 | } | ||
370 | } | ||
371 | |||
372 | free_init_pages("unused decrypted", vaddr, vaddr_end); | ||
373 | } | ||
374 | |||
351 | void __init mem_encrypt_init(void) | 375 | void __init mem_encrypt_init(void) |
352 | { | 376 | { |
353 | if (!sme_me_mask) | 377 | if (!sme_me_mask) |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index ae394552fb94..089e78c4effd 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -637,6 +637,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) | |||
637 | { | 637 | { |
638 | unsigned long address = __fix_to_virt(idx); | 638 | unsigned long address = __fix_to_virt(idx); |
639 | 639 | ||
640 | #ifdef CONFIG_X86_64 | ||
641 | /* | ||
642 | * Ensure that the static initial page tables are covering the | ||
643 | * fixmap completely. | ||
644 | */ | ||
645 | BUILD_BUG_ON(__end_of_permanent_fixed_addresses > | ||
646 | (FIXMAP_PMD_NUM * PTRS_PER_PTE)); | ||
647 | #endif | ||
648 | |||
640 | if (idx >= __end_of_fixed_addresses) { | 649 | if (idx >= __end_of_fixed_addresses) { |
641 | BUG(); | 650 | BUG(); |
642 | return; | 651 | return; |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 2fe5c9b1816b..dd461c0167ef 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
@@ -1907,7 +1907,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1907 | /* L3_k[511] -> level2_fixmap_pgt */ | 1907 | /* L3_k[511] -> level2_fixmap_pgt */ |
1908 | convert_pfn_mfn(level3_kernel_pgt); | 1908 | convert_pfn_mfn(level3_kernel_pgt); |
1909 | 1909 | ||
1910 | /* L3_k[511][506] -> level1_fixmap_pgt */ | 1910 | /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */ |
1911 | convert_pfn_mfn(level2_fixmap_pgt); | 1911 | convert_pfn_mfn(level2_fixmap_pgt); |
1912 | 1912 | ||
1913 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ | 1913 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ |
@@ -1952,7 +1952,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1952 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); | 1952 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); |
1953 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | 1953 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); |
1954 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | 1954 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); |
1955 | set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); | 1955 | |
1956 | for (i = 0; i < FIXMAP_PMD_NUM; i++) { | ||
1957 | set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE, | ||
1958 | PAGE_KERNEL_RO); | ||
1959 | } | ||
1956 | 1960 | ||
1957 | /* Pin down new L4 */ | 1961 | /* Pin down new L4 */ |
1958 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, | 1962 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, |
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 7d00d4ad44d4..95997e6c0696 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c | |||
@@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs, | |||
478 | irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) | 478 | irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) |
479 | { | 479 | { |
480 | int err, ret = IRQ_NONE; | 480 | int err, ret = IRQ_NONE; |
481 | struct pt_regs regs; | 481 | struct pt_regs regs = {0}; |
482 | const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); | 482 | const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); |
483 | uint8_t xenpmu_flags = get_xenpmu_flags(); | 483 | uint8_t xenpmu_flags = get_xenpmu_flags(); |
484 | 484 | ||
diff --git a/block/bio.c b/block/bio.c index 8c680a776171..0093bed81c0e 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -1684,7 +1684,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op, | |||
1684 | const int sgrp = op_stat_group(req_op); | 1684 | const int sgrp = op_stat_group(req_op); |
1685 | int cpu = part_stat_lock(); | 1685 | int cpu = part_stat_lock(); |
1686 | 1686 | ||
1687 | part_stat_add(cpu, part, ticks[sgrp], duration); | 1687 | part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration)); |
1688 | part_round_stats(q, cpu, part); | 1688 | part_round_stats(q, cpu, part); |
1689 | part_dec_in_flight(q, part, op_is_write(req_op)); | 1689 | part_dec_in_flight(q, part, op_is_write(req_op)); |
1690 | 1690 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index 4dbc93f43b38..cff0a60ee200 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2733,17 +2733,15 @@ void blk_account_io_done(struct request *req, u64 now) | |||
2733 | * containing request is enough. | 2733 | * containing request is enough. |
2734 | */ | 2734 | */ |
2735 | if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { | 2735 | if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { |
2736 | unsigned long duration; | ||
2737 | const int sgrp = op_stat_group(req_op(req)); | 2736 | const int sgrp = op_stat_group(req_op(req)); |
2738 | struct hd_struct *part; | 2737 | struct hd_struct *part; |
2739 | int cpu; | 2738 | int cpu; |
2740 | 2739 | ||
2741 | duration = nsecs_to_jiffies(now - req->start_time_ns); | ||
2742 | cpu = part_stat_lock(); | 2740 | cpu = part_stat_lock(); |
2743 | part = req->part; | 2741 | part = req->part; |
2744 | 2742 | ||
2745 | part_stat_inc(cpu, part, ios[sgrp]); | 2743 | part_stat_inc(cpu, part, ios[sgrp]); |
2746 | part_stat_add(cpu, part, ticks[sgrp], duration); | 2744 | part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns); |
2747 | part_round_stats(req->q, cpu, part); | 2745 | part_round_stats(req->q, cpu, part); |
2748 | part_dec_in_flight(req->q, part, rq_data_dir(req)); | 2746 | part_dec_in_flight(req->q, part, rq_data_dir(req)); |
2749 | 2747 | ||
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 94e1ed667b6e..41317c50a446 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, | |||
322 | 322 | ||
323 | /* | 323 | /* |
324 | * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and | 324 | * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and |
325 | * queue_hw_ctx after freeze the queue. So we could use q_usage_counter | 325 | * queue_hw_ctx after freeze the queue, so we use q_usage_counter |
326 | * to avoid race with it. __blk_mq_update_nr_hw_queues will users | 326 | * to avoid race with it. |
327 | * synchronize_rcu to ensure all of the users go out of the critical | ||
328 | * section below and see zeroed q_usage_counter. | ||
329 | */ | 327 | */ |
330 | rcu_read_lock(); | 328 | if (!percpu_ref_tryget(&q->q_usage_counter)) |
331 | if (percpu_ref_is_zero(&q->q_usage_counter)) { | ||
332 | rcu_read_unlock(); | ||
333 | return; | 329 | return; |
334 | } | ||
335 | 330 | ||
336 | queue_for_each_hw_ctx(q, hctx, i) { | 331 | queue_for_each_hw_ctx(q, hctx, i) { |
337 | struct blk_mq_tags *tags = hctx->tags; | 332 | struct blk_mq_tags *tags = hctx->tags; |
@@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, | |||
347 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); | 342 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); |
348 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); | 343 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); |
349 | } | 344 | } |
350 | rcu_read_unlock(); | 345 | blk_queue_exit(q); |
351 | } | 346 | } |
352 | 347 | ||
353 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, | 348 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 85a1c1a59c72..e3c39ea8e17b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
1628 | BUG_ON(!rq->q); | 1628 | BUG_ON(!rq->q); |
1629 | if (rq->mq_ctx != this_ctx) { | 1629 | if (rq->mq_ctx != this_ctx) { |
1630 | if (this_ctx) { | 1630 | if (this_ctx) { |
1631 | trace_block_unplug(this_q, depth, from_schedule); | 1631 | trace_block_unplug(this_q, depth, !from_schedule); |
1632 | blk_mq_sched_insert_requests(this_q, this_ctx, | 1632 | blk_mq_sched_insert_requests(this_q, this_ctx, |
1633 | &ctx_list, | 1633 | &ctx_list, |
1634 | from_schedule); | 1634 | from_schedule); |
@@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
1648 | * on 'ctx_list'. Do those. | 1648 | * on 'ctx_list'. Do those. |
1649 | */ | 1649 | */ |
1650 | if (this_ctx) { | 1650 | if (this_ctx) { |
1651 | trace_block_unplug(this_q, depth, from_schedule); | 1651 | trace_block_unplug(this_q, depth, !from_schedule); |
1652 | blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, | 1652 | blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, |
1653 | from_schedule); | 1653 | from_schedule); |
1654 | } | 1654 | } |
diff --git a/block/elevator.c b/block/elevator.c index 6a06b5d040e5..fae58b2f906f 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q) | |||
609 | 609 | ||
610 | while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) | 610 | while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) |
611 | ; | 611 | ; |
612 | if (q->nr_sorted && printed++ < 10) { | 612 | if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) { |
613 | printk(KERN_ERR "%s: forced dispatching is broken " | 613 | printk(KERN_ERR "%s: forced dispatching is broken " |
614 | "(nr_sorted=%u), please report this\n", | 614 | "(nr_sorted=%u), please report this\n", |
615 | q->elevator->type->elevator_name, q->nr_sorted); | 615 | q->elevator->type->elevator_name, q->nr_sorted); |
diff --git a/block/genhd.c b/block/genhd.c index 8cc719a37b32..be5bab20b2ab 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1343,18 +1343,18 @@ static int diskstats_show(struct seq_file *seqf, void *v) | |||
1343 | part_stat_read(hd, ios[STAT_READ]), | 1343 | part_stat_read(hd, ios[STAT_READ]), |
1344 | part_stat_read(hd, merges[STAT_READ]), | 1344 | part_stat_read(hd, merges[STAT_READ]), |
1345 | part_stat_read(hd, sectors[STAT_READ]), | 1345 | part_stat_read(hd, sectors[STAT_READ]), |
1346 | jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])), | 1346 | (unsigned int)part_stat_read_msecs(hd, STAT_READ), |
1347 | part_stat_read(hd, ios[STAT_WRITE]), | 1347 | part_stat_read(hd, ios[STAT_WRITE]), |
1348 | part_stat_read(hd, merges[STAT_WRITE]), | 1348 | part_stat_read(hd, merges[STAT_WRITE]), |
1349 | part_stat_read(hd, sectors[STAT_WRITE]), | 1349 | part_stat_read(hd, sectors[STAT_WRITE]), |
1350 | jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])), | 1350 | (unsigned int)part_stat_read_msecs(hd, STAT_WRITE), |
1351 | inflight[0], | 1351 | inflight[0], |
1352 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), | 1352 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), |
1353 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)), | 1353 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)), |
1354 | part_stat_read(hd, ios[STAT_DISCARD]), | 1354 | part_stat_read(hd, ios[STAT_DISCARD]), |
1355 | part_stat_read(hd, merges[STAT_DISCARD]), | 1355 | part_stat_read(hd, merges[STAT_DISCARD]), |
1356 | part_stat_read(hd, sectors[STAT_DISCARD]), | 1356 | part_stat_read(hd, sectors[STAT_DISCARD]), |
1357 | jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD])) | 1357 | (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD) |
1358 | ); | 1358 | ); |
1359 | } | 1359 | } |
1360 | disk_part_iter_exit(&piter); | 1360 | disk_part_iter_exit(&piter); |
diff --git a/block/partition-generic.c b/block/partition-generic.c index 5a8975a1201c..d3d14e81fb12 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -136,18 +136,18 @@ ssize_t part_stat_show(struct device *dev, | |||
136 | part_stat_read(p, ios[STAT_READ]), | 136 | part_stat_read(p, ios[STAT_READ]), |
137 | part_stat_read(p, merges[STAT_READ]), | 137 | part_stat_read(p, merges[STAT_READ]), |
138 | (unsigned long long)part_stat_read(p, sectors[STAT_READ]), | 138 | (unsigned long long)part_stat_read(p, sectors[STAT_READ]), |
139 | jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])), | 139 | (unsigned int)part_stat_read_msecs(p, STAT_READ), |
140 | part_stat_read(p, ios[STAT_WRITE]), | 140 | part_stat_read(p, ios[STAT_WRITE]), |
141 | part_stat_read(p, merges[STAT_WRITE]), | 141 | part_stat_read(p, merges[STAT_WRITE]), |
142 | (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]), | 142 | (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]), |
143 | jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])), | 143 | (unsigned int)part_stat_read_msecs(p, STAT_WRITE), |
144 | inflight[0], | 144 | inflight[0], |
145 | jiffies_to_msecs(part_stat_read(p, io_ticks)), | 145 | jiffies_to_msecs(part_stat_read(p, io_ticks)), |
146 | jiffies_to_msecs(part_stat_read(p, time_in_queue)), | 146 | jiffies_to_msecs(part_stat_read(p, time_in_queue)), |
147 | part_stat_read(p, ios[STAT_DISCARD]), | 147 | part_stat_read(p, ios[STAT_DISCARD]), |
148 | part_stat_read(p, merges[STAT_DISCARD]), | 148 | part_stat_read(p, merges[STAT_DISCARD]), |
149 | (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]), | 149 | (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]), |
150 | jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD]))); | 150 | (unsigned int)part_stat_read_msecs(p, STAT_DISCARD)); |
151 | } | 151 | } |
152 | 152 | ||
153 | ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, | 153 | ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 599e01bcdef2..a9dd4ea7467d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
5359 | */ | 5359 | */ |
5360 | int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) | 5360 | int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) |
5361 | { | 5361 | { |
5362 | u64 done_mask, ap_qc_active = ap->qc_active; | ||
5362 | int nr_done = 0; | 5363 | int nr_done = 0; |
5363 | u64 done_mask; | ||
5364 | 5364 | ||
5365 | done_mask = ap->qc_active ^ qc_active; | 5365 | /* |
5366 | * If the internal tag is set on ap->qc_active, then we care about | ||
5367 | * bit0 on the passed in qc_active mask. Move that bit up to match | ||
5368 | * the internal tag. | ||
5369 | */ | ||
5370 | if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { | ||
5371 | qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; | ||
5372 | qc_active ^= qc_active & 0x01; | ||
5373 | } | ||
5374 | |||
5375 | done_mask = ap_qc_active ^ qc_active; | ||
5366 | 5376 | ||
5367 | if (unlikely(done_mask & qc_active)) { | 5377 | if (unlikely(done_mask & qc_active)) { |
5368 | ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", | 5378 | ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 48f622728ce6..f2b6f4da1034 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3467,6 +3467,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | |||
3467 | (struct floppy_struct **)&outparam); | 3467 | (struct floppy_struct **)&outparam); |
3468 | if (ret) | 3468 | if (ret) |
3469 | return ret; | 3469 | return ret; |
3470 | memcpy(&inparam.g, outparam, | ||
3471 | offsetof(struct floppy_struct, name)); | ||
3472 | outparam = &inparam.g; | ||
3470 | break; | 3473 | break; |
3471 | case FDMSGON: | 3474 | case FDMSGON: |
3472 | UDP->flags |= FTD_MSG; | 3475 | UDP->flags |= FTD_MSG; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a71d817e900d..429d20131c7e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info) | |||
2670 | list_del(&gnt_list_entry->node); | 2670 | list_del(&gnt_list_entry->node); |
2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); | 2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); |
2672 | rinfo->persistent_gnts_c--; | 2672 | rinfo->persistent_gnts_c--; |
2673 | __free_page(gnt_list_entry->page); | 2673 | gnt_list_entry->gref = GRANT_INVALID_REF; |
2674 | kfree(gnt_list_entry); | 2674 | list_add_tail(&gnt_list_entry->node, &rinfo->grants); |
2675 | } | 2675 | } |
2676 | 2676 | ||
2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | 2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); |
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 963bb0309e25..ea6238ed5c0e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c | |||
@@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty) | |||
543 | } | 543 | } |
544 | clear_bit(HCI_UART_PROTO_SET, &hu->flags); | 544 | clear_bit(HCI_UART_PROTO_SET, &hu->flags); |
545 | 545 | ||
546 | percpu_free_rwsem(&hu->proto_lock); | ||
547 | |||
546 | kfree(hu); | 548 | kfree(hu); |
547 | } | 549 | } |
548 | 550 | ||
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index 08ef69945ffb..d977193842df 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c | |||
@@ -55,6 +55,7 @@ struct clk_plt_data { | |||
55 | u8 nparents; | 55 | u8 nparents; |
56 | struct clk_plt *clks[PMC_CLK_NUM]; | 56 | struct clk_plt *clks[PMC_CLK_NUM]; |
57 | struct clk_lookup *mclk_lookup; | 57 | struct clk_lookup *mclk_lookup; |
58 | struct clk_lookup *ether_clk_lookup; | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | /* Return an index in parent table */ | 61 | /* Return an index in parent table */ |
@@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, | |||
186 | pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; | 187 | pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; |
187 | spin_lock_init(&pclk->lock); | 188 | spin_lock_init(&pclk->lock); |
188 | 189 | ||
189 | /* | ||
190 | * If the clock was already enabled by the firmware mark it as critical | ||
191 | * to avoid it being gated by the clock framework if no driver owns it. | ||
192 | */ | ||
193 | if (plt_clk_is_enabled(&pclk->hw)) | ||
194 | init.flags |= CLK_IS_CRITICAL; | ||
195 | |||
196 | ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); | 190 | ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); |
197 | if (ret) { | 191 | if (ret) { |
198 | pclk = ERR_PTR(ret); | 192 | pclk = ERR_PTR(ret); |
@@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev) | |||
351 | goto err_unreg_clk_plt; | 345 | goto err_unreg_clk_plt; |
352 | } | 346 | } |
353 | 347 | ||
348 | data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw, | ||
349 | "ether_clk", NULL); | ||
350 | if (!data->ether_clk_lookup) { | ||
351 | err = -ENOMEM; | ||
352 | goto err_drop_mclk; | ||
353 | } | ||
354 | |||
354 | plt_clk_free_parent_names_loop(parent_names, data->nparents); | 355 | plt_clk_free_parent_names_loop(parent_names, data->nparents); |
355 | 356 | ||
356 | platform_set_drvdata(pdev, data); | 357 | platform_set_drvdata(pdev, data); |
357 | return 0; | 358 | return 0; |
358 | 359 | ||
360 | err_drop_mclk: | ||
361 | clkdev_drop(data->mclk_lookup); | ||
359 | err_unreg_clk_plt: | 362 | err_unreg_clk_plt: |
360 | plt_clk_unregister_loop(data, i); | 363 | plt_clk_unregister_loop(data, i); |
361 | plt_clk_unregister_parents(data); | 364 | plt_clk_unregister_parents(data); |
@@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev) | |||
369 | 372 | ||
370 | data = platform_get_drvdata(pdev); | 373 | data = platform_get_drvdata(pdev); |
371 | 374 | ||
375 | clkdev_drop(data->ether_clk_lookup); | ||
372 | clkdev_drop(data->mclk_lookup); | 376 | clkdev_drop(data->mclk_lookup); |
373 | plt_clk_unregister_loop(data, PMC_CLK_NUM); | 377 | plt_clk_unregister_loop(data, PMC_CLK_NUM); |
374 | plt_clk_unregister_parents(data); | 378 | plt_clk_unregister_parents(data); |
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c index ec8a4376f74f..2fab18fae4fc 100644 --- a/drivers/clocksource/timer-atmel-pit.c +++ b/drivers/clocksource/timer-atmel-pit.c | |||
@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
180 | data->base = of_iomap(node, 0); | 180 | data->base = of_iomap(node, 0); |
181 | if (!data->base) { | 181 | if (!data->base) { |
182 | pr_err("Could not map PIT address\n"); | 182 | pr_err("Could not map PIT address\n"); |
183 | return -ENXIO; | 183 | ret = -ENXIO; |
184 | goto exit; | ||
184 | } | 185 | } |
185 | 186 | ||
186 | data->mck = of_clk_get(node, 0); | 187 | data->mck = of_clk_get(node, 0); |
187 | if (IS_ERR(data->mck)) { | 188 | if (IS_ERR(data->mck)) { |
188 | pr_err("Unable to get mck clk\n"); | 189 | pr_err("Unable to get mck clk\n"); |
189 | return PTR_ERR(data->mck); | 190 | ret = PTR_ERR(data->mck); |
191 | goto exit; | ||
190 | } | 192 | } |
191 | 193 | ||
192 | ret = clk_prepare_enable(data->mck); | 194 | ret = clk_prepare_enable(data->mck); |
193 | if (ret) { | 195 | if (ret) { |
194 | pr_err("Unable to enable mck\n"); | 196 | pr_err("Unable to enable mck\n"); |
195 | return ret; | 197 | goto exit; |
196 | } | 198 | } |
197 | 199 | ||
198 | /* Get the interrupts property */ | 200 | /* Get the interrupts property */ |
199 | data->irq = irq_of_parse_and_map(node, 0); | 201 | data->irq = irq_of_parse_and_map(node, 0); |
200 | if (!data->irq) { | 202 | if (!data->irq) { |
201 | pr_err("Unable to get IRQ from DT\n"); | 203 | pr_err("Unable to get IRQ from DT\n"); |
202 | return -EINVAL; | 204 | ret = -EINVAL; |
205 | goto exit; | ||
203 | } | 206 | } |
204 | 207 | ||
205 | /* | 208 | /* |
@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
227 | ret = clocksource_register_hz(&data->clksrc, pit_rate); | 230 | ret = clocksource_register_hz(&data->clksrc, pit_rate); |
228 | if (ret) { | 231 | if (ret) { |
229 | pr_err("Failed to register clocksource\n"); | 232 | pr_err("Failed to register clocksource\n"); |
230 | return ret; | 233 | goto exit; |
231 | } | 234 | } |
232 | 235 | ||
233 | /* Set up irq handler */ | 236 | /* Set up irq handler */ |
@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
236 | "at91_tick", data); | 239 | "at91_tick", data); |
237 | if (ret) { | 240 | if (ret) { |
238 | pr_err("Unable to setup IRQ\n"); | 241 | pr_err("Unable to setup IRQ\n"); |
239 | return ret; | 242 | clocksource_unregister(&data->clksrc); |
243 | goto exit; | ||
240 | } | 244 | } |
241 | 245 | ||
242 | /* Set up and register clockevents */ | 246 | /* Set up and register clockevents */ |
@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
254 | clockevents_register_device(&data->clkevt); | 258 | clockevents_register_device(&data->clkevt); |
255 | 259 | ||
256 | return 0; | 260 | return 0; |
261 | |||
262 | exit: | ||
263 | kfree(data); | ||
264 | return ret; | ||
257 | } | 265 | } |
258 | TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", | 266 | TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", |
259 | at91sam926x_pit_dt_init); | 267 | at91sam926x_pit_dt_init); |
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c index c020038ebfab..cf93f6419b51 100644 --- a/drivers/clocksource/timer-fttmr010.c +++ b/drivers/clocksource/timer-fttmr010.c | |||
@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles, | |||
130 | cr &= ~fttmr010->t1_enable_val; | 130 | cr &= ~fttmr010->t1_enable_val; |
131 | writel(cr, fttmr010->base + TIMER_CR); | 131 | writel(cr, fttmr010->base + TIMER_CR); |
132 | 132 | ||
133 | /* Setup the match register forward/backward in time */ | 133 | if (fttmr010->count_down) { |
134 | cr = readl(fttmr010->base + TIMER1_COUNT); | 134 | /* |
135 | if (fttmr010->count_down) | 135 | * ASPEED Timer Controller will load TIMER1_LOAD register |
136 | cr -= cycles; | 136 | * into TIMER1_COUNT register when the timer is re-enabled. |
137 | else | 137 | */ |
138 | cr += cycles; | 138 | writel(cycles, fttmr010->base + TIMER1_LOAD); |
139 | writel(cr, fttmr010->base + TIMER1_MATCH1); | 139 | } else { |
140 | /* Setup the match register forward in time */ | ||
141 | cr = readl(fttmr010->base + TIMER1_COUNT); | ||
142 | writel(cr + cycles, fttmr010->base + TIMER1_MATCH1); | ||
143 | } | ||
140 | 144 | ||
141 | /* Start */ | 145 | /* Start */ |
142 | cr = readl(fttmr010->base + TIMER_CR); | 146 | cr = readl(fttmr010->base + TIMER_CR); |
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c index 29e2e1a78a43..6949a9113dbb 100644 --- a/drivers/clocksource/timer-ti-32k.c +++ b/drivers/clocksource/timer-ti-32k.c | |||
@@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np) | |||
97 | return -ENXIO; | 97 | return -ENXIO; |
98 | } | 98 | } |
99 | 99 | ||
100 | if (!of_machine_is_compatible("ti,am43")) | ||
101 | ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; | ||
102 | |||
100 | ti_32k_timer.counter = ti_32k_timer.base; | 103 | ti_32k_timer.counter = ti_32k_timer.base; |
101 | 104 | ||
102 | /* | 105 | /* |
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index a1830fa25fc5..2a3675c24032 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c | |||
@@ -44,7 +44,7 @@ enum _msm8996_version { | |||
44 | 44 | ||
45 | struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; | 45 | struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; |
46 | 46 | ||
47 | static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) | 47 | static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) |
48 | { | 48 | { |
49 | size_t len; | 49 | size_t len; |
50 | u32 *msm_id; | 50 | u32 *msm_id; |
@@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void) | |||
222 | } | 222 | } |
223 | module_init(qcom_cpufreq_kryo_init); | 223 | module_init(qcom_cpufreq_kryo_init); |
224 | 224 | ||
225 | static void __init qcom_cpufreq_kryo_exit(void) | 225 | static void __exit qcom_cpufreq_kryo_exit(void) |
226 | { | 226 | { |
227 | platform_device_unregister(kryo_cpufreq_pdev); | 227 | platform_device_unregister(kryo_cpufreq_pdev); |
228 | platform_driver_unregister(&qcom_cpufreq_kryo_driver); | 228 | platform_driver_unregister(&qcom_cpufreq_kryo_driver); |
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 218739b961fe..72790d88236d 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c | |||
@@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex); | |||
38 | static struct sev_misc_dev *misc_dev; | 38 | static struct sev_misc_dev *misc_dev; |
39 | static struct psp_device *psp_master; | 39 | static struct psp_device *psp_master; |
40 | 40 | ||
41 | static int psp_cmd_timeout = 100; | ||
42 | module_param(psp_cmd_timeout, int, 0644); | ||
43 | MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); | ||
44 | |||
45 | static int psp_probe_timeout = 5; | ||
46 | module_param(psp_probe_timeout, int, 0644); | ||
47 | MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); | ||
48 | |||
49 | static bool psp_dead; | ||
50 | static int psp_timeout; | ||
51 | |||
41 | static struct psp_device *psp_alloc_struct(struct sp_device *sp) | 52 | static struct psp_device *psp_alloc_struct(struct sp_device *sp) |
42 | { | 53 | { |
43 | struct device *dev = sp->dev; | 54 | struct device *dev = sp->dev; |
@@ -82,10 +93,19 @@ done: | |||
82 | return IRQ_HANDLED; | 93 | return IRQ_HANDLED; |
83 | } | 94 | } |
84 | 95 | ||
85 | static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) | 96 | static int sev_wait_cmd_ioc(struct psp_device *psp, |
97 | unsigned int *reg, unsigned int timeout) | ||
86 | { | 98 | { |
87 | wait_event(psp->sev_int_queue, psp->sev_int_rcvd); | 99 | int ret; |
100 | |||
101 | ret = wait_event_timeout(psp->sev_int_queue, | ||
102 | psp->sev_int_rcvd, timeout * HZ); | ||
103 | if (!ret) | ||
104 | return -ETIMEDOUT; | ||
105 | |||
88 | *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); | 106 | *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); |
107 | |||
108 | return 0; | ||
89 | } | 109 | } |
90 | 110 | ||
91 | static int sev_cmd_buffer_len(int cmd) | 111 | static int sev_cmd_buffer_len(int cmd) |
@@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) | |||
133 | if (!psp) | 153 | if (!psp) |
134 | return -ENODEV; | 154 | return -ENODEV; |
135 | 155 | ||
156 | if (psp_dead) | ||
157 | return -EBUSY; | ||
158 | |||
136 | /* Get the physical address of the command buffer */ | 159 | /* Get the physical address of the command buffer */ |
137 | phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; | 160 | phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; |
138 | phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; | 161 | phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; |
139 | 162 | ||
140 | dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", | 163 | dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", |
141 | cmd, phys_msb, phys_lsb); | 164 | cmd, phys_msb, phys_lsb, psp_timeout); |
142 | 165 | ||
143 | print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, | 166 | print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, |
144 | sev_cmd_buffer_len(cmd), false); | 167 | sev_cmd_buffer_len(cmd), false); |
@@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) | |||
154 | iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); | 177 | iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); |
155 | 178 | ||
156 | /* wait for command completion */ | 179 | /* wait for command completion */ |
157 | sev_wait_cmd_ioc(psp, ®); | 180 | ret = sev_wait_cmd_ioc(psp, ®, psp_timeout); |
181 | if (ret) { | ||
182 | if (psp_ret) | ||
183 | *psp_ret = 0; | ||
184 | |||
185 | dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd); | ||
186 | psp_dead = true; | ||
187 | |||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | psp_timeout = psp_cmd_timeout; | ||
158 | 192 | ||
159 | if (psp_ret) | 193 | if (psp_ret) |
160 | *psp_ret = reg & PSP_CMDRESP_ERR_MASK; | 194 | *psp_ret = reg & PSP_CMDRESP_ERR_MASK; |
@@ -888,6 +922,8 @@ void psp_pci_init(void) | |||
888 | 922 | ||
889 | psp_master = sp->psp_data; | 923 | psp_master = sp->psp_data; |
890 | 924 | ||
925 | psp_timeout = psp_probe_timeout; | ||
926 | |||
891 | if (sev_get_api_version()) | 927 | if (sev_get_api_version()) |
892 | goto err; | 928 | goto err; |
893 | 929 | ||
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index bbe4d72ca105..948806e57cee 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c | |||
@@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp, | |||
535 | return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); | 535 | return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); |
536 | } | 536 | } |
537 | 537 | ||
538 | static const struct address_space_operations dev_dax_aops = { | ||
539 | .set_page_dirty = noop_set_page_dirty, | ||
540 | .invalidatepage = noop_invalidatepage, | ||
541 | }; | ||
542 | |||
538 | static int dax_open(struct inode *inode, struct file *filp) | 543 | static int dax_open(struct inode *inode, struct file *filp) |
539 | { | 544 | { |
540 | struct dax_device *dax_dev = inode_dax(inode); | 545 | struct dax_device *dax_dev = inode_dax(inode); |
@@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp) | |||
544 | dev_dbg(&dev_dax->dev, "trace\n"); | 549 | dev_dbg(&dev_dax->dev, "trace\n"); |
545 | inode->i_mapping = __dax_inode->i_mapping; | 550 | inode->i_mapping = __dax_inode->i_mapping; |
546 | inode->i_mapping->host = __dax_inode; | 551 | inode->i_mapping->host = __dax_inode; |
552 | inode->i_mapping->a_ops = &dev_dax_aops; | ||
547 | filp->f_mapping = inode->i_mapping; | 553 | filp->f_mapping = inode->i_mapping; |
548 | filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); | 554 | filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); |
549 | filp->private_data = dev_dax; | 555 | filp->private_data = dev_dax; |
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index d8e159feb573..89110dfc7127 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig | |||
@@ -90,14 +90,17 @@ config EFI_ARMSTUB | |||
90 | config EFI_ARMSTUB_DTB_LOADER | 90 | config EFI_ARMSTUB_DTB_LOADER |
91 | bool "Enable the DTB loader" | 91 | bool "Enable the DTB loader" |
92 | depends on EFI_ARMSTUB | 92 | depends on EFI_ARMSTUB |
93 | default y | ||
93 | help | 94 | help |
94 | Select this config option to add support for the dtb= command | 95 | Select this config option to add support for the dtb= command |
95 | line parameter, allowing a device tree blob to be loaded into | 96 | line parameter, allowing a device tree blob to be loaded into |
96 | memory from the EFI System Partition by the stub. | 97 | memory from the EFI System Partition by the stub. |
97 | 98 | ||
98 | The device tree is typically provided by the platform or by | 99 | If the device tree is provided by the platform or by |
99 | the bootloader, so this option is mostly for development | 100 | the bootloader this option may not be needed. |
100 | purposes only. | 101 | But, for various development reasons and to maintain existing |
102 | functionality for bootloaders that do not have such support | ||
103 | this option is necessary. | ||
101 | 104 | ||
102 | config EFI_BOOTLOADER_CONTROL | 105 | config EFI_BOOTLOADER_CONTROL |
103 | tristate "EFI Bootloader Control" | 106 | tristate "EFI Bootloader Control" |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index f8bbbb3a9504..0c791e35acf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | |||
@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) | |||
272 | 272 | ||
273 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | 273 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, |
274 | void **mem_obj, uint64_t *gpu_addr, | 274 | void **mem_obj, uint64_t *gpu_addr, |
275 | void **cpu_ptr) | 275 | void **cpu_ptr, bool mqd_gfx9) |
276 | { | 276 | { |
277 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | 277 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
278 | struct amdgpu_bo *bo = NULL; | 278 | struct amdgpu_bo *bo = NULL; |
@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | |||
287 | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; | 287 | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
288 | bp.type = ttm_bo_type_kernel; | 288 | bp.type = ttm_bo_type_kernel; |
289 | bp.resv = NULL; | 289 | bp.resv = NULL; |
290 | |||
291 | if (mqd_gfx9) | ||
292 | bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9; | ||
293 | |||
290 | r = amdgpu_bo_create(adev, &bp, &bo); | 294 | r = amdgpu_bo_create(adev, &bp, &bo); |
291 | if (r) { | 295 | if (r) { |
292 | dev_err(adev->dev, | 296 | dev_err(adev->dev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 2f379c183ed2..cc9aeab5468c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | |||
@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); | |||
136 | /* Shared API */ | 136 | /* Shared API */ |
137 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | 137 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, |
138 | void **mem_obj, uint64_t *gpu_addr, | 138 | void **mem_obj, uint64_t *gpu_addr, |
139 | void **cpu_ptr); | 139 | void **cpu_ptr, bool mqd_gfx9); |
140 | void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); | 140 | void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); |
141 | void get_local_mem_info(struct kgd_dev *kgd, | 141 | void get_local_mem_info(struct kgd_dev *kgd, |
142 | struct kfd_local_mem_info *mem_info); | 142 | struct kfd_local_mem_info *mem_info); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ea3f698aef5e..9803b91f3e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | |||
@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, | |||
685 | 685 | ||
686 | while (true) { | 686 | while (true) { |
687 | temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); | 687 | temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); |
688 | if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) | 688 | if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) |
689 | break; | 689 | break; |
690 | if (time_after(jiffies, end_jiffies)) | 690 | if (time_after(jiffies, end_jiffies)) |
691 | return -ETIME; | 691 | return -ETIME; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 693ec5ea4950..8816c697b205 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
367 | break; | 367 | break; |
368 | case CHIP_POLARIS10: | 368 | case CHIP_POLARIS10: |
369 | if (type == CGS_UCODE_ID_SMU) { | 369 | if (type == CGS_UCODE_ID_SMU) { |
370 | if ((adev->pdev->device == 0x67df) && | 370 | if (((adev->pdev->device == 0x67df) && |
371 | ((adev->pdev->revision == 0xe0) || | 371 | ((adev->pdev->revision == 0xe0) || |
372 | (adev->pdev->revision == 0xe3) || | 372 | (adev->pdev->revision == 0xe3) || |
373 | (adev->pdev->revision == 0xe4) || | 373 | (adev->pdev->revision == 0xe4) || |
374 | (adev->pdev->revision == 0xe5) || | 374 | (adev->pdev->revision == 0xe5) || |
375 | (adev->pdev->revision == 0xe7) || | 375 | (adev->pdev->revision == 0xe7) || |
376 | (adev->pdev->revision == 0xef))) || | ||
377 | ((adev->pdev->device == 0x6fdf) && | ||
376 | (adev->pdev->revision == 0xef))) { | 378 | (adev->pdev->revision == 0xef))) { |
377 | info->is_kicker = true; | 379 | info->is_kicker = true; |
378 | strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); | 380 | strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8843a06360fa..0f41d8647376 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = { | |||
740 | {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 740 | {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
741 | {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 741 | {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
742 | {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 742 | {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
743 | {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
743 | /* Polaris12 */ | 744 | /* Polaris12 */ |
744 | {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 745 | {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
745 | {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 746 | {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 0cc5190f4f36..5f3f54073818 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) | |||
258 | { | 258 | { |
259 | int i; | 259 | int i; |
260 | 260 | ||
261 | cancel_delayed_work_sync(&adev->vce.idle_work); | ||
262 | |||
261 | if (adev->vce.vcpu_bo == NULL) | 263 | if (adev->vce.vcpu_bo == NULL) |
262 | return 0; | 264 | return 0; |
263 | 265 | ||
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) | |||
268 | if (i == AMDGPU_MAX_VCE_HANDLES) | 270 | if (i == AMDGPU_MAX_VCE_HANDLES) |
269 | return 0; | 271 | return 0; |
270 | 272 | ||
271 | cancel_delayed_work_sync(&adev->vce.idle_work); | ||
272 | /* TODO: suspending running encoding sessions isn't supported */ | 273 | /* TODO: suspending running encoding sessions isn't supported */ |
273 | return -EINVAL; | 274 | return -EINVAL; |
274 | } | 275 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index fd654a4406db..400fc74bbae2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | |||
@@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) | |||
153 | unsigned size; | 153 | unsigned size; |
154 | void *ptr; | 154 | void *ptr; |
155 | 155 | ||
156 | cancel_delayed_work_sync(&adev->vcn.idle_work); | ||
157 | |||
156 | if (adev->vcn.vcpu_bo == NULL) | 158 | if (adev->vcn.vcpu_bo == NULL) |
157 | return 0; | 159 | return 0; |
158 | 160 | ||
159 | cancel_delayed_work_sync(&adev->vcn.idle_work); | ||
160 | |||
161 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); | 161 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); |
162 | ptr = adev->vcn.cpu_addr; | 162 | ptr = adev->vcn.cpu_addr; |
163 | 163 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1b048715ab8a..29ac74f40dce 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, | |||
457 | 457 | ||
458 | if (kfd->kfd2kgd->init_gtt_mem_allocation( | 458 | if (kfd->kfd2kgd->init_gtt_mem_allocation( |
459 | kfd->kgd, size, &kfd->gtt_mem, | 459 | kfd->kgd, size, &kfd->gtt_mem, |
460 | &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ | 460 | &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, |
461 | false)) { | ||
461 | dev_err(kfd_device, "Could not allocate %d bytes\n", size); | 462 | dev_err(kfd_device, "Could not allocate %d bytes\n", size); |
462 | goto out; | 463 | goto out; |
463 | } | 464 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 7a61f38c09e6..01494752c36a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | |||
@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) | |||
62 | struct amd_iommu_device_info iommu_info; | 62 | struct amd_iommu_device_info iommu_info; |
63 | unsigned int pasid_limit; | 63 | unsigned int pasid_limit; |
64 | int err; | 64 | int err; |
65 | struct kfd_topology_device *top_dev; | ||
65 | 66 | ||
66 | if (!kfd->device_info->needs_iommu_device) | 67 | top_dev = kfd_topology_device_by_id(kfd->id); |
68 | |||
69 | /* | ||
70 | * Overwrite ATS capability according to needs_iommu_device to fix | ||
71 | * potential missing corresponding bit in CRAT of BIOS. | ||
72 | */ | ||
73 | if (!kfd->device_info->needs_iommu_device) { | ||
74 | top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; | ||
67 | return 0; | 75 | return 0; |
76 | } | ||
77 | |||
78 | top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT; | ||
68 | 79 | ||
69 | iommu_info.flags = 0; | 80 | iommu_info.flags = 0; |
70 | err = amd_iommu_device_info(kfd->pdev, &iommu_info); | 81 | err = amd_iommu_device_info(kfd->pdev, &iommu_info); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index f5fc3675f21e..0cedb37cf513 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | |||
@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, | |||
88 | ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), | 88 | ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), |
89 | &((*mqd_mem_obj)->gtt_mem), | 89 | &((*mqd_mem_obj)->gtt_mem), |
90 | &((*mqd_mem_obj)->gpu_addr), | 90 | &((*mqd_mem_obj)->gpu_addr), |
91 | (void *)&((*mqd_mem_obj)->cpu_ptr)); | 91 | (void *)&((*mqd_mem_obj)->cpu_ptr), true); |
92 | } else | 92 | } else |
93 | retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), | 93 | retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), |
94 | mqd_mem_obj); | 94 | mqd_mem_obj); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f971710f1c91..92b285ca73aa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h | |||
@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu); | |||
806 | int kfd_topology_remove_device(struct kfd_dev *gpu); | 806 | int kfd_topology_remove_device(struct kfd_dev *gpu); |
807 | struct kfd_topology_device *kfd_topology_device_by_proximity_domain( | 807 | struct kfd_topology_device *kfd_topology_device_by_proximity_domain( |
808 | uint32_t proximity_domain); | 808 | uint32_t proximity_domain); |
809 | struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); | ||
809 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); | 810 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); |
810 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); | 811 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); |
811 | int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); | 812 | int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index bc95d4dfee2e..80f5db4ef75f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain( | |||
63 | return device; | 63 | return device; |
64 | } | 64 | } |
65 | 65 | ||
66 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) | 66 | struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id) |
67 | { | 67 | { |
68 | struct kfd_topology_device *top_dev; | 68 | struct kfd_topology_device *top_dev = NULL; |
69 | struct kfd_dev *device = NULL; | 69 | struct kfd_topology_device *ret = NULL; |
70 | 70 | ||
71 | down_read(&topology_lock); | 71 | down_read(&topology_lock); |
72 | 72 | ||
73 | list_for_each_entry(top_dev, &topology_device_list, list) | 73 | list_for_each_entry(top_dev, &topology_device_list, list) |
74 | if (top_dev->gpu_id == gpu_id) { | 74 | if (top_dev->gpu_id == gpu_id) { |
75 | device = top_dev->gpu; | 75 | ret = top_dev; |
76 | break; | 76 | break; |
77 | } | 77 | } |
78 | 78 | ||
79 | up_read(&topology_lock); | 79 | up_read(&topology_lock); |
80 | 80 | ||
81 | return device; | 81 | return ret; |
82 | } | ||
83 | |||
84 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) | ||
85 | { | ||
86 | struct kfd_topology_device *top_dev; | ||
87 | |||
88 | top_dev = kfd_topology_device_by_id(gpu_id); | ||
89 | if (!top_dev) | ||
90 | return NULL; | ||
91 | |||
92 | return top_dev->gpu; | ||
82 | } | 93 | } |
83 | 94 | ||
84 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) | 95 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 800f481a6995..96875950845a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, | |||
641 | return NULL; | 641 | return NULL; |
642 | } | 642 | } |
643 | 643 | ||
644 | static void emulated_link_detect(struct dc_link *link) | ||
645 | { | ||
646 | struct dc_sink_init_data sink_init_data = { 0 }; | ||
647 | struct display_sink_capability sink_caps = { 0 }; | ||
648 | enum dc_edid_status edid_status; | ||
649 | struct dc_context *dc_ctx = link->ctx; | ||
650 | struct dc_sink *sink = NULL; | ||
651 | struct dc_sink *prev_sink = NULL; | ||
652 | |||
653 | link->type = dc_connection_none; | ||
654 | prev_sink = link->local_sink; | ||
655 | |||
656 | if (prev_sink != NULL) | ||
657 | dc_sink_retain(prev_sink); | ||
658 | |||
659 | switch (link->connector_signal) { | ||
660 | case SIGNAL_TYPE_HDMI_TYPE_A: { | ||
661 | sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; | ||
662 | sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; | ||
663 | break; | ||
664 | } | ||
665 | |||
666 | case SIGNAL_TYPE_DVI_SINGLE_LINK: { | ||
667 | sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; | ||
668 | sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; | ||
669 | break; | ||
670 | } | ||
671 | |||
672 | case SIGNAL_TYPE_DVI_DUAL_LINK: { | ||
673 | sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; | ||
674 | sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; | ||
675 | break; | ||
676 | } | ||
677 | |||
678 | case SIGNAL_TYPE_LVDS: { | ||
679 | sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; | ||
680 | sink_caps.signal = SIGNAL_TYPE_LVDS; | ||
681 | break; | ||
682 | } | ||
683 | |||
684 | case SIGNAL_TYPE_EDP: { | ||
685 | sink_caps.transaction_type = | ||
686 | DDC_TRANSACTION_TYPE_I2C_OVER_AUX; | ||
687 | sink_caps.signal = SIGNAL_TYPE_EDP; | ||
688 | break; | ||
689 | } | ||
690 | |||
691 | case SIGNAL_TYPE_DISPLAY_PORT: { | ||
692 | sink_caps.transaction_type = | ||
693 | DDC_TRANSACTION_TYPE_I2C_OVER_AUX; | ||
694 | sink_caps.signal = SIGNAL_TYPE_VIRTUAL; | ||
695 | break; | ||
696 | } | ||
697 | |||
698 | default: | ||
699 | DC_ERROR("Invalid connector type! signal:%d\n", | ||
700 | link->connector_signal); | ||
701 | return; | ||
702 | } | ||
703 | |||
704 | sink_init_data.link = link; | ||
705 | sink_init_data.sink_signal = sink_caps.signal; | ||
706 | |||
707 | sink = dc_sink_create(&sink_init_data); | ||
708 | if (!sink) { | ||
709 | DC_ERROR("Failed to create sink!\n"); | ||
710 | return; | ||
711 | } | ||
712 | |||
713 | link->local_sink = sink; | ||
714 | |||
715 | edid_status = dm_helpers_read_local_edid( | ||
716 | link->ctx, | ||
717 | link, | ||
718 | sink); | ||
719 | |||
720 | if (edid_status != EDID_OK) | ||
721 | DC_ERROR("Failed to read EDID"); | ||
722 | |||
723 | } | ||
724 | |||
644 | static int dm_resume(void *handle) | 725 | static int dm_resume(void *handle) |
645 | { | 726 | { |
646 | struct amdgpu_device *adev = handle; | 727 | struct amdgpu_device *adev = handle; |
@@ -654,6 +735,7 @@ static int dm_resume(void *handle) | |||
654 | struct drm_plane *plane; | 735 | struct drm_plane *plane; |
655 | struct drm_plane_state *new_plane_state; | 736 | struct drm_plane_state *new_plane_state; |
656 | struct dm_plane_state *dm_new_plane_state; | 737 | struct dm_plane_state *dm_new_plane_state; |
738 | enum dc_connection_type new_connection_type = dc_connection_none; | ||
657 | int ret; | 739 | int ret; |
658 | int i; | 740 | int i; |
659 | 741 | ||
@@ -684,7 +766,13 @@ static int dm_resume(void *handle) | |||
684 | continue; | 766 | continue; |
685 | 767 | ||
686 | mutex_lock(&aconnector->hpd_lock); | 768 | mutex_lock(&aconnector->hpd_lock); |
687 | dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); | 769 | if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) |
770 | DRM_ERROR("KMS: Failed to detect connector\n"); | ||
771 | |||
772 | if (aconnector->base.force && new_connection_type == dc_connection_none) | ||
773 | emulated_link_detect(aconnector->dc_link); | ||
774 | else | ||
775 | dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); | ||
688 | 776 | ||
689 | if (aconnector->fake_enable && aconnector->dc_link->local_sink) | 777 | if (aconnector->fake_enable && aconnector->dc_link->local_sink) |
690 | aconnector->fake_enable = false; | 778 | aconnector->fake_enable = false; |
@@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param) | |||
922 | struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; | 1010 | struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; |
923 | struct drm_connector *connector = &aconnector->base; | 1011 | struct drm_connector *connector = &aconnector->base; |
924 | struct drm_device *dev = connector->dev; | 1012 | struct drm_device *dev = connector->dev; |
1013 | enum dc_connection_type new_connection_type = dc_connection_none; | ||
925 | 1014 | ||
926 | /* In case of failure or MST no need to update connector status or notify the OS | 1015 | /* In case of failure or MST no need to update connector status or notify the OS |
927 | * since (for MST case) MST does this in it's own context. | 1016 | * since (for MST case) MST does this in it's own context. |
@@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param) | |||
931 | if (aconnector->fake_enable) | 1020 | if (aconnector->fake_enable) |
932 | aconnector->fake_enable = false; | 1021 | aconnector->fake_enable = false; |
933 | 1022 | ||
934 | if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { | 1023 | if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) |
1024 | DRM_ERROR("KMS: Failed to detect connector\n"); | ||
1025 | |||
1026 | if (aconnector->base.force && new_connection_type == dc_connection_none) { | ||
1027 | emulated_link_detect(aconnector->dc_link); | ||
1028 | |||
1029 | |||
1030 | drm_modeset_lock_all(dev); | ||
1031 | dm_restore_drm_connector_state(dev, connector); | ||
1032 | drm_modeset_unlock_all(dev); | ||
1033 | |||
1034 | if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) | ||
1035 | drm_kms_helper_hotplug_event(dev); | ||
1036 | |||
1037 | } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { | ||
935 | amdgpu_dm_update_connector_after_detect(aconnector); | 1038 | amdgpu_dm_update_connector_after_detect(aconnector); |
936 | 1039 | ||
937 | 1040 | ||
@@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param) | |||
1031 | struct drm_device *dev = connector->dev; | 1134 | struct drm_device *dev = connector->dev; |
1032 | struct dc_link *dc_link = aconnector->dc_link; | 1135 | struct dc_link *dc_link = aconnector->dc_link; |
1033 | bool is_mst_root_connector = aconnector->mst_mgr.mst_state; | 1136 | bool is_mst_root_connector = aconnector->mst_mgr.mst_state; |
1137 | enum dc_connection_type new_connection_type = dc_connection_none; | ||
1034 | 1138 | ||
1035 | /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio | 1139 | /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio |
1036 | * conflict, after implement i2c helper, this mutex should be | 1140 | * conflict, after implement i2c helper, this mutex should be |
@@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param) | |||
1042 | if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && | 1146 | if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && |
1043 | !is_mst_root_connector) { | 1147 | !is_mst_root_connector) { |
1044 | /* Downstream Port status changed. */ | 1148 | /* Downstream Port status changed. */ |
1045 | if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { | 1149 | if (!dc_link_detect_sink(dc_link, &new_connection_type)) |
1150 | DRM_ERROR("KMS: Failed to detect connector\n"); | ||
1151 | |||
1152 | if (aconnector->base.force && new_connection_type == dc_connection_none) { | ||
1153 | emulated_link_detect(dc_link); | ||
1154 | |||
1155 | if (aconnector->fake_enable) | ||
1156 | aconnector->fake_enable = false; | ||
1157 | |||
1158 | amdgpu_dm_update_connector_after_detect(aconnector); | ||
1159 | |||
1160 | |||
1161 | drm_modeset_lock_all(dev); | ||
1162 | dm_restore_drm_connector_state(dev, connector); | ||
1163 | drm_modeset_unlock_all(dev); | ||
1164 | |||
1165 | drm_kms_helper_hotplug_event(dev); | ||
1166 | } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { | ||
1046 | 1167 | ||
1047 | if (aconnector->fake_enable) | 1168 | if (aconnector->fake_enable) |
1048 | aconnector->fake_enable = false; | 1169 | aconnector->fake_enable = false; |
@@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |||
1433 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | 1554 | struct amdgpu_mode_info *mode_info = &adev->mode_info; |
1434 | uint32_t link_cnt; | 1555 | uint32_t link_cnt; |
1435 | int32_t total_overlay_planes, total_primary_planes; | 1556 | int32_t total_overlay_planes, total_primary_planes; |
1557 | enum dc_connection_type new_connection_type = dc_connection_none; | ||
1436 | 1558 | ||
1437 | link_cnt = dm->dc->caps.max_links; | 1559 | link_cnt = dm->dc->caps.max_links; |
1438 | if (amdgpu_dm_mode_config_init(dm->adev)) { | 1560 | if (amdgpu_dm_mode_config_init(dm->adev)) { |
@@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |||
1499 | 1621 | ||
1500 | link = dc_get_link_at_index(dm->dc, i); | 1622 | link = dc_get_link_at_index(dm->dc, i); |
1501 | 1623 | ||
1502 | if (dc_link_detect(link, DETECT_REASON_BOOT)) { | 1624 | if (!dc_link_detect_sink(link, &new_connection_type)) |
1625 | DRM_ERROR("KMS: Failed to detect connector\n"); | ||
1626 | |||
1627 | if (aconnector->base.force && new_connection_type == dc_connection_none) { | ||
1628 | emulated_link_detect(link); | ||
1629 | amdgpu_dm_update_connector_after_detect(aconnector); | ||
1630 | |||
1631 | } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { | ||
1503 | amdgpu_dm_update_connector_after_detect(aconnector); | 1632 | amdgpu_dm_update_connector_after_detect(aconnector); |
1504 | register_backlight_device(dm, link); | 1633 | register_backlight_device(dm, link); |
1505 | } | 1634 | } |
@@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2494 | if (dm_state && dm_state->freesync_capable) | 2623 | if (dm_state && dm_state->freesync_capable) |
2495 | stream->ignore_msa_timing_param = true; | 2624 | stream->ignore_msa_timing_param = true; |
2496 | finish: | 2625 | finish: |
2497 | if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) | 2626 | if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) |
2498 | dc_sink_release(sink); | 2627 | dc_sink_release(sink); |
2499 | 2628 | ||
2500 | return stream; | 2629 | return stream; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 37eaf72ace54..fced3c1c2ef5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
@@ -195,7 +195,7 @@ static bool program_hpd_filter( | |||
195 | return result; | 195 | return result; |
196 | } | 196 | } |
197 | 197 | ||
198 | static bool detect_sink(struct dc_link *link, enum dc_connection_type *type) | 198 | bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) |
199 | { | 199 | { |
200 | uint32_t is_hpd_high = 0; | 200 | uint32_t is_hpd_high = 0; |
201 | struct gpio *hpd_pin; | 201 | struct gpio *hpd_pin; |
@@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) | |||
604 | if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) | 604 | if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) |
605 | return false; | 605 | return false; |
606 | 606 | ||
607 | if (false == detect_sink(link, &new_connection_type)) { | 607 | if (false == dc_link_detect_sink(link, &new_connection_type)) { |
608 | BREAK_TO_DEBUGGER(); | 608 | BREAK_TO_DEBUGGER(); |
609 | return false; | 609 | return false; |
610 | } | 610 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index d43cefbc43d3..1b48ab9aea89 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h | |||
@@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable); | |||
215 | 215 | ||
216 | bool dc_link_is_dp_sink_present(struct dc_link *link); | 216 | bool dc_link_is_dp_sink_present(struct dc_link *link); |
217 | 217 | ||
218 | bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type); | ||
218 | /* | 219 | /* |
219 | * DPCD access interfaces | 220 | * DPCD access interfaces |
220 | */ | 221 | */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 14384d9675a8..b2f308766a9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
@@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements( | |||
2560 | dc->prev_display_config = *pp_display_cfg; | 2560 | dc->prev_display_config = *pp_display_cfg; |
2561 | } | 2561 | } |
2562 | 2562 | ||
2563 | void dce110_set_bandwidth( | 2563 | static void dce110_set_bandwidth( |
2564 | struct dc *dc, | 2564 | struct dc *dc, |
2565 | struct dc_state *context, | 2565 | struct dc_state *context, |
2566 | bool decrease_allowed) | 2566 | bool decrease_allowed) |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index e4c5db75c4c6..d6db3dbd9015 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h | |||
@@ -68,11 +68,6 @@ void dce110_fill_display_configs( | |||
68 | const struct dc_state *context, | 68 | const struct dc_state *context, |
69 | struct dm_pp_display_configuration *pp_display_cfg); | 69 | struct dm_pp_display_configuration *pp_display_cfg); |
70 | 70 | ||
71 | void dce110_set_bandwidth( | ||
72 | struct dc *dc, | ||
73 | struct dc_state *context, | ||
74 | bool decrease_allowed); | ||
75 | |||
76 | uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); | 71 | uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); |
77 | 72 | ||
78 | void dp_receiver_power_ctrl(struct dc_link *link, bool on); | 73 | void dp_receiver_power_ctrl(struct dc_link *link, bool on); |
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index 5853522a6182..eb0f5f9a973b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c | |||
@@ -244,17 +244,6 @@ static void dce120_update_dchub( | |||
244 | dh_data->dchub_info_valid = false; | 244 | dh_data->dchub_info_valid = false; |
245 | } | 245 | } |
246 | 246 | ||
247 | static void dce120_set_bandwidth( | ||
248 | struct dc *dc, | ||
249 | struct dc_state *context, | ||
250 | bool decrease_allowed) | ||
251 | { | ||
252 | if (context->stream_count <= 0) | ||
253 | return; | ||
254 | |||
255 | dce110_set_bandwidth(dc, context, decrease_allowed); | ||
256 | } | ||
257 | |||
258 | void dce120_hw_sequencer_construct(struct dc *dc) | 247 | void dce120_hw_sequencer_construct(struct dc *dc) |
259 | { | 248 | { |
260 | /* All registers used by dce11.2 match those in dce11 in offset and | 249 | /* All registers used by dce11.2 match those in dce11 in offset and |
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc) | |||
263 | dce110_hw_sequencer_construct(dc); | 252 | dce110_hw_sequencer_construct(dc); |
264 | dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; | 253 | dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; |
265 | dc->hwss.update_dchub = dce120_update_dchub; | 254 | dc->hwss.update_dchub = dce120_update_dchub; |
266 | dc->hwss.set_bandwidth = dce120_set_bandwidth; | ||
267 | } | 255 | } |
268 | 256 | ||
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 14391b06080c..43b82e14007e 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h | |||
@@ -292,7 +292,7 @@ struct tile_config { | |||
292 | struct kfd2kgd_calls { | 292 | struct kfd2kgd_calls { |
293 | int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, | 293 | int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, |
294 | void **mem_obj, uint64_t *gpu_addr, | 294 | void **mem_obj, uint64_t *gpu_addr, |
295 | void **cpu_ptr); | 295 | void **cpu_ptr, bool mqd_gfx9); |
296 | 296 | ||
297 | void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); | 297 | void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); |
298 | 298 | ||
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 08b5bb219816..94d6dabec2dc 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c | |||
@@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev) | |||
754 | drm->irq_enabled = true; | 754 | drm->irq_enabled = true; |
755 | 755 | ||
756 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); | 756 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); |
757 | drm_crtc_vblank_reset(&malidp->crtc); | ||
757 | if (ret < 0) { | 758 | if (ret < 0) { |
758 | DRM_ERROR("failed to initialise vblank\n"); | 759 | DRM_ERROR("failed to initialise vblank\n"); |
759 | goto vblank_fail; | 760 | goto vblank_fail; |
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index c94a4422e0e9..2781e462c1ed 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c | |||
@@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev, | |||
384 | 384 | ||
385 | static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, | 385 | static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, |
386 | dma_addr_t *addrs, s32 *pitches, | 386 | dma_addr_t *addrs, s32 *pitches, |
387 | int num_planes, u16 w, u16 h, u32 fmt_id) | 387 | int num_planes, u16 w, u16 h, u32 fmt_id, |
388 | const s16 *rgb2yuv_coeffs) | ||
388 | { | 389 | { |
389 | u32 base = MALIDP500_SE_MEMWRITE_BASE; | 390 | u32 base = MALIDP500_SE_MEMWRITE_BASE; |
390 | u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); | 391 | u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); |
@@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, | |||
416 | 417 | ||
417 | malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), | 418 | malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), |
418 | MALIDP500_SE_MEMWRITE_OUT_SIZE); | 419 | MALIDP500_SE_MEMWRITE_OUT_SIZE); |
420 | |||
421 | if (rgb2yuv_coeffs) { | ||
422 | int i; | ||
423 | |||
424 | for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { | ||
425 | malidp_hw_write(hwdev, rgb2yuv_coeffs[i], | ||
426 | MALIDP500_SE_RGB_YUV_COEFFS + i * 4); | ||
427 | } | ||
428 | } | ||
429 | |||
419 | malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); | 430 | malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); |
420 | 431 | ||
421 | return 0; | 432 | return 0; |
@@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev, | |||
658 | 669 | ||
659 | static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, | 670 | static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, |
660 | dma_addr_t *addrs, s32 *pitches, | 671 | dma_addr_t *addrs, s32 *pitches, |
661 | int num_planes, u16 w, u16 h, u32 fmt_id) | 672 | int num_planes, u16 w, u16 h, u32 fmt_id, |
673 | const s16 *rgb2yuv_coeffs) | ||
662 | { | 674 | { |
663 | u32 base = MALIDP550_SE_MEMWRITE_BASE; | 675 | u32 base = MALIDP550_SE_MEMWRITE_BASE; |
664 | u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); | 676 | u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); |
@@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, | |||
689 | malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, | 701 | malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, |
690 | MALIDP550_SE_CONTROL); | 702 | MALIDP550_SE_CONTROL); |
691 | 703 | ||
704 | if (rgb2yuv_coeffs) { | ||
705 | int i; | ||
706 | |||
707 | for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { | ||
708 | malidp_hw_write(hwdev, rgb2yuv_coeffs[i], | ||
709 | MALIDP550_SE_RGB_YUV_COEFFS + i * 4); | ||
710 | } | ||
711 | } | ||
712 | |||
692 | return 0; | 713 | return 0; |
693 | } | 714 | } |
694 | 715 | ||
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index ad2e96915d44..9fc94c08190f 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h | |||
@@ -191,7 +191,8 @@ struct malidp_hw { | |||
191 | * @param fmt_id - internal format ID of output buffer | 191 | * @param fmt_id - internal format ID of output buffer |
192 | */ | 192 | */ |
193 | int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, | 193 | int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, |
194 | s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id); | 194 | s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id, |
195 | const s16 *rgb2yuv_coeffs); | ||
195 | 196 | ||
196 | /* | 197 | /* |
197 | * Disable the writing to memory of the next frame's content. | 198 | * Disable the writing to memory of the next frame's content. |
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index ba6ae66387c9..91472e5e0c8b 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c | |||
@@ -26,6 +26,8 @@ struct malidp_mw_connector_state { | |||
26 | s32 pitches[2]; | 26 | s32 pitches[2]; |
27 | u8 format; | 27 | u8 format; |
28 | u8 n_planes; | 28 | u8 n_planes; |
29 | bool rgb2yuv_initialized; | ||
30 | const s16 *rgb2yuv_coeffs; | ||
29 | }; | 31 | }; |
30 | 32 | ||
31 | static int malidp_mw_connector_get_modes(struct drm_connector *connector) | 33 | static int malidp_mw_connector_get_modes(struct drm_connector *connector) |
@@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector) | |||
84 | static struct drm_connector_state * | 86 | static struct drm_connector_state * |
85 | malidp_mw_connector_duplicate_state(struct drm_connector *connector) | 87 | malidp_mw_connector_duplicate_state(struct drm_connector *connector) |
86 | { | 88 | { |
87 | struct malidp_mw_connector_state *mw_state; | 89 | struct malidp_mw_connector_state *mw_state, *mw_current_state; |
88 | 90 | ||
89 | if (WARN_ON(!connector->state)) | 91 | if (WARN_ON(!connector->state)) |
90 | return NULL; | 92 | return NULL; |
@@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector) | |||
93 | if (!mw_state) | 95 | if (!mw_state) |
94 | return NULL; | 96 | return NULL; |
95 | 97 | ||
96 | /* No need to preserve any of our driver-local data */ | 98 | mw_current_state = to_mw_state(connector->state); |
99 | mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs; | ||
100 | mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized; | ||
101 | |||
97 | __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); | 102 | __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); |
98 | 103 | ||
99 | return &mw_state->base; | 104 | return &mw_state->base; |
@@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = { | |||
108 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 113 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
109 | }; | 114 | }; |
110 | 115 | ||
116 | static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = { | ||
117 | 47, 157, 16, | ||
118 | -26, -87, 112, | ||
119 | 112, -102, -10, | ||
120 | 16, 128, 128 | ||
121 | }; | ||
122 | |||
111 | static int | 123 | static int |
112 | malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, | 124 | malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, |
113 | struct drm_crtc_state *crtc_state, | 125 | struct drm_crtc_state *crtc_state, |
@@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, | |||
157 | } | 169 | } |
158 | mw_state->n_planes = n_planes; | 170 | mw_state->n_planes = n_planes; |
159 | 171 | ||
172 | if (fb->format->is_yuv) | ||
173 | mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited; | ||
174 | |||
160 | return 0; | 175 | return 0; |
161 | } | 176 | } |
162 | 177 | ||
@@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm, | |||
239 | 254 | ||
240 | drm_writeback_queue_job(mw_conn, conn_state->writeback_job); | 255 | drm_writeback_queue_job(mw_conn, conn_state->writeback_job); |
241 | conn_state->writeback_job = NULL; | 256 | conn_state->writeback_job = NULL; |
242 | |||
243 | hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, | 257 | hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, |
244 | mw_state->pitches, mw_state->n_planes, | 258 | mw_state->pitches, mw_state->n_planes, |
245 | fb->width, fb->height, mw_state->format); | 259 | fb->width, fb->height, mw_state->format, |
260 | !mw_state->rgb2yuv_initialized ? | ||
261 | mw_state->rgb2yuv_coeffs : NULL); | ||
262 | mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs; | ||
246 | } else { | 263 | } else { |
247 | DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); | 264 | DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); |
248 | hwdev->hw->disable_memwrite(hwdev); | 265 | hwdev->hw->disable_memwrite(hwdev); |
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index 3579d36b2a71..6ffe849774f2 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h | |||
@@ -205,6 +205,7 @@ | |||
205 | #define MALIDP500_SE_BASE 0x00c00 | 205 | #define MALIDP500_SE_BASE 0x00c00 |
206 | #define MALIDP500_SE_CONTROL 0x00c0c | 206 | #define MALIDP500_SE_CONTROL 0x00c0c |
207 | #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c | 207 | #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c |
208 | #define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74 | ||
208 | #define MALIDP500_SE_MEMWRITE_BASE 0x00e00 | 209 | #define MALIDP500_SE_MEMWRITE_BASE 0x00e00 |
209 | #define MALIDP500_DC_IRQ_BASE 0x00f00 | 210 | #define MALIDP500_DC_IRQ_BASE 0x00f00 |
210 | #define MALIDP500_CONFIG_VALID 0x00f00 | 211 | #define MALIDP500_CONFIG_VALID 0x00f00 |
@@ -238,6 +239,7 @@ | |||
238 | #define MALIDP550_SE_CONTROL 0x08010 | 239 | #define MALIDP550_SE_CONTROL 0x08010 |
239 | #define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) | 240 | #define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) |
240 | #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 | 241 | #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 |
242 | #define MALIDP550_SE_RGB_YUV_COEFFS 0x08078 | ||
241 | #define MALIDP550_SE_MEMWRITE_BASE 0x08100 | 243 | #define MALIDP550_SE_MEMWRITE_BASE 0x08100 |
242 | #define MALIDP550_DC_BASE 0x0c000 | 244 | #define MALIDP550_DC_BASE 0x0c000 |
243 | #define MALIDP550_DC_CONTROL 0x0c010 | 245 | #define MALIDP550_DC_CONTROL 0x0c010 |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3eb061e11e2e..018fcdb353d2 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, | |||
2067 | struct drm_connector *connector; | 2067 | struct drm_connector *connector; |
2068 | struct drm_connector_list_iter conn_iter; | 2068 | struct drm_connector_list_iter conn_iter; |
2069 | 2069 | ||
2070 | if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) | 2070 | if (!drm_drv_uses_atomic_modeset(dev)) |
2071 | return; | 2071 | return; |
2072 | 2072 | ||
2073 | list_for_each_entry(plane, &config->plane_list, head) { | 2073 | list_for_each_entry(plane, &config->plane_list, head) { |
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 6f28fe58f169..373bd4c2b698 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c | |||
@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, | |||
151 | return ret; | 151 | return ret; |
152 | } | 152 | } |
153 | 153 | ||
154 | if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { | 154 | if (drm_drv_uses_atomic_modeset(dev)) { |
155 | ret = drm_atomic_debugfs_init(minor); | 155 | ret = drm_atomic_debugfs_init(minor); |
156 | if (ret) { | 156 | if (ret) { |
157 | DRM_ERROR("Failed to create atomic debugfs files\n"); | 157 | DRM_ERROR("Failed to create atomic debugfs files\n"); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 4b0dd20bccb8..16ec93b75dbf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, | |||
2370 | { | 2370 | { |
2371 | int c, o; | 2371 | int c, o; |
2372 | struct drm_connector *connector; | 2372 | struct drm_connector *connector; |
2373 | const struct drm_connector_helper_funcs *connector_funcs; | ||
2374 | int my_score, best_score, score; | 2373 | int my_score, best_score, score; |
2375 | struct drm_fb_helper_crtc **crtcs, *crtc; | 2374 | struct drm_fb_helper_crtc **crtcs, *crtc; |
2376 | struct drm_fb_helper_connector *fb_helper_conn; | 2375 | struct drm_fb_helper_connector *fb_helper_conn; |
@@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, | |||
2399 | if (drm_has_preferred_mode(fb_helper_conn, width, height)) | 2398 | if (drm_has_preferred_mode(fb_helper_conn, width, height)) |
2400 | my_score++; | 2399 | my_score++; |
2401 | 2400 | ||
2402 | connector_funcs = connector->helper_private; | ||
2403 | |||
2404 | /* | 2401 | /* |
2405 | * select a crtc for this connector and then attempt to configure | 2402 | * select a crtc for this connector and then attempt to configure |
2406 | * remaining connectors | 2403 | * remaining connectors |
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index b902361dee6e..1d9a9d2fe0e0 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | 26 | ||
27 | #include <drm/drm_device.h> | ||
28 | #include <drm/drm_crtc.h> | 27 | #include <drm/drm_crtc.h> |
29 | #include <drm/drm_panel.h> | 28 | #include <drm/drm_panel.h> |
30 | 29 | ||
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector) | |||
105 | if (panel->connector) | 104 | if (panel->connector) |
106 | return -EBUSY; | 105 | return -EBUSY; |
107 | 106 | ||
108 | panel->link = device_link_add(connector->dev->dev, panel->dev, 0); | ||
109 | if (!panel->link) { | ||
110 | dev_err(panel->dev, "failed to link panel to %s\n", | ||
111 | dev_name(connector->dev->dev)); | ||
112 | return -EINVAL; | ||
113 | } | ||
114 | |||
115 | panel->connector = connector; | 107 | panel->connector = connector; |
116 | panel->drm = connector->dev; | 108 | panel->drm = connector->dev; |
117 | 109 | ||
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach); | |||
133 | */ | 125 | */ |
134 | int drm_panel_detach(struct drm_panel *panel) | 126 | int drm_panel_detach(struct drm_panel *panel) |
135 | { | 127 | { |
136 | device_link_del(panel->link); | ||
137 | |||
138 | panel->connector = NULL; | 128 | panel->connector = NULL; |
139 | panel->drm = NULL; | 129 | panel->drm = NULL; |
140 | 130 | ||
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index adb3cb27d31e..759278fef35a 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c | |||
@@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, | |||
97 | { | 97 | { |
98 | int ret; | 98 | int ret; |
99 | 99 | ||
100 | WARN_ON(*fence); | ||
101 | |||
100 | *fence = drm_syncobj_fence_get(syncobj); | 102 | *fence = drm_syncobj_fence_get(syncobj); |
101 | if (*fence) | 103 | if (*fence) |
102 | return 1; | 104 | return 1; |
@@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, | |||
743 | 745 | ||
744 | if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { | 746 | if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { |
745 | for (i = 0; i < count; ++i) { | 747 | for (i = 0; i < count; ++i) { |
748 | if (entries[i].fence) | ||
749 | continue; | ||
750 | |||
746 | drm_syncobj_fence_get_or_add_callback(syncobjs[i], | 751 | drm_syncobj_fence_get_or_add_callback(syncobjs[i], |
747 | &entries[i].fence, | 752 | &entries[i].fence, |
748 | &entries[i].syncobj_cb, | 753 | &entries[i].syncobj_cb, |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 9b2720b41571..83c1f46670bf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c | |||
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) | |||
592 | struct device *dev = &pdev->dev; | 592 | struct device *dev = &pdev->dev; |
593 | struct component_match *match = NULL; | 593 | struct component_match *match = NULL; |
594 | 594 | ||
595 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | ||
596 | |||
597 | if (!dev->platform_data) { | 595 | if (!dev->platform_data) { |
598 | struct device_node *core_node; | 596 | struct device_node *core_node; |
599 | 597 | ||
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void) | |||
655 | for_each_compatible_node(np, NULL, "vivante,gc") { | 653 | for_each_compatible_node(np, NULL, "vivante,gc") { |
656 | if (!of_device_is_available(np)) | 654 | if (!of_device_is_available(np)) |
657 | continue; | 655 | continue; |
658 | pdev = platform_device_register_simple("etnaviv", -1, | 656 | |
659 | NULL, 0); | 657 | pdev = platform_device_alloc("etnaviv", -1); |
660 | if (IS_ERR(pdev)) { | 658 | if (!pdev) { |
661 | ret = PTR_ERR(pdev); | 659 | ret = -ENOMEM; |
660 | of_node_put(np); | ||
661 | goto unregister_platform_driver; | ||
662 | } | ||
663 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40); | ||
664 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | ||
665 | |||
666 | /* | ||
667 | * Apply the same DMA configuration to the virtual etnaviv | ||
668 | * device as the GPU we found. This assumes that all Vivante | ||
669 | * GPUs in the system share the same DMA constraints. | ||
670 | */ | ||
671 | of_dma_configure(&pdev->dev, np, true); | ||
672 | |||
673 | ret = platform_device_add(pdev); | ||
674 | if (ret) { | ||
675 | platform_device_put(pdev); | ||
662 | of_node_put(np); | 676 | of_node_put(np); |
663 | goto unregister_platform_driver; | 677 | goto unregister_platform_driver; |
664 | } | 678 | } |
679 | |||
665 | etnaviv_drm = pdev; | 680 | etnaviv_drm = pdev; |
666 | of_node_put(np); | 681 | of_node_put(np); |
667 | break; | 682 | break; |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 72afa518edd9..94c1089ecf59 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -3210,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) | |||
3210 | MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); | 3210 | MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); |
3211 | 3211 | ||
3212 | MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); | 3212 | MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); |
3213 | MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT); | ||
3213 | 3214 | ||
3214 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); | 3215 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); |
3215 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); | 3216 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c7afee37b2b8..9ad89e38f6c0 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -1833,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) | |||
1833 | { | 1833 | { |
1834 | struct kvmgt_guest_info *info; | 1834 | struct kvmgt_guest_info *info; |
1835 | struct kvm *kvm; | 1835 | struct kvm *kvm; |
1836 | int idx; | ||
1837 | bool ret; | ||
1836 | 1838 | ||
1837 | if (!handle_valid(handle)) | 1839 | if (!handle_valid(handle)) |
1838 | return false; | 1840 | return false; |
@@ -1840,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) | |||
1840 | info = (struct kvmgt_guest_info *)handle; | 1842 | info = (struct kvmgt_guest_info *)handle; |
1841 | kvm = info->kvm; | 1843 | kvm = info->kvm; |
1842 | 1844 | ||
1843 | return kvm_is_visible_gfn(kvm, gfn); | 1845 | idx = srcu_read_lock(&kvm->srcu); |
1846 | ret = kvm_is_visible_gfn(kvm, gfn); | ||
1847 | srcu_read_unlock(&kvm->srcu, idx); | ||
1844 | 1848 | ||
1849 | return ret; | ||
1845 | } | 1850 | } |
1846 | 1851 | ||
1847 | struct intel_gvt_mpt kvmgt_mpt = { | 1852 | struct intel_gvt_mpt kvmgt_mpt = { |
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 994366035364..9bb9a85c992c 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c | |||
@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) | |||
244 | 244 | ||
245 | /* set the bit 0:2(Core C-State ) to C0 */ | 245 | /* set the bit 0:2(Core C-State ) to C0 */ |
246 | vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; | 246 | vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; |
247 | |||
248 | if (IS_BROXTON(vgpu->gvt->dev_priv)) { | ||
249 | vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= | ||
250 | ~(BIT(0) | BIT(1)); | ||
251 | vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= | ||
252 | ~PHY_POWER_GOOD; | ||
253 | vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= | ||
254 | ~PHY_POWER_GOOD; | ||
255 | vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= | ||
256 | ~BIT(30); | ||
257 | vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= | ||
258 | ~BIT(30); | ||
259 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= | ||
260 | ~BXT_PHY_LANE_ENABLED; | ||
261 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= | ||
262 | BXT_PHY_CMNLANE_POWERDOWN_ACK | | ||
263 | BXT_PHY_LANE_POWERDOWN_ACK; | ||
264 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= | ||
265 | ~BXT_PHY_LANE_ENABLED; | ||
266 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= | ||
267 | BXT_PHY_CMNLANE_POWERDOWN_ACK | | ||
268 | BXT_PHY_LANE_POWERDOWN_ACK; | ||
269 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= | ||
270 | ~BXT_PHY_LANE_ENABLED; | ||
271 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= | ||
272 | BXT_PHY_CMNLANE_POWERDOWN_ACK | | ||
273 | BXT_PHY_LANE_POWERDOWN_ACK; | ||
274 | } | ||
247 | } else { | 275 | } else { |
248 | #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) | 276 | #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) |
249 | /* only reset the engine related, so starting with 0x44200 | 277 | /* only reset the engine related, so starting with 0x44200 |
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index a4e8e3cf74fd..c628be05fbfe 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c | |||
@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) | |||
281 | intel_vgpu_clean_submission(vgpu); | 281 | intel_vgpu_clean_submission(vgpu); |
282 | intel_vgpu_clean_display(vgpu); | 282 | intel_vgpu_clean_display(vgpu); |
283 | intel_vgpu_clean_opregion(vgpu); | 283 | intel_vgpu_clean_opregion(vgpu); |
284 | intel_vgpu_reset_ggtt(vgpu, true); | ||
284 | intel_vgpu_clean_gtt(vgpu); | 285 | intel_vgpu_clean_gtt(vgpu); |
285 | intel_gvt_hypervisor_detach_vgpu(vgpu); | 286 | intel_gvt_hypervisor_detach_vgpu(vgpu); |
286 | intel_vgpu_free_resource(vgpu); | 287 | intel_vgpu_free_resource(vgpu); |
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c index a534b225e31b..5fa0441bb6df 100644 --- a/drivers/gpu/drm/pl111/pl111_vexpress.c +++ b/drivers/gpu/drm/pl111/pl111_vexpress.c | |||
@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev) | |||
111 | } | 111 | } |
112 | 112 | ||
113 | static const struct of_device_id vexpress_muxfpga_match[] = { | 113 | static const struct of_device_id vexpress_muxfpga_match[] = { |
114 | { .compatible = "arm,vexpress-muxfpga", } | 114 | { .compatible = "arm,vexpress-muxfpga", }, |
115 | {} | ||
115 | }; | 116 | }; |
116 | 117 | ||
117 | static struct platform_driver vexpress_muxfpga_driver = { | 118 | static struct platform_driver vexpress_muxfpga_driver = { |
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index dd19d674055c..8b0cd08034e0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = { | |||
418 | { .compatible = "allwinner,sun8i-a33-display-engine" }, | 418 | { .compatible = "allwinner,sun8i-a33-display-engine" }, |
419 | { .compatible = "allwinner,sun8i-a83t-display-engine" }, | 419 | { .compatible = "allwinner,sun8i-a83t-display-engine" }, |
420 | { .compatible = "allwinner,sun8i-h3-display-engine" }, | 420 | { .compatible = "allwinner,sun8i-h3-display-engine" }, |
421 | { .compatible = "allwinner,sun8i-r40-display-engine" }, | ||
422 | { .compatible = "allwinner,sun8i-v3s-display-engine" }, | 421 | { .compatible = "allwinner,sun8i-v3s-display-engine" }, |
423 | { .compatible = "allwinner,sun9i-a80-display-engine" }, | 422 | { .compatible = "allwinner,sun9i-a80-display-engine" }, |
424 | { } | 423 | { } |
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 82502b351aec..a564b5dfe082 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c | |||
@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = { | |||
398 | 398 | ||
399 | static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = { | 399 | static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = { |
400 | .has_phy_clk = true, | 400 | .has_phy_clk = true, |
401 | .has_second_pll = true, | ||
402 | .phy_init = &sun8i_hdmi_phy_init_h3, | 401 | .phy_init = &sun8i_hdmi_phy_init_h3, |
403 | .phy_disable = &sun8i_hdmi_phy_disable_h3, | 402 | .phy_disable = &sun8i_hdmi_phy_disable_h3, |
404 | .phy_config = &sun8i_hdmi_phy_config_h3, | 403 | .phy_config = &sun8i_hdmi_phy_config_h3, |
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index fc3713608f78..cb65b0ed53fd 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c | |||
@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = { | |||
545 | .vi_num = 1, | 545 | .vi_num = 1, |
546 | }; | 546 | }; |
547 | 547 | ||
548 | static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = { | ||
549 | .ccsc = 0, | ||
550 | .mod_rate = 297000000, | ||
551 | .scaler_mask = 0xf, | ||
552 | .ui_num = 3, | ||
553 | .vi_num = 1, | ||
554 | }; | ||
555 | |||
556 | static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = { | ||
557 | .ccsc = 1, | ||
558 | .mod_rate = 297000000, | ||
559 | .scaler_mask = 0x3, | ||
560 | .ui_num = 1, | ||
561 | .vi_num = 1, | ||
562 | }; | ||
563 | |||
564 | static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { | 548 | static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { |
565 | .vi_num = 2, | 549 | .vi_num = 2, |
566 | .ui_num = 1, | 550 | .ui_num = 1, |
@@ -583,14 +567,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = { | |||
583 | .data = &sun8i_h3_mixer0_cfg, | 567 | .data = &sun8i_h3_mixer0_cfg, |
584 | }, | 568 | }, |
585 | { | 569 | { |
586 | .compatible = "allwinner,sun8i-r40-de2-mixer-0", | ||
587 | .data = &sun8i_r40_mixer0_cfg, | ||
588 | }, | ||
589 | { | ||
590 | .compatible = "allwinner,sun8i-r40-de2-mixer-1", | ||
591 | .data = &sun8i_r40_mixer1_cfg, | ||
592 | }, | ||
593 | { | ||
594 | .compatible = "allwinner,sun8i-v3s-de2-mixer", | 570 | .compatible = "allwinner,sun8i-v3s-de2-mixer", |
595 | .data = &sun8i_v3s_mixer_cfg, | 571 | .data = &sun8i_v3s_mixer_cfg, |
596 | }, | 572 | }, |
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index 55fe398d8290..d5240b777a8f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c | |||
@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev) | |||
253 | 253 | ||
254 | /* sun4i_drv uses this list to check if a device node is a TCON TOP */ | 254 | /* sun4i_drv uses this list to check if a device node is a TCON TOP */ |
255 | const struct of_device_id sun8i_tcon_top_of_table[] = { | 255 | const struct of_device_id sun8i_tcon_top_of_table[] = { |
256 | { .compatible = "allwinner,sun8i-r40-tcon-top" }, | ||
257 | { /* sentinel */ } | 256 | { /* sentinel */ } |
258 | }; | 257 | }; |
259 | MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table); | 258 | MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table); |
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index dbb62f6eb48a..dd9ffded223b 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev, | |||
432 | { | 432 | { |
433 | drm_fb_helper_unregister_fbi(&ufbdev->helper); | 433 | drm_fb_helper_unregister_fbi(&ufbdev->helper); |
434 | drm_fb_helper_fini(&ufbdev->helper); | 434 | drm_fb_helper_fini(&ufbdev->helper); |
435 | drm_framebuffer_unregister_private(&ufbdev->ufb.base); | 435 | if (ufbdev->ufb.obj) { |
436 | drm_framebuffer_cleanup(&ufbdev->ufb.base); | 436 | drm_framebuffer_unregister_private(&ufbdev->ufb.base); |
437 | drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); | 437 | drm_framebuffer_cleanup(&ufbdev->ufb.base); |
438 | drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); | ||
439 | } | ||
438 | } | 440 | } |
439 | 441 | ||
440 | int udl_fbdev_init(struct drm_device *dev) | 442 | int udl_fbdev_init(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index cfb50fedfa2b..a3275fa66b7b 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) | |||
297 | vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], | 297 | vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], |
298 | vc4_state->crtc_h); | 298 | vc4_state->crtc_h); |
299 | 299 | ||
300 | vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && | ||
301 | vc4_state->y_scaling[0] == VC4_SCALING_NONE); | ||
302 | |||
300 | if (num_planes > 1) { | 303 | if (num_planes > 1) { |
301 | vc4_state->is_yuv = true; | 304 | vc4_state->is_yuv = true; |
302 | 305 | ||
@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) | |||
312 | vc4_get_scaling_mode(vc4_state->src_h[1], | 315 | vc4_get_scaling_mode(vc4_state->src_h[1], |
313 | vc4_state->crtc_h); | 316 | vc4_state->crtc_h); |
314 | 317 | ||
315 | /* YUV conversion requires that scaling be enabled, | 318 | /* YUV conversion requires that horizontal scaling be enabled, |
316 | * even on a plane that's otherwise 1:1. Choose TPZ | 319 | * even on a plane that's otherwise 1:1. Looks like only PPF |
317 | * for simplicity. | 320 | * works in that case, so let's pick that one. |
318 | */ | 321 | */ |
319 | if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) | 322 | if (vc4_state->is_unity) |
320 | vc4_state->x_scaling[0] = VC4_SCALING_TPZ; | 323 | vc4_state->x_scaling[0] = VC4_SCALING_PPF; |
321 | if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) | ||
322 | vc4_state->y_scaling[0] = VC4_SCALING_TPZ; | ||
323 | } else { | 324 | } else { |
324 | vc4_state->x_scaling[1] = VC4_SCALING_NONE; | 325 | vc4_state->x_scaling[1] = VC4_SCALING_NONE; |
325 | vc4_state->y_scaling[1] = VC4_SCALING_NONE; | 326 | vc4_state->y_scaling[1] = VC4_SCALING_NONE; |
326 | } | 327 | } |
327 | 328 | ||
328 | vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && | ||
329 | vc4_state->y_scaling[0] == VC4_SCALING_NONE && | ||
330 | vc4_state->x_scaling[1] == VC4_SCALING_NONE && | ||
331 | vc4_state->y_scaling[1] == VC4_SCALING_NONE); | ||
332 | |||
333 | /* No configuring scaling on the cursor plane, since it gets | 329 | /* No configuring scaling on the cursor plane, since it gets |
334 | non-vblank-synced updates, and scaling requires requires | 330 | non-vblank-synced updates, and scaling requires requires |
335 | LBM changes which have to be vblank-synced. | 331 | LBM changes which have to be vblank-synced. |
@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane, | |||
672 | vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); | 668 | vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); |
673 | } | 669 | } |
674 | 670 | ||
675 | if (!vc4_state->is_unity) { | 671 | if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || |
672 | vc4_state->x_scaling[1] != VC4_SCALING_NONE || | ||
673 | vc4_state->y_scaling[0] != VC4_SCALING_NONE || | ||
674 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) { | ||
676 | /* LBM Base Address. */ | 675 | /* LBM Base Address. */ |
677 | if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || | 676 | if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || |
678 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) { | 677 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 1f134570b759..f0ab6b2313bb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
3729 | { | 3729 | { |
3730 | struct vmw_buffer_object *vbo = | 3730 | struct vmw_buffer_object *vbo = |
3731 | container_of(bo, struct vmw_buffer_object, base); | 3731 | container_of(bo, struct vmw_buffer_object, base); |
3732 | struct ttm_operation_ctx ctx = { interruptible, true }; | 3732 | struct ttm_operation_ctx ctx = { interruptible, false }; |
3733 | int ret; | 3733 | int ret; |
3734 | 3734 | ||
3735 | if (vbo->pin_count > 0) | 3735 | if (vbo->pin_count > 0) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 23beff5d8e3c..6a712a8d59e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, | |||
1512 | struct drm_rect *rects) | 1512 | struct drm_rect *rects) |
1513 | { | 1513 | { |
1514 | struct vmw_private *dev_priv = vmw_priv(dev); | 1514 | struct vmw_private *dev_priv = vmw_priv(dev); |
1515 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
1516 | struct drm_rect bounding_box = {0}; | 1515 | struct drm_rect bounding_box = {0}; |
1517 | u64 total_pixels = 0, pixel_mem, bb_mem; | 1516 | u64 total_pixels = 0, pixel_mem, bb_mem; |
1518 | int i; | 1517 | int i; |
1519 | 1518 | ||
1520 | for (i = 0; i < num_rects; i++) { | 1519 | for (i = 0; i < num_rects; i++) { |
1521 | /* | 1520 | /* |
1522 | * Currently this check is limiting the topology within max | 1521 | * For STDU only individual screen (screen target) is limited by |
1523 | * texture/screentarget size. This should change in future when | 1522 | * SCREENTARGET_MAX_WIDTH/HEIGHT registers. |
1524 | * user-space support multiple fb with topology. | ||
1525 | */ | 1523 | */ |
1526 | if (rects[i].x1 < 0 || rects[i].y1 < 0 || | 1524 | if (dev_priv->active_display_unit == vmw_du_screen_target && |
1527 | rects[i].x2 > mode_config->max_width || | 1525 | (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || |
1528 | rects[i].y2 > mode_config->max_height) { | 1526 | drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { |
1529 | DRM_ERROR("Invalid GUI layout.\n"); | 1527 | DRM_ERROR("Screen size not supported.\n"); |
1530 | return -EINVAL; | 1528 | return -EINVAL; |
1531 | } | 1529 | } |
1532 | 1530 | ||
@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, | |||
1615 | struct drm_connector_state *conn_state; | 1613 | struct drm_connector_state *conn_state; |
1616 | struct vmw_connector_state *vmw_conn_state; | 1614 | struct vmw_connector_state *vmw_conn_state; |
1617 | 1615 | ||
1618 | if (!new_crtc_state->enable && old_crtc_state->enable) { | 1616 | if (!new_crtc_state->enable) { |
1619 | rects[i].x1 = 0; | 1617 | rects[i].x1 = 0; |
1620 | rects[i].y1 = 0; | 1618 | rects[i].y1 = 0; |
1621 | rects[i].x2 = 0; | 1619 | rects[i].x2 = 0; |
@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, | |||
2216 | if (dev_priv->assume_16bpp) | 2214 | if (dev_priv->assume_16bpp) |
2217 | assumed_bpp = 2; | 2215 | assumed_bpp = 2; |
2218 | 2216 | ||
2217 | max_width = min(max_width, dev_priv->texture_max_width); | ||
2218 | max_height = min(max_height, dev_priv->texture_max_height); | ||
2219 | |||
2220 | /* | ||
2221 | * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/ | ||
2222 | * HEIGHT registers. | ||
2223 | */ | ||
2219 | if (dev_priv->active_display_unit == vmw_du_screen_target) { | 2224 | if (dev_priv->active_display_unit == vmw_du_screen_target) { |
2220 | max_width = min(max_width, dev_priv->stdu_max_width); | 2225 | max_width = min(max_width, dev_priv->stdu_max_width); |
2221 | max_width = min(max_width, dev_priv->texture_max_width); | ||
2222 | |||
2223 | max_height = min(max_height, dev_priv->stdu_max_height); | 2226 | max_height = min(max_height, dev_priv->stdu_max_height); |
2224 | max_height = min(max_height, dev_priv->texture_max_height); | ||
2225 | } | 2227 | } |
2226 | 2228 | ||
2227 | /* Add preferred mode */ | 2229 | /* Add preferred mode */ |
@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2376 | struct drm_file *file_priv) | 2378 | struct drm_file *file_priv) |
2377 | { | 2379 | { |
2378 | struct vmw_private *dev_priv = vmw_priv(dev); | 2380 | struct vmw_private *dev_priv = vmw_priv(dev); |
2381 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
2379 | struct drm_vmw_update_layout_arg *arg = | 2382 | struct drm_vmw_update_layout_arg *arg = |
2380 | (struct drm_vmw_update_layout_arg *)data; | 2383 | (struct drm_vmw_update_layout_arg *)data; |
2381 | void __user *user_rects; | 2384 | void __user *user_rects; |
@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2421 | drm_rects[i].y1 = curr_rect.y; | 2424 | drm_rects[i].y1 = curr_rect.y; |
2422 | drm_rects[i].x2 = curr_rect.x + curr_rect.w; | 2425 | drm_rects[i].x2 = curr_rect.x + curr_rect.w; |
2423 | drm_rects[i].y2 = curr_rect.y + curr_rect.h; | 2426 | drm_rects[i].y2 = curr_rect.y + curr_rect.h; |
2427 | |||
2428 | /* | ||
2429 | * Currently this check is limiting the topology within | ||
2430 | * mode_config->max (which actually is max texture size | ||
2431 | * supported by virtual device). This limit is here to address | ||
2432 | * window managers that create a big framebuffer for whole | ||
2433 | * topology. | ||
2434 | */ | ||
2435 | if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || | ||
2436 | drm_rects[i].x2 > mode_config->max_width || | ||
2437 | drm_rects[i].y2 > mode_config->max_height) { | ||
2438 | DRM_ERROR("Invalid GUI layout.\n"); | ||
2439 | ret = -EINVAL; | ||
2440 | goto out_free; | ||
2441 | } | ||
2424 | } | 2442 | } |
2425 | 2443 | ||
2426 | ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); | 2444 | ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 93f6b96ca7bb..f30e839f7bfd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | |||
@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) | |||
1600 | 1600 | ||
1601 | dev_priv->active_display_unit = vmw_du_screen_target; | 1601 | dev_priv->active_display_unit = vmw_du_screen_target; |
1602 | 1602 | ||
1603 | if (dev_priv->capabilities & SVGA_CAP_3D) { | ||
1604 | /* | ||
1605 | * For 3D VMs, display (scanout) buffer size is the smaller of | ||
1606 | * max texture and max STDU | ||
1607 | */ | ||
1608 | uint32_t max_width, max_height; | ||
1609 | |||
1610 | max_width = min(dev_priv->texture_max_width, | ||
1611 | dev_priv->stdu_max_width); | ||
1612 | max_height = min(dev_priv->texture_max_height, | ||
1613 | dev_priv->stdu_max_height); | ||
1614 | |||
1615 | dev->mode_config.max_width = max_width; | ||
1616 | dev->mode_config.max_height = max_height; | ||
1617 | } else { | ||
1618 | /* | ||
1619 | * Given various display aspect ratios, there's no way to | ||
1620 | * estimate these using prim_bb_mem. So just set these to | ||
1621 | * something arbitrarily large and we will reject any layout | ||
1622 | * that doesn't fit prim_bb_mem later | ||
1623 | */ | ||
1624 | dev->mode_config.max_width = 8192; | ||
1625 | dev->mode_config.max_height = 8192; | ||
1626 | } | ||
1627 | |||
1628 | vmw_kms_create_implicit_placement_property(dev_priv, false); | 1603 | vmw_kms_create_implicit_placement_property(dev_priv, false); |
1629 | 1604 | ||
1630 | for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { | 1605 | for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index e125233e074b..80a01cd4c051 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
1404 | *srf_out = NULL; | 1404 | *srf_out = NULL; |
1405 | 1405 | ||
1406 | if (for_scanout) { | 1406 | if (for_scanout) { |
1407 | uint32_t max_width, max_height; | ||
1408 | |||
1409 | if (!svga3dsurface_is_screen_target_format(format)) { | 1407 | if (!svga3dsurface_is_screen_target_format(format)) { |
1410 | DRM_ERROR("Invalid Screen Target surface format."); | 1408 | DRM_ERROR("Invalid Screen Target surface format."); |
1411 | return -EINVAL; | 1409 | return -EINVAL; |
1412 | } | 1410 | } |
1413 | 1411 | ||
1414 | max_width = min(dev_priv->texture_max_width, | 1412 | if (size.width > dev_priv->texture_max_width || |
1415 | dev_priv->stdu_max_width); | 1413 | size.height > dev_priv->texture_max_height) { |
1416 | max_height = min(dev_priv->texture_max_height, | ||
1417 | dev_priv->stdu_max_height); | ||
1418 | |||
1419 | if (size.width > max_width || size.height > max_height) { | ||
1420 | DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", | 1414 | DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", |
1421 | size.width, size.height, | 1415 | size.width, size.height, |
1422 | max_width, max_height); | 1416 | dev_priv->texture_max_width, |
1417 | dev_priv->texture_max_height); | ||
1423 | return -EINVAL; | 1418 | return -EINVAL; |
1424 | } | 1419 | } |
1425 | } else { | 1420 | } else { |
@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
1495 | if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) | 1490 | if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) |
1496 | srf->res.backup_size += sizeof(SVGA3dDXSOState); | 1491 | srf->res.backup_size += sizeof(SVGA3dDXSOState); |
1497 | 1492 | ||
1493 | /* | ||
1494 | * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with | ||
1495 | * size greater than STDU max width/height. This is really a workaround | ||
1496 | * to support creation of big framebuffer requested by some user-space | ||
1497 | * for whole topology. That big framebuffer won't really be used for | ||
1498 | * binding with screen target as during prepare_fb a separate surface is | ||
1499 | * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag. | ||
1500 | */ | ||
1498 | if (dev_priv->active_display_unit == vmw_du_screen_target && | 1501 | if (dev_priv->active_display_unit == vmw_du_screen_target && |
1499 | for_scanout) | 1502 | for_scanout && size.width <= dev_priv->stdu_max_width && |
1503 | size.height <= dev_priv->stdu_max_height) | ||
1500 | srf->flags |= SVGA3D_SURFACE_SCREENTARGET; | 1504 | srf->flags |= SVGA3D_SURFACE_SCREENTARGET; |
1501 | 1505 | ||
1502 | /* | 1506 | /* |
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index a96bf46bc483..cf2a18571d48 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c | |||
@@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void) | |||
215 | return; | 215 | return; |
216 | 216 | ||
217 | client->id = ret | ID_BIT_AUDIO; | 217 | client->id = ret | ID_BIT_AUDIO; |
218 | if (client->ops->gpu_bound) | ||
219 | client->ops->gpu_bound(client->pdev, ret); | ||
218 | } | 220 | } |
219 | 221 | ||
220 | vga_switcheroo_debugfs_init(&vgasr_priv); | 222 | vga_switcheroo_debugfs_init(&vgasr_priv); |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 944f5b63aecd..78603b78cf41 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
@@ -207,8 +207,6 @@ superio_exit(int ioreg) | |||
207 | 207 | ||
208 | #define NUM_FAN 7 | 208 | #define NUM_FAN 7 |
209 | 209 | ||
210 | #define TEMP_SOURCE_VIRTUAL 0x1f | ||
211 | |||
212 | /* Common and NCT6775 specific data */ | 210 | /* Common and NCT6775 specific data */ |
213 | 211 | ||
214 | /* Voltage min/max registers for nr=7..14 are in bank 5 */ | 212 | /* Voltage min/max registers for nr=7..14 are in bank 5 */ |
@@ -299,8 +297,9 @@ static const u16 NCT6775_REG_PWM_READ[] = { | |||
299 | 297 | ||
300 | static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; | 298 | static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; |
301 | static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d }; | 299 | static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d }; |
302 | static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 }; | 300 | static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = { |
303 | static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 }; | 301 | 0x641, 0x642, 0x643, 0x644 }; |
302 | static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { }; | ||
304 | 303 | ||
305 | static const u16 NCT6775_REG_TEMP[] = { | 304 | static const u16 NCT6775_REG_TEMP[] = { |
306 | 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; | 305 | 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; |
@@ -373,6 +372,7 @@ static const char *const nct6775_temp_label[] = { | |||
373 | }; | 372 | }; |
374 | 373 | ||
375 | #define NCT6775_TEMP_MASK 0x001ffffe | 374 | #define NCT6775_TEMP_MASK 0x001ffffe |
375 | #define NCT6775_VIRT_TEMP_MASK 0x00000000 | ||
376 | 376 | ||
377 | static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = { | 377 | static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = { |
378 | [13] = 0x661, | 378 | [13] = 0x661, |
@@ -425,8 +425,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 }; | |||
425 | 425 | ||
426 | static const u16 NCT6776_REG_FAN_MIN[] = { | 426 | static const u16 NCT6776_REG_FAN_MIN[] = { |
427 | 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; | 427 | 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; |
428 | static const u16 NCT6776_REG_FAN_PULSES[] = { | 428 | static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = { |
429 | 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; | 429 | 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; |
430 | 430 | ||
431 | static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = { | 431 | static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = { |
432 | 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e }; | 432 | 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e }; |
@@ -461,6 +461,7 @@ static const char *const nct6776_temp_label[] = { | |||
461 | }; | 461 | }; |
462 | 462 | ||
463 | #define NCT6776_TEMP_MASK 0x007ffffe | 463 | #define NCT6776_TEMP_MASK 0x007ffffe |
464 | #define NCT6776_VIRT_TEMP_MASK 0x00000000 | ||
464 | 465 | ||
465 | static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = { | 466 | static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = { |
466 | [14] = 0x401, | 467 | [14] = 0x401, |
@@ -501,9 +502,9 @@ static const s8 NCT6779_BEEP_BITS[] = { | |||
501 | 30, 31 }; /* intrusion0, intrusion1 */ | 502 | 30, 31 }; /* intrusion0, intrusion1 */ |
502 | 503 | ||
503 | static const u16 NCT6779_REG_FAN[] = { | 504 | static const u16 NCT6779_REG_FAN[] = { |
504 | 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 }; | 505 | 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce }; |
505 | static const u16 NCT6779_REG_FAN_PULSES[] = { | 506 | static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = { |
506 | 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; | 507 | 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; |
507 | 508 | ||
508 | static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = { | 509 | static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = { |
509 | 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 }; | 510 | 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 }; |
@@ -559,7 +560,9 @@ static const char *const nct6779_temp_label[] = { | |||
559 | }; | 560 | }; |
560 | 561 | ||
561 | #define NCT6779_TEMP_MASK 0x07ffff7e | 562 | #define NCT6779_TEMP_MASK 0x07ffff7e |
563 | #define NCT6779_VIRT_TEMP_MASK 0x00000000 | ||
562 | #define NCT6791_TEMP_MASK 0x87ffff7e | 564 | #define NCT6791_TEMP_MASK 0x87ffff7e |
565 | #define NCT6791_VIRT_TEMP_MASK 0x80000000 | ||
563 | 566 | ||
564 | static const u16 NCT6779_REG_TEMP_ALTERNATE[32] | 567 | static const u16 NCT6779_REG_TEMP_ALTERNATE[32] |
565 | = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0, | 568 | = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0, |
@@ -638,6 +641,7 @@ static const char *const nct6792_temp_label[] = { | |||
638 | }; | 641 | }; |
639 | 642 | ||
640 | #define NCT6792_TEMP_MASK 0x9fffff7e | 643 | #define NCT6792_TEMP_MASK 0x9fffff7e |
644 | #define NCT6792_VIRT_TEMP_MASK 0x80000000 | ||
641 | 645 | ||
642 | static const char *const nct6793_temp_label[] = { | 646 | static const char *const nct6793_temp_label[] = { |
643 | "", | 647 | "", |
@@ -675,6 +679,7 @@ static const char *const nct6793_temp_label[] = { | |||
675 | }; | 679 | }; |
676 | 680 | ||
677 | #define NCT6793_TEMP_MASK 0xbfff037e | 681 | #define NCT6793_TEMP_MASK 0xbfff037e |
682 | #define NCT6793_VIRT_TEMP_MASK 0x80000000 | ||
678 | 683 | ||
679 | static const char *const nct6795_temp_label[] = { | 684 | static const char *const nct6795_temp_label[] = { |
680 | "", | 685 | "", |
@@ -712,6 +717,7 @@ static const char *const nct6795_temp_label[] = { | |||
712 | }; | 717 | }; |
713 | 718 | ||
714 | #define NCT6795_TEMP_MASK 0xbfffff7e | 719 | #define NCT6795_TEMP_MASK 0xbfffff7e |
720 | #define NCT6795_VIRT_TEMP_MASK 0x80000000 | ||
715 | 721 | ||
716 | static const char *const nct6796_temp_label[] = { | 722 | static const char *const nct6796_temp_label[] = { |
717 | "", | 723 | "", |
@@ -724,8 +730,8 @@ static const char *const nct6796_temp_label[] = { | |||
724 | "AUXTIN4", | 730 | "AUXTIN4", |
725 | "SMBUSMASTER 0", | 731 | "SMBUSMASTER 0", |
726 | "SMBUSMASTER 1", | 732 | "SMBUSMASTER 1", |
727 | "", | 733 | "Virtual_TEMP", |
728 | "", | 734 | "Virtual_TEMP", |
729 | "", | 735 | "", |
730 | "", | 736 | "", |
731 | "", | 737 | "", |
@@ -748,7 +754,8 @@ static const char *const nct6796_temp_label[] = { | |||
748 | "Virtual_TEMP" | 754 | "Virtual_TEMP" |
749 | }; | 755 | }; |
750 | 756 | ||
751 | #define NCT6796_TEMP_MASK 0xbfff03fe | 757 | #define NCT6796_TEMP_MASK 0xbfff0ffe |
758 | #define NCT6796_VIRT_TEMP_MASK 0x80000c00 | ||
752 | 759 | ||
753 | /* NCT6102D/NCT6106D specific data */ | 760 | /* NCT6102D/NCT6106D specific data */ |
754 | 761 | ||
@@ -779,8 +786,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = { | |||
779 | 786 | ||
780 | static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 }; | 787 | static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 }; |
781 | static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 }; | 788 | static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 }; |
782 | static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 }; | 789 | static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 }; |
783 | static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 }; | 790 | static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 }; |
784 | 791 | ||
785 | static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 }; | 792 | static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 }; |
786 | static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 }; | 793 | static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 }; |
@@ -917,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg) | |||
917 | return 1350000U / (reg << divreg); | 924 | return 1350000U / (reg << divreg); |
918 | } | 925 | } |
919 | 926 | ||
927 | static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg) | ||
928 | { | ||
929 | return reg; | ||
930 | } | ||
931 | |||
920 | static u16 fan_to_reg(u32 fan, unsigned int divreg) | 932 | static u16 fan_to_reg(u32 fan, unsigned int divreg) |
921 | { | 933 | { |
922 | if (!fan) | 934 | if (!fan) |
@@ -969,6 +981,7 @@ struct nct6775_data { | |||
969 | u16 reg_temp_config[NUM_TEMP]; | 981 | u16 reg_temp_config[NUM_TEMP]; |
970 | const char * const *temp_label; | 982 | const char * const *temp_label; |
971 | u32 temp_mask; | 983 | u32 temp_mask; |
984 | u32 virt_temp_mask; | ||
972 | 985 | ||
973 | u16 REG_CONFIG; | 986 | u16 REG_CONFIG; |
974 | u16 REG_VBAT; | 987 | u16 REG_VBAT; |
@@ -1276,11 +1289,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg) | |||
1276 | case nct6795: | 1289 | case nct6795: |
1277 | case nct6796: | 1290 | case nct6796: |
1278 | return reg == 0x150 || reg == 0x153 || reg == 0x155 || | 1291 | return reg == 0x150 || reg == 0x153 || reg == 0x155 || |
1279 | ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || | 1292 | (reg & 0xfff0) == 0x4c0 || |
1280 | reg == 0x402 || | 1293 | reg == 0x402 || |
1281 | reg == 0x63a || reg == 0x63c || reg == 0x63e || | 1294 | reg == 0x63a || reg == 0x63c || reg == 0x63e || |
1282 | reg == 0x640 || reg == 0x642 || reg == 0x64a || | 1295 | reg == 0x640 || reg == 0x642 || reg == 0x64a || |
1283 | reg == 0x64c || reg == 0x660 || | 1296 | reg == 0x64c || |
1284 | reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 || | 1297 | reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 || |
1285 | reg == 0x7b || reg == 0x7d; | 1298 | reg == 0x7b || reg == 0x7d; |
1286 | } | 1299 | } |
@@ -1558,7 +1571,7 @@ static void nct6775_update_pwm(struct device *dev) | |||
1558 | reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); | 1571 | reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); |
1559 | data->pwm_weight_temp_sel[i] = reg & 0x1f; | 1572 | data->pwm_weight_temp_sel[i] = reg & 0x1f; |
1560 | /* If weight is disabled, report weight source as 0 */ | 1573 | /* If weight is disabled, report weight source as 0 */ |
1561 | if (j == 1 && !(reg & 0x80)) | 1574 | if (!(reg & 0x80)) |
1562 | data->pwm_weight_temp_sel[i] = 0; | 1575 | data->pwm_weight_temp_sel[i] = 0; |
1563 | 1576 | ||
1564 | /* Weight temp data */ | 1577 | /* Weight temp data */ |
@@ -1682,9 +1695,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev) | |||
1682 | if (data->has_fan_min & BIT(i)) | 1695 | if (data->has_fan_min & BIT(i)) |
1683 | data->fan_min[i] = nct6775_read_value(data, | 1696 | data->fan_min[i] = nct6775_read_value(data, |
1684 | data->REG_FAN_MIN[i]); | 1697 | data->REG_FAN_MIN[i]); |
1685 | data->fan_pulses[i] = | 1698 | |
1686 | (nct6775_read_value(data, data->REG_FAN_PULSES[i]) | 1699 | if (data->REG_FAN_PULSES[i]) { |
1687 | >> data->FAN_PULSE_SHIFT[i]) & 0x03; | 1700 | data->fan_pulses[i] = |
1701 | (nct6775_read_value(data, | ||
1702 | data->REG_FAN_PULSES[i]) | ||
1703 | >> data->FAN_PULSE_SHIFT[i]) & 0x03; | ||
1704 | } | ||
1688 | 1705 | ||
1689 | nct6775_select_fan_div(dev, data, i, reg); | 1706 | nct6775_select_fan_div(dev, data, i, reg); |
1690 | } | 1707 | } |
@@ -3639,6 +3656,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3639 | 3656 | ||
3640 | data->temp_label = nct6776_temp_label; | 3657 | data->temp_label = nct6776_temp_label; |
3641 | data->temp_mask = NCT6776_TEMP_MASK; | 3658 | data->temp_mask = NCT6776_TEMP_MASK; |
3659 | data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK; | ||
3642 | 3660 | ||
3643 | data->REG_VBAT = NCT6106_REG_VBAT; | 3661 | data->REG_VBAT = NCT6106_REG_VBAT; |
3644 | data->REG_DIODE = NCT6106_REG_DIODE; | 3662 | data->REG_DIODE = NCT6106_REG_DIODE; |
@@ -3717,6 +3735,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3717 | 3735 | ||
3718 | data->temp_label = nct6775_temp_label; | 3736 | data->temp_label = nct6775_temp_label; |
3719 | data->temp_mask = NCT6775_TEMP_MASK; | 3737 | data->temp_mask = NCT6775_TEMP_MASK; |
3738 | data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK; | ||
3720 | 3739 | ||
3721 | data->REG_CONFIG = NCT6775_REG_CONFIG; | 3740 | data->REG_CONFIG = NCT6775_REG_CONFIG; |
3722 | data->REG_VBAT = NCT6775_REG_VBAT; | 3741 | data->REG_VBAT = NCT6775_REG_VBAT; |
@@ -3789,6 +3808,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3789 | 3808 | ||
3790 | data->temp_label = nct6776_temp_label; | 3809 | data->temp_label = nct6776_temp_label; |
3791 | data->temp_mask = NCT6776_TEMP_MASK; | 3810 | data->temp_mask = NCT6776_TEMP_MASK; |
3811 | data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK; | ||
3792 | 3812 | ||
3793 | data->REG_CONFIG = NCT6775_REG_CONFIG; | 3813 | data->REG_CONFIG = NCT6775_REG_CONFIG; |
3794 | data->REG_VBAT = NCT6775_REG_VBAT; | 3814 | data->REG_VBAT = NCT6775_REG_VBAT; |
@@ -3853,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3853 | data->ALARM_BITS = NCT6779_ALARM_BITS; | 3873 | data->ALARM_BITS = NCT6779_ALARM_BITS; |
3854 | data->BEEP_BITS = NCT6779_BEEP_BITS; | 3874 | data->BEEP_BITS = NCT6779_BEEP_BITS; |
3855 | 3875 | ||
3856 | data->fan_from_reg = fan_from_reg13; | 3876 | data->fan_from_reg = fan_from_reg_rpm; |
3857 | data->fan_from_reg_min = fan_from_reg13; | 3877 | data->fan_from_reg_min = fan_from_reg13; |
3858 | data->target_temp_mask = 0xff; | 3878 | data->target_temp_mask = 0xff; |
3859 | data->tolerance_mask = 0x07; | 3879 | data->tolerance_mask = 0x07; |
@@ -3861,6 +3881,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3861 | 3881 | ||
3862 | data->temp_label = nct6779_temp_label; | 3882 | data->temp_label = nct6779_temp_label; |
3863 | data->temp_mask = NCT6779_TEMP_MASK; | 3883 | data->temp_mask = NCT6779_TEMP_MASK; |
3884 | data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK; | ||
3864 | 3885 | ||
3865 | data->REG_CONFIG = NCT6775_REG_CONFIG; | 3886 | data->REG_CONFIG = NCT6775_REG_CONFIG; |
3866 | data->REG_VBAT = NCT6775_REG_VBAT; | 3887 | data->REG_VBAT = NCT6775_REG_VBAT; |
@@ -3933,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3933 | data->ALARM_BITS = NCT6791_ALARM_BITS; | 3954 | data->ALARM_BITS = NCT6791_ALARM_BITS; |
3934 | data->BEEP_BITS = NCT6779_BEEP_BITS; | 3955 | data->BEEP_BITS = NCT6779_BEEP_BITS; |
3935 | 3956 | ||
3936 | data->fan_from_reg = fan_from_reg13; | 3957 | data->fan_from_reg = fan_from_reg_rpm; |
3937 | data->fan_from_reg_min = fan_from_reg13; | 3958 | data->fan_from_reg_min = fan_from_reg13; |
3938 | data->target_temp_mask = 0xff; | 3959 | data->target_temp_mask = 0xff; |
3939 | data->tolerance_mask = 0x07; | 3960 | data->tolerance_mask = 0x07; |
@@ -3944,22 +3965,27 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3944 | case nct6791: | 3965 | case nct6791: |
3945 | data->temp_label = nct6779_temp_label; | 3966 | data->temp_label = nct6779_temp_label; |
3946 | data->temp_mask = NCT6791_TEMP_MASK; | 3967 | data->temp_mask = NCT6791_TEMP_MASK; |
3968 | data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK; | ||
3947 | break; | 3969 | break; |
3948 | case nct6792: | 3970 | case nct6792: |
3949 | data->temp_label = nct6792_temp_label; | 3971 | data->temp_label = nct6792_temp_label; |
3950 | data->temp_mask = NCT6792_TEMP_MASK; | 3972 | data->temp_mask = NCT6792_TEMP_MASK; |
3973 | data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK; | ||
3951 | break; | 3974 | break; |
3952 | case nct6793: | 3975 | case nct6793: |
3953 | data->temp_label = nct6793_temp_label; | 3976 | data->temp_label = nct6793_temp_label; |
3954 | data->temp_mask = NCT6793_TEMP_MASK; | 3977 | data->temp_mask = NCT6793_TEMP_MASK; |
3978 | data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK; | ||
3955 | break; | 3979 | break; |
3956 | case nct6795: | 3980 | case nct6795: |
3957 | data->temp_label = nct6795_temp_label; | 3981 | data->temp_label = nct6795_temp_label; |
3958 | data->temp_mask = NCT6795_TEMP_MASK; | 3982 | data->temp_mask = NCT6795_TEMP_MASK; |
3983 | data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK; | ||
3959 | break; | 3984 | break; |
3960 | case nct6796: | 3985 | case nct6796: |
3961 | data->temp_label = nct6796_temp_label; | 3986 | data->temp_label = nct6796_temp_label; |
3962 | data->temp_mask = NCT6796_TEMP_MASK; | 3987 | data->temp_mask = NCT6796_TEMP_MASK; |
3988 | data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK; | ||
3963 | break; | 3989 | break; |
3964 | } | 3990 | } |
3965 | 3991 | ||
@@ -4143,7 +4169,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
4143 | * for each fan reflects a different temperature, and there | 4169 | * for each fan reflects a different temperature, and there |
4144 | * are no duplicates. | 4170 | * are no duplicates. |
4145 | */ | 4171 | */ |
4146 | if (src != TEMP_SOURCE_VIRTUAL) { | 4172 | if (!(data->virt_temp_mask & BIT(src))) { |
4147 | if (mask & BIT(src)) | 4173 | if (mask & BIT(src)) |
4148 | continue; | 4174 | continue; |
4149 | mask |= BIT(src); | 4175 | mask |= BIT(src); |
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index da962aa2cef5..fc6b7f8b62fb 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c | |||
@@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev) | |||
139 | th->thdev[i] = NULL; | 139 | th->thdev[i] = NULL; |
140 | } | 140 | } |
141 | 141 | ||
142 | th->num_thdevs = lowest; | 142 | if (lowest >= 0) |
143 | th->num_thdevs = lowest; | ||
143 | } | 144 | } |
144 | 145 | ||
145 | if (thdrv->attr_group) | 146 | if (thdrv->attr_group) |
@@ -487,7 +488,7 @@ static const struct intel_th_subdevice { | |||
487 | .flags = IORESOURCE_MEM, | 488 | .flags = IORESOURCE_MEM, |
488 | }, | 489 | }, |
489 | { | 490 | { |
490 | .start = TH_MMIO_SW, | 491 | .start = 1, /* use resource[1] */ |
491 | .end = 0, | 492 | .end = 0, |
492 | .flags = IORESOURCE_MEM, | 493 | .flags = IORESOURCE_MEM, |
493 | }, | 494 | }, |
@@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th, | |||
580 | struct intel_th_device *thdev; | 581 | struct intel_th_device *thdev; |
581 | struct resource res[3]; | 582 | struct resource res[3]; |
582 | unsigned int req = 0; | 583 | unsigned int req = 0; |
584 | bool is64bit = false; | ||
583 | int r, err; | 585 | int r, err; |
584 | 586 | ||
585 | thdev = intel_th_device_alloc(th, subdev->type, subdev->name, | 587 | thdev = intel_th_device_alloc(th, subdev->type, subdev->name, |
@@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th, | |||
589 | 591 | ||
590 | thdev->drvdata = th->drvdata; | 592 | thdev->drvdata = th->drvdata; |
591 | 593 | ||
594 | for (r = 0; r < th->num_resources; r++) | ||
595 | if (th->resource[r].flags & IORESOURCE_MEM_64) { | ||
596 | is64bit = true; | ||
597 | break; | ||
598 | } | ||
599 | |||
592 | memcpy(res, subdev->res, | 600 | memcpy(res, subdev->res, |
593 | sizeof(struct resource) * subdev->nres); | 601 | sizeof(struct resource) * subdev->nres); |
594 | 602 | ||
595 | for (r = 0; r < subdev->nres; r++) { | 603 | for (r = 0; r < subdev->nres; r++) { |
596 | struct resource *devres = th->resource; | 604 | struct resource *devres = th->resource; |
597 | int bar = TH_MMIO_CONFIG; | 605 | int bar = 0; /* cut subdevices' MMIO from resource[0] */ |
598 | 606 | ||
599 | /* | 607 | /* |
600 | * Take .end == 0 to mean 'take the whole bar', | 608 | * Take .end == 0 to mean 'take the whole bar', |
@@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th, | |||
603 | */ | 611 | */ |
604 | if (!res[r].end && res[r].flags == IORESOURCE_MEM) { | 612 | if (!res[r].end && res[r].flags == IORESOURCE_MEM) { |
605 | bar = res[r].start; | 613 | bar = res[r].start; |
614 | if (is64bit) | ||
615 | bar *= 2; | ||
606 | res[r].start = 0; | 616 | res[r].start = 0; |
607 | res[r].end = resource_size(&devres[bar]) - 1; | 617 | res[r].end = resource_size(&devres[bar]) - 1; |
608 | } | 618 | } |
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index c2e55e5d97f6..1cf6290d6435 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c | |||
@@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { | |||
160 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), | 160 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), |
161 | .driver_data = (kernel_ulong_t)&intel_th_2x, | 161 | .driver_data = (kernel_ulong_t)&intel_th_2x, |
162 | }, | 162 | }, |
163 | { | ||
164 | /* Ice Lake PCH */ | ||
165 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6), | ||
166 | .driver_data = (kernel_ulong_t)&intel_th_2x, | ||
167 | }, | ||
163 | { 0 }, | 168 | { 0 }, |
164 | }; | 169 | }; |
165 | 170 | ||
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 0bee1f4b914e..3208ad6ad540 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -338,6 +338,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry) | |||
338 | } | 338 | } |
339 | 339 | ||
340 | /** | 340 | /** |
341 | * del_gid - Delete GID table entry | ||
342 | * | ||
343 | * @ib_dev: IB device whose GID entry to be deleted | ||
344 | * @port: Port number of the IB device | ||
345 | * @table: GID table of the IB device for a port | ||
346 | * @ix: GID entry index to delete | ||
347 | * | ||
348 | */ | ||
349 | static void del_gid(struct ib_device *ib_dev, u8 port, | ||
350 | struct ib_gid_table *table, int ix) | ||
351 | { | ||
352 | struct ib_gid_table_entry *entry; | ||
353 | |||
354 | lockdep_assert_held(&table->lock); | ||
355 | |||
356 | pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__, | ||
357 | ib_dev->name, port, ix, | ||
358 | table->data_vec[ix]->attr.gid.raw); | ||
359 | |||
360 | write_lock_irq(&table->rwlock); | ||
361 | entry = table->data_vec[ix]; | ||
362 | entry->state = GID_TABLE_ENTRY_PENDING_DEL; | ||
363 | /* | ||
364 | * For non RoCE protocol, GID entry slot is ready to use. | ||
365 | */ | ||
366 | if (!rdma_protocol_roce(ib_dev, port)) | ||
367 | table->data_vec[ix] = NULL; | ||
368 | write_unlock_irq(&table->rwlock); | ||
369 | |||
370 | put_gid_entry_locked(entry); | ||
371 | } | ||
372 | |||
373 | /** | ||
341 | * add_modify_gid - Add or modify GID table entry | 374 | * add_modify_gid - Add or modify GID table entry |
342 | * | 375 | * |
343 | * @table: GID table in which GID to be added or modified | 376 | * @table: GID table in which GID to be added or modified |
@@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table, | |||
358 | * this index. | 391 | * this index. |
359 | */ | 392 | */ |
360 | if (is_gid_entry_valid(table->data_vec[attr->index])) | 393 | if (is_gid_entry_valid(table->data_vec[attr->index])) |
361 | put_gid_entry(table->data_vec[attr->index]); | 394 | del_gid(attr->device, attr->port_num, table, attr->index); |
362 | 395 | ||
363 | /* | 396 | /* |
364 | * Some HCA's report multiple GID entries with only one valid GID, and | 397 | * Some HCA's report multiple GID entries with only one valid GID, and |
@@ -386,39 +419,6 @@ done: | |||
386 | return ret; | 419 | return ret; |
387 | } | 420 | } |
388 | 421 | ||
389 | /** | ||
390 | * del_gid - Delete GID table entry | ||
391 | * | ||
392 | * @ib_dev: IB device whose GID entry to be deleted | ||
393 | * @port: Port number of the IB device | ||
394 | * @table: GID table of the IB device for a port | ||
395 | * @ix: GID entry index to delete | ||
396 | * | ||
397 | */ | ||
398 | static void del_gid(struct ib_device *ib_dev, u8 port, | ||
399 | struct ib_gid_table *table, int ix) | ||
400 | { | ||
401 | struct ib_gid_table_entry *entry; | ||
402 | |||
403 | lockdep_assert_held(&table->lock); | ||
404 | |||
405 | pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__, | ||
406 | ib_dev->name, port, ix, | ||
407 | table->data_vec[ix]->attr.gid.raw); | ||
408 | |||
409 | write_lock_irq(&table->rwlock); | ||
410 | entry = table->data_vec[ix]; | ||
411 | entry->state = GID_TABLE_ENTRY_PENDING_DEL; | ||
412 | /* | ||
413 | * For non RoCE protocol, GID entry slot is ready to use. | ||
414 | */ | ||
415 | if (!rdma_protocol_roce(ib_dev, port)) | ||
416 | table->data_vec[ix] = NULL; | ||
417 | write_unlock_irq(&table->rwlock); | ||
418 | |||
419 | put_gid_entry_locked(entry); | ||
420 | } | ||
421 | |||
422 | /* rwlock should be read locked, or lock should be held */ | 422 | /* rwlock should be read locked, or lock should be held */ |
423 | static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, | 423 | static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, |
424 | const struct ib_gid_attr *val, bool default_gid, | 424 | const struct ib_gid_attr *val, bool default_gid, |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5f437d1570fb..21863ddde63e 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -1759,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp) | |||
1759 | mutex_lock(&mut); | 1759 | mutex_lock(&mut); |
1760 | if (!ctx->closing) { | 1760 | if (!ctx->closing) { |
1761 | mutex_unlock(&mut); | 1761 | mutex_unlock(&mut); |
1762 | ucma_put_ctx(ctx); | ||
1763 | wait_for_completion(&ctx->comp); | ||
1762 | /* rdma_destroy_id ensures that no event handlers are | 1764 | /* rdma_destroy_id ensures that no event handlers are |
1763 | * inflight for that id before releasing it. | 1765 | * inflight for that id before releasing it. |
1764 | */ | 1766 | */ |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index a21d5214afc3..e012ca80f9d1 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file, | |||
2027 | 2027 | ||
2028 | if ((cmd->base.attr_mask & IB_QP_CUR_STATE && | 2028 | if ((cmd->base.attr_mask & IB_QP_CUR_STATE && |
2029 | cmd->base.cur_qp_state > IB_QPS_ERR) || | 2029 | cmd->base.cur_qp_state > IB_QPS_ERR) || |
2030 | cmd->base.qp_state > IB_QPS_ERR) { | 2030 | (cmd->base.attr_mask & IB_QP_STATE && |
2031 | cmd->base.qp_state > IB_QPS_ERR)) { | ||
2031 | ret = -EINVAL; | 2032 | ret = -EINVAL; |
2032 | goto release_qp; | 2033 | goto release_qp; |
2033 | } | 2034 | } |
2034 | 2035 | ||
2035 | attr->qp_state = cmd->base.qp_state; | 2036 | if (cmd->base.attr_mask & IB_QP_STATE) |
2036 | attr->cur_qp_state = cmd->base.cur_qp_state; | 2037 | attr->qp_state = cmd->base.qp_state; |
2037 | attr->path_mtu = cmd->base.path_mtu; | 2038 | if (cmd->base.attr_mask & IB_QP_CUR_STATE) |
2038 | attr->path_mig_state = cmd->base.path_mig_state; | 2039 | attr->cur_qp_state = cmd->base.cur_qp_state; |
2039 | attr->qkey = cmd->base.qkey; | 2040 | if (cmd->base.attr_mask & IB_QP_PATH_MTU) |
2040 | attr->rq_psn = cmd->base.rq_psn; | 2041 | attr->path_mtu = cmd->base.path_mtu; |
2041 | attr->sq_psn = cmd->base.sq_psn; | 2042 | if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE) |
2042 | attr->dest_qp_num = cmd->base.dest_qp_num; | 2043 | attr->path_mig_state = cmd->base.path_mig_state; |
2043 | attr->qp_access_flags = cmd->base.qp_access_flags; | 2044 | if (cmd->base.attr_mask & IB_QP_QKEY) |
2044 | attr->pkey_index = cmd->base.pkey_index; | 2045 | attr->qkey = cmd->base.qkey; |
2045 | attr->alt_pkey_index = cmd->base.alt_pkey_index; | 2046 | if (cmd->base.attr_mask & IB_QP_RQ_PSN) |
2046 | attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; | 2047 | attr->rq_psn = cmd->base.rq_psn; |
2047 | attr->max_rd_atomic = cmd->base.max_rd_atomic; | 2048 | if (cmd->base.attr_mask & IB_QP_SQ_PSN) |
2048 | attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; | 2049 | attr->sq_psn = cmd->base.sq_psn; |
2049 | attr->min_rnr_timer = cmd->base.min_rnr_timer; | 2050 | if (cmd->base.attr_mask & IB_QP_DEST_QPN) |
2050 | attr->port_num = cmd->base.port_num; | 2051 | attr->dest_qp_num = cmd->base.dest_qp_num; |
2051 | attr->timeout = cmd->base.timeout; | 2052 | if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS) |
2052 | attr->retry_cnt = cmd->base.retry_cnt; | 2053 | attr->qp_access_flags = cmd->base.qp_access_flags; |
2053 | attr->rnr_retry = cmd->base.rnr_retry; | 2054 | if (cmd->base.attr_mask & IB_QP_PKEY_INDEX) |
2054 | attr->alt_port_num = cmd->base.alt_port_num; | 2055 | attr->pkey_index = cmd->base.pkey_index; |
2055 | attr->alt_timeout = cmd->base.alt_timeout; | 2056 | if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) |
2056 | attr->rate_limit = cmd->rate_limit; | 2057 | attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; |
2058 | if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | ||
2059 | attr->max_rd_atomic = cmd->base.max_rd_atomic; | ||
2060 | if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
2061 | attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; | ||
2062 | if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER) | ||
2063 | attr->min_rnr_timer = cmd->base.min_rnr_timer; | ||
2064 | if (cmd->base.attr_mask & IB_QP_PORT) | ||
2065 | attr->port_num = cmd->base.port_num; | ||
2066 | if (cmd->base.attr_mask & IB_QP_TIMEOUT) | ||
2067 | attr->timeout = cmd->base.timeout; | ||
2068 | if (cmd->base.attr_mask & IB_QP_RETRY_CNT) | ||
2069 | attr->retry_cnt = cmd->base.retry_cnt; | ||
2070 | if (cmd->base.attr_mask & IB_QP_RNR_RETRY) | ||
2071 | attr->rnr_retry = cmd->base.rnr_retry; | ||
2072 | if (cmd->base.attr_mask & IB_QP_ALT_PATH) { | ||
2073 | attr->alt_port_num = cmd->base.alt_port_num; | ||
2074 | attr->alt_timeout = cmd->base.alt_timeout; | ||
2075 | attr->alt_pkey_index = cmd->base.alt_pkey_index; | ||
2076 | } | ||
2077 | if (cmd->base.attr_mask & IB_QP_RATE_LIMIT) | ||
2078 | attr->rate_limit = cmd->rate_limit; | ||
2057 | 2079 | ||
2058 | if (cmd->base.attr_mask & IB_QP_AV) | 2080 | if (cmd->base.attr_mask & IB_QP_AV) |
2059 | copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, | 2081 | copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 6d974e2363df..50152c1b1004 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp) | |||
440 | list_del(&entry->obj_list); | 440 | list_del(&entry->obj_list); |
441 | kfree(entry); | 441 | kfree(entry); |
442 | } | 442 | } |
443 | file->ev_queue.is_closed = 1; | ||
443 | spin_unlock_irq(&file->ev_queue.lock); | 444 | spin_unlock_irq(&file->ev_queue.lock); |
444 | 445 | ||
445 | uverbs_close_fd(filp); | 446 | uverbs_close_fd(filp); |
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c index 73ea6f0db88f..be854628a7c6 100644 --- a/drivers/infiniband/core/uverbs_uapi.c +++ b/drivers/infiniband/core/uverbs_uapi.c | |||
@@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi) | |||
248 | kfree(rcu_dereference_protected(*slot, true)); | 248 | kfree(rcu_dereference_protected(*slot, true)); |
249 | radix_tree_iter_delete(&uapi->radix, &iter, slot); | 249 | radix_tree_iter_delete(&uapi->radix, &iter, slot); |
250 | } | 250 | } |
251 | kfree(uapi); | ||
251 | } | 252 | } |
252 | 253 | ||
253 | struct uverbs_api *uverbs_alloc_api( | 254 | struct uverbs_api *uverbs_alloc_api( |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 20b9f31052bf..85cd1a3593d6 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
@@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); | |||
78 | /* Mutex to protect the list of bnxt_re devices added */ | 78 | /* Mutex to protect the list of bnxt_re devices added */ |
79 | static DEFINE_MUTEX(bnxt_re_dev_lock); | 79 | static DEFINE_MUTEX(bnxt_re_dev_lock); |
80 | static struct workqueue_struct *bnxt_re_wq; | 80 | static struct workqueue_struct *bnxt_re_wq; |
81 | static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait); | 81 | static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev); |
82 | 82 | ||
83 | /* SR-IOV helper functions */ | 83 | /* SR-IOV helper functions */ |
84 | 84 | ||
@@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p) | |||
182 | if (!rdev) | 182 | if (!rdev) |
183 | return; | 183 | return; |
184 | 184 | ||
185 | bnxt_re_ib_unreg(rdev, false); | 185 | bnxt_re_ib_unreg(rdev); |
186 | } | 186 | } |
187 | 187 | ||
188 | static void bnxt_re_stop_irq(void *handle) | 188 | static void bnxt_re_stop_irq(void *handle) |
@@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = { | |||
251 | /* Driver registration routines used to let the networking driver (bnxt_en) | 251 | /* Driver registration routines used to let the networking driver (bnxt_en) |
252 | * to know that the RoCE driver is now installed | 252 | * to know that the RoCE driver is now installed |
253 | */ | 253 | */ |
254 | static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait) | 254 | static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev) |
255 | { | 255 | { |
256 | struct bnxt_en_dev *en_dev; | 256 | struct bnxt_en_dev *en_dev; |
257 | int rc; | 257 | int rc; |
@@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait) | |||
260 | return -EINVAL; | 260 | return -EINVAL; |
261 | 261 | ||
262 | en_dev = rdev->en_dev; | 262 | en_dev = rdev->en_dev; |
263 | /* Acquire rtnl lock if it is not invokded from netdev event */ | ||
264 | if (lock_wait) | ||
265 | rtnl_lock(); | ||
266 | 263 | ||
267 | rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, | 264 | rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, |
268 | BNXT_ROCE_ULP); | 265 | BNXT_ROCE_ULP); |
269 | if (lock_wait) | ||
270 | rtnl_unlock(); | ||
271 | return rc; | 266 | return rc; |
272 | } | 267 | } |
273 | 268 | ||
@@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) | |||
281 | 276 | ||
282 | en_dev = rdev->en_dev; | 277 | en_dev = rdev->en_dev; |
283 | 278 | ||
284 | rtnl_lock(); | ||
285 | rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, | 279 | rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, |
286 | &bnxt_re_ulp_ops, rdev); | 280 | &bnxt_re_ulp_ops, rdev); |
287 | rtnl_unlock(); | ||
288 | return rc; | 281 | return rc; |
289 | } | 282 | } |
290 | 283 | ||
291 | static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait) | 284 | static int bnxt_re_free_msix(struct bnxt_re_dev *rdev) |
292 | { | 285 | { |
293 | struct bnxt_en_dev *en_dev; | 286 | struct bnxt_en_dev *en_dev; |
294 | int rc; | 287 | int rc; |
@@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait) | |||
298 | 291 | ||
299 | en_dev = rdev->en_dev; | 292 | en_dev = rdev->en_dev; |
300 | 293 | ||
301 | if (lock_wait) | ||
302 | rtnl_lock(); | ||
303 | 294 | ||
304 | rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); | 295 | rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); |
305 | 296 | ||
306 | if (lock_wait) | ||
307 | rtnl_unlock(); | ||
308 | return rc; | 297 | return rc; |
309 | } | 298 | } |
310 | 299 | ||
@@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) | |||
320 | 309 | ||
321 | num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); | 310 | num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); |
322 | 311 | ||
323 | rtnl_lock(); | ||
324 | num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, | 312 | num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, |
325 | rdev->msix_entries, | 313 | rdev->msix_entries, |
326 | num_msix_want); | 314 | num_msix_want); |
@@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) | |||
335 | } | 323 | } |
336 | rdev->num_msix = num_msix_got; | 324 | rdev->num_msix = num_msix_got; |
337 | done: | 325 | done: |
338 | rtnl_unlock(); | ||
339 | return rc; | 326 | return rc; |
340 | } | 327 | } |
341 | 328 | ||
@@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg, | |||
358 | fw_msg->timeout = timeout; | 345 | fw_msg->timeout = timeout; |
359 | } | 346 | } |
360 | 347 | ||
361 | static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, | 348 | static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id) |
362 | bool lock_wait) | ||
363 | { | 349 | { |
364 | struct bnxt_en_dev *en_dev = rdev->en_dev; | 350 | struct bnxt_en_dev *en_dev = rdev->en_dev; |
365 | struct hwrm_ring_free_input req = {0}; | 351 | struct hwrm_ring_free_input req = {0}; |
366 | struct hwrm_ring_free_output resp; | 352 | struct hwrm_ring_free_output resp; |
367 | struct bnxt_fw_msg fw_msg; | 353 | struct bnxt_fw_msg fw_msg; |
368 | bool do_unlock = false; | ||
369 | int rc = -EINVAL; | 354 | int rc = -EINVAL; |
370 | 355 | ||
371 | if (!en_dev) | 356 | if (!en_dev) |
372 | return rc; | 357 | return rc; |
373 | 358 | ||
374 | memset(&fw_msg, 0, sizeof(fw_msg)); | 359 | memset(&fw_msg, 0, sizeof(fw_msg)); |
375 | if (lock_wait) { | ||
376 | rtnl_lock(); | ||
377 | do_unlock = true; | ||
378 | } | ||
379 | 360 | ||
380 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); | 361 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); |
381 | req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; | 362 | req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; |
@@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, | |||
386 | if (rc) | 367 | if (rc) |
387 | dev_err(rdev_to_dev(rdev), | 368 | dev_err(rdev_to_dev(rdev), |
388 | "Failed to free HW ring:%d :%#x", req.ring_id, rc); | 369 | "Failed to free HW ring:%d :%#x", req.ring_id, rc); |
389 | if (do_unlock) | ||
390 | rtnl_unlock(); | ||
391 | return rc; | 370 | return rc; |
392 | } | 371 | } |
393 | 372 | ||
@@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr, | |||
405 | return rc; | 384 | return rc; |
406 | 385 | ||
407 | memset(&fw_msg, 0, sizeof(fw_msg)); | 386 | memset(&fw_msg, 0, sizeof(fw_msg)); |
408 | rtnl_lock(); | ||
409 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); | 387 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); |
410 | req.enables = 0; | 388 | req.enables = 0; |
411 | req.page_tbl_addr = cpu_to_le64(dma_arr[0]); | 389 | req.page_tbl_addr = cpu_to_le64(dma_arr[0]); |
@@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr, | |||
426 | if (!rc) | 404 | if (!rc) |
427 | *fw_ring_id = le16_to_cpu(resp.ring_id); | 405 | *fw_ring_id = le16_to_cpu(resp.ring_id); |
428 | 406 | ||
429 | rtnl_unlock(); | ||
430 | return rc; | 407 | return rc; |
431 | } | 408 | } |
432 | 409 | ||
433 | static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, | 410 | static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, |
434 | u32 fw_stats_ctx_id, bool lock_wait) | 411 | u32 fw_stats_ctx_id) |
435 | { | 412 | { |
436 | struct bnxt_en_dev *en_dev = rdev->en_dev; | 413 | struct bnxt_en_dev *en_dev = rdev->en_dev; |
437 | struct hwrm_stat_ctx_free_input req = {0}; | 414 | struct hwrm_stat_ctx_free_input req = {0}; |
438 | struct bnxt_fw_msg fw_msg; | 415 | struct bnxt_fw_msg fw_msg; |
439 | bool do_unlock = false; | ||
440 | int rc = -EINVAL; | 416 | int rc = -EINVAL; |
441 | 417 | ||
442 | if (!en_dev) | 418 | if (!en_dev) |
443 | return rc; | 419 | return rc; |
444 | 420 | ||
445 | memset(&fw_msg, 0, sizeof(fw_msg)); | 421 | memset(&fw_msg, 0, sizeof(fw_msg)); |
446 | if (lock_wait) { | ||
447 | rtnl_lock(); | ||
448 | do_unlock = true; | ||
449 | } | ||
450 | 422 | ||
451 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); | 423 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); |
452 | req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); | 424 | req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); |
@@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, | |||
457 | dev_err(rdev_to_dev(rdev), | 429 | dev_err(rdev_to_dev(rdev), |
458 | "Failed to free HW stats context %#x", rc); | 430 | "Failed to free HW stats context %#x", rc); |
459 | 431 | ||
460 | if (do_unlock) | ||
461 | rtnl_unlock(); | ||
462 | return rc; | 432 | return rc; |
463 | } | 433 | } |
464 | 434 | ||
@@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, | |||
478 | return rc; | 448 | return rc; |
479 | 449 | ||
480 | memset(&fw_msg, 0, sizeof(fw_msg)); | 450 | memset(&fw_msg, 0, sizeof(fw_msg)); |
481 | rtnl_lock(); | ||
482 | 451 | ||
483 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); | 452 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); |
484 | req.update_period_ms = cpu_to_le32(1000); | 453 | req.update_period_ms = cpu_to_le32(1000); |
@@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, | |||
490 | if (!rc) | 459 | if (!rc) |
491 | *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); | 460 | *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); |
492 | 461 | ||
493 | rtnl_unlock(); | ||
494 | return rc; | 462 | return rc; |
495 | } | 463 | } |
496 | 464 | ||
@@ -929,19 +897,19 @@ fail: | |||
929 | return rc; | 897 | return rc; |
930 | } | 898 | } |
931 | 899 | ||
932 | static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait) | 900 | static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) |
933 | { | 901 | { |
934 | int i; | 902 | int i; |
935 | 903 | ||
936 | for (i = 0; i < rdev->num_msix - 1; i++) { | 904 | for (i = 0; i < rdev->num_msix - 1; i++) { |
937 | bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait); | 905 | bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id); |
938 | bnxt_qplib_free_nq(&rdev->nq[i]); | 906 | bnxt_qplib_free_nq(&rdev->nq[i]); |
939 | } | 907 | } |
940 | } | 908 | } |
941 | 909 | ||
942 | static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait) | 910 | static void bnxt_re_free_res(struct bnxt_re_dev *rdev) |
943 | { | 911 | { |
944 | bnxt_re_free_nq_res(rdev, lock_wait); | 912 | bnxt_re_free_nq_res(rdev); |
945 | 913 | ||
946 | if (rdev->qplib_res.dpi_tbl.max) { | 914 | if (rdev->qplib_res.dpi_tbl.max) { |
947 | bnxt_qplib_dealloc_dpi(&rdev->qplib_res, | 915 | bnxt_qplib_dealloc_dpi(&rdev->qplib_res, |
@@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) | |||
1219 | return 0; | 1187 | return 0; |
1220 | } | 1188 | } |
1221 | 1189 | ||
1222 | static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait) | 1190 | static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) |
1223 | { | 1191 | { |
1224 | int i, rc; | 1192 | int i, rc; |
1225 | 1193 | ||
@@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait) | |||
1234 | cancel_delayed_work(&rdev->worker); | 1202 | cancel_delayed_work(&rdev->worker); |
1235 | 1203 | ||
1236 | bnxt_re_cleanup_res(rdev); | 1204 | bnxt_re_cleanup_res(rdev); |
1237 | bnxt_re_free_res(rdev, lock_wait); | 1205 | bnxt_re_free_res(rdev); |
1238 | 1206 | ||
1239 | if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { | 1207 | if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { |
1240 | rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); | 1208 | rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); |
1241 | if (rc) | 1209 | if (rc) |
1242 | dev_warn(rdev_to_dev(rdev), | 1210 | dev_warn(rdev_to_dev(rdev), |
1243 | "Failed to deinitialize RCFW: %#x", rc); | 1211 | "Failed to deinitialize RCFW: %#x", rc); |
1244 | bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, | 1212 | bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); |
1245 | lock_wait); | ||
1246 | bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); | 1213 | bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); |
1247 | bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); | 1214 | bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); |
1248 | bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait); | 1215 | bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id); |
1249 | bnxt_qplib_free_rcfw_channel(&rdev->rcfw); | 1216 | bnxt_qplib_free_rcfw_channel(&rdev->rcfw); |
1250 | } | 1217 | } |
1251 | if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { | 1218 | if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { |
1252 | rc = bnxt_re_free_msix(rdev, lock_wait); | 1219 | rc = bnxt_re_free_msix(rdev); |
1253 | if (rc) | 1220 | if (rc) |
1254 | dev_warn(rdev_to_dev(rdev), | 1221 | dev_warn(rdev_to_dev(rdev), |
1255 | "Failed to free MSI-X vectors: %#x", rc); | 1222 | "Failed to free MSI-X vectors: %#x", rc); |
1256 | } | 1223 | } |
1257 | if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { | 1224 | if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { |
1258 | rc = bnxt_re_unregister_netdev(rdev, lock_wait); | 1225 | rc = bnxt_re_unregister_netdev(rdev); |
1259 | if (rc) | 1226 | if (rc) |
1260 | dev_warn(rdev_to_dev(rdev), | 1227 | dev_warn(rdev_to_dev(rdev), |
1261 | "Failed to unregister with netdev: %#x", rc); | 1228 | "Failed to unregister with netdev: %#x", rc); |
@@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
1276 | { | 1243 | { |
1277 | int i, j, rc; | 1244 | int i, j, rc; |
1278 | 1245 | ||
1246 | bool locked; | ||
1247 | |||
1248 | /* Acquire rtnl lock through out this function */ | ||
1249 | rtnl_lock(); | ||
1250 | locked = true; | ||
1251 | |||
1279 | /* Registered a new RoCE device instance to netdev */ | 1252 | /* Registered a new RoCE device instance to netdev */ |
1280 | rc = bnxt_re_register_netdev(rdev); | 1253 | rc = bnxt_re_register_netdev(rdev); |
1281 | if (rc) { | 1254 | if (rc) { |
@@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
1374 | schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); | 1347 | schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); |
1375 | } | 1348 | } |
1376 | 1349 | ||
1350 | rtnl_unlock(); | ||
1351 | locked = false; | ||
1352 | |||
1377 | /* Register ib dev */ | 1353 | /* Register ib dev */ |
1378 | rc = bnxt_re_register_ib(rdev); | 1354 | rc = bnxt_re_register_ib(rdev); |
1379 | if (rc) { | 1355 | if (rc) { |
1380 | pr_err("Failed to register with IB: %#x\n", rc); | 1356 | pr_err("Failed to register with IB: %#x\n", rc); |
1381 | goto fail; | 1357 | goto fail; |
1382 | } | 1358 | } |
1359 | set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); | ||
1383 | dev_info(rdev_to_dev(rdev), "Device registered successfully"); | 1360 | dev_info(rdev_to_dev(rdev), "Device registered successfully"); |
1384 | for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) { | 1361 | for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) { |
1385 | rc = device_create_file(&rdev->ibdev.dev, | 1362 | rc = device_create_file(&rdev->ibdev.dev, |
@@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
1395 | goto fail; | 1372 | goto fail; |
1396 | } | 1373 | } |
1397 | } | 1374 | } |
1398 | set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); | ||
1399 | ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, | 1375 | ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, |
1400 | &rdev->active_width); | 1376 | &rdev->active_width); |
1401 | set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); | 1377 | set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); |
@@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
1404 | 1380 | ||
1405 | return 0; | 1381 | return 0; |
1406 | free_sctx: | 1382 | free_sctx: |
1407 | bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true); | 1383 | bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); |
1408 | free_ctx: | 1384 | free_ctx: |
1409 | bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); | 1385 | bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); |
1410 | disable_rcfw: | 1386 | disable_rcfw: |
1411 | bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); | 1387 | bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); |
1412 | free_ring: | 1388 | free_ring: |
1413 | bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true); | 1389 | bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id); |
1414 | free_rcfw: | 1390 | free_rcfw: |
1415 | bnxt_qplib_free_rcfw_channel(&rdev->rcfw); | 1391 | bnxt_qplib_free_rcfw_channel(&rdev->rcfw); |
1416 | fail: | 1392 | fail: |
1417 | bnxt_re_ib_unreg(rdev, true); | 1393 | if (!locked) |
1394 | rtnl_lock(); | ||
1395 | bnxt_re_ib_unreg(rdev); | ||
1396 | rtnl_unlock(); | ||
1397 | |||
1418 | return rc; | 1398 | return rc; |
1419 | } | 1399 | } |
1420 | 1400 | ||
@@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, | |||
1567 | */ | 1547 | */ |
1568 | if (atomic_read(&rdev->sched_count) > 0) | 1548 | if (atomic_read(&rdev->sched_count) > 0) |
1569 | goto exit; | 1549 | goto exit; |
1570 | bnxt_re_ib_unreg(rdev, false); | 1550 | bnxt_re_ib_unreg(rdev); |
1571 | bnxt_re_remove_one(rdev); | 1551 | bnxt_re_remove_one(rdev); |
1572 | bnxt_re_dev_unreg(rdev); | 1552 | bnxt_re_dev_unreg(rdev); |
1573 | break; | 1553 | break; |
@@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void) | |||
1646 | */ | 1626 | */ |
1647 | flush_workqueue(bnxt_re_wq); | 1627 | flush_workqueue(bnxt_re_wq); |
1648 | bnxt_re_dev_stop(rdev); | 1628 | bnxt_re_dev_stop(rdev); |
1649 | bnxt_re_ib_unreg(rdev, true); | 1629 | /* Acquire the rtnl_lock as the L2 resources are freed here */ |
1630 | rtnl_lock(); | ||
1631 | bnxt_re_ib_unreg(rdev); | ||
1632 | rtnl_unlock(); | ||
1650 | bnxt_re_remove_one(rdev); | 1633 | bnxt_re_remove_one(rdev); |
1651 | bnxt_re_dev_unreg(rdev); | 1634 | bnxt_re_dev_unreg(rdev); |
1652 | } | 1635 | } |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 2c19bf772451..e1668bcc2d13 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) | |||
6733 | struct hfi1_devdata *dd = ppd->dd; | 6733 | struct hfi1_devdata *dd = ppd->dd; |
6734 | struct send_context *sc; | 6734 | struct send_context *sc; |
6735 | int i; | 6735 | int i; |
6736 | int sc_flags; | ||
6736 | 6737 | ||
6737 | if (flags & FREEZE_SELF) | 6738 | if (flags & FREEZE_SELF) |
6738 | write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); | 6739 | write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); |
@@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) | |||
6743 | /* notify all SDMA engines that they are going into a freeze */ | 6744 | /* notify all SDMA engines that they are going into a freeze */ |
6744 | sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); | 6745 | sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); |
6745 | 6746 | ||
6747 | sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ? | ||
6748 | SCF_LINK_DOWN : 0); | ||
6746 | /* do halt pre-handling on all enabled send contexts */ | 6749 | /* do halt pre-handling on all enabled send contexts */ |
6747 | for (i = 0; i < dd->num_send_contexts; i++) { | 6750 | for (i = 0; i < dd->num_send_contexts; i++) { |
6748 | sc = dd->send_contexts[i].sc; | 6751 | sc = dd->send_contexts[i].sc; |
6749 | if (sc && (sc->flags & SCF_ENABLED)) | 6752 | if (sc && (sc->flags & SCF_ENABLED)) |
6750 | sc_stop(sc, SCF_FROZEN | SCF_HALTED); | 6753 | sc_stop(sc, sc_flags); |
6751 | } | 6754 | } |
6752 | 6755 | ||
6753 | /* Send context are frozen. Notify user space */ | 6756 | /* Send context are frozen. Notify user space */ |
@@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) | |||
10674 | add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); | 10677 | add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
10675 | 10678 | ||
10676 | handle_linkup_change(dd, 1); | 10679 | handle_linkup_change(dd, 1); |
10680 | pio_kernel_linkup(dd); | ||
10677 | 10681 | ||
10678 | /* | 10682 | /* |
10679 | * After link up, a new link width will have been set. | 10683 | * After link up, a new link width will have been set. |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index c2c1cba5b23b..752057647f09 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
@@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op) | |||
86 | unsigned long flags; | 86 | unsigned long flags; |
87 | int write = 1; /* write sendctrl back */ | 87 | int write = 1; /* write sendctrl back */ |
88 | int flush = 0; /* re-read sendctrl to make sure it is flushed */ | 88 | int flush = 0; /* re-read sendctrl to make sure it is flushed */ |
89 | int i; | ||
89 | 90 | ||
90 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | 91 | spin_lock_irqsave(&dd->sendctrl_lock, flags); |
91 | 92 | ||
@@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op) | |||
95 | reg |= SEND_CTRL_SEND_ENABLE_SMASK; | 96 | reg |= SEND_CTRL_SEND_ENABLE_SMASK; |
96 | /* Fall through */ | 97 | /* Fall through */ |
97 | case PSC_DATA_VL_ENABLE: | 98 | case PSC_DATA_VL_ENABLE: |
99 | mask = 0; | ||
100 | for (i = 0; i < ARRAY_SIZE(dd->vld); i++) | ||
101 | if (!dd->vld[i].mtu) | ||
102 | mask |= BIT_ULL(i); | ||
98 | /* Disallow sending on VLs not enabled */ | 103 | /* Disallow sending on VLs not enabled */ |
99 | mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << | 104 | mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) << |
100 | SEND_CTRL_UNSUPPORTED_VL_SHIFT; | 105 | SEND_CTRL_UNSUPPORTED_VL_SHIFT; |
101 | reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; | 106 | reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; |
102 | break; | 107 | break; |
103 | case PSC_GLOBAL_DISABLE: | 108 | case PSC_GLOBAL_DISABLE: |
@@ -921,20 +926,18 @@ void sc_free(struct send_context *sc) | |||
921 | void sc_disable(struct send_context *sc) | 926 | void sc_disable(struct send_context *sc) |
922 | { | 927 | { |
923 | u64 reg; | 928 | u64 reg; |
924 | unsigned long flags; | ||
925 | struct pio_buf *pbuf; | 929 | struct pio_buf *pbuf; |
926 | 930 | ||
927 | if (!sc) | 931 | if (!sc) |
928 | return; | 932 | return; |
929 | 933 | ||
930 | /* do all steps, even if already disabled */ | 934 | /* do all steps, even if already disabled */ |
931 | spin_lock_irqsave(&sc->alloc_lock, flags); | 935 | spin_lock_irq(&sc->alloc_lock); |
932 | reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); | 936 | reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); |
933 | reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); | 937 | reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); |
934 | sc->flags &= ~SCF_ENABLED; | 938 | sc->flags &= ~SCF_ENABLED; |
935 | sc_wait_for_packet_egress(sc, 1); | 939 | sc_wait_for_packet_egress(sc, 1); |
936 | write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); | 940 | write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); |
937 | spin_unlock_irqrestore(&sc->alloc_lock, flags); | ||
938 | 941 | ||
939 | /* | 942 | /* |
940 | * Flush any waiters. Once the context is disabled, | 943 | * Flush any waiters. Once the context is disabled, |
@@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc) | |||
944 | * proceed with the flush. | 947 | * proceed with the flush. |
945 | */ | 948 | */ |
946 | udelay(1); | 949 | udelay(1); |
947 | spin_lock_irqsave(&sc->release_lock, flags); | 950 | spin_lock(&sc->release_lock); |
948 | if (sc->sr) { /* this context has a shadow ring */ | 951 | if (sc->sr) { /* this context has a shadow ring */ |
949 | while (sc->sr_tail != sc->sr_head) { | 952 | while (sc->sr_tail != sc->sr_head) { |
950 | pbuf = &sc->sr[sc->sr_tail].pbuf; | 953 | pbuf = &sc->sr[sc->sr_tail].pbuf; |
@@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc) | |||
955 | sc->sr_tail = 0; | 958 | sc->sr_tail = 0; |
956 | } | 959 | } |
957 | } | 960 | } |
958 | spin_unlock_irqrestore(&sc->release_lock, flags); | 961 | spin_unlock(&sc->release_lock); |
962 | spin_unlock_irq(&sc->alloc_lock); | ||
959 | } | 963 | } |
960 | 964 | ||
961 | /* return SendEgressCtxtStatus.PacketOccupancy */ | 965 | /* return SendEgressCtxtStatus.PacketOccupancy */ |
@@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd) | |||
1178 | sc = dd->send_contexts[i].sc; | 1182 | sc = dd->send_contexts[i].sc; |
1179 | if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) | 1183 | if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) |
1180 | continue; | 1184 | continue; |
1185 | if (sc->flags & SCF_LINK_DOWN) | ||
1186 | continue; | ||
1181 | 1187 | ||
1182 | sc_enable(sc); /* will clear the sc frozen flag */ | 1188 | sc_enable(sc); /* will clear the sc frozen flag */ |
1183 | } | 1189 | } |
1184 | } | 1190 | } |
1185 | 1191 | ||
1192 | /** | ||
1193 | * pio_kernel_linkup() - Re-enable send contexts after linkup event | ||
1194 | * @dd: valid devive data | ||
1195 | * | ||
1196 | * When the link goes down, the freeze path is taken. However, a link down | ||
1197 | * event is different from a freeze because if the send context is re-enabled | ||
1198 | * whowever is sending data will start sending data again, which will hang | ||
1199 | * any QP that is sending data. | ||
1200 | * | ||
1201 | * The freeze path now looks at the type of event that occurs and takes this | ||
1202 | * path for link down event. | ||
1203 | */ | ||
1204 | void pio_kernel_linkup(struct hfi1_devdata *dd) | ||
1205 | { | ||
1206 | struct send_context *sc; | ||
1207 | int i; | ||
1208 | |||
1209 | for (i = 0; i < dd->num_send_contexts; i++) { | ||
1210 | sc = dd->send_contexts[i].sc; | ||
1211 | if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER) | ||
1212 | continue; | ||
1213 | |||
1214 | sc_enable(sc); /* will clear the sc link down flag */ | ||
1215 | } | ||
1216 | } | ||
1217 | |||
1186 | /* | 1218 | /* |
1187 | * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. | 1219 | * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. |
1188 | * Returns: | 1220 | * Returns: |
@@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag) | |||
1382 | { | 1414 | { |
1383 | unsigned long flags; | 1415 | unsigned long flags; |
1384 | 1416 | ||
1385 | /* mark the context */ | ||
1386 | sc->flags |= flag; | ||
1387 | |||
1388 | /* stop buffer allocations */ | 1417 | /* stop buffer allocations */ |
1389 | spin_lock_irqsave(&sc->alloc_lock, flags); | 1418 | spin_lock_irqsave(&sc->alloc_lock, flags); |
1419 | /* mark the context */ | ||
1420 | sc->flags |= flag; | ||
1390 | sc->flags &= ~SCF_ENABLED; | 1421 | sc->flags &= ~SCF_ENABLED; |
1391 | spin_unlock_irqrestore(&sc->alloc_lock, flags); | 1422 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
1392 | wake_up(&sc->halt_wait); | 1423 | wake_up(&sc->halt_wait); |
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h index 058b08f459ab..aaf372c3e5d6 100644 --- a/drivers/infiniband/hw/hfi1/pio.h +++ b/drivers/infiniband/hw/hfi1/pio.h | |||
@@ -139,6 +139,7 @@ struct send_context { | |||
139 | #define SCF_IN_FREE 0x02 | 139 | #define SCF_IN_FREE 0x02 |
140 | #define SCF_HALTED 0x04 | 140 | #define SCF_HALTED 0x04 |
141 | #define SCF_FROZEN 0x08 | 141 | #define SCF_FROZEN 0x08 |
142 | #define SCF_LINK_DOWN 0x10 | ||
142 | 143 | ||
143 | struct send_context_info { | 144 | struct send_context_info { |
144 | struct send_context *sc; /* allocated working context */ | 145 | struct send_context *sc; /* allocated working context */ |
@@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc); | |||
306 | void pio_reset_all(struct hfi1_devdata *dd); | 307 | void pio_reset_all(struct hfi1_devdata *dd); |
307 | void pio_freeze(struct hfi1_devdata *dd); | 308 | void pio_freeze(struct hfi1_devdata *dd); |
308 | void pio_kernel_unfreeze(struct hfi1_devdata *dd); | 309 | void pio_kernel_unfreeze(struct hfi1_devdata *dd); |
310 | void pio_kernel_linkup(struct hfi1_devdata *dd); | ||
309 | 311 | ||
310 | /* global PIO send control operations */ | 312 | /* global PIO send control operations */ |
311 | #define PSC_GLOBAL_ENABLE 0 | 313 | #define PSC_GLOBAL_ENABLE 0 |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index a3a7b33196d6..5c88706121c1 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) | |||
828 | if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { | 828 | if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { |
829 | if (++req->iov_idx == req->data_iovs) { | 829 | if (++req->iov_idx == req->data_iovs) { |
830 | ret = -EFAULT; | 830 | ret = -EFAULT; |
831 | goto free_txreq; | 831 | goto free_tx; |
832 | } | 832 | } |
833 | iovec = &req->iovs[req->iov_idx]; | 833 | iovec = &req->iovs[req->iov_idx]; |
834 | WARN_ON(iovec->offset); | 834 | WARN_ON(iovec->offset); |
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 13374c727b14..a7c586a5589d 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c | |||
@@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) | |||
1582 | struct hfi1_pportdata *ppd; | 1582 | struct hfi1_pportdata *ppd; |
1583 | struct hfi1_devdata *dd; | 1583 | struct hfi1_devdata *dd; |
1584 | u8 sc5; | 1584 | u8 sc5; |
1585 | u8 sl; | ||
1585 | 1586 | ||
1586 | if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && | 1587 | if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && |
1587 | !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) | 1588 | !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) |
@@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) | |||
1590 | /* test the mapping for validity */ | 1591 | /* test the mapping for validity */ |
1591 | ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); | 1592 | ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); |
1592 | ppd = ppd_from_ibp(ibp); | 1593 | ppd = ppd_from_ibp(ibp); |
1593 | sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)]; | ||
1594 | dd = dd_from_ppd(ppd); | 1594 | dd = dd_from_ppd(ppd); |
1595 | |||
1596 | sl = rdma_ah_get_sl(ah_attr); | ||
1597 | if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) | ||
1598 | return -EINVAL; | ||
1599 | |||
1600 | sc5 = ibp->sl_to_sc[sl]; | ||
1595 | if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) | 1601 | if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) |
1596 | return -EINVAL; | 1602 | return -EINVAL; |
1597 | return 0; | 1603 | return 0; |
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index ac116d63e466..f2f11e652dcd 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c | |||
@@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( | |||
723 | attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); | 723 | attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); |
724 | struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); | 724 | struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); |
725 | struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); | 725 | struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); |
726 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; | ||
726 | struct devx_obj *obj; | 727 | struct devx_obj *obj; |
727 | int err; | 728 | int err; |
728 | 729 | ||
@@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( | |||
754 | 755 | ||
755 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); | 756 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); |
756 | if (err) | 757 | if (err) |
757 | goto obj_free; | 758 | goto obj_destroy; |
758 | 759 | ||
759 | return 0; | 760 | return 0; |
760 | 761 | ||
762 | obj_destroy: | ||
763 | mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); | ||
761 | obj_free: | 764 | obj_free: |
762 | kfree(obj); | 765 | kfree(obj); |
763 | return err; | 766 | return err; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 444d16520506..0b34e909505f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
2951 | { | 2951 | { |
2952 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 2952 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
2953 | struct srp_rdma_ch *ch; | 2953 | struct srp_rdma_ch *ch; |
2954 | int i; | 2954 | int i, j; |
2955 | u8 status; | 2955 | u8 status; |
2956 | 2956 | ||
2957 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); | 2957 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); |
@@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
2965 | 2965 | ||
2966 | for (i = 0; i < target->ch_count; i++) { | 2966 | for (i = 0; i < target->ch_count; i++) { |
2967 | ch = &target->ch[i]; | 2967 | ch = &target->ch[i]; |
2968 | for (i = 0; i < target->req_ring_size; ++i) { | 2968 | for (j = 0; j < target->req_ring_size; ++j) { |
2969 | struct srp_request *req = &ch->req_ring[i]; | 2969 | struct srp_request *req = &ch->req_ring[j]; |
2970 | 2970 | ||
2971 | srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); | 2971 | srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); |
2972 | } | 2972 | } |
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c index 6f62da2909ec..6caee807cafa 100644 --- a/drivers/input/keyboard/atakbd.c +++ b/drivers/input/keyboard/atakbd.c | |||
@@ -75,8 +75,7 @@ MODULE_LICENSE("GPL"); | |||
75 | */ | 75 | */ |
76 | 76 | ||
77 | 77 | ||
78 | static unsigned char atakbd_keycode[0x72] = { /* American layout */ | 78 | static unsigned char atakbd_keycode[0x73] = { /* American layout */ |
79 | [0] = KEY_GRAVE, | ||
80 | [1] = KEY_ESC, | 79 | [1] = KEY_ESC, |
81 | [2] = KEY_1, | 80 | [2] = KEY_1, |
82 | [3] = KEY_2, | 81 | [3] = KEY_2, |
@@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */ | |||
117 | [38] = KEY_L, | 116 | [38] = KEY_L, |
118 | [39] = KEY_SEMICOLON, | 117 | [39] = KEY_SEMICOLON, |
119 | [40] = KEY_APOSTROPHE, | 118 | [40] = KEY_APOSTROPHE, |
120 | [41] = KEY_BACKSLASH, /* FIXME, '#' */ | 119 | [41] = KEY_GRAVE, |
121 | [42] = KEY_LEFTSHIFT, | 120 | [42] = KEY_LEFTSHIFT, |
122 | [43] = KEY_GRAVE, /* FIXME: '~' */ | 121 | [43] = KEY_BACKSLASH, |
123 | [44] = KEY_Z, | 122 | [44] = KEY_Z, |
124 | [45] = KEY_X, | 123 | [45] = KEY_X, |
125 | [46] = KEY_C, | 124 | [46] = KEY_C, |
@@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */ | |||
145 | [66] = KEY_F8, | 144 | [66] = KEY_F8, |
146 | [67] = KEY_F9, | 145 | [67] = KEY_F9, |
147 | [68] = KEY_F10, | 146 | [68] = KEY_F10, |
148 | [69] = KEY_ESC, | 147 | [71] = KEY_HOME, |
149 | [70] = KEY_DELETE, | 148 | [72] = KEY_UP, |
150 | [71] = KEY_KP7, | ||
151 | [72] = KEY_KP8, | ||
152 | [73] = KEY_KP9, | ||
153 | [74] = KEY_KPMINUS, | 149 | [74] = KEY_KPMINUS, |
154 | [75] = KEY_KP4, | 150 | [75] = KEY_LEFT, |
155 | [76] = KEY_KP5, | 151 | [77] = KEY_RIGHT, |
156 | [77] = KEY_KP6, | ||
157 | [78] = KEY_KPPLUS, | 152 | [78] = KEY_KPPLUS, |
158 | [79] = KEY_KP1, | 153 | [80] = KEY_DOWN, |
159 | [80] = KEY_KP2, | 154 | [82] = KEY_INSERT, |
160 | [81] = KEY_KP3, | 155 | [83] = KEY_DELETE, |
161 | [82] = KEY_KP0, | ||
162 | [83] = KEY_KPDOT, | ||
163 | [90] = KEY_KPLEFTPAREN, | ||
164 | [91] = KEY_KPRIGHTPAREN, | ||
165 | [92] = KEY_KPASTERISK, /* FIXME */ | ||
166 | [93] = KEY_KPASTERISK, | ||
167 | [94] = KEY_KPPLUS, | ||
168 | [95] = KEY_HELP, | ||
169 | [96] = KEY_102ND, | 156 | [96] = KEY_102ND, |
170 | [97] = KEY_KPASTERISK, /* FIXME */ | 157 | [97] = KEY_UNDO, |
171 | [98] = KEY_KPSLASH, | 158 | [98] = KEY_HELP, |
172 | [99] = KEY_KPLEFTPAREN, | 159 | [99] = KEY_KPLEFTPAREN, |
173 | [100] = KEY_KPRIGHTPAREN, | 160 | [100] = KEY_KPRIGHTPAREN, |
174 | [101] = KEY_KPSLASH, | 161 | [101] = KEY_KPSLASH, |
175 | [102] = KEY_KPASTERISK, | 162 | [102] = KEY_KPASTERISK, |
176 | [103] = KEY_UP, | 163 | [103] = KEY_KP7, |
177 | [104] = KEY_KPASTERISK, /* FIXME */ | 164 | [104] = KEY_KP8, |
178 | [105] = KEY_LEFT, | 165 | [105] = KEY_KP9, |
179 | [106] = KEY_RIGHT, | 166 | [106] = KEY_KP4, |
180 | [107] = KEY_KPASTERISK, /* FIXME */ | 167 | [107] = KEY_KP5, |
181 | [108] = KEY_DOWN, | 168 | [108] = KEY_KP6, |
182 | [109] = KEY_KPASTERISK, /* FIXME */ | 169 | [109] = KEY_KP1, |
183 | [110] = KEY_KPASTERISK, /* FIXME */ | 170 | [110] = KEY_KP2, |
184 | [111] = KEY_KPASTERISK, /* FIXME */ | 171 | [111] = KEY_KP3, |
185 | [112] = KEY_KPASTERISK, /* FIXME */ | 172 | [112] = KEY_KP0, |
186 | [113] = KEY_KPASTERISK /* FIXME */ | 173 | [113] = KEY_KPDOT, |
174 | [114] = KEY_KPENTER, | ||
187 | }; | 175 | }; |
188 | 176 | ||
189 | static struct input_dev *atakbd_dev; | 177 | static struct input_dev *atakbd_dev; |
@@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev; | |||
191 | static void atakbd_interrupt(unsigned char scancode, char down) | 179 | static void atakbd_interrupt(unsigned char scancode, char down) |
192 | { | 180 | { |
193 | 181 | ||
194 | if (scancode < 0x72) { /* scancodes < 0xf2 are keys */ | 182 | if (scancode < 0x73) { /* scancodes < 0xf3 are keys */ |
195 | 183 | ||
196 | // report raw events here? | 184 | // report raw events here? |
197 | 185 | ||
198 | scancode = atakbd_keycode[scancode]; | 186 | scancode = atakbd_keycode[scancode]; |
199 | 187 | ||
200 | if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */ | 188 | input_report_key(atakbd_dev, scancode, down); |
201 | input_report_key(atakbd_dev, scancode, 1); | 189 | input_sync(atakbd_dev); |
202 | input_report_key(atakbd_dev, scancode, 0); | 190 | } else /* scancodes >= 0xf3 are mouse data, most likely */ |
203 | input_sync(atakbd_dev); | ||
204 | } else { | ||
205 | input_report_key(atakbd_dev, scancode, down); | ||
206 | input_sync(atakbd_dev); | ||
207 | } | ||
208 | } else /* scancodes >= 0xf2 are mouse data, most likely */ | ||
209 | printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); | 191 | printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); |
210 | 192 | ||
211 | return; | 193 | return; |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 96a887f33698..eb14ddf69346 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
@@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, | |||
410 | min = abs->minimum; | 410 | min = abs->minimum; |
411 | max = abs->maximum; | 411 | max = abs->maximum; |
412 | 412 | ||
413 | if ((min != 0 || max != 0) && max <= min) { | 413 | if ((min != 0 || max != 0) && max < min) { |
414 | printk(KERN_DEBUG | 414 | printk(KERN_DEBUG |
415 | "%s: invalid abs[%02x] min:%d max:%d\n", | 415 | "%s: invalid abs[%02x] min:%d max:%d\n", |
416 | UINPUT_NAME, code, min, max); | 416 | UINPUT_NAME, code, min, max); |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 44f57cf6675b..2d95e8d93cc7 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { | |||
1178 | static const char * const middle_button_pnp_ids[] = { | 1178 | static const char * const middle_button_pnp_ids[] = { |
1179 | "LEN2131", /* ThinkPad P52 w/ NFC */ | 1179 | "LEN2131", /* ThinkPad P52 w/ NFC */ |
1180 | "LEN2132", /* ThinkPad P52 */ | 1180 | "LEN2132", /* ThinkPad P52 */ |
1181 | "LEN2133", /* ThinkPad P72 w/ NFC */ | ||
1182 | "LEN2134", /* ThinkPad P72 */ | ||
1181 | NULL | 1183 | NULL |
1182 | }; | 1184 | }; |
1183 | 1185 | ||
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c index 80e69bb8283e..83ac8c128192 100644 --- a/drivers/input/touchscreen/egalax_ts.c +++ b/drivers/input/touchscreen/egalax_ts.c | |||
@@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev) | |||
241 | struct i2c_client *client = to_i2c_client(dev); | 241 | struct i2c_client *client = to_i2c_client(dev); |
242 | int ret; | 242 | int ret; |
243 | 243 | ||
244 | if (device_may_wakeup(dev)) | ||
245 | return enable_irq_wake(client->irq); | ||
246 | |||
244 | ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN); | 247 | ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN); |
245 | return ret > 0 ? 0 : ret; | 248 | return ret > 0 ? 0 : ret; |
246 | } | 249 | } |
@@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev) | |||
249 | { | 252 | { |
250 | struct i2c_client *client = to_i2c_client(dev); | 253 | struct i2c_client *client = to_i2c_client(dev); |
251 | 254 | ||
255 | if (device_may_wakeup(dev)) | ||
256 | return disable_irq_wake(client->irq); | ||
257 | |||
252 | return egalax_wake_up_device(client); | 258 | return egalax_wake_up_device(client); |
253 | } | 259 | } |
254 | 260 | ||
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 4e04fff23977..73e47d93e7a0 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev) | |||
246 | 246 | ||
247 | /* The callers make sure that get_device_id() does not fail here */ | 247 | /* The callers make sure that get_device_id() does not fail here */ |
248 | devid = get_device_id(dev); | 248 | devid = get_device_id(dev); |
249 | |||
250 | /* For ACPI HID devices, we simply return the devid as such */ | ||
251 | if (!dev_is_pci(dev)) | ||
252 | return devid; | ||
253 | |||
249 | ivrs_alias = amd_iommu_alias_table[devid]; | 254 | ivrs_alias = amd_iommu_alias_table[devid]; |
255 | |||
250 | pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); | 256 | pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); |
251 | 257 | ||
252 | if (ivrs_alias == pci_alias) | 258 | if (ivrs_alias == pci_alias) |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 5f3f10cf9d9d..bedc801b06a0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, | |||
2540 | if (dev && dev_is_pci(dev) && info->pasid_supported) { | 2540 | if (dev && dev_is_pci(dev) && info->pasid_supported) { |
2541 | ret = intel_pasid_alloc_table(dev); | 2541 | ret = intel_pasid_alloc_table(dev); |
2542 | if (ret) { | 2542 | if (ret) { |
2543 | __dmar_remove_one_dev_info(info); | 2543 | pr_warn("No pasid table for %s, pasid disabled\n", |
2544 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2544 | dev_name(dev)); |
2545 | return NULL; | 2545 | info->pasid_supported = 0; |
2546 | } | 2546 | } |
2547 | } | 2547 | } |
2548 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2548 | spin_unlock_irqrestore(&device_domain_lock, flags); |
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h index 1c05ed6fc5a5..1fb5e12b029a 100644 --- a/drivers/iommu/intel-pasid.h +++ b/drivers/iommu/intel-pasid.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #define __INTEL_PASID_H | 11 | #define __INTEL_PASID_H |
12 | 12 | ||
13 | #define PASID_MIN 0x1 | 13 | #define PASID_MIN 0x1 |
14 | #define PASID_MAX 0x100000 | 14 | #define PASID_MAX 0x20000 |
15 | 15 | ||
16 | struct pasid_entry { | 16 | struct pasid_entry { |
17 | u64 val; | 17 | u64 val; |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 258115b10fa9..ad3e2b97469e 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -1241,6 +1241,12 @@ err_unprepare_clocks: | |||
1241 | 1241 | ||
1242 | static void rk_iommu_shutdown(struct platform_device *pdev) | 1242 | static void rk_iommu_shutdown(struct platform_device *pdev) |
1243 | { | 1243 | { |
1244 | struct rk_iommu *iommu = platform_get_drvdata(pdev); | ||
1245 | int i = 0, irq; | ||
1246 | |||
1247 | while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) | ||
1248 | devm_free_irq(iommu->dev, irq, iommu); | ||
1249 | |||
1244 | pm_runtime_force_suspend(&pdev->dev); | 1250 | pm_runtime_force_suspend(&pdev->dev); |
1245 | } | 1251 | } |
1246 | 1252 | ||
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 83504dd8100a..954dad29e6e8 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca); | |||
965 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); | 965 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); |
966 | 966 | ||
967 | extern struct workqueue_struct *bcache_wq; | 967 | extern struct workqueue_struct *bcache_wq; |
968 | extern struct workqueue_struct *bch_journal_wq; | ||
968 | extern struct mutex bch_register_lock; | 969 | extern struct mutex bch_register_lock; |
969 | extern struct list_head bch_cache_sets; | 970 | extern struct list_head bch_cache_sets; |
970 | 971 | ||
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6116bbf870d8..522c7426f3a0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c | |||
@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca) | |||
485 | 485 | ||
486 | closure_get(&ca->set->cl); | 486 | closure_get(&ca->set->cl); |
487 | INIT_WORK(&ja->discard_work, journal_discard_work); | 487 | INIT_WORK(&ja->discard_work, journal_discard_work); |
488 | schedule_work(&ja->discard_work); | 488 | queue_work(bch_journal_wq, &ja->discard_work); |
489 | } | 489 | } |
490 | } | 490 | } |
491 | 491 | ||
@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl) | |||
592 | : &j->w[0]; | 592 | : &j->w[0]; |
593 | 593 | ||
594 | __closure_wake_up(&w->wait); | 594 | __closure_wake_up(&w->wait); |
595 | continue_at_nobarrier(cl, journal_write, system_wq); | 595 | continue_at_nobarrier(cl, journal_write, bch_journal_wq); |
596 | } | 596 | } |
597 | 597 | ||
598 | static void journal_write_unlock(struct closure *cl) | 598 | static void journal_write_unlock(struct closure *cl) |
@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl) | |||
627 | spin_unlock(&c->journal.lock); | 627 | spin_unlock(&c->journal.lock); |
628 | 628 | ||
629 | btree_flush_write(c); | 629 | btree_flush_write(c); |
630 | continue_at(cl, journal_write, system_wq); | 630 | continue_at(cl, journal_write, bch_journal_wq); |
631 | return; | 631 | return; |
632 | } | 632 | } |
633 | 633 | ||
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 94c756c66bd7..30ba9aeb5ee8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -47,6 +47,7 @@ static int bcache_major; | |||
47 | static DEFINE_IDA(bcache_device_idx); | 47 | static DEFINE_IDA(bcache_device_idx); |
48 | static wait_queue_head_t unregister_wait; | 48 | static wait_queue_head_t unregister_wait; |
49 | struct workqueue_struct *bcache_wq; | 49 | struct workqueue_struct *bcache_wq; |
50 | struct workqueue_struct *bch_journal_wq; | ||
50 | 51 | ||
51 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) | 52 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) |
52 | /* limitation of partitions number on single bcache device */ | 53 | /* limitation of partitions number on single bcache device */ |
@@ -2341,6 +2342,9 @@ static void bcache_exit(void) | |||
2341 | kobject_put(bcache_kobj); | 2342 | kobject_put(bcache_kobj); |
2342 | if (bcache_wq) | 2343 | if (bcache_wq) |
2343 | destroy_workqueue(bcache_wq); | 2344 | destroy_workqueue(bcache_wq); |
2345 | if (bch_journal_wq) | ||
2346 | destroy_workqueue(bch_journal_wq); | ||
2347 | |||
2344 | if (bcache_major) | 2348 | if (bcache_major) |
2345 | unregister_blkdev(bcache_major, "bcache"); | 2349 | unregister_blkdev(bcache_major, "bcache"); |
2346 | unregister_reboot_notifier(&reboot); | 2350 | unregister_reboot_notifier(&reboot); |
@@ -2370,6 +2374,10 @@ static int __init bcache_init(void) | |||
2370 | if (!bcache_wq) | 2374 | if (!bcache_wq) |
2371 | goto err; | 2375 | goto err; |
2372 | 2376 | ||
2377 | bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); | ||
2378 | if (!bch_journal_wq) | ||
2379 | goto err; | ||
2380 | |||
2373 | bcache_kobj = kobject_create_and_add("bcache", fs_kobj); | 2381 | bcache_kobj = kobject_create_and_add("bcache", fs_kobj); |
2374 | if (!bcache_kobj) | 2382 | if (!bcache_kobj) |
2375 | goto err; | 2383 | goto err; |
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index b5410aeb5fe2..bb41bea950ac 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c | |||
@@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client) | |||
1159 | V4L2_CID_AUTO_WHITE_BALANCE, | 1159 | V4L2_CID_AUTO_WHITE_BALANCE, |
1160 | 0, 1, 1, | 1160 | 0, 1, 1, |
1161 | V4L2_WHITE_BALANCE_AUTO); | 1161 | V4L2_WHITE_BALANCE_AUTO); |
1162 | if (IS_ERR_OR_NULL(mt9v111->auto_awb)) { | ||
1163 | ret = PTR_ERR(mt9v111->auto_awb); | ||
1164 | goto error_free_ctrls; | ||
1165 | } | ||
1166 | |||
1167 | mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, | 1162 | mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, |
1168 | &mt9v111_ctrl_ops, | 1163 | &mt9v111_ctrl_ops, |
1169 | V4L2_CID_EXPOSURE_AUTO, | 1164 | V4L2_CID_EXPOSURE_AUTO, |
1170 | V4L2_EXPOSURE_MANUAL, | 1165 | V4L2_EXPOSURE_MANUAL, |
1171 | 0, V4L2_EXPOSURE_AUTO); | 1166 | 0, V4L2_EXPOSURE_AUTO); |
1172 | if (IS_ERR_OR_NULL(mt9v111->auto_exp)) { | ||
1173 | ret = PTR_ERR(mt9v111->auto_exp); | ||
1174 | goto error_free_ctrls; | ||
1175 | } | ||
1176 | |||
1177 | /* Initialize timings */ | ||
1178 | mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, | 1167 | mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, |
1179 | V4L2_CID_HBLANK, | 1168 | V4L2_CID_HBLANK, |
1180 | MT9V111_CORE_R05_MIN_HBLANK, | 1169 | MT9V111_CORE_R05_MIN_HBLANK, |
1181 | MT9V111_CORE_R05_MAX_HBLANK, 1, | 1170 | MT9V111_CORE_R05_MAX_HBLANK, 1, |
1182 | MT9V111_CORE_R05_DEF_HBLANK); | 1171 | MT9V111_CORE_R05_DEF_HBLANK); |
1183 | if (IS_ERR_OR_NULL(mt9v111->hblank)) { | ||
1184 | ret = PTR_ERR(mt9v111->hblank); | ||
1185 | goto error_free_ctrls; | ||
1186 | } | ||
1187 | |||
1188 | mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, | 1172 | mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, |
1189 | V4L2_CID_VBLANK, | 1173 | V4L2_CID_VBLANK, |
1190 | MT9V111_CORE_R06_MIN_VBLANK, | 1174 | MT9V111_CORE_R06_MIN_VBLANK, |
1191 | MT9V111_CORE_R06_MAX_VBLANK, 1, | 1175 | MT9V111_CORE_R06_MAX_VBLANK, 1, |
1192 | MT9V111_CORE_R06_DEF_VBLANK); | 1176 | MT9V111_CORE_R06_DEF_VBLANK); |
1193 | if (IS_ERR_OR_NULL(mt9v111->vblank)) { | ||
1194 | ret = PTR_ERR(mt9v111->vblank); | ||
1195 | goto error_free_ctrls; | ||
1196 | } | ||
1197 | 1177 | ||
1198 | /* PIXEL_RATE is fixed: just expose it to user space. */ | 1178 | /* PIXEL_RATE is fixed: just expose it to user space. */ |
1199 | v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, | 1179 | v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, |
@@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client) | |||
1201 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, | 1181 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, |
1202 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); | 1182 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); |
1203 | 1183 | ||
1184 | if (mt9v111->ctrls.error) { | ||
1185 | ret = mt9v111->ctrls.error; | ||
1186 | goto error_free_ctrls; | ||
1187 | } | ||
1204 | mt9v111->sd.ctrl_handler = &mt9v111->ctrls; | 1188 | mt9v111->sd.ctrl_handler = &mt9v111->ctrls; |
1205 | 1189 | ||
1206 | /* Start with default configuration: 640x480 UYVY. */ | 1190 | /* Start with default configuration: 640x480 UYVY. */ |
@@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client) | |||
1226 | mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; | 1210 | mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; |
1227 | ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); | 1211 | ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); |
1228 | if (ret) | 1212 | if (ret) |
1229 | goto error_free_ctrls; | 1213 | goto error_free_entity; |
1230 | #endif | 1214 | #endif |
1231 | 1215 | ||
1232 | ret = mt9v111_chip_probe(mt9v111); | 1216 | ret = mt9v111_chip_probe(mt9v111); |
1233 | if (ret) | 1217 | if (ret) |
1234 | goto error_free_ctrls; | 1218 | goto error_free_entity; |
1235 | 1219 | ||
1236 | ret = v4l2_async_register_subdev(&mt9v111->sd); | 1220 | ret = v4l2_async_register_subdev(&mt9v111->sd); |
1237 | if (ret) | 1221 | if (ret) |
1238 | goto error_free_ctrls; | 1222 | goto error_free_entity; |
1239 | 1223 | ||
1240 | return 0; | 1224 | return 0; |
1241 | 1225 | ||
1242 | error_free_ctrls: | 1226 | error_free_entity: |
1243 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
1244 | |||
1245 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) | 1227 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) |
1246 | media_entity_cleanup(&mt9v111->sd.entity); | 1228 | media_entity_cleanup(&mt9v111->sd.entity); |
1247 | #endif | 1229 | #endif |
1248 | 1230 | ||
1231 | error_free_ctrls: | ||
1232 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
1233 | |||
1249 | mutex_destroy(&mt9v111->pwr_mutex); | 1234 | mutex_destroy(&mt9v111->pwr_mutex); |
1250 | mutex_destroy(&mt9v111->stream_mutex); | 1235 | mutex_destroy(&mt9v111->stream_mutex); |
1251 | 1236 | ||
@@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client) | |||
1259 | 1244 | ||
1260 | v4l2_async_unregister_subdev(sd); | 1245 | v4l2_async_unregister_subdev(sd); |
1261 | 1246 | ||
1262 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
1263 | |||
1264 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) | 1247 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) |
1265 | media_entity_cleanup(&sd->entity); | 1248 | media_entity_cleanup(&sd->entity); |
1266 | #endif | 1249 | #endif |
1267 | 1250 | ||
1251 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
1252 | |||
1268 | mutex_destroy(&mt9v111->pwr_mutex); | 1253 | mutex_destroy(&mt9v111->pwr_mutex); |
1269 | mutex_destroy(&mt9v111->stream_mutex); | 1254 | mutex_destroy(&mt9v111->stream_mutex); |
1270 | 1255 | ||
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 94c1fe0e9787..54fe90acb5b2 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
@@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC | |||
541 | depends on MFD_CROS_EC | 541 | depends on MFD_CROS_EC |
542 | select CEC_CORE | 542 | select CEC_CORE |
543 | select CEC_NOTIFIER | 543 | select CEC_NOTIFIER |
544 | select CHROME_PLATFORMS | ||
545 | select CROS_EC_PROTO | ||
544 | ---help--- | 546 | ---help--- |
545 | If you say yes here you will get support for the | 547 | If you say yes here you will get support for the |
546 | ChromeOS Embedded Controller's CEC. | 548 | ChromeOS Embedded Controller's CEC. |
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c index 729b31891466..a5ae85674ffb 100644 --- a/drivers/media/platform/qcom/camss/camss-csid.c +++ b/drivers/media/platform/qcom/camss/camss-csid.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
11 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> | ||
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/of.h> | 15 | #include <linux/of.h> |
15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c index c832539397d7..12bce391d71f 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> | ||
15 | 16 | ||
16 | #define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) | 17 | #define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) |
17 | #define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) | 18 | #define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) |
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c index bcd0dfd33618..2e65caf1ecae 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> | ||
15 | 16 | ||
16 | #define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) | 17 | #define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) |
17 | #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) | 18 | #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) |
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c index 4559f3b1b38c..008afb85023b 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> | ||
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/of.h> | 15 | #include <linux/of.h> |
15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c index 7f269021d08c..1f33b4eb198c 100644 --- a/drivers/media/platform/qcom/camss/camss-ispif.c +++ b/drivers/media/platform/qcom/camss/camss-ispif.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
11 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> | ||
13 | #include <linux/iopoll.h> | 14 | #include <linux/iopoll.h> |
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
@@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif, | |||
1076 | else | 1077 | else |
1077 | return -EINVAL; | 1078 | return -EINVAL; |
1078 | 1079 | ||
1079 | ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line), | 1080 | ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line), |
1080 | GFP_KERNEL); | 1081 | GFP_KERNEL); |
1081 | if (!ispif->line) | 1082 | if (!ispif->line) |
1082 | return -ENOMEM; | 1083 | return -ENOMEM; |
1083 | 1084 | ||
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c index da3a9fed9f2d..174a36be6f5d 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/io.h> | ||
12 | #include <linux/iopoll.h> | 13 | #include <linux/iopoll.h> |
13 | 14 | ||
14 | #include "camss-vfe.h" | 15 | #include "camss-vfe.h" |
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c index 4c584bffd179..0dca8bf9281e 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/io.h> | ||
12 | #include <linux/iopoll.h> | 13 | #include <linux/iopoll.h> |
13 | 14 | ||
14 | #include "camss-vfe.h" | 15 | #include "camss-vfe.h" |
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c index dcc0c30ef1b1..669615fff6a0 100644 --- a/drivers/media/platform/qcom/camss/camss.c +++ b/drivers/media/platform/qcom/camss/camss.c | |||
@@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev) | |||
848 | return -EINVAL; | 848 | return -EINVAL; |
849 | } | 849 | } |
850 | 850 | ||
851 | camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy), | 851 | camss->csiphy = devm_kcalloc(dev, camss->csiphy_num, |
852 | GFP_KERNEL); | 852 | sizeof(*camss->csiphy), GFP_KERNEL); |
853 | if (!camss->csiphy) | 853 | if (!camss->csiphy) |
854 | return -ENOMEM; | 854 | return -ENOMEM; |
855 | 855 | ||
856 | camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid), | 856 | camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid), |
857 | GFP_KERNEL); | 857 | GFP_KERNEL); |
858 | if (!camss->csid) | 858 | if (!camss->csid) |
859 | return -ENOMEM; | 859 | return -ENOMEM; |
860 | 860 | ||
861 | camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL); | 861 | camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe), |
862 | GFP_KERNEL); | ||
862 | if (!camss->vfe) | 863 | if (!camss->vfe) |
863 | return -ENOMEM; | 864 | return -ENOMEM; |
864 | 865 | ||
@@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = { | |||
993 | 994 | ||
994 | MODULE_DEVICE_TABLE(of, camss_dt_match); | 995 | MODULE_DEVICE_TABLE(of, camss_dt_match); |
995 | 996 | ||
996 | static int camss_runtime_suspend(struct device *dev) | 997 | static int __maybe_unused camss_runtime_suspend(struct device *dev) |
997 | { | 998 | { |
998 | return 0; | 999 | return 0; |
999 | } | 1000 | } |
1000 | 1001 | ||
1001 | static int camss_runtime_resume(struct device *dev) | 1002 | static int __maybe_unused camss_runtime_resume(struct device *dev) |
1002 | { | 1003 | { |
1003 | return 0; | 1004 | return 0; |
1004 | } | 1005 | } |
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 666d319d3d1a..1f6c1eefe389 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c | |||
@@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, | |||
402 | if (msg[0].addr == state->af9033_i2c_addr[1]) | 402 | if (msg[0].addr == state->af9033_i2c_addr[1]) |
403 | reg |= 0x100000; | 403 | reg |= 0x100000; |
404 | 404 | ||
405 | ret = af9035_wr_regs(d, reg, &msg[0].buf[3], | 405 | ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg, |
406 | msg[0].len - 3); | 406 | &msg[0].buf[3], |
407 | msg[0].len - 3) | ||
408 | : -EOPNOTSUPP; | ||
407 | } else { | 409 | } else { |
408 | /* I2C write */ | 410 | /* I2C write */ |
409 | u8 buf[MAX_XFER_SIZE]; | 411 | u8 buf[MAX_XFER_SIZE]; |
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index e11ab12fbdf2..800986a79704 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c | |||
@@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev, | |||
528 | } | 528 | } |
529 | 529 | ||
530 | static const struct of_device_id usbhs_child_match_table[] = { | 530 | static const struct of_device_id usbhs_child_match_table[] = { |
531 | { .compatible = "ti,omap-ehci", }, | 531 | { .compatible = "ti,ehci-omap", }, |
532 | { .compatible = "ti,omap-ohci", }, | 532 | { .compatible = "ti,ohci-omap3", }, |
533 | { } | 533 | { } |
534 | }; | 534 | }; |
535 | 535 | ||
@@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = { | |||
855 | .pm = &usbhsomap_dev_pm_ops, | 855 | .pm = &usbhsomap_dev_pm_ops, |
856 | .of_match_table = usbhs_omap_dt_ids, | 856 | .of_match_table = usbhs_omap_dt_ids, |
857 | }, | 857 | }, |
858 | .probe = usbhs_omap_probe, | ||
858 | .remove = usbhs_omap_remove, | 859 | .remove = usbhs_omap_remove, |
859 | }; | 860 | }; |
860 | 861 | ||
@@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME); | |||
864 | MODULE_LICENSE("GPL v2"); | 865 | MODULE_LICENSE("GPL v2"); |
865 | MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI"); | 866 | MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI"); |
866 | 867 | ||
867 | static int __init omap_usbhs_drvinit(void) | 868 | static int omap_usbhs_drvinit(void) |
868 | { | 869 | { |
869 | return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe); | 870 | return platform_driver_register(&usbhs_omap_driver); |
870 | } | 871 | } |
871 | 872 | ||
872 | /* | 873 | /* |
@@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void) | |||
878 | */ | 879 | */ |
879 | fs_initcall_sync(omap_usbhs_drvinit); | 880 | fs_initcall_sync(omap_usbhs_drvinit); |
880 | 881 | ||
881 | static void __exit omap_usbhs_drvexit(void) | 882 | static void omap_usbhs_drvexit(void) |
882 | { | 883 | { |
883 | platform_driver_unregister(&usbhs_omap_driver); | 884 | platform_driver_unregister(&usbhs_omap_driver); |
884 | } | 885 | } |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index cbfafc453274..270d3c9580c5 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len) | |||
39 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1), | 39 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1), |
40 | SPI_MEM_OP_NO_ADDR, | 40 | SPI_MEM_OP_NO_ADDR, |
41 | SPI_MEM_OP_NO_DUMMY, | 41 | SPI_MEM_OP_NO_DUMMY, |
42 | SPI_MEM_OP_DATA_IN(len, val, 1)); | 42 | SPI_MEM_OP_DATA_IN(len, NULL, 1)); |
43 | void *scratchbuf; | ||
43 | int ret; | 44 | int ret; |
44 | 45 | ||
46 | scratchbuf = kmalloc(len, GFP_KERNEL); | ||
47 | if (!scratchbuf) | ||
48 | return -ENOMEM; | ||
49 | |||
50 | op.data.buf.in = scratchbuf; | ||
45 | ret = spi_mem_exec_op(flash->spimem, &op); | 51 | ret = spi_mem_exec_op(flash->spimem, &op); |
46 | if (ret < 0) | 52 | if (ret < 0) |
47 | dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret, | 53 | dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret, |
48 | code); | 54 | code); |
55 | else | ||
56 | memcpy(val, scratchbuf, len); | ||
57 | |||
58 | kfree(scratchbuf); | ||
49 | 59 | ||
50 | return ret; | 60 | return ret; |
51 | } | 61 | } |
@@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | |||
56 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1), | 66 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1), |
57 | SPI_MEM_OP_NO_ADDR, | 67 | SPI_MEM_OP_NO_ADDR, |
58 | SPI_MEM_OP_NO_DUMMY, | 68 | SPI_MEM_OP_NO_DUMMY, |
59 | SPI_MEM_OP_DATA_OUT(len, buf, 1)); | 69 | SPI_MEM_OP_DATA_OUT(len, NULL, 1)); |
70 | void *scratchbuf; | ||
71 | int ret; | ||
60 | 72 | ||
61 | return spi_mem_exec_op(flash->spimem, &op); | 73 | scratchbuf = kmemdup(buf, len, GFP_KERNEL); |
74 | if (!scratchbuf) | ||
75 | return -ENOMEM; | ||
76 | |||
77 | op.data.buf.out = scratchbuf; | ||
78 | ret = spi_mem_exec_op(flash->spimem, &op); | ||
79 | kfree(scratchbuf); | ||
80 | |||
81 | return ret; | ||
62 | } | 82 | } |
63 | 83 | ||
64 | static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, | 84 | static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, |
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 52e2cb35fc79..99c460facd5e 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
@@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master, | |||
873 | int ret, err = 0; | 873 | int ret, err = 0; |
874 | 874 | ||
875 | np = mtd_get_of_node(master); | 875 | np = mtd_get_of_node(master); |
876 | if (!mtd_is_partition(master)) | 876 | if (mtd_is_partition(master)) |
877 | of_node_get(np); | ||
878 | else | ||
877 | np = of_get_child_by_name(np, "partitions"); | 879 | np = of_get_child_by_name(np, "partitions"); |
880 | |||
878 | of_property_for_each_string(np, "compatible", prop, compat) { | 881 | of_property_for_each_string(np, "compatible", prop, compat) { |
879 | parser = mtd_part_get_compatible_parser(compat); | 882 | parser = mtd_part_get_compatible_parser(compat); |
880 | if (!parser) | 883 | if (!parser) |
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 67b2065e7a19..b864b93dd289 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c | |||
@@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, | |||
596 | } | 596 | } |
597 | 597 | ||
598 | iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); | 598 | iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); |
599 | /* | ||
600 | * The ->setup_dma() hook kicks DMA by using the data/command | ||
601 | * interface, which belongs to a different AXI port from the | ||
602 | * register interface. Read back the register to avoid a race. | ||
603 | */ | ||
604 | ioread32(denali->reg + DMA_ENABLE); | ||
599 | 605 | ||
600 | denali_reset_irq(denali); | 606 | denali_reset_irq(denali); |
601 | denali->setup_dma(denali, dma_addr, page, write); | 607 | denali->setup_dma(denali, dma_addr, page, write); |
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 7af4d6213ee5..bc2ef5209783 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c | |||
@@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, | |||
1547 | for (op_id = 0; op_id < subop->ninstrs; op_id++) { | 1547 | for (op_id = 0; op_id < subop->ninstrs; op_id++) { |
1548 | unsigned int offset, naddrs; | 1548 | unsigned int offset, naddrs; |
1549 | const u8 *addrs; | 1549 | const u8 *addrs; |
1550 | int len = nand_subop_get_data_len(subop, op_id); | 1550 | int len; |
1551 | 1551 | ||
1552 | instr = &subop->instrs[op_id]; | 1552 | instr = &subop->instrs[op_id]; |
1553 | 1553 | ||
@@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, | |||
1593 | nfc_op->ndcb[0] |= | 1593 | nfc_op->ndcb[0] |= |
1594 | NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | | 1594 | NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | |
1595 | NDCB0_LEN_OVRD; | 1595 | NDCB0_LEN_OVRD; |
1596 | len = nand_subop_get_data_len(subop, op_id); | ||
1596 | nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); | 1597 | nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); |
1597 | } | 1598 | } |
1598 | nfc_op->data_delay_ns = instr->delay_ns; | 1599 | nfc_op->data_delay_ns = instr->delay_ns; |
@@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, | |||
1606 | nfc_op->ndcb[0] |= | 1607 | nfc_op->ndcb[0] |= |
1607 | NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | | 1608 | NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | |
1608 | NDCB0_LEN_OVRD; | 1609 | NDCB0_LEN_OVRD; |
1610 | len = nand_subop_get_data_len(subop, op_id); | ||
1609 | nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); | 1611 | nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); |
1610 | } | 1612 | } |
1611 | nfc_op->data_delay_ns = instr->delay_ns; | 1613 | nfc_op->data_delay_ns = instr->delay_ns; |
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index 9375cef22420..3d27616d9c85 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c | |||
@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
283 | case SIOCFINDIPDDPRT: | 283 | case SIOCFINDIPDDPRT: |
284 | spin_lock_bh(&ipddp_route_lock); | 284 | spin_lock_bh(&ipddp_route_lock); |
285 | rp = __ipddp_find_route(&rcp); | 285 | rp = __ipddp_find_route(&rcp); |
286 | if (rp) | 286 | if (rp) { |
287 | memcpy(&rcp2, rp, sizeof(rcp2)); | 287 | memset(&rcp2, 0, sizeof(rcp2)); |
288 | rcp2.ip = rp->ip; | ||
289 | rcp2.at = rp->at; | ||
290 | rcp2.flags = rp->flags; | ||
291 | } | ||
288 | spin_unlock_bh(&ipddp_route_lock); | 292 | spin_unlock_bh(&ipddp_route_lock); |
289 | 293 | ||
290 | if (rp) { | 294 | if (rp) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a764a83f99da..0d87e11e7f1d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -971,16 +971,13 @@ static void bond_poll_controller(struct net_device *bond_dev) | |||
971 | struct slave *slave = NULL; | 971 | struct slave *slave = NULL; |
972 | struct list_head *iter; | 972 | struct list_head *iter; |
973 | struct ad_info ad_info; | 973 | struct ad_info ad_info; |
974 | struct netpoll_info *ni; | ||
975 | const struct net_device_ops *ops; | ||
976 | 974 | ||
977 | if (BOND_MODE(bond) == BOND_MODE_8023AD) | 975 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
978 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) | 976 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) |
979 | return; | 977 | return; |
980 | 978 | ||
981 | bond_for_each_slave_rcu(bond, slave, iter) { | 979 | bond_for_each_slave_rcu(bond, slave, iter) { |
982 | ops = slave->dev->netdev_ops; | 980 | if (!bond_slave_is_up(slave)) |
983 | if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller) | ||
984 | continue; | 981 | continue; |
985 | 982 | ||
986 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { | 983 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
@@ -992,11 +989,7 @@ static void bond_poll_controller(struct net_device *bond_dev) | |||
992 | continue; | 989 | continue; |
993 | } | 990 | } |
994 | 991 | ||
995 | ni = rcu_dereference_bh(slave->dev->npinfo); | 992 | netpoll_poll_dev(slave->dev); |
996 | if (down_trylock(&ni->dev_lock)) | ||
997 | continue; | ||
998 | ops->ndo_poll_controller(slave->dev); | ||
999 | up(&ni->dev_lock); | ||
1000 | } | 993 | } |
1001 | } | 994 | } |
1002 | 995 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 7c791c1da4b9..bef01331266f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h | |||
@@ -128,7 +128,7 @@ | |||
128 | #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 | 128 | #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 |
129 | #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) | 129 | #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) |
130 | #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) | 130 | #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) |
131 | #define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5) | 131 | #define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5) |
132 | #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) | 132 | #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) |
133 | 133 | ||
134 | /* Offset 0x0C: ATU Data Register */ | 134 | /* Offset 0x0C: ATU Data Register */ |
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 307410898fc9..5200e4bdce93 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c | |||
@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
349 | chip->ports[entry.portvec].atu_member_violation++; | 349 | chip->ports[entry.portvec].atu_member_violation++; |
350 | } | 350 | } |
351 | 351 | ||
352 | if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { | 352 | if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { |
353 | dev_err_ratelimited(chip->dev, | 353 | dev_err_ratelimited(chip->dev, |
354 | "ATU miss violation for %pM portvec %x\n", | 354 | "ATU miss violation for %pM portvec %x\n", |
355 | entry.mac, entry.portvec); | 355 | entry.mac, entry.portvec); |
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 024998d6d8c6..6a8e2567f2bd 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c | |||
@@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); | |||
154 | static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); | 154 | static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); |
155 | static void bmac_set_timeout(struct net_device *dev); | 155 | static void bmac_set_timeout(struct net_device *dev); |
156 | static void bmac_tx_timeout(struct timer_list *t); | 156 | static void bmac_tx_timeout(struct timer_list *t); |
157 | static int bmac_output(struct sk_buff *skb, struct net_device *dev); | 157 | static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev); |
158 | static void bmac_start(struct net_device *dev); | 158 | static void bmac_start(struct net_device *dev); |
159 | 159 | ||
160 | #define DBDMA_SET(x) ( ((x) | (x) << 16) ) | 160 | #define DBDMA_SET(x) ( ((x) | (x) << 16) ) |
@@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev) | |||
1456 | spin_unlock_irqrestore(&bp->lock, flags); | 1456 | spin_unlock_irqrestore(&bp->lock, flags); |
1457 | } | 1457 | } |
1458 | 1458 | ||
1459 | static int | 1459 | static netdev_tx_t |
1460 | bmac_output(struct sk_buff *skb, struct net_device *dev) | 1460 | bmac_output(struct sk_buff *skb, struct net_device *dev) |
1461 | { | 1461 | { |
1462 | struct bmac_data *bp = netdev_priv(dev); | 1462 | struct bmac_data *bp = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 0b5429d76bcf..68b9ee489489 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c | |||
@@ -78,7 +78,7 @@ struct mace_data { | |||
78 | 78 | ||
79 | static int mace_open(struct net_device *dev); | 79 | static int mace_open(struct net_device *dev); |
80 | static int mace_close(struct net_device *dev); | 80 | static int mace_close(struct net_device *dev); |
81 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | 81 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev); |
82 | static void mace_set_multicast(struct net_device *dev); | 82 | static void mace_set_multicast(struct net_device *dev); |
83 | static void mace_reset(struct net_device *dev); | 83 | static void mace_reset(struct net_device *dev); |
84 | static int mace_set_address(struct net_device *dev, void *addr); | 84 | static int mace_set_address(struct net_device *dev, void *addr); |
@@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev) | |||
525 | mp->timeout_active = 1; | 525 | mp->timeout_active = 1; |
526 | } | 526 | } |
527 | 527 | ||
528 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | 528 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev) |
529 | { | 529 | { |
530 | struct mace_data *mp = netdev_priv(dev); | 530 | struct mace_data *mp = netdev_priv(dev); |
531 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | 531 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; |
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 137cbb470af2..376f2c2613e7 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c | |||
@@ -89,7 +89,7 @@ struct mace_frame { | |||
89 | 89 | ||
90 | static int mace_open(struct net_device *dev); | 90 | static int mace_open(struct net_device *dev); |
91 | static int mace_close(struct net_device *dev); | 91 | static int mace_close(struct net_device *dev); |
92 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | 92 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev); |
93 | static void mace_set_multicast(struct net_device *dev); | 93 | static void mace_set_multicast(struct net_device *dev); |
94 | static int mace_set_address(struct net_device *dev, void *addr); | 94 | static int mace_set_address(struct net_device *dev, void *addr); |
95 | static void mace_reset(struct net_device *dev); | 95 | static void mace_reset(struct net_device *dev); |
@@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev) | |||
444 | * Transmit a frame | 444 | * Transmit a frame |
445 | */ | 445 | */ |
446 | 446 | ||
447 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | 447 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev) |
448 | { | 448 | { |
449 | struct mace_data *mp = netdev_priv(dev); | 449 | struct mace_data *mp = netdev_priv(dev); |
450 | unsigned long flags; | 450 | unsigned long flags; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index b5f1f62e8e25..d1e1a0ba8615 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
@@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, | |||
225 | } | 225 | } |
226 | 226 | ||
227 | /* for single fragment packets use build_skb() */ | 227 | /* for single fragment packets use build_skb() */ |
228 | if (buff->is_eop) { | 228 | if (buff->is_eop && |
229 | buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { | ||
229 | skb = build_skb(page_address(buff->page), | 230 | skb = build_skb(page_address(buff->page), |
230 | buff->len + AQ_SKB_ALIGN); | 231 | AQ_CFG_RX_FRAME_MAX); |
231 | if (unlikely(!skb)) { | 232 | if (unlikely(!skb)) { |
232 | err = -ENOMEM; | 233 | err = -ENOMEM; |
233 | goto err_exit; | 234 | goto err_exit; |
@@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self, | |||
247 | buff->len - ETH_HLEN, | 248 | buff->len - ETH_HLEN, |
248 | SKB_TRUESIZE(buff->len - ETH_HLEN)); | 249 | SKB_TRUESIZE(buff->len - ETH_HLEN)); |
249 | 250 | ||
250 | for (i = 1U, next_ = buff->next, | 251 | if (!buff->is_eop) { |
251 | buff_ = &self->buff_ring[next_]; true; | 252 | for (i = 1U, next_ = buff->next, |
252 | next_ = buff_->next, | 253 | buff_ = &self->buff_ring[next_]; |
253 | buff_ = &self->buff_ring[next_], ++i) { | 254 | true; next_ = buff_->next, |
254 | skb_add_rx_frag(skb, i, buff_->page, 0, | 255 | buff_ = &self->buff_ring[next_], ++i) { |
255 | buff_->len, | 256 | skb_add_rx_frag(skb, i, |
256 | SKB_TRUESIZE(buff->len - | 257 | buff_->page, 0, |
257 | ETH_HLEN)); | 258 | buff_->len, |
258 | buff_->is_cleaned = 1; | 259 | SKB_TRUESIZE(buff->len - |
259 | 260 | ETH_HLEN)); | |
260 | if (buff_->is_eop) | 261 | buff_->is_cleaned = 1; |
261 | break; | 262 | |
263 | if (buff_->is_eop) | ||
264 | break; | ||
265 | } | ||
262 | } | 266 | } |
263 | } | 267 | } |
264 | 268 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 71362b7f6040..fcc2328bb0d9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
12894 | } | 12894 | } |
12895 | } | 12895 | } |
12896 | 12896 | ||
12897 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
12898 | static void poll_bnx2x(struct net_device *dev) | ||
12899 | { | ||
12900 | struct bnx2x *bp = netdev_priv(dev); | ||
12901 | int i; | ||
12902 | |||
12903 | for_each_eth_queue(bp, i) { | ||
12904 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
12905 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | ||
12906 | } | ||
12907 | } | ||
12908 | #endif | ||
12909 | |||
12910 | static int bnx2x_validate_addr(struct net_device *dev) | 12897 | static int bnx2x_validate_addr(struct net_device *dev) |
12911 | { | 12898 | { |
12912 | struct bnx2x *bp = netdev_priv(dev); | 12899 | struct bnx2x *bp = netdev_priv(dev); |
@@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = { | |||
13113 | .ndo_tx_timeout = bnx2x_tx_timeout, | 13100 | .ndo_tx_timeout = bnx2x_tx_timeout, |
13114 | .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, | 13101 | .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, |
13115 | .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, | 13102 | .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, |
13116 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
13117 | .ndo_poll_controller = poll_bnx2x, | ||
13118 | #endif | ||
13119 | .ndo_setup_tc = __bnx2x_setup_tc, | 13103 | .ndo_setup_tc = __bnx2x_setup_tc, |
13120 | #ifdef CONFIG_BNX2X_SRIOV | 13104 | #ifdef CONFIG_BNX2X_SRIOV |
13121 | .ndo_set_vf_mac = bnx2x_set_vf_mac, | 13105 | .ndo_set_vf_mac = bnx2x_set_vf_mac, |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index cecbb1d1f587..61957b0bbd8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -7672,21 +7672,6 @@ static void bnxt_tx_timeout(struct net_device *dev) | |||
7672 | bnxt_queue_sp_work(bp); | 7672 | bnxt_queue_sp_work(bp); |
7673 | } | 7673 | } |
7674 | 7674 | ||
7675 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
7676 | static void bnxt_poll_controller(struct net_device *dev) | ||
7677 | { | ||
7678 | struct bnxt *bp = netdev_priv(dev); | ||
7679 | int i; | ||
7680 | |||
7681 | /* Only process tx rings/combined rings in netpoll mode. */ | ||
7682 | for (i = 0; i < bp->tx_nr_rings; i++) { | ||
7683 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; | ||
7684 | |||
7685 | napi_schedule(&txr->bnapi->napi); | ||
7686 | } | ||
7687 | } | ||
7688 | #endif | ||
7689 | |||
7690 | static void bnxt_timer(struct timer_list *t) | 7675 | static void bnxt_timer(struct timer_list *t) |
7691 | { | 7676 | { |
7692 | struct bnxt *bp = from_timer(bp, t, timer); | 7677 | struct bnxt *bp = from_timer(bp, t, timer); |
@@ -8027,7 +8012,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) | |||
8027 | if (ether_addr_equal(addr->sa_data, dev->dev_addr)) | 8012 | if (ether_addr_equal(addr->sa_data, dev->dev_addr)) |
8028 | return 0; | 8013 | return 0; |
8029 | 8014 | ||
8030 | rc = bnxt_approve_mac(bp, addr->sa_data); | 8015 | rc = bnxt_approve_mac(bp, addr->sa_data, true); |
8031 | if (rc) | 8016 | if (rc) |
8032 | return rc; | 8017 | return rc; |
8033 | 8018 | ||
@@ -8520,9 +8505,6 @@ static const struct net_device_ops bnxt_netdev_ops = { | |||
8520 | .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, | 8505 | .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, |
8521 | .ndo_set_vf_trust = bnxt_set_vf_trust, | 8506 | .ndo_set_vf_trust = bnxt_set_vf_trust, |
8522 | #endif | 8507 | #endif |
8523 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
8524 | .ndo_poll_controller = bnxt_poll_controller, | ||
8525 | #endif | ||
8526 | .ndo_setup_tc = bnxt_setup_tc, | 8508 | .ndo_setup_tc = bnxt_setup_tc, |
8527 | #ifdef CONFIG_RFS_ACCEL | 8509 | #ifdef CONFIG_RFS_ACCEL |
8528 | .ndo_rx_flow_steer = bnxt_rx_flow_steer, | 8510 | .ndo_rx_flow_steer = bnxt_rx_flow_steer, |
@@ -8827,14 +8809,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp) | |||
8827 | } else { | 8809 | } else { |
8828 | #ifdef CONFIG_BNXT_SRIOV | 8810 | #ifdef CONFIG_BNXT_SRIOV |
8829 | struct bnxt_vf_info *vf = &bp->vf; | 8811 | struct bnxt_vf_info *vf = &bp->vf; |
8812 | bool strict_approval = true; | ||
8830 | 8813 | ||
8831 | if (is_valid_ether_addr(vf->mac_addr)) { | 8814 | if (is_valid_ether_addr(vf->mac_addr)) { |
8832 | /* overwrite netdev dev_addr with admin VF MAC */ | 8815 | /* overwrite netdev dev_addr with admin VF MAC */ |
8833 | memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); | 8816 | memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); |
8817 | /* Older PF driver or firmware may not approve this | ||
8818 | * correctly. | ||
8819 | */ | ||
8820 | strict_approval = false; | ||
8834 | } else { | 8821 | } else { |
8835 | eth_hw_addr_random(bp->dev); | 8822 | eth_hw_addr_random(bp->dev); |
8836 | } | 8823 | } |
8837 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr); | 8824 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); |
8838 | #endif | 8825 | #endif |
8839 | } | 8826 | } |
8840 | return rc; | 8827 | return rc; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index f3b9fbcc705b..790c684f08ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | |||
@@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, | |||
46 | } | 46 | } |
47 | } | 47 | } |
48 | 48 | ||
49 | if (i == ARRAY_SIZE(nvm_params)) | ||
50 | return -EOPNOTSUPP; | ||
51 | |||
49 | if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) | 52 | if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) |
50 | idx = bp->pf.port_id; | 53 | idx = bp->pf.port_id; |
51 | else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) | 54 | else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index fcd085a9853a..3962f6fd543c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
@@ -1104,7 +1104,7 @@ update_vf_mac_exit: | |||
1104 | mutex_unlock(&bp->hwrm_cmd_lock); | 1104 | mutex_unlock(&bp->hwrm_cmd_lock); |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac) | 1107 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) |
1108 | { | 1108 | { |
1109 | struct hwrm_func_vf_cfg_input req = {0}; | 1109 | struct hwrm_func_vf_cfg_input req = {0}; |
1110 | int rc = 0; | 1110 | int rc = 0; |
@@ -1122,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac) | |||
1122 | memcpy(req.dflt_mac_addr, mac, ETH_ALEN); | 1122 | memcpy(req.dflt_mac_addr, mac, ETH_ALEN); |
1123 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 1123 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
1124 | mac_done: | 1124 | mac_done: |
1125 | if (rc) { | 1125 | if (rc && strict) { |
1126 | rc = -EADDRNOTAVAIL; | 1126 | rc = -EADDRNOTAVAIL; |
1127 | netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", | 1127 | netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", |
1128 | mac); | 1128 | mac); |
1129 | return rc; | ||
1129 | } | 1130 | } |
1130 | return rc; | 1131 | return 0; |
1131 | } | 1132 | } |
1132 | #else | 1133 | #else |
1133 | 1134 | ||
@@ -1144,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) | |||
1144 | { | 1145 | { |
1145 | } | 1146 | } |
1146 | 1147 | ||
1147 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac) | 1148 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) |
1148 | { | 1149 | { |
1149 | return 0; | 1150 | return 0; |
1150 | } | 1151 | } |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index e9b20cd19881..2eed9eda1195 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h | |||
@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); | |||
39 | void bnxt_sriov_disable(struct bnxt *); | 39 | void bnxt_sriov_disable(struct bnxt *); |
40 | void bnxt_hwrm_exec_fwd_req(struct bnxt *); | 40 | void bnxt_hwrm_exec_fwd_req(struct bnxt *); |
41 | void bnxt_update_vf_mac(struct bnxt *); | 41 | void bnxt_update_vf_mac(struct bnxt *); |
42 | int bnxt_approve_mac(struct bnxt *, u8 *); | 42 | int bnxt_approve_mac(struct bnxt *, u8 *, bool); |
43 | #endif | 43 | #endif |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 092c817f8f11..e1594c9df4c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
@@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, | |||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | static void bnxt_tc_parse_vlan(struct bnxt *bp, | 78 | static int bnxt_tc_parse_vlan(struct bnxt *bp, |
79 | struct bnxt_tc_actions *actions, | 79 | struct bnxt_tc_actions *actions, |
80 | const struct tc_action *tc_act) | 80 | const struct tc_action *tc_act) |
81 | { | 81 | { |
82 | if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { | 82 | switch (tcf_vlan_action(tc_act)) { |
83 | case TCA_VLAN_ACT_POP: | ||
83 | actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; | 84 | actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; |
84 | } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { | 85 | break; |
86 | case TCA_VLAN_ACT_PUSH: | ||
85 | actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; | 87 | actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; |
86 | actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); | 88 | actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); |
87 | actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); | 89 | actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); |
90 | break; | ||
91 | default: | ||
92 | return -EOPNOTSUPP; | ||
88 | } | 93 | } |
94 | return 0; | ||
89 | } | 95 | } |
90 | 96 | ||
91 | static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, | 97 | static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, |
@@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, | |||
134 | 140 | ||
135 | /* Push/pop VLAN */ | 141 | /* Push/pop VLAN */ |
136 | if (is_tcf_vlan(tc_act)) { | 142 | if (is_tcf_vlan(tc_act)) { |
137 | bnxt_tc_parse_vlan(bp, actions, tc_act); | 143 | rc = bnxt_tc_parse_vlan(bp, actions, tc_act); |
144 | if (rc) | ||
145 | return rc; | ||
138 | continue; | 146 | continue; |
139 | } | 147 | } |
140 | 148 | ||
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 16e4ef7d7185..f1a86b422617 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
@@ -3837,6 +3837,13 @@ static const struct macb_config at91sam9260_config = { | |||
3837 | .init = macb_init, | 3837 | .init = macb_init, |
3838 | }; | 3838 | }; |
3839 | 3839 | ||
3840 | static const struct macb_config sama5d3macb_config = { | ||
3841 | .caps = MACB_CAPS_SG_DISABLED | ||
3842 | | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, | ||
3843 | .clk_init = macb_clk_init, | ||
3844 | .init = macb_init, | ||
3845 | }; | ||
3846 | |||
3840 | static const struct macb_config pc302gem_config = { | 3847 | static const struct macb_config pc302gem_config = { |
3841 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 3848 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
3842 | .dma_burst_length = 16, | 3849 | .dma_burst_length = 16, |
@@ -3904,6 +3911,7 @@ static const struct of_device_id macb_dt_ids[] = { | |||
3904 | { .compatible = "cdns,gem", .data = &pc302gem_config }, | 3911 | { .compatible = "cdns,gem", .data = &pc302gem_config }, |
3905 | { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, | 3912 | { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, |
3906 | { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, | 3913 | { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, |
3914 | { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, | ||
3907 | { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, | 3915 | { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, |
3908 | { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, | 3916 | { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, |
3909 | { .compatible = "cdns,emac", .data = &emac_config }, | 3917 | { .compatible = "cdns,emac", .data = &emac_config }, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index b8f75a22fb6c..f152da1ce046 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | |||
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss { | |||
753 | }; | 753 | }; |
754 | 754 | ||
755 | struct cpl_abort_req_rss6 { | 755 | struct cpl_abort_req_rss6 { |
756 | WR_HDR; | ||
757 | union opcode_tid ot; | 756 | union opcode_tid ot; |
758 | __be32 srqidx_status; | 757 | __be32 srqidx_status; |
759 | }; | 758 | }; |
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index e2a702996db4..13dfdfca49fc 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c | |||
@@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget) | |||
332 | return rx; | 332 | return rx; |
333 | } | 333 | } |
334 | 334 | ||
335 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) | 335 | static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) |
336 | { | 336 | { |
337 | struct ep93xx_priv *ep = netdev_priv(dev); | 337 | struct ep93xx_priv *ep = netdev_priv(dev); |
338 | struct ep93xx_tdesc *txd; | 338 | struct ep93xx_tdesc *txd; |
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 3f8fe8fd79cc..6324e80960c3 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c | |||
@@ -113,7 +113,7 @@ struct net_local { | |||
113 | 113 | ||
114 | /* Index to functions, as function prototypes. */ | 114 | /* Index to functions, as function prototypes. */ |
115 | static int net_open(struct net_device *dev); | 115 | static int net_open(struct net_device *dev); |
116 | static int net_send_packet(struct sk_buff *skb, struct net_device *dev); | 116 | static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev); |
117 | static irqreturn_t net_interrupt(int irq, void *dev_id); | 117 | static irqreturn_t net_interrupt(int irq, void *dev_id); |
118 | static void set_multicast_list(struct net_device *dev); | 118 | static void set_multicast_list(struct net_device *dev); |
119 | static void net_rx(struct net_device *dev); | 119 | static void net_rx(struct net_device *dev); |
@@ -324,7 +324,7 @@ net_open(struct net_device *dev) | |||
324 | return 0; | 324 | return 0; |
325 | } | 325 | } |
326 | 326 | ||
327 | static int | 327 | static netdev_tx_t |
328 | net_send_packet(struct sk_buff *skb, struct net_device *dev) | 328 | net_send_packet(struct sk_buff *skb, struct net_device *dev) |
329 | { | 329 | { |
330 | struct net_local *lp = netdev_priv(dev); | 330 | struct net_local *lp = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index c8c7ad2eff77..9b5a68b65432 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c | |||
@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) | |||
2634 | /* Wait for link to drop */ | 2634 | /* Wait for link to drop */ |
2635 | time = jiffies + (HZ / 10); | 2635 | time = jiffies + (HZ / 10); |
2636 | do { | 2636 | do { |
2637 | if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) | 2637 | if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) |
2638 | break; | 2638 | break; |
2639 | if (!in_interrupt()) | 2639 | if (!in_interrupt()) |
2640 | schedule_timeout_interruptible(1); | 2640 | schedule_timeout_interruptible(1); |
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c index dc983450354b..35f6291a3672 100644 --- a/drivers/net/ethernet/i825xx/ether1.c +++ b/drivers/net/ethernet/i825xx/ether1.c | |||
@@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG; | |||
64 | #define RX_AREA_END 0x0fc00 | 64 | #define RX_AREA_END 0x0fc00 |
65 | 65 | ||
66 | static int ether1_open(struct net_device *dev); | 66 | static int ether1_open(struct net_device *dev); |
67 | static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); | 67 | static netdev_tx_t ether1_sendpacket(struct sk_buff *skb, |
68 | struct net_device *dev); | ||
68 | static irqreturn_t ether1_interrupt(int irq, void *dev_id); | 69 | static irqreturn_t ether1_interrupt(int irq, void *dev_id); |
69 | static int ether1_close(struct net_device *dev); | 70 | static int ether1_close(struct net_device *dev); |
70 | static void ether1_setmulticastlist(struct net_device *dev); | 71 | static void ether1_setmulticastlist(struct net_device *dev); |
@@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev) | |||
667 | netif_wake_queue(dev); | 668 | netif_wake_queue(dev); |
668 | } | 669 | } |
669 | 670 | ||
670 | static int | 671 | static netdev_tx_t |
671 | ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) | 672 | ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) |
672 | { | 673 | { |
673 | int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; | 674 | int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; |
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index f00a1dc2128c..2f7ae118217f 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c | |||
@@ -347,7 +347,7 @@ static const char init_setup[] = | |||
347 | 0x7f /* *multi IA */ }; | 347 | 0x7f /* *multi IA */ }; |
348 | 348 | ||
349 | static int i596_open(struct net_device *dev); | 349 | static int i596_open(struct net_device *dev); |
350 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); | 350 | static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); |
351 | static irqreturn_t i596_interrupt(int irq, void *dev_id); | 351 | static irqreturn_t i596_interrupt(int irq, void *dev_id); |
352 | static int i596_close(struct net_device *dev); | 352 | static int i596_close(struct net_device *dev); |
353 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); | 353 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); |
@@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev) | |||
966 | } | 966 | } |
967 | 967 | ||
968 | 968 | ||
969 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) | 969 | static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) |
970 | { | 970 | { |
971 | struct i596_private *lp = netdev_priv(dev); | 971 | struct i596_private *lp = netdev_priv(dev); |
972 | struct tx_cmd *tx_cmd; | 972 | struct tx_cmd *tx_cmd; |
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 8bb15a8c2a40..1a86184d44c0 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c | |||
@@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr); | |||
121 | static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); | 121 | static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); |
122 | static int sun3_82586_open(struct net_device *dev); | 122 | static int sun3_82586_open(struct net_device *dev); |
123 | static int sun3_82586_close(struct net_device *dev); | 123 | static int sun3_82586_close(struct net_device *dev); |
124 | static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); | 124 | static netdev_tx_t sun3_82586_send_packet(struct sk_buff *, |
125 | struct net_device *); | ||
125 | static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); | 126 | static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); |
126 | static void set_multicast_list(struct net_device *dev); | 127 | static void set_multicast_list(struct net_device *dev); |
127 | static void sun3_82586_timeout(struct net_device *dev); | 128 | static void sun3_82586_timeout(struct net_device *dev); |
@@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev) | |||
1002 | * send frame | 1003 | * send frame |
1003 | */ | 1004 | */ |
1004 | 1005 | ||
1005 | static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) | 1006 | static netdev_tx_t |
1007 | sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) | ||
1006 | { | 1008 | { |
1007 | int len,i; | 1009 | int len,i; |
1008 | #ifndef NO_NOPCOMMANDS | 1010 | #ifndef NO_NOPCOMMANDS |
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 372664686309..129f4e9f38da 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
@@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev) | |||
2677 | if (of_phy_is_fixed_link(np)) { | 2677 | if (of_phy_is_fixed_link(np)) { |
2678 | int res = emac_dt_mdio_probe(dev); | 2678 | int res = emac_dt_mdio_probe(dev); |
2679 | 2679 | ||
2680 | if (!res) { | 2680 | if (res) |
2681 | res = of_phy_register_fixed_link(np); | 2681 | return res; |
2682 | if (res) | 2682 | |
2683 | mdiobus_unregister(dev->mii_bus); | 2683 | res = of_phy_register_fixed_link(np); |
2684 | dev->phy_dev = of_phy_find_device(np); | ||
2685 | if (res || !dev->phy_dev) { | ||
2686 | mdiobus_unregister(dev->mii_bus); | ||
2687 | return res ? res : -EINVAL; | ||
2684 | } | 2688 | } |
2685 | return res; | 2689 | emac_adjust_link(dev->ndev); |
2690 | put_device(&dev->phy_dev->mdio.dev); | ||
2686 | } | 2691 | } |
2687 | return 0; | 2692 | return 0; |
2688 | } | 2693 | } |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index a903a0ba45e1..7d42582ed48d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h | |||
@@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface); | |||
504 | void fm10k_service_event_schedule(struct fm10k_intfc *interface); | 504 | void fm10k_service_event_schedule(struct fm10k_intfc *interface); |
505 | void fm10k_macvlan_schedule(struct fm10k_intfc *interface); | 505 | void fm10k_macvlan_schedule(struct fm10k_intfc *interface); |
506 | void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); | 506 | void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); |
507 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
508 | void fm10k_netpoll(struct net_device *netdev); | ||
509 | #endif | ||
510 | 507 | ||
511 | /* Netdev */ | 508 | /* Netdev */ |
512 | struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); | 509 | struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 929f538d28bc..538a8467f434 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | |||
@@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = { | |||
1648 | .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, | 1648 | .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, |
1649 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, | 1649 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, |
1650 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, | 1650 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, |
1651 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1652 | .ndo_poll_controller = fm10k_netpoll, | ||
1653 | #endif | ||
1654 | .ndo_features_check = fm10k_features_check, | 1651 | .ndo_features_check = fm10k_features_check, |
1655 | }; | 1652 | }; |
1656 | 1653 | ||
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 15071e4adb98..c859ababeed5 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c | |||
@@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) | |||
1210 | return IRQ_HANDLED; | 1210 | return IRQ_HANDLED; |
1211 | } | 1211 | } |
1212 | 1212 | ||
1213 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1214 | /** | ||
1215 | * fm10k_netpoll - A Polling 'interrupt' handler | ||
1216 | * @netdev: network interface device structure | ||
1217 | * | ||
1218 | * This is used by netconsole to send skbs without having to re-enable | ||
1219 | * interrupts. It's not called while the normal interrupt routine is executing. | ||
1220 | **/ | ||
1221 | void fm10k_netpoll(struct net_device *netdev) | ||
1222 | { | ||
1223 | struct fm10k_intfc *interface = netdev_priv(netdev); | ||
1224 | int i; | ||
1225 | |||
1226 | /* if interface is down do nothing */ | ||
1227 | if (test_bit(__FM10K_DOWN, interface->state)) | ||
1228 | return; | ||
1229 | |||
1230 | for (i = 0; i < interface->num_q_vectors; i++) | ||
1231 | fm10k_msix_clean_rings(0, interface->q_vector[i]); | ||
1232 | } | ||
1233 | |||
1234 | #endif | ||
1235 | #define FM10K_ERR_MSG(type) case (type): error = #type; break | 1213 | #define FM10K_ERR_MSG(type) case (type): error = #type; break |
1236 | static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, | 1214 | static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, |
1237 | struct fm10k_fault *fault) | 1215 | struct fm10k_fault *fault) |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5906c1c1d19d..fef6d892ed4c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
@@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) | |||
396 | adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; | 396 | adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; |
397 | } | 397 | } |
398 | 398 | ||
399 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
400 | /** | ||
401 | * i40evf_netpoll - A Polling 'interrupt' handler | ||
402 | * @netdev: network interface device structure | ||
403 | * | ||
404 | * This is used by netconsole to send skbs without having to re-enable | ||
405 | * interrupts. It's not called while the normal interrupt routine is executing. | ||
406 | **/ | ||
407 | static void i40evf_netpoll(struct net_device *netdev) | ||
408 | { | ||
409 | struct i40evf_adapter *adapter = netdev_priv(netdev); | ||
410 | int q_vectors = adapter->num_msix_vectors - NONQ_VECS; | ||
411 | int i; | ||
412 | |||
413 | /* if interface is down do nothing */ | ||
414 | if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state)) | ||
415 | return; | ||
416 | |||
417 | for (i = 0; i < q_vectors; i++) | ||
418 | i40evf_msix_clean_rings(0, &adapter->q_vectors[i]); | ||
419 | } | ||
420 | |||
421 | #endif | ||
422 | /** | 399 | /** |
423 | * i40evf_irq_affinity_notify - Callback for affinity changes | 400 | * i40evf_irq_affinity_notify - Callback for affinity changes |
424 | * @notify: context as to what irq was changed | 401 | * @notify: context as to what irq was changed |
@@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = { | |||
3229 | .ndo_features_check = i40evf_features_check, | 3206 | .ndo_features_check = i40evf_features_check, |
3230 | .ndo_fix_features = i40evf_fix_features, | 3207 | .ndo_fix_features = i40evf_fix_features, |
3231 | .ndo_set_features = i40evf_set_features, | 3208 | .ndo_set_features = i40evf_set_features, |
3232 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3233 | .ndo_poll_controller = i40evf_netpoll, | ||
3234 | #endif | ||
3235 | .ndo_setup_tc = i40evf_setup_tc, | 3209 | .ndo_setup_tc = i40evf_setup_tc, |
3236 | }; | 3210 | }; |
3237 | 3211 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f1e80eed2fd6..3f047bb43348 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |||
@@ -4806,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | |||
4806 | stats->rx_length_errors = vsi_stats->rx_length_errors; | 4806 | stats->rx_length_errors = vsi_stats->rx_length_errors; |
4807 | } | 4807 | } |
4808 | 4808 | ||
4809 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4810 | /** | ||
4811 | * ice_netpoll - polling "interrupt" handler | ||
4812 | * @netdev: network interface device structure | ||
4813 | * | ||
4814 | * Used by netconsole to send skbs without having to re-enable interrupts. | ||
4815 | * This is not called in the normal interrupt path. | ||
4816 | */ | ||
4817 | static void ice_netpoll(struct net_device *netdev) | ||
4818 | { | ||
4819 | struct ice_netdev_priv *np = netdev_priv(netdev); | ||
4820 | struct ice_vsi *vsi = np->vsi; | ||
4821 | struct ice_pf *pf = vsi->back; | ||
4822 | int i; | ||
4823 | |||
4824 | if (test_bit(__ICE_DOWN, vsi->state) || | ||
4825 | !test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) | ||
4826 | return; | ||
4827 | |||
4828 | for (i = 0; i < vsi->num_q_vectors; i++) | ||
4829 | ice_msix_clean_rings(0, vsi->q_vectors[i]); | ||
4830 | } | ||
4831 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
4832 | |||
4833 | /** | 4809 | /** |
4834 | * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI | 4810 | * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI |
4835 | * @vsi: VSI having NAPI disabled | 4811 | * @vsi: VSI having NAPI disabled |
@@ -5497,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = { | |||
5497 | .ndo_validate_addr = eth_validate_addr, | 5473 | .ndo_validate_addr = eth_validate_addr, |
5498 | .ndo_change_mtu = ice_change_mtu, | 5474 | .ndo_change_mtu = ice_change_mtu, |
5499 | .ndo_get_stats64 = ice_get_stats64, | 5475 | .ndo_get_stats64 = ice_get_stats64, |
5500 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
5501 | .ndo_poll_controller = ice_netpoll, | ||
5502 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
5503 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, | 5476 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, |
5504 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, | 5477 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, |
5505 | .ndo_set_features = ice_set_features, | 5478 | .ndo_set_features = ice_set_features, |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a32c576c1e65..0796cef96fa3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = { | |||
205 | .priority = 0 | 205 | .priority = 0 |
206 | }; | 206 | }; |
207 | #endif | 207 | #endif |
208 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
209 | /* for netdump / net console */ | ||
210 | static void igb_netpoll(struct net_device *); | ||
211 | #endif | ||
212 | #ifdef CONFIG_PCI_IOV | 208 | #ifdef CONFIG_PCI_IOV |
213 | static unsigned int max_vfs; | 209 | static unsigned int max_vfs; |
214 | module_param(max_vfs, uint, 0); | 210 | module_param(max_vfs, uint, 0); |
@@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = { | |||
2881 | .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, | 2877 | .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, |
2882 | .ndo_set_vf_trust = igb_ndo_set_vf_trust, | 2878 | .ndo_set_vf_trust = igb_ndo_set_vf_trust, |
2883 | .ndo_get_vf_config = igb_ndo_get_vf_config, | 2879 | .ndo_get_vf_config = igb_ndo_get_vf_config, |
2884 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2885 | .ndo_poll_controller = igb_netpoll, | ||
2886 | #endif | ||
2887 | .ndo_fix_features = igb_fix_features, | 2880 | .ndo_fix_features = igb_fix_features, |
2888 | .ndo_set_features = igb_set_features, | 2881 | .ndo_set_features = igb_set_features, |
2889 | .ndo_fdb_add = igb_ndo_fdb_add, | 2882 | .ndo_fdb_add = igb_ndo_fdb_add, |
@@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) | |||
9053 | return 0; | 9046 | return 0; |
9054 | } | 9047 | } |
9055 | 9048 | ||
9056 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
9057 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
9058 | * without having to re-enable interrupts. It's not called while | ||
9059 | * the interrupt routine is executing. | ||
9060 | */ | ||
9061 | static void igb_netpoll(struct net_device *netdev) | ||
9062 | { | ||
9063 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
9064 | struct e1000_hw *hw = &adapter->hw; | ||
9065 | struct igb_q_vector *q_vector; | ||
9066 | int i; | ||
9067 | |||
9068 | for (i = 0; i < adapter->num_q_vectors; i++) { | ||
9069 | q_vector = adapter->q_vector[i]; | ||
9070 | if (adapter->flags & IGB_FLAG_HAS_MSIX) | ||
9071 | wr32(E1000_EIMC, q_vector->eims_value); | ||
9072 | else | ||
9073 | igb_irq_disable(adapter); | ||
9074 | napi_schedule(&q_vector->napi); | ||
9075 | } | ||
9076 | } | ||
9077 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
9078 | |||
9079 | /** | 9049 | /** |
9080 | * igb_io_error_detected - called when PCI error is detected | 9050 | * igb_io_error_detected - called when PCI error is detected |
9081 | * @pdev: Pointer to PCI device | 9051 | * @pdev: Pointer to PCI device |
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index d3e72d0f66ef..7722153c4ac2 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
@@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, | |||
81 | __be16 proto, u16 vid); | 81 | __be16 proto, u16 vid); |
82 | static void ixgb_restore_vlan(struct ixgb_adapter *adapter); | 82 | static void ixgb_restore_vlan(struct ixgb_adapter *adapter); |
83 | 83 | ||
84 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
85 | /* for netdump / net console */ | ||
86 | static void ixgb_netpoll(struct net_device *dev); | ||
87 | #endif | ||
88 | |||
89 | static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, | 84 | static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, |
90 | enum pci_channel_state state); | 85 | enum pci_channel_state state); |
91 | static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); | 86 | static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); |
@@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = { | |||
348 | .ndo_tx_timeout = ixgb_tx_timeout, | 343 | .ndo_tx_timeout = ixgb_tx_timeout, |
349 | .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, | 344 | .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, |
350 | .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, | 345 | .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, |
351 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
352 | .ndo_poll_controller = ixgb_netpoll, | ||
353 | #endif | ||
354 | .ndo_fix_features = ixgb_fix_features, | 346 | .ndo_fix_features = ixgb_fix_features, |
355 | .ndo_set_features = ixgb_set_features, | 347 | .ndo_set_features = ixgb_set_features, |
356 | }; | 348 | }; |
@@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) | |||
2195 | ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); | 2187 | ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); |
2196 | } | 2188 | } |
2197 | 2189 | ||
2198 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2199 | /* | ||
2200 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
2201 | * without having to re-enable interrupts. It's not called while | ||
2202 | * the interrupt routine is executing. | ||
2203 | */ | ||
2204 | |||
2205 | static void ixgb_netpoll(struct net_device *dev) | ||
2206 | { | ||
2207 | struct ixgb_adapter *adapter = netdev_priv(dev); | ||
2208 | |||
2209 | disable_irq(adapter->pdev->irq); | ||
2210 | ixgb_intr(adapter->pdev->irq, dev); | ||
2211 | enable_irq(adapter->pdev->irq); | ||
2212 | } | ||
2213 | #endif | ||
2214 | |||
2215 | /** | 2190 | /** |
2216 | * ixgb_io_error_detected - called when PCI error is detected | 2191 | * ixgb_io_error_detected - called when PCI error is detected |
2217 | * @pdev: pointer to pci device with error | 2192 | * @pdev: pointer to pci device with error |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9a23d33a47ed..f27d73a7bf16 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -8768,28 +8768,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) | |||
8768 | return err; | 8768 | return err; |
8769 | } | 8769 | } |
8770 | 8770 | ||
8771 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
8772 | /* | ||
8773 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
8774 | * without having to re-enable interrupts. It's not called while | ||
8775 | * the interrupt routine is executing. | ||
8776 | */ | ||
8777 | static void ixgbe_netpoll(struct net_device *netdev) | ||
8778 | { | ||
8779 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
8780 | int i; | ||
8781 | |||
8782 | /* if interface is down do nothing */ | ||
8783 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | ||
8784 | return; | ||
8785 | |||
8786 | /* loop through and schedule all active queues */ | ||
8787 | for (i = 0; i < adapter->num_q_vectors; i++) | ||
8788 | ixgbe_msix_clean_rings(0, adapter->q_vector[i]); | ||
8789 | } | ||
8790 | |||
8791 | #endif | ||
8792 | |||
8793 | static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, | 8771 | static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, |
8794 | struct ixgbe_ring *ring) | 8772 | struct ixgbe_ring *ring) |
8795 | { | 8773 | { |
@@ -10251,9 +10229,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
10251 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, | 10229 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, |
10252 | .ndo_get_stats64 = ixgbe_get_stats64, | 10230 | .ndo_get_stats64 = ixgbe_get_stats64, |
10253 | .ndo_setup_tc = __ixgbe_setup_tc, | 10231 | .ndo_setup_tc = __ixgbe_setup_tc, |
10254 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
10255 | .ndo_poll_controller = ixgbe_netpoll, | ||
10256 | #endif | ||
10257 | #ifdef IXGBE_FCOE | 10232 | #ifdef IXGBE_FCOE |
10258 | .ndo_select_queue = ixgbe_select_queue, | 10233 | .ndo_select_queue = ixgbe_select_queue, |
10259 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, | 10234 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d86446d202d5..5a228582423b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) | |||
4233 | return 0; | 4233 | return 0; |
4234 | } | 4234 | } |
4235 | 4235 | ||
4236 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4237 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
4238 | * without having to re-enable interrupts. It's not called while | ||
4239 | * the interrupt routine is executing. | ||
4240 | */ | ||
4241 | static void ixgbevf_netpoll(struct net_device *netdev) | ||
4242 | { | ||
4243 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
4244 | int i; | ||
4245 | |||
4246 | /* if interface is down do nothing */ | ||
4247 | if (test_bit(__IXGBEVF_DOWN, &adapter->state)) | ||
4248 | return; | ||
4249 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
4250 | ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); | ||
4251 | } | ||
4252 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
4253 | |||
4254 | static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) | 4236 | static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) |
4255 | { | 4237 | { |
4256 | struct net_device *netdev = pci_get_drvdata(pdev); | 4238 | struct net_device *netdev = pci_get_drvdata(pdev); |
@@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = { | |||
4482 | .ndo_tx_timeout = ixgbevf_tx_timeout, | 4464 | .ndo_tx_timeout = ixgbevf_tx_timeout, |
4483 | .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, | 4465 | .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, |
4484 | .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, | 4466 | .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, |
4485 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4486 | .ndo_poll_controller = ixgbevf_netpoll, | ||
4487 | #endif | ||
4488 | .ndo_features_check = ixgbevf_features_check, | 4467 | .ndo_features_check = ixgbevf_features_check, |
4489 | .ndo_bpf = ixgbevf_xdp, | 4468 | .ndo_bpf = ixgbevf_xdp, |
4490 | }; | 4469 | }; |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index bc80a678abc3..b4ed7d394d07 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, | |||
1890 | if (!data || !(rx_desc->buf_phys_addr)) | 1890 | if (!data || !(rx_desc->buf_phys_addr)) |
1891 | continue; | 1891 | continue; |
1892 | 1892 | ||
1893 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, | 1893 | dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, |
1894 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); | 1894 | PAGE_SIZE, DMA_FROM_DEVICE); |
1895 | __free_page(data); | 1895 | __free_page(data); |
1896 | } | 1896 | } |
1897 | } | 1897 | } |
@@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, | |||
2008 | skb_add_rx_frag(rxq->skb, frag_num, page, | 2008 | skb_add_rx_frag(rxq->skb, frag_num, page, |
2009 | frag_offset, frag_size, | 2009 | frag_offset, frag_size, |
2010 | PAGE_SIZE); | 2010 | PAGE_SIZE); |
2011 | dma_unmap_single(dev->dev.parent, phys_addr, | 2011 | dma_unmap_page(dev->dev.parent, phys_addr, |
2012 | PAGE_SIZE, DMA_FROM_DEVICE); | 2012 | PAGE_SIZE, DMA_FROM_DEVICE); |
2013 | rxq->left_size -= frag_size; | 2013 | rxq->left_size -= frag_size; |
2014 | } | 2014 | } |
2015 | } else { | 2015 | } else { |
@@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, | |||
2039 | frag_offset, frag_size, | 2039 | frag_offset, frag_size, |
2040 | PAGE_SIZE); | 2040 | PAGE_SIZE); |
2041 | 2041 | ||
2042 | dma_unmap_single(dev->dev.parent, phys_addr, | 2042 | dma_unmap_page(dev->dev.parent, phys_addr, |
2043 | PAGE_SIZE, | 2043 | PAGE_SIZE, DMA_FROM_DEVICE); |
2044 | DMA_FROM_DEVICE); | ||
2045 | 2044 | ||
2046 | rxq->left_size -= frag_size; | 2045 | rxq->left_size -= frag_size; |
2047 | } | 2046 | } |
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 28500417843e..38cc01beea79 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
@@ -58,6 +58,8 @@ static struct { | |||
58 | */ | 58 | */ |
59 | static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, | 59 | static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, |
60 | const struct phylink_link_state *state); | 60 | const struct phylink_link_state *state); |
61 | static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, | ||
62 | phy_interface_t interface, struct phy_device *phy); | ||
61 | 63 | ||
62 | /* Queue modes */ | 64 | /* Queue modes */ |
63 | #define MVPP2_QDIST_SINGLE_MODE 0 | 65 | #define MVPP2_QDIST_SINGLE_MODE 0 |
@@ -3053,10 +3055,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) | |||
3053 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); | 3055 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); |
3054 | } | 3056 | } |
3055 | 3057 | ||
3056 | cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; | 3058 | if (port->has_tx_irqs) { |
3057 | if (cause_tx) { | 3059 | cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; |
3058 | cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; | 3060 | if (cause_tx) { |
3059 | mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); | 3061 | cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; |
3062 | mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); | ||
3063 | } | ||
3060 | } | 3064 | } |
3061 | 3065 | ||
3062 | /* Process RX packets */ | 3066 | /* Process RX packets */ |
@@ -3142,6 +3146,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port) | |||
3142 | mvpp22_mode_reconfigure(port); | 3146 | mvpp22_mode_reconfigure(port); |
3143 | 3147 | ||
3144 | if (port->phylink) { | 3148 | if (port->phylink) { |
3149 | netif_carrier_off(port->dev); | ||
3145 | phylink_start(port->phylink); | 3150 | phylink_start(port->phylink); |
3146 | } else { | 3151 | } else { |
3147 | /* Phylink isn't used as of now for ACPI, so the MAC has to be | 3152 | /* Phylink isn't used as of now for ACPI, so the MAC has to be |
@@ -3150,9 +3155,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port) | |||
3150 | */ | 3155 | */ |
3151 | struct phylink_link_state state = { | 3156 | struct phylink_link_state state = { |
3152 | .interface = port->phy_interface, | 3157 | .interface = port->phy_interface, |
3153 | .link = 1, | ||
3154 | }; | 3158 | }; |
3155 | mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); | 3159 | mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); |
3160 | mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface, | ||
3161 | NULL); | ||
3156 | } | 3162 | } |
3157 | 3163 | ||
3158 | netif_tx_start_all_queues(port->dev); | 3164 | netif_tx_start_all_queues(port->dev); |
@@ -4495,10 +4501,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, | |||
4495 | return; | 4501 | return; |
4496 | } | 4502 | } |
4497 | 4503 | ||
4498 | netif_tx_stop_all_queues(port->dev); | ||
4499 | if (!port->has_phy) | ||
4500 | netif_carrier_off(port->dev); | ||
4501 | |||
4502 | /* Make sure the port is disabled when reconfiguring the mode */ | 4504 | /* Make sure the port is disabled when reconfiguring the mode */ |
4503 | mvpp2_port_disable(port); | 4505 | mvpp2_port_disable(port); |
4504 | 4506 | ||
@@ -4523,16 +4525,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, | |||
4523 | if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) | 4525 | if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) |
4524 | mvpp2_port_loopback_set(port, state); | 4526 | mvpp2_port_loopback_set(port, state); |
4525 | 4527 | ||
4526 | /* If the port already was up, make sure it's still in the same state */ | 4528 | mvpp2_port_enable(port); |
4527 | if (state->link || !port->has_phy) { | ||
4528 | mvpp2_port_enable(port); | ||
4529 | |||
4530 | mvpp2_egress_enable(port); | ||
4531 | mvpp2_ingress_enable(port); | ||
4532 | if (!port->has_phy) | ||
4533 | netif_carrier_on(dev); | ||
4534 | netif_tx_wake_all_queues(dev); | ||
4535 | } | ||
4536 | } | 4529 | } |
4537 | 4530 | ||
4538 | static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, | 4531 | static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6785661d1a72..fe49384eba48 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -1286,20 +1286,6 @@ out: | |||
1286 | mutex_unlock(&mdev->state_lock); | 1286 | mutex_unlock(&mdev->state_lock); |
1287 | } | 1287 | } |
1288 | 1288 | ||
1289 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1290 | static void mlx4_en_netpoll(struct net_device *dev) | ||
1291 | { | ||
1292 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
1293 | struct mlx4_en_cq *cq; | ||
1294 | int i; | ||
1295 | |||
1296 | for (i = 0; i < priv->tx_ring_num[TX]; i++) { | ||
1297 | cq = priv->tx_cq[TX][i]; | ||
1298 | napi_schedule(&cq->napi); | ||
1299 | } | ||
1300 | } | ||
1301 | #endif | ||
1302 | |||
1303 | static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) | 1289 | static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) |
1304 | { | 1290 | { |
1305 | u64 reg_id; | 1291 | u64 reg_id; |
@@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
2946 | .ndo_tx_timeout = mlx4_en_tx_timeout, | 2932 | .ndo_tx_timeout = mlx4_en_tx_timeout, |
2947 | .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, | 2933 | .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, |
2948 | .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, | 2934 | .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, |
2949 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2950 | .ndo_poll_controller = mlx4_en_netpoll, | ||
2951 | #endif | ||
2952 | .ndo_set_features = mlx4_en_set_features, | 2935 | .ndo_set_features = mlx4_en_set_features, |
2953 | .ndo_fix_features = mlx4_en_fix_features, | 2936 | .ndo_fix_features = mlx4_en_fix_features, |
2954 | .ndo_setup_tc = __mlx4_en_setup_tc, | 2937 | .ndo_setup_tc = __mlx4_en_setup_tc, |
@@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = { | |||
2983 | .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, | 2966 | .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, |
2984 | .ndo_get_vf_stats = mlx4_en_get_vf_stats, | 2967 | .ndo_get_vf_stats = mlx4_en_get_vf_stats, |
2985 | .ndo_get_vf_config = mlx4_en_get_vf_config, | 2968 | .ndo_get_vf_config = mlx4_en_get_vf_config, |
2986 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2987 | .ndo_poll_controller = mlx4_en_netpoll, | ||
2988 | #endif | ||
2989 | .ndo_set_features = mlx4_en_set_features, | 2969 | .ndo_set_features = mlx4_en_set_features, |
2990 | .ndo_fix_features = mlx4_en_fix_features, | 2970 | .ndo_fix_features = mlx4_en_fix_features, |
2991 | .ndo_setup_tc = __mlx4_en_setup_tc, | 2971 | .ndo_setup_tc = __mlx4_en_setup_tc, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 1f3372c1802e..2df92dbd38e1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) | |||
240 | struct mlx4_dev *dev = &priv->dev; | 240 | struct mlx4_dev *dev = &priv->dev; |
241 | struct mlx4_eq *eq = &priv->eq_table.eq[vec]; | 241 | struct mlx4_eq *eq = &priv->eq_table.eq[vec]; |
242 | 242 | ||
243 | if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) | 243 | if (!cpumask_available(eq->affinity_mask) || |
244 | cpumask_empty(eq->affinity_mask)) | ||
244 | return; | 245 | return; |
245 | 246 | ||
246 | hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); | 247 | hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3ce14d42ddc8..a53736c26c0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent) | |||
206 | u8 own; | 206 | u8 own; |
207 | 207 | ||
208 | do { | 208 | do { |
209 | own = ent->lay->status_own; | 209 | own = READ_ONCE(ent->lay->status_own); |
210 | if (!(own & CMD_OWNER_HW)) { | 210 | if (!(own & CMD_OWNER_HW)) { |
211 | ent->ret = 0; | 211 | ent->ret = 0; |
212 | return; | 212 | return; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index eddd7702680b..e88340e196f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c | |||
@@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = { | |||
183 | 183 | ||
184 | void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) | 184 | void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) |
185 | { | 185 | { |
186 | u32 caps = mlx5_accel_tls_device_caps(priv->mdev); | ||
187 | struct net_device *netdev = priv->netdev; | 186 | struct net_device *netdev = priv->netdev; |
187 | u32 caps; | ||
188 | 188 | ||
189 | if (!mlx5_accel_is_tls_device(priv->mdev)) | 189 | if (!mlx5_accel_is_tls_device(priv->mdev)) |
190 | return; | 190 | return; |
191 | 191 | ||
192 | caps = mlx5_accel_tls_device_caps(priv->mdev); | ||
192 | if (caps & MLX5_ACCEL_TLS_TX) { | 193 | if (caps & MLX5_ACCEL_TLS_TX) { |
193 | netdev->features |= NETIF_F_HW_TLS_TX; | 194 | netdev->features |= NETIF_F_HW_TLS_TX; |
194 | netdev->hw_features |= NETIF_F_HW_TLS_TX; | 195 | netdev->hw_features |= NETIF_F_HW_TLS_TX; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a7939e70190..54118b77dc1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -4315,22 +4315,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) | |||
4315 | } | 4315 | } |
4316 | } | 4316 | } |
4317 | 4317 | ||
4318 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4319 | /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without | ||
4320 | * reenabling interrupts. | ||
4321 | */ | ||
4322 | static void mlx5e_netpoll(struct net_device *dev) | ||
4323 | { | ||
4324 | struct mlx5e_priv *priv = netdev_priv(dev); | ||
4325 | struct mlx5e_channels *chs = &priv->channels; | ||
4326 | |||
4327 | int i; | ||
4328 | |||
4329 | for (i = 0; i < chs->num; i++) | ||
4330 | napi_schedule(&chs->c[i]->napi); | ||
4331 | } | ||
4332 | #endif | ||
4333 | |||
4334 | static const struct net_device_ops mlx5e_netdev_ops = { | 4318 | static const struct net_device_ops mlx5e_netdev_ops = { |
4335 | .ndo_open = mlx5e_open, | 4319 | .ndo_open = mlx5e_open, |
4336 | .ndo_stop = mlx5e_close, | 4320 | .ndo_stop = mlx5e_close, |
@@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = { | |||
4356 | #ifdef CONFIG_MLX5_EN_ARFS | 4340 | #ifdef CONFIG_MLX5_EN_ARFS |
4357 | .ndo_rx_flow_steer = mlx5e_rx_flow_steer, | 4341 | .ndo_rx_flow_steer = mlx5e_rx_flow_steer, |
4358 | #endif | 4342 | #endif |
4359 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4360 | .ndo_poll_controller = mlx5e_netpoll, | ||
4361 | #endif | ||
4362 | #ifdef CONFIG_MLX5_ESWITCH | 4343 | #ifdef CONFIG_MLX5_ESWITCH |
4363 | /* SRIOV E-Switch NDOs */ | 4344 | /* SRIOV E-Switch NDOs */ |
4364 | .ndo_set_vf_mac = mlx5e_set_vf_mac, | 4345 | .ndo_set_vf_mac = mlx5e_set_vf_mac, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index dae1c5c5d27c..d2f76070ea7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c | |||
@@ -509,7 +509,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn, | |||
509 | 509 | ||
510 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); | 510 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); |
511 | 511 | ||
512 | if (next_state == MLX5_RQC_STATE_RDY) { | 512 | if (next_state == MLX5_SQC_STATE_RDY) { |
513 | MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); | 513 | MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); |
514 | MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); | 514 | MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); |
515 | } | 515 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 930700413b1d..b492152c8881 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -44,8 +44,8 @@ | |||
44 | #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) | 44 | #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) |
45 | 45 | ||
46 | #define MLXSW_SP1_FWREV_MAJOR 13 | 46 | #define MLXSW_SP1_FWREV_MAJOR 13 |
47 | #define MLXSW_SP1_FWREV_MINOR 1702 | 47 | #define MLXSW_SP1_FWREV_MINOR 1703 |
48 | #define MLXSW_SP1_FWREV_SUBMINOR 6 | 48 | #define MLXSW_SP1_FWREV_SUBMINOR 4 |
49 | #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 | 49 | #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 |
50 | 50 | ||
51 | static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { | 51 | static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { |
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e7dce79ff2c9..001b5f714c1b 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c | |||
@@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev) | |||
2850 | lan743x_hardware_cleanup(adapter); | 2850 | lan743x_hardware_cleanup(adapter); |
2851 | } | 2851 | } |
2852 | 2852 | ||
2853 | #ifdef CONFIG_PM | 2853 | #ifdef CONFIG_PM_SLEEP |
2854 | static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) | 2854 | static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) |
2855 | { | 2855 | { |
2856 | return bitrev16(crc16(0xFFFF, buf, len)); | 2856 | return bitrev16(crc16(0xFFFF, buf, len)); |
@@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev) | |||
3016 | static const struct dev_pm_ops lan743x_pm_ops = { | 3016 | static const struct dev_pm_ops lan743x_pm_ops = { |
3017 | SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) | 3017 | SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) |
3018 | }; | 3018 | }; |
3019 | #endif /*CONFIG_PM */ | 3019 | #endif /* CONFIG_PM_SLEEP */ |
3020 | 3020 | ||
3021 | static const struct pci_device_id lan743x_pcidev_tbl[] = { | 3021 | static const struct pci_device_id lan743x_pcidev_tbl[] = { |
3022 | { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, | 3022 | { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, |
@@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = { | |||
3028 | .id_table = lan743x_pcidev_tbl, | 3028 | .id_table = lan743x_pcidev_tbl, |
3029 | .probe = lan743x_pcidev_probe, | 3029 | .probe = lan743x_pcidev_probe, |
3030 | .remove = lan743x_pcidev_remove, | 3030 | .remove = lan743x_pcidev_remove, |
3031 | #ifdef CONFIG_PM | 3031 | #ifdef CONFIG_PM_SLEEP |
3032 | .driver.pm = &lan743x_pm_ops, | 3032 | .driver.pm = &lan743x_pm_ops, |
3033 | #endif | 3033 | #endif |
3034 | .shutdown = lan743x_pcidev_shutdown, | 3034 | .shutdown = lan743x_pcidev_shutdown, |
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 26bb3b18f3be..3cdf63e35b53 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c | |||
@@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) | |||
91 | struct sk_buff *skb; | 91 | struct sk_buff *skb; |
92 | struct net_device *dev; | 92 | struct net_device *dev; |
93 | u32 *buf; | 93 | u32 *buf; |
94 | int sz, len; | 94 | int sz, len, buf_len; |
95 | u32 ifh[4]; | 95 | u32 ifh[4]; |
96 | u32 val; | 96 | u32 val; |
97 | struct frame_info info; | 97 | struct frame_info info; |
@@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) | |||
116 | err = -ENOMEM; | 116 | err = -ENOMEM; |
117 | break; | 117 | break; |
118 | } | 118 | } |
119 | buf = (u32 *)skb_put(skb, info.len); | 119 | buf_len = info.len - ETH_FCS_LEN; |
120 | buf = (u32 *)skb_put(skb, buf_len); | ||
120 | 121 | ||
121 | len = 0; | 122 | len = 0; |
122 | do { | 123 | do { |
123 | sz = ocelot_rx_frame_word(ocelot, grp, false, &val); | 124 | sz = ocelot_rx_frame_word(ocelot, grp, false, &val); |
124 | *buf++ = val; | 125 | *buf++ = val; |
125 | len += sz; | 126 | len += sz; |
126 | } while ((sz == 4) && (len < info.len)); | 127 | } while (len < buf_len); |
128 | |||
129 | /* Read the FCS and discard it */ | ||
130 | sz = ocelot_rx_frame_word(ocelot, grp, false, &val); | ||
131 | /* Update the statistics if part of the FCS was read before */ | ||
132 | len -= ETH_FCS_LEN - sz; | ||
127 | 133 | ||
128 | if (sz < 0) { | 134 | if (sz < 0) { |
129 | err = sz; | 135 | err = sz; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 253bdaef1505..8ed38fd5a852 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
@@ -3146,21 +3146,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
3146 | return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); | 3146 | return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); |
3147 | } | 3147 | } |
3148 | 3148 | ||
3149 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3150 | static void nfp_net_netpoll(struct net_device *netdev) | ||
3151 | { | ||
3152 | struct nfp_net *nn = netdev_priv(netdev); | ||
3153 | int i; | ||
3154 | |||
3155 | /* nfp_net's NAPIs are statically allocated so even if there is a race | ||
3156 | * with reconfig path this will simply try to schedule some disabled | ||
3157 | * NAPI instances. | ||
3158 | */ | ||
3159 | for (i = 0; i < nn->dp.num_stack_tx_rings; i++) | ||
3160 | napi_schedule_irqoff(&nn->r_vecs[i].napi); | ||
3161 | } | ||
3162 | #endif | ||
3163 | |||
3164 | static void nfp_net_stat64(struct net_device *netdev, | 3149 | static void nfp_net_stat64(struct net_device *netdev, |
3165 | struct rtnl_link_stats64 *stats) | 3150 | struct rtnl_link_stats64 *stats) |
3166 | { | 3151 | { |
@@ -3519,9 +3504,6 @@ const struct net_device_ops nfp_net_netdev_ops = { | |||
3519 | .ndo_get_stats64 = nfp_net_stat64, | 3504 | .ndo_get_stats64 = nfp_net_stat64, |
3520 | .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, | 3505 | .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, |
3521 | .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, | 3506 | .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, |
3522 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3523 | .ndo_poll_controller = nfp_net_netpoll, | ||
3524 | #endif | ||
3525 | .ndo_set_vf_mac = nfp_app_set_vf_mac, | 3507 | .ndo_set_vf_mac = nfp_app_set_vf_mac, |
3526 | .ndo_set_vf_vlan = nfp_app_set_vf_vlan, | 3508 | .ndo_set_vf_vlan = nfp_app_set_vf_vlan, |
3527 | .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, | 3509 | .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 6bb76e6d3c14..f5459de6d60a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
@@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) | |||
190 | 190 | ||
191 | static void | 191 | static void |
192 | qed_dcbx_set_params(struct qed_dcbx_results *p_data, | 192 | qed_dcbx_set_params(struct qed_dcbx_results *p_data, |
193 | struct qed_hw_info *p_info, | 193 | struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
194 | bool enable, | 194 | bool enable, u8 prio, u8 tc, |
195 | u8 prio, | ||
196 | u8 tc, | ||
197 | enum dcbx_protocol_type type, | 195 | enum dcbx_protocol_type type, |
198 | enum qed_pci_personality personality) | 196 | enum qed_pci_personality personality) |
199 | { | 197 | { |
@@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, | |||
206 | else | 204 | else |
207 | p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; | 205 | p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; |
208 | 206 | ||
207 | /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */ | ||
208 | if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) || | ||
209 | test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits))) | ||
210 | p_data->arr[type].dont_add_vlan0 = true; | ||
211 | |||
209 | /* QM reconf data */ | 212 | /* QM reconf data */ |
210 | if (p_info->personality == personality) | 213 | if (p_hwfn->hw_info.personality == personality) |
211 | qed_hw_info_set_offload_tc(p_info, tc); | 214 | qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); |
215 | |||
216 | /* Configure dcbx vlan priority in doorbell block for roce EDPM */ | ||
217 | if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && | ||
218 | type == DCBX_PROTOCOL_ROCE) { | ||
219 | qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); | ||
220 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1); | ||
221 | } | ||
212 | } | 222 | } |
213 | 223 | ||
214 | /* Update app protocol data and hw_info fields with the TLV info */ | 224 | /* Update app protocol data and hw_info fields with the TLV info */ |
215 | static void | 225 | static void |
216 | qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | 226 | qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, |
217 | struct qed_hwfn *p_hwfn, | 227 | struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
218 | bool enable, | 228 | bool enable, u8 prio, u8 tc, |
219 | u8 prio, u8 tc, enum dcbx_protocol_type type) | 229 | enum dcbx_protocol_type type) |
220 | { | 230 | { |
221 | struct qed_hw_info *p_info = &p_hwfn->hw_info; | ||
222 | enum qed_pci_personality personality; | 231 | enum qed_pci_personality personality; |
223 | enum dcbx_protocol_type id; | 232 | enum dcbx_protocol_type id; |
224 | int i; | 233 | int i; |
@@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | |||
231 | 240 | ||
232 | personality = qed_dcbx_app_update[i].personality; | 241 | personality = qed_dcbx_app_update[i].personality; |
233 | 242 | ||
234 | qed_dcbx_set_params(p_data, p_info, enable, | 243 | qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, |
235 | prio, tc, type, personality); | 244 | prio, tc, type, personality); |
236 | } | 245 | } |
237 | } | 246 | } |
@@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, | |||
265 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. | 274 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. |
266 | */ | 275 | */ |
267 | static int | 276 | static int |
268 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | 277 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
269 | struct qed_dcbx_results *p_data, | 278 | struct qed_dcbx_results *p_data, |
270 | struct dcbx_app_priority_entry *p_tbl, | 279 | struct dcbx_app_priority_entry *p_tbl, |
271 | u32 pri_tc_tbl, int count, u8 dcbx_version) | 280 | u32 pri_tc_tbl, int count, u8 dcbx_version) |
@@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
309 | enable = true; | 318 | enable = true; |
310 | } | 319 | } |
311 | 320 | ||
312 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, | 321 | qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, |
313 | priority, tc, type); | 322 | priority, tc, type); |
314 | } | 323 | } |
315 | } | 324 | } |
@@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
331 | continue; | 340 | continue; |
332 | 341 | ||
333 | enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; | 342 | enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; |
334 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, | 343 | qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, |
335 | priority, tc, type); | 344 | priority, tc, type); |
336 | } | 345 | } |
337 | 346 | ||
@@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
341 | /* Parse app TLV's to update TC information in hw_info structure for | 350 | /* Parse app TLV's to update TC information in hw_info structure for |
342 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. | 351 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. |
343 | */ | 352 | */ |
344 | static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | 353 | static int |
354 | qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | ||
345 | { | 355 | { |
346 | struct dcbx_app_priority_feature *p_app; | 356 | struct dcbx_app_priority_feature *p_app; |
347 | struct dcbx_app_priority_entry *p_tbl; | 357 | struct dcbx_app_priority_entry *p_tbl; |
@@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
365 | p_info = &p_hwfn->hw_info; | 375 | p_info = &p_hwfn->hw_info; |
366 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); | 376 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); |
367 | 377 | ||
368 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, | 378 | rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, |
369 | num_entries, dcbx_version); | 379 | num_entries, dcbx_version); |
370 | if (rc) | 380 | if (rc) |
371 | return rc; | 381 | return rc; |
@@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, | |||
891 | return rc; | 901 | return rc; |
892 | 902 | ||
893 | if (type == QED_DCBX_OPERATIONAL_MIB) { | 903 | if (type == QED_DCBX_OPERATIONAL_MIB) { |
894 | rc = qed_dcbx_process_mib_info(p_hwfn); | 904 | rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt); |
895 | if (!rc) { | 905 | if (!rc) { |
896 | /* reconfigure tcs of QM queues according | 906 | /* reconfigure tcs of QM queues according |
897 | * to negotiation results | 907 | * to negotiation results |
@@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, | |||
954 | p_data->dcb_enable_flag = p_src->arr[type].enable; | 964 | p_data->dcb_enable_flag = p_src->arr[type].enable; |
955 | p_data->dcb_priority = p_src->arr[type].priority; | 965 | p_data->dcb_priority = p_src->arr[type].priority; |
956 | p_data->dcb_tc = p_src->arr[type].tc; | 966 | p_data->dcb_tc = p_src->arr[type].tc; |
967 | p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; | ||
957 | } | 968 | } |
958 | 969 | ||
959 | /* Set pf update ramrod command params */ | 970 | /* Set pf update ramrod command params */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index a4d688c04e18..01f253ea4b22 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h | |||
@@ -55,6 +55,7 @@ struct qed_dcbx_app_data { | |||
55 | u8 update; /* Update indication */ | 55 | u8 update; /* Update indication */ |
56 | u8 priority; /* Priority */ | 56 | u8 priority; /* Priority */ |
57 | u8 tc; /* Traffic Class */ | 57 | u8 tc; /* Traffic Class */ |
58 | bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */ | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | #define QED_DCBX_VERSION_DISABLED 0 | 61 | #define QED_DCBX_VERSION_DISABLED 0 |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 016ca8a7ec8a..97f073fd3725 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
@@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn, | |||
1706 | int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) | 1706 | int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) |
1707 | { | 1707 | { |
1708 | struct qed_load_req_params load_req_params; | 1708 | struct qed_load_req_params load_req_params; |
1709 | u32 load_code, param, drv_mb_param; | 1709 | u32 load_code, resp, param, drv_mb_param; |
1710 | bool b_default_mtu = true; | 1710 | bool b_default_mtu = true; |
1711 | struct qed_hwfn *p_hwfn; | 1711 | struct qed_hwfn *p_hwfn; |
1712 | int rc = 0, mfw_rc, i; | 1712 | int rc = 0, mfw_rc, i; |
@@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) | |||
1852 | 1852 | ||
1853 | if (IS_PF(cdev)) { | 1853 | if (IS_PF(cdev)) { |
1854 | p_hwfn = QED_LEADING_HWFN(cdev); | 1854 | p_hwfn = QED_LEADING_HWFN(cdev); |
1855 | |||
1856 | /* Get pre-negotiated values for stag, bandwidth etc. */ | ||
1857 | DP_VERBOSE(p_hwfn, | ||
1858 | QED_MSG_SPQ, | ||
1859 | "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); | ||
1860 | drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; | ||
1861 | rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, | ||
1862 | DRV_MSG_CODE_GET_OEM_UPDATES, | ||
1863 | drv_mb_param, &resp, ¶m); | ||
1864 | if (rc) | ||
1865 | DP_NOTICE(p_hwfn, | ||
1866 | "Failed to send GET_OEM_UPDATES attention request\n"); | ||
1867 | |||
1855 | drv_mb_param = STORM_FW_VERSION; | 1868 | drv_mb_param = STORM_FW_VERSION; |
1856 | rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, | 1869 | rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, |
1857 | DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, | 1870 | DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 8faceb691657..9b3ef00e5782 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
@@ -12414,6 +12414,7 @@ struct public_drv_mb { | |||
12414 | #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 | 12414 | #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 |
12415 | #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 | 12415 | #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 |
12416 | #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 | 12416 | #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 |
12417 | #define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000 | ||
12417 | 12418 | ||
12418 | #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 | 12419 | #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 |
12419 | #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 | 12420 | #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 |
@@ -12541,6 +12542,9 @@ struct public_drv_mb { | |||
12541 | #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 | 12542 | #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 |
12542 | #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 | 12543 | #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 |
12543 | 12544 | ||
12545 | #define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 | ||
12546 | #define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 | ||
12547 | |||
12544 | #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 | 12548 | #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 |
12545 | #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 | 12549 | #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 |
12546 | #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 | 12550 | #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 5d37ec7e9b0b..58c7eb9d8e1b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
@@ -1581,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
1581 | p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & | 1581 | p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & |
1582 | FUNC_MF_CFG_OV_STAG_MASK; | 1582 | FUNC_MF_CFG_OV_STAG_MASK; |
1583 | p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; | 1583 | p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; |
1584 | if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) && | 1584 | if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) { |
1585 | (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) { | 1585 | if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) { |
1586 | qed_wr(p_hwfn, p_ptt, | 1586 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, |
1587 | NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan); | 1587 | p_hwfn->hw_info.ovlan); |
1588 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); | ||
1589 | |||
1590 | /* Configure DB to add external vlan to EDPM packets */ | ||
1591 | qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); | ||
1592 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, | ||
1593 | p_hwfn->hw_info.ovlan); | ||
1594 | } else { | ||
1595 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); | ||
1596 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); | ||
1597 | qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); | ||
1598 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0); | ||
1599 | } | ||
1600 | |||
1588 | qed_sp_pf_update_stag(p_hwfn); | 1601 | qed_sp_pf_update_stag(p_hwfn); |
1589 | } | 1602 | } |
1590 | 1603 | ||
1604 | DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", | ||
1605 | p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); | ||
1606 | |||
1591 | /* Acknowledge the MFW */ | 1607 | /* Acknowledge the MFW */ |
1592 | qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, | 1608 | qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, |
1593 | &resp, ¶m); | 1609 | &resp, ¶m); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index f736f70956fd..2440970882c4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
@@ -216,6 +216,12 @@ | |||
216 | 0x00c000UL | 216 | 0x00c000UL |
217 | #define DORQ_REG_IFEN \ | 217 | #define DORQ_REG_IFEN \ |
218 | 0x100040UL | 218 | 0x100040UL |
219 | #define DORQ_REG_TAG1_OVRD_MODE \ | ||
220 | 0x1008b4UL | ||
221 | #define DORQ_REG_PF_PCP_BB_K2 \ | ||
222 | 0x1008c4UL | ||
223 | #define DORQ_REG_PF_EXT_VID_BB_K2 \ | ||
224 | 0x1008c8UL | ||
219 | #define DORQ_REG_DB_DROP_REASON \ | 225 | #define DORQ_REG_DB_DROP_REASON \ |
220 | 0x100a2cUL | 226 | 0x100a2cUL |
221 | #define DORQ_REG_DB_DROP_DETAILS \ | 227 | #define DORQ_REG_DB_DROP_DETAILS \ |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 1d8631303b53..ab30aaeac6d3 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/etherdevice.h> | 15 | #include <linux/etherdevice.h> |
16 | #include <linux/clk.h> | ||
16 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
17 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
18 | #include <linux/phy.h> | 19 | #include <linux/phy.h> |
@@ -665,6 +666,7 @@ struct rtl8169_private { | |||
665 | 666 | ||
666 | u16 event_slow; | 667 | u16 event_slow; |
667 | const struct rtl_coalesce_info *coalesce_info; | 668 | const struct rtl_coalesce_info *coalesce_info; |
669 | struct clk *clk; | ||
668 | 670 | ||
669 | struct mdio_ops { | 671 | struct mdio_ops { |
670 | void (*write)(struct rtl8169_private *, int, int); | 672 | void (*write)(struct rtl8169_private *, int, int); |
@@ -4069,6 +4071,15 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | |||
4069 | phy_speed_up(dev->phydev); | 4071 | phy_speed_up(dev->phydev); |
4070 | 4072 | ||
4071 | genphy_soft_reset(dev->phydev); | 4073 | genphy_soft_reset(dev->phydev); |
4074 | |||
4075 | /* It was reported that chip version 33 ends up with 10MBit/Half on a | ||
4076 | * 1GBit link after resuming from S3. For whatever reason the PHY on | ||
4077 | * this chip doesn't properly start a renegotiation when soft-reset. | ||
4078 | * Explicitly requesting a renegotiation fixes this. | ||
4079 | */ | ||
4080 | if (tp->mac_version == RTL_GIGA_MAC_VER_33 && | ||
4081 | dev->phydev->autoneg == AUTONEG_ENABLE) | ||
4082 | phy_restart_aneg(dev->phydev); | ||
4072 | } | 4083 | } |
4073 | 4084 | ||
4074 | static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) | 4085 | static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) |
@@ -4775,12 +4786,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) | |||
4775 | static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) | 4786 | static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) |
4776 | { | 4787 | { |
4777 | if (enable) { | 4788 | if (enable) { |
4778 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); | ||
4779 | RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); | 4789 | RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); |
4790 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); | ||
4780 | } else { | 4791 | } else { |
4781 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); | 4792 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); |
4782 | RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); | 4793 | RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); |
4783 | } | 4794 | } |
4795 | |||
4796 | udelay(10); | ||
4784 | } | 4797 | } |
4785 | 4798 | ||
4786 | static void rtl_hw_start_8168bb(struct rtl8169_private *tp) | 4799 | static void rtl_hw_start_8168bb(struct rtl8169_private *tp) |
@@ -5625,6 +5638,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) | |||
5625 | 5638 | ||
5626 | static void rtl_hw_start_8106(struct rtl8169_private *tp) | 5639 | static void rtl_hw_start_8106(struct rtl8169_private *tp) |
5627 | { | 5640 | { |
5641 | rtl_hw_aspm_clkreq_enable(tp, false); | ||
5642 | |||
5628 | /* Force LAN exit from ASPM if Rx/Tx are not idle */ | 5643 | /* Force LAN exit from ASPM if Rx/Tx are not idle */ |
5629 | RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); | 5644 | RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); |
5630 | 5645 | ||
@@ -5633,6 +5648,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) | |||
5633 | RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); | 5648 | RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); |
5634 | 5649 | ||
5635 | rtl_pcie_state_l2l3_enable(tp, false); | 5650 | rtl_pcie_state_l2l3_enable(tp, false); |
5651 | rtl_hw_aspm_clkreq_enable(tp, true); | ||
5636 | } | 5652 | } |
5637 | 5653 | ||
5638 | static void rtl_hw_start_8101(struct rtl8169_private *tp) | 5654 | static void rtl_hw_start_8101(struct rtl8169_private *tp) |
@@ -7257,6 +7273,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp) | |||
7257 | } | 7273 | } |
7258 | } | 7274 | } |
7259 | 7275 | ||
7276 | static void rtl_disable_clk(void *data) | ||
7277 | { | ||
7278 | clk_disable_unprepare(data); | ||
7279 | } | ||
7280 | |||
7260 | static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 7281 | static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
7261 | { | 7282 | { |
7262 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; | 7283 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; |
@@ -7277,6 +7298,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7277 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); | 7298 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); |
7278 | tp->supports_gmii = cfg->has_gmii; | 7299 | tp->supports_gmii = cfg->has_gmii; |
7279 | 7300 | ||
7301 | /* Get the *optional* external "ether_clk" used on some boards */ | ||
7302 | tp->clk = devm_clk_get(&pdev->dev, "ether_clk"); | ||
7303 | if (IS_ERR(tp->clk)) { | ||
7304 | rc = PTR_ERR(tp->clk); | ||
7305 | if (rc == -ENOENT) { | ||
7306 | /* clk-core allows NULL (for suspend / resume) */ | ||
7307 | tp->clk = NULL; | ||
7308 | } else if (rc == -EPROBE_DEFER) { | ||
7309 | return rc; | ||
7310 | } else { | ||
7311 | dev_err(&pdev->dev, "failed to get clk: %d\n", rc); | ||
7312 | return rc; | ||
7313 | } | ||
7314 | } else { | ||
7315 | rc = clk_prepare_enable(tp->clk); | ||
7316 | if (rc) { | ||
7317 | dev_err(&pdev->dev, "failed to enable clk: %d\n", rc); | ||
7318 | return rc; | ||
7319 | } | ||
7320 | |||
7321 | rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk, | ||
7322 | tp->clk); | ||
7323 | if (rc) | ||
7324 | return rc; | ||
7325 | } | ||
7326 | |||
7280 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 7327 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
7281 | rc = pcim_enable_device(pdev); | 7328 | rc = pcim_enable_device(pdev); |
7282 | if (rc < 0) { | 7329 | if (rc < 0) { |
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 1470fc12282b..9b6bf557a2f5 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h | |||
@@ -428,6 +428,7 @@ enum EIS_BIT { | |||
428 | EIS_CULF1 = 0x00000080, | 428 | EIS_CULF1 = 0x00000080, |
429 | EIS_TFFF = 0x00000100, | 429 | EIS_TFFF = 0x00000100, |
430 | EIS_QFS = 0x00010000, | 430 | EIS_QFS = 0x00010000, |
431 | EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)), | ||
431 | }; | 432 | }; |
432 | 433 | ||
433 | /* RIC0 */ | 434 | /* RIC0 */ |
@@ -472,6 +473,7 @@ enum RIS0_BIT { | |||
472 | RIS0_FRF15 = 0x00008000, | 473 | RIS0_FRF15 = 0x00008000, |
473 | RIS0_FRF16 = 0x00010000, | 474 | RIS0_FRF16 = 0x00010000, |
474 | RIS0_FRF17 = 0x00020000, | 475 | RIS0_FRF17 = 0x00020000, |
476 | RIS0_RESERVED = GENMASK(31, 18), | ||
475 | }; | 477 | }; |
476 | 478 | ||
477 | /* RIC1 */ | 479 | /* RIC1 */ |
@@ -528,6 +530,7 @@ enum RIS2_BIT { | |||
528 | RIS2_QFF16 = 0x00010000, | 530 | RIS2_QFF16 = 0x00010000, |
529 | RIS2_QFF17 = 0x00020000, | 531 | RIS2_QFF17 = 0x00020000, |
530 | RIS2_RFFF = 0x80000000, | 532 | RIS2_RFFF = 0x80000000, |
533 | RIS2_RESERVED = GENMASK(30, 18), | ||
531 | }; | 534 | }; |
532 | 535 | ||
533 | /* TIC */ | 536 | /* TIC */ |
@@ -544,6 +547,7 @@ enum TIS_BIT { | |||
544 | TIS_FTF1 = 0x00000002, /* Undocumented? */ | 547 | TIS_FTF1 = 0x00000002, /* Undocumented? */ |
545 | TIS_TFUF = 0x00000100, | 548 | TIS_TFUF = 0x00000100, |
546 | TIS_TFWF = 0x00000200, | 549 | TIS_TFWF = 0x00000200, |
550 | TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4)) | ||
547 | }; | 551 | }; |
548 | 552 | ||
549 | /* ISS */ | 553 | /* ISS */ |
@@ -617,6 +621,7 @@ enum GIC_BIT { | |||
617 | enum GIS_BIT { | 621 | enum GIS_BIT { |
618 | GIS_PTCF = 0x00000001, /* Undocumented? */ | 622 | GIS_PTCF = 0x00000001, /* Undocumented? */ |
619 | GIS_PTMF = 0x00000004, | 623 | GIS_PTMF = 0x00000004, |
624 | GIS_RESERVED = GENMASK(15, 10), | ||
620 | }; | 625 | }; |
621 | 626 | ||
622 | /* GIE (R-Car Gen3 only) */ | 627 | /* GIE (R-Car Gen3 only) */ |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index aff5516b781e..d6f753925352 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -739,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev) | |||
739 | u32 eis, ris2; | 739 | u32 eis, ris2; |
740 | 740 | ||
741 | eis = ravb_read(ndev, EIS); | 741 | eis = ravb_read(ndev, EIS); |
742 | ravb_write(ndev, ~EIS_QFS, EIS); | 742 | ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); |
743 | if (eis & EIS_QFS) { | 743 | if (eis & EIS_QFS) { |
744 | ris2 = ravb_read(ndev, RIS2); | 744 | ris2 = ravb_read(ndev, RIS2); |
745 | ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); | 745 | ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), |
746 | RIS2); | ||
746 | 747 | ||
747 | /* Receive Descriptor Empty int */ | 748 | /* Receive Descriptor Empty int */ |
748 | if (ris2 & RIS2_QFF0) | 749 | if (ris2 & RIS2_QFF0) |
@@ -795,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev) | |||
795 | u32 tis = ravb_read(ndev, TIS); | 796 | u32 tis = ravb_read(ndev, TIS); |
796 | 797 | ||
797 | if (tis & TIS_TFUF) { | 798 | if (tis & TIS_TFUF) { |
798 | ravb_write(ndev, ~TIS_TFUF, TIS); | 799 | ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); |
799 | ravb_get_tx_tstamp(ndev); | 800 | ravb_get_tx_tstamp(ndev); |
800 | return true; | 801 | return true; |
801 | } | 802 | } |
@@ -930,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) | |||
930 | /* Processing RX Descriptor Ring */ | 931 | /* Processing RX Descriptor Ring */ |
931 | if (ris0 & mask) { | 932 | if (ris0 & mask) { |
932 | /* Clear RX interrupt */ | 933 | /* Clear RX interrupt */ |
933 | ravb_write(ndev, ~mask, RIS0); | 934 | ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); |
934 | if (ravb_rx(ndev, "a, q)) | 935 | if (ravb_rx(ndev, "a, q)) |
935 | goto out; | 936 | goto out; |
936 | } | 937 | } |
@@ -938,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) | |||
938 | if (tis & mask) { | 939 | if (tis & mask) { |
939 | spin_lock_irqsave(&priv->lock, flags); | 940 | spin_lock_irqsave(&priv->lock, flags); |
940 | /* Clear TX interrupt */ | 941 | /* Clear TX interrupt */ |
941 | ravb_write(ndev, ~mask, TIS); | 942 | ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); |
942 | ravb_tx_free(ndev, q, true); | 943 | ravb_tx_free(ndev, q, true); |
943 | netif_wake_subqueue(ndev, q); | 944 | netif_wake_subqueue(ndev, q); |
944 | mmiowb(); | 945 | mmiowb(); |
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 0721b5c35d91..dce2a40a31e3 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c | |||
@@ -315,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev) | |||
315 | } | 315 | } |
316 | } | 316 | } |
317 | 317 | ||
318 | ravb_write(ndev, ~gis, GIS); | 318 | ravb_write(ndev, ~(gis | GIS_RESERVED), GIS); |
319 | } | 319 | } |
320 | 320 | ||
321 | void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) | 321 | void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index c5bc124b41a9..d1bb73bf9914 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c | |||
@@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev); | |||
77 | static int ether3_rx(struct net_device *dev, unsigned int maxcnt); | 77 | static int ether3_rx(struct net_device *dev, unsigned int maxcnt); |
78 | static void ether3_tx(struct net_device *dev); | 78 | static void ether3_tx(struct net_device *dev); |
79 | static int ether3_open (struct net_device *dev); | 79 | static int ether3_open (struct net_device *dev); |
80 | static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); | 80 | static netdev_tx_t ether3_sendpacket(struct sk_buff *skb, |
81 | struct net_device *dev); | ||
81 | static irqreturn_t ether3_interrupt (int irq, void *dev_id); | 82 | static irqreturn_t ether3_interrupt (int irq, void *dev_id); |
82 | static int ether3_close (struct net_device *dev); | 83 | static int ether3_close (struct net_device *dev); |
83 | static void ether3_setmulticastlist (struct net_device *dev); | 84 | static void ether3_setmulticastlist (struct net_device *dev); |
@@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev) | |||
481 | /* | 482 | /* |
482 | * Transmit a packet | 483 | * Transmit a packet |
483 | */ | 484 | */ |
484 | static int | 485 | static netdev_tx_t |
485 | ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) | 486 | ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) |
486 | { | 487 | { |
487 | unsigned long flags; | 488 | unsigned long flags; |
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 573691bc3b71..70cce63a6081 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c | |||
@@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev) | |||
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | 581 | static netdev_tx_t |
582 | sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
582 | { | 583 | { |
583 | struct sgiseeq_private *sp = netdev_priv(dev); | 584 | struct sgiseeq_private *sp = netdev_priv(dev); |
584 | struct hpc3_ethregs *hregs = sp->hregs; | 585 | struct hpc3_ethregs *hregs = sp->hregs; |
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 18d533fdf14c..3140999642ba 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c | |||
@@ -99,7 +99,7 @@ struct ioc3_private { | |||
99 | 99 | ||
100 | static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 100 | static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
101 | static void ioc3_set_multicast_list(struct net_device *dev); | 101 | static void ioc3_set_multicast_list(struct net_device *dev); |
102 | static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); | 102 | static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); |
103 | static void ioc3_timeout(struct net_device *dev); | 103 | static void ioc3_timeout(struct net_device *dev); |
104 | static inline unsigned int ioc3_hash(const unsigned char *addr); | 104 | static inline unsigned int ioc3_hash(const unsigned char *addr); |
105 | static inline void ioc3_stop(struct ioc3_private *ip); | 105 | static inline void ioc3_stop(struct ioc3_private *ip); |
@@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = { | |||
1390 | .remove = ioc3_remove_one, | 1390 | .remove = ioc3_remove_one, |
1391 | }; | 1391 | }; |
1392 | 1392 | ||
1393 | static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1393 | static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1394 | { | 1394 | { |
1395 | unsigned long data; | 1395 | unsigned long data; |
1396 | struct ioc3_private *ip = netdev_priv(dev); | 1396 | struct ioc3_private *ip = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index ea55abd62ec7..703fbbefea44 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c | |||
@@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) | |||
697 | /* | 697 | /* |
698 | * Transmit a packet (called by the kernel) | 698 | * Transmit a packet (called by the kernel) |
699 | */ | 699 | */ |
700 | static int meth_tx(struct sk_buff *skb, struct net_device *dev) | 700 | static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev) |
701 | { | 701 | { |
702 | struct meth_private *priv = netdev_priv(dev); | 702 | struct meth_private *priv = netdev_priv(dev); |
703 | unsigned long flags; | 703 | unsigned long flags; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 1854f270ad66..b1b305f8f414 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -258,10 +258,10 @@ struct stmmac_safety_stats { | |||
258 | #define MAX_DMA_RIWT 0xff | 258 | #define MAX_DMA_RIWT 0xff |
259 | #define MIN_DMA_RIWT 0x20 | 259 | #define MIN_DMA_RIWT 0x20 |
260 | /* Tx coalesce parameters */ | 260 | /* Tx coalesce parameters */ |
261 | #define STMMAC_COAL_TX_TIMER 40000 | 261 | #define STMMAC_COAL_TX_TIMER 1000 |
262 | #define STMMAC_MAX_COAL_TX_TICK 100000 | 262 | #define STMMAC_MAX_COAL_TX_TICK 100000 |
263 | #define STMMAC_TX_MAX_FRAMES 256 | 263 | #define STMMAC_TX_MAX_FRAMES 256 |
264 | #define STMMAC_TX_FRAMES 64 | 264 | #define STMMAC_TX_FRAMES 25 |
265 | 265 | ||
266 | /* Packets types */ | 266 | /* Packets types */ |
267 | enum packets_types { | 267 | enum packets_types { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index c0a855b7ab3b..63e1064b27a2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -48,6 +48,8 @@ struct stmmac_tx_info { | |||
48 | 48 | ||
49 | /* Frequently used values are kept adjacent for cache effect */ | 49 | /* Frequently used values are kept adjacent for cache effect */ |
50 | struct stmmac_tx_queue { | 50 | struct stmmac_tx_queue { |
51 | u32 tx_count_frames; | ||
52 | struct timer_list txtimer; | ||
51 | u32 queue_index; | 53 | u32 queue_index; |
52 | struct stmmac_priv *priv_data; | 54 | struct stmmac_priv *priv_data; |
53 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; | 55 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; |
@@ -73,7 +75,14 @@ struct stmmac_rx_queue { | |||
73 | u32 rx_zeroc_thresh; | 75 | u32 rx_zeroc_thresh; |
74 | dma_addr_t dma_rx_phy; | 76 | dma_addr_t dma_rx_phy; |
75 | u32 rx_tail_addr; | 77 | u32 rx_tail_addr; |
78 | }; | ||
79 | |||
80 | struct stmmac_channel { | ||
76 | struct napi_struct napi ____cacheline_aligned_in_smp; | 81 | struct napi_struct napi ____cacheline_aligned_in_smp; |
82 | struct stmmac_priv *priv_data; | ||
83 | u32 index; | ||
84 | int has_rx; | ||
85 | int has_tx; | ||
77 | }; | 86 | }; |
78 | 87 | ||
79 | struct stmmac_tc_entry { | 88 | struct stmmac_tc_entry { |
@@ -109,14 +118,12 @@ struct stmmac_pps_cfg { | |||
109 | 118 | ||
110 | struct stmmac_priv { | 119 | struct stmmac_priv { |
111 | /* Frequently used values are kept adjacent for cache effect */ | 120 | /* Frequently used values are kept adjacent for cache effect */ |
112 | u32 tx_count_frames; | ||
113 | u32 tx_coal_frames; | 121 | u32 tx_coal_frames; |
114 | u32 tx_coal_timer; | 122 | u32 tx_coal_timer; |
115 | 123 | ||
116 | int tx_coalesce; | 124 | int tx_coalesce; |
117 | int hwts_tx_en; | 125 | int hwts_tx_en; |
118 | bool tx_path_in_lpi_mode; | 126 | bool tx_path_in_lpi_mode; |
119 | struct timer_list txtimer; | ||
120 | bool tso; | 127 | bool tso; |
121 | 128 | ||
122 | unsigned int dma_buf_sz; | 129 | unsigned int dma_buf_sz; |
@@ -137,6 +144,9 @@ struct stmmac_priv { | |||
137 | /* TX Queue */ | 144 | /* TX Queue */ |
138 | struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; | 145 | struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; |
139 | 146 | ||
147 | /* Generic channel for NAPI */ | ||
148 | struct stmmac_channel channel[STMMAC_CH_MAX]; | ||
149 | |||
140 | bool oldlink; | 150 | bool oldlink; |
141 | int speed; | 151 | int speed; |
142 | int oldduplex; | 152 | int oldduplex; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9f458bb16f2a..75896d6ba6e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -148,12 +148,14 @@ static void stmmac_verify_args(void) | |||
148 | static void stmmac_disable_all_queues(struct stmmac_priv *priv) | 148 | static void stmmac_disable_all_queues(struct stmmac_priv *priv) |
149 | { | 149 | { |
150 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | 150 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
151 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | ||
152 | u32 maxq = max(rx_queues_cnt, tx_queues_cnt); | ||
151 | u32 queue; | 153 | u32 queue; |
152 | 154 | ||
153 | for (queue = 0; queue < rx_queues_cnt; queue++) { | 155 | for (queue = 0; queue < maxq; queue++) { |
154 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 156 | struct stmmac_channel *ch = &priv->channel[queue]; |
155 | 157 | ||
156 | napi_disable(&rx_q->napi); | 158 | napi_disable(&ch->napi); |
157 | } | 159 | } |
158 | } | 160 | } |
159 | 161 | ||
@@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) | |||
164 | static void stmmac_enable_all_queues(struct stmmac_priv *priv) | 166 | static void stmmac_enable_all_queues(struct stmmac_priv *priv) |
165 | { | 167 | { |
166 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | 168 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
169 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | ||
170 | u32 maxq = max(rx_queues_cnt, tx_queues_cnt); | ||
167 | u32 queue; | 171 | u32 queue; |
168 | 172 | ||
169 | for (queue = 0; queue < rx_queues_cnt; queue++) { | 173 | for (queue = 0; queue < maxq; queue++) { |
170 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 174 | struct stmmac_channel *ch = &priv->channel[queue]; |
171 | 175 | ||
172 | napi_enable(&rx_q->napi); | 176 | napi_enable(&ch->napi); |
173 | } | 177 | } |
174 | } | 178 | } |
175 | 179 | ||
@@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | |||
1843 | * @queue: TX queue index | 1847 | * @queue: TX queue index |
1844 | * Description: it reclaims the transmit resources after transmission completes. | 1848 | * Description: it reclaims the transmit resources after transmission completes. |
1845 | */ | 1849 | */ |
1846 | static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | 1850 | static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) |
1847 | { | 1851 | { |
1848 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | 1852 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
1849 | unsigned int bytes_compl = 0, pkts_compl = 0; | 1853 | unsigned int bytes_compl = 0, pkts_compl = 0; |
1850 | unsigned int entry; | 1854 | unsigned int entry, count = 0; |
1851 | 1855 | ||
1852 | netif_tx_lock(priv->dev); | 1856 | __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); |
1853 | 1857 | ||
1854 | priv->xstats.tx_clean++; | 1858 | priv->xstats.tx_clean++; |
1855 | 1859 | ||
1856 | entry = tx_q->dirty_tx; | 1860 | entry = tx_q->dirty_tx; |
1857 | while (entry != tx_q->cur_tx) { | 1861 | while ((entry != tx_q->cur_tx) && (count < budget)) { |
1858 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; | 1862 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; |
1859 | struct dma_desc *p; | 1863 | struct dma_desc *p; |
1860 | int status; | 1864 | int status; |
@@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | |||
1870 | if (unlikely(status & tx_dma_own)) | 1874 | if (unlikely(status & tx_dma_own)) |
1871 | break; | 1875 | break; |
1872 | 1876 | ||
1877 | count++; | ||
1878 | |||
1873 | /* Make sure descriptor fields are read after reading | 1879 | /* Make sure descriptor fields are read after reading |
1874 | * the own bit. | 1880 | * the own bit. |
1875 | */ | 1881 | */ |
@@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | |||
1937 | stmmac_enable_eee_mode(priv); | 1943 | stmmac_enable_eee_mode(priv); |
1938 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); | 1944 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
1939 | } | 1945 | } |
1940 | netif_tx_unlock(priv->dev); | 1946 | |
1947 | __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); | ||
1948 | |||
1949 | return count; | ||
1941 | } | 1950 | } |
1942 | 1951 | ||
1943 | /** | 1952 | /** |
@@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) | |||
2020 | return false; | 2029 | return false; |
2021 | } | 2030 | } |
2022 | 2031 | ||
2032 | static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) | ||
2033 | { | ||
2034 | int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, | ||
2035 | &priv->xstats, chan); | ||
2036 | struct stmmac_channel *ch = &priv->channel[chan]; | ||
2037 | bool needs_work = false; | ||
2038 | |||
2039 | if ((status & handle_rx) && ch->has_rx) { | ||
2040 | needs_work = true; | ||
2041 | } else { | ||
2042 | status &= ~handle_rx; | ||
2043 | } | ||
2044 | |||
2045 | if ((status & handle_tx) && ch->has_tx) { | ||
2046 | needs_work = true; | ||
2047 | } else { | ||
2048 | status &= ~handle_tx; | ||
2049 | } | ||
2050 | |||
2051 | if (needs_work && napi_schedule_prep(&ch->napi)) { | ||
2052 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan); | ||
2053 | __napi_schedule(&ch->napi); | ||
2054 | } | ||
2055 | |||
2056 | return status; | ||
2057 | } | ||
2058 | |||
2023 | /** | 2059 | /** |
2024 | * stmmac_dma_interrupt - DMA ISR | 2060 | * stmmac_dma_interrupt - DMA ISR |
2025 | * @priv: driver private structure | 2061 | * @priv: driver private structure |
@@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) | |||
2034 | u32 channels_to_check = tx_channel_count > rx_channel_count ? | 2070 | u32 channels_to_check = tx_channel_count > rx_channel_count ? |
2035 | tx_channel_count : rx_channel_count; | 2071 | tx_channel_count : rx_channel_count; |
2036 | u32 chan; | 2072 | u32 chan; |
2037 | bool poll_scheduled = false; | ||
2038 | int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; | 2073 | int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; |
2039 | 2074 | ||
2040 | /* Make sure we never check beyond our status buffer. */ | 2075 | /* Make sure we never check beyond our status buffer. */ |
2041 | if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) | 2076 | if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) |
2042 | channels_to_check = ARRAY_SIZE(status); | 2077 | channels_to_check = ARRAY_SIZE(status); |
2043 | 2078 | ||
2044 | /* Each DMA channel can be used for rx and tx simultaneously, yet | ||
2045 | * napi_struct is embedded in struct stmmac_rx_queue rather than in a | ||
2046 | * stmmac_channel struct. | ||
2047 | * Because of this, stmmac_poll currently checks (and possibly wakes) | ||
2048 | * all tx queues rather than just a single tx queue. | ||
2049 | */ | ||
2050 | for (chan = 0; chan < channels_to_check; chan++) | 2079 | for (chan = 0; chan < channels_to_check; chan++) |
2051 | status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, | 2080 | status[chan] = stmmac_napi_check(priv, chan); |
2052 | &priv->xstats, chan); | ||
2053 | |||
2054 | for (chan = 0; chan < rx_channel_count; chan++) { | ||
2055 | if (likely(status[chan] & handle_rx)) { | ||
2056 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; | ||
2057 | |||
2058 | if (likely(napi_schedule_prep(&rx_q->napi))) { | ||
2059 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan); | ||
2060 | __napi_schedule(&rx_q->napi); | ||
2061 | poll_scheduled = true; | ||
2062 | } | ||
2063 | } | ||
2064 | } | ||
2065 | |||
2066 | /* If we scheduled poll, we already know that tx queues will be checked. | ||
2067 | * If we didn't schedule poll, see if any DMA channel (used by tx) has a | ||
2068 | * completed transmission, if so, call stmmac_poll (once). | ||
2069 | */ | ||
2070 | if (!poll_scheduled) { | ||
2071 | for (chan = 0; chan < tx_channel_count; chan++) { | ||
2072 | if (status[chan] & handle_tx) { | ||
2073 | /* It doesn't matter what rx queue we choose | ||
2074 | * here. We use 0 since it always exists. | ||
2075 | */ | ||
2076 | struct stmmac_rx_queue *rx_q = | ||
2077 | &priv->rx_queue[0]; | ||
2078 | |||
2079 | if (likely(napi_schedule_prep(&rx_q->napi))) { | ||
2080 | stmmac_disable_dma_irq(priv, | ||
2081 | priv->ioaddr, chan); | ||
2082 | __napi_schedule(&rx_q->napi); | ||
2083 | } | ||
2084 | break; | ||
2085 | } | ||
2086 | } | ||
2087 | } | ||
2088 | 2081 | ||
2089 | for (chan = 0; chan < tx_channel_count; chan++) { | 2082 | for (chan = 0; chan < tx_channel_count; chan++) { |
2090 | if (unlikely(status[chan] & tx_hard_error_bump_tc)) { | 2083 | if (unlikely(status[chan] & tx_hard_error_bump_tc)) { |
@@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) | |||
2220 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, | 2213 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
2221 | tx_q->dma_tx_phy, chan); | 2214 | tx_q->dma_tx_phy, chan); |
2222 | 2215 | ||
2223 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + | 2216 | tx_q->tx_tail_addr = tx_q->dma_tx_phy; |
2224 | (DMA_TX_SIZE * sizeof(struct dma_desc)); | ||
2225 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, | 2217 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, |
2226 | tx_q->tx_tail_addr, chan); | 2218 | tx_q->tx_tail_addr, chan); |
2227 | } | 2219 | } |
@@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) | |||
2233 | return ret; | 2225 | return ret; |
2234 | } | 2226 | } |
2235 | 2227 | ||
2228 | static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) | ||
2229 | { | ||
2230 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | ||
2231 | |||
2232 | mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); | ||
2233 | } | ||
2234 | |||
2236 | /** | 2235 | /** |
2237 | * stmmac_tx_timer - mitigation sw timer for tx. | 2236 | * stmmac_tx_timer - mitigation sw timer for tx. |
2238 | * @data: data pointer | 2237 | * @data: data pointer |
@@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) | |||
2241 | */ | 2240 | */ |
2242 | static void stmmac_tx_timer(struct timer_list *t) | 2241 | static void stmmac_tx_timer(struct timer_list *t) |
2243 | { | 2242 | { |
2244 | struct stmmac_priv *priv = from_timer(priv, t, txtimer); | 2243 | struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); |
2245 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | 2244 | struct stmmac_priv *priv = tx_q->priv_data; |
2246 | u32 queue; | 2245 | struct stmmac_channel *ch; |
2246 | |||
2247 | ch = &priv->channel[tx_q->queue_index]; | ||
2247 | 2248 | ||
2248 | /* let's scan all the tx queues */ | 2249 | if (likely(napi_schedule_prep(&ch->napi))) |
2249 | for (queue = 0; queue < tx_queues_count; queue++) | 2250 | __napi_schedule(&ch->napi); |
2250 | stmmac_tx_clean(priv, queue); | ||
2251 | } | 2251 | } |
2252 | 2252 | ||
2253 | /** | 2253 | /** |
@@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t) | |||
2260 | */ | 2260 | */ |
2261 | static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) | 2261 | static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) |
2262 | { | 2262 | { |
2263 | u32 tx_channel_count = priv->plat->tx_queues_to_use; | ||
2264 | u32 chan; | ||
2265 | |||
2263 | priv->tx_coal_frames = STMMAC_TX_FRAMES; | 2266 | priv->tx_coal_frames = STMMAC_TX_FRAMES; |
2264 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; | 2267 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; |
2265 | timer_setup(&priv->txtimer, stmmac_tx_timer, 0); | 2268 | |
2266 | priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); | 2269 | for (chan = 0; chan < tx_channel_count; chan++) { |
2267 | add_timer(&priv->txtimer); | 2270 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
2271 | |||
2272 | timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); | ||
2273 | } | ||
2268 | } | 2274 | } |
2269 | 2275 | ||
2270 | static void stmmac_set_rings_length(struct stmmac_priv *priv) | 2276 | static void stmmac_set_rings_length(struct stmmac_priv *priv) |
@@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev) | |||
2592 | static int stmmac_open(struct net_device *dev) | 2598 | static int stmmac_open(struct net_device *dev) |
2593 | { | 2599 | { |
2594 | struct stmmac_priv *priv = netdev_priv(dev); | 2600 | struct stmmac_priv *priv = netdev_priv(dev); |
2601 | u32 chan; | ||
2595 | int ret; | 2602 | int ret; |
2596 | 2603 | ||
2597 | stmmac_check_ether_addr(priv); | 2604 | stmmac_check_ether_addr(priv); |
@@ -2688,7 +2695,9 @@ irq_error: | |||
2688 | if (dev->phydev) | 2695 | if (dev->phydev) |
2689 | phy_stop(dev->phydev); | 2696 | phy_stop(dev->phydev); |
2690 | 2697 | ||
2691 | del_timer_sync(&priv->txtimer); | 2698 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
2699 | del_timer_sync(&priv->tx_queue[chan].txtimer); | ||
2700 | |||
2692 | stmmac_hw_teardown(dev); | 2701 | stmmac_hw_teardown(dev); |
2693 | init_error: | 2702 | init_error: |
2694 | free_dma_desc_resources(priv); | 2703 | free_dma_desc_resources(priv); |
@@ -2708,6 +2717,7 @@ dma_desc_error: | |||
2708 | static int stmmac_release(struct net_device *dev) | 2717 | static int stmmac_release(struct net_device *dev) |
2709 | { | 2718 | { |
2710 | struct stmmac_priv *priv = netdev_priv(dev); | 2719 | struct stmmac_priv *priv = netdev_priv(dev); |
2720 | u32 chan; | ||
2711 | 2721 | ||
2712 | if (priv->eee_enabled) | 2722 | if (priv->eee_enabled) |
2713 | del_timer_sync(&priv->eee_ctrl_timer); | 2723 | del_timer_sync(&priv->eee_ctrl_timer); |
@@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev) | |||
2722 | 2732 | ||
2723 | stmmac_disable_all_queues(priv); | 2733 | stmmac_disable_all_queues(priv); |
2724 | 2734 | ||
2725 | del_timer_sync(&priv->txtimer); | 2735 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
2736 | del_timer_sync(&priv->tx_queue[chan].txtimer); | ||
2726 | 2737 | ||
2727 | /* Free the IRQ lines */ | 2738 | /* Free the IRQ lines */ |
2728 | free_irq(dev->irq, dev); | 2739 | free_irq(dev->irq, dev); |
@@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2936 | priv->xstats.tx_tso_nfrags += nfrags; | 2947 | priv->xstats.tx_tso_nfrags += nfrags; |
2937 | 2948 | ||
2938 | /* Manage tx mitigation */ | 2949 | /* Manage tx mitigation */ |
2939 | priv->tx_count_frames += nfrags + 1; | 2950 | tx_q->tx_count_frames += nfrags + 1; |
2940 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { | 2951 | if (priv->tx_coal_frames <= tx_q->tx_count_frames) { |
2941 | mod_timer(&priv->txtimer, | ||
2942 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | ||
2943 | } else { | ||
2944 | priv->tx_count_frames = 0; | ||
2945 | stmmac_set_tx_ic(priv, desc); | 2952 | stmmac_set_tx_ic(priv, desc); |
2946 | priv->xstats.tx_set_ic_bit++; | 2953 | priv->xstats.tx_set_ic_bit++; |
2954 | tx_q->tx_count_frames = 0; | ||
2955 | } else { | ||
2956 | stmmac_tx_timer_arm(priv, queue); | ||
2947 | } | 2957 | } |
2948 | 2958 | ||
2949 | skb_tx_timestamp(skb); | 2959 | skb_tx_timestamp(skb); |
@@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2992 | 3002 | ||
2993 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); | 3003 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
2994 | 3004 | ||
3005 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); | ||
2995 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); | 3006 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
2996 | 3007 | ||
2997 | return NETDEV_TX_OK; | 3008 | return NETDEV_TX_OK; |
@@ -3146,14 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3146 | * This approach takes care about the fragments: desc is the first | 3157 | * This approach takes care about the fragments: desc is the first |
3147 | * element in case of no SG. | 3158 | * element in case of no SG. |
3148 | */ | 3159 | */ |
3149 | priv->tx_count_frames += nfrags + 1; | 3160 | tx_q->tx_count_frames += nfrags + 1; |
3150 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { | 3161 | if (priv->tx_coal_frames <= tx_q->tx_count_frames) { |
3151 | mod_timer(&priv->txtimer, | ||
3152 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | ||
3153 | } else { | ||
3154 | priv->tx_count_frames = 0; | ||
3155 | stmmac_set_tx_ic(priv, desc); | 3162 | stmmac_set_tx_ic(priv, desc); |
3156 | priv->xstats.tx_set_ic_bit++; | 3163 | priv->xstats.tx_set_ic_bit++; |
3164 | tx_q->tx_count_frames = 0; | ||
3165 | } else { | ||
3166 | stmmac_tx_timer_arm(priv, queue); | ||
3157 | } | 3167 | } |
3158 | 3168 | ||
3159 | skb_tx_timestamp(skb); | 3169 | skb_tx_timestamp(skb); |
@@ -3199,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3199 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); | 3209 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
3200 | 3210 | ||
3201 | stmmac_enable_dma_transmission(priv, priv->ioaddr); | 3211 | stmmac_enable_dma_transmission(priv, priv->ioaddr); |
3212 | |||
3213 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); | ||
3202 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); | 3214 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
3203 | 3215 | ||
3204 | return NETDEV_TX_OK; | 3216 | return NETDEV_TX_OK; |
@@ -3319,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) | |||
3319 | static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | 3331 | static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) |
3320 | { | 3332 | { |
3321 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 3333 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
3334 | struct stmmac_channel *ch = &priv->channel[queue]; | ||
3322 | unsigned int entry = rx_q->cur_rx; | 3335 | unsigned int entry = rx_q->cur_rx; |
3323 | int coe = priv->hw->rx_csum; | 3336 | int coe = priv->hw->rx_csum; |
3324 | unsigned int next_entry; | 3337 | unsigned int next_entry; |
@@ -3491,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3491 | else | 3504 | else |
3492 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 3505 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3493 | 3506 | ||
3494 | napi_gro_receive(&rx_q->napi, skb); | 3507 | napi_gro_receive(&ch->napi, skb); |
3495 | 3508 | ||
3496 | priv->dev->stats.rx_packets++; | 3509 | priv->dev->stats.rx_packets++; |
3497 | priv->dev->stats.rx_bytes += frame_len; | 3510 | priv->dev->stats.rx_bytes += frame_len; |
@@ -3514,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3514 | * Description : | 3527 | * Description : |
3515 | * To look at the incoming frames and clear the tx resources. | 3528 | * To look at the incoming frames and clear the tx resources. |
3516 | */ | 3529 | */ |
3517 | static int stmmac_poll(struct napi_struct *napi, int budget) | 3530 | static int stmmac_napi_poll(struct napi_struct *napi, int budget) |
3518 | { | 3531 | { |
3519 | struct stmmac_rx_queue *rx_q = | 3532 | struct stmmac_channel *ch = |
3520 | container_of(napi, struct stmmac_rx_queue, napi); | 3533 | container_of(napi, struct stmmac_channel, napi); |
3521 | struct stmmac_priv *priv = rx_q->priv_data; | 3534 | struct stmmac_priv *priv = ch->priv_data; |
3522 | u32 tx_count = priv->plat->tx_queues_to_use; | 3535 | int work_done = 0, work_rem = budget; |
3523 | u32 chan = rx_q->queue_index; | 3536 | u32 chan = ch->index; |
3524 | int work_done = 0; | ||
3525 | u32 queue; | ||
3526 | 3537 | ||
3527 | priv->xstats.napi_poll++; | 3538 | priv->xstats.napi_poll++; |
3528 | 3539 | ||
3529 | /* check all the queues */ | 3540 | if (ch->has_tx) { |
3530 | for (queue = 0; queue < tx_count; queue++) | 3541 | int done = stmmac_tx_clean(priv, work_rem, chan); |
3531 | stmmac_tx_clean(priv, queue); | ||
3532 | 3542 | ||
3533 | work_done = stmmac_rx(priv, budget, rx_q->queue_index); | 3543 | work_done += done; |
3534 | if (work_done < budget) { | 3544 | work_rem -= done; |
3535 | napi_complete_done(napi, work_done); | 3545 | } |
3536 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); | 3546 | |
3547 | if (ch->has_rx) { | ||
3548 | int done = stmmac_rx(priv, work_rem, chan); | ||
3549 | |||
3550 | work_done += done; | ||
3551 | work_rem -= done; | ||
3537 | } | 3552 | } |
3553 | |||
3554 | if (work_done < budget && napi_complete_done(napi, work_done)) | ||
3555 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); | ||
3556 | |||
3538 | return work_done; | 3557 | return work_done; |
3539 | } | 3558 | } |
3540 | 3559 | ||
@@ -4198,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device, | |||
4198 | { | 4217 | { |
4199 | struct net_device *ndev = NULL; | 4218 | struct net_device *ndev = NULL; |
4200 | struct stmmac_priv *priv; | 4219 | struct stmmac_priv *priv; |
4220 | u32 queue, maxq; | ||
4201 | int ret = 0; | 4221 | int ret = 0; |
4202 | u32 queue; | ||
4203 | 4222 | ||
4204 | ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), | 4223 | ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), |
4205 | MTL_MAX_TX_QUEUES, | 4224 | MTL_MAX_TX_QUEUES, |
@@ -4322,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device, | |||
4322 | "Enable RX Mitigation via HW Watchdog Timer\n"); | 4341 | "Enable RX Mitigation via HW Watchdog Timer\n"); |
4323 | } | 4342 | } |
4324 | 4343 | ||
4325 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { | 4344 | /* Setup channels NAPI */ |
4326 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 4345 | maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
4327 | 4346 | ||
4328 | netif_napi_add(ndev, &rx_q->napi, stmmac_poll, | 4347 | for (queue = 0; queue < maxq; queue++) { |
4329 | (8 * priv->plat->rx_queues_to_use)); | 4348 | struct stmmac_channel *ch = &priv->channel[queue]; |
4349 | |||
4350 | ch->priv_data = priv; | ||
4351 | ch->index = queue; | ||
4352 | |||
4353 | if (queue < priv->plat->rx_queues_to_use) | ||
4354 | ch->has_rx = true; | ||
4355 | if (queue < priv->plat->tx_queues_to_use) | ||
4356 | ch->has_tx = true; | ||
4357 | |||
4358 | netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, | ||
4359 | NAPI_POLL_WEIGHT); | ||
4330 | } | 4360 | } |
4331 | 4361 | ||
4332 | mutex_init(&priv->lock); | 4362 | mutex_init(&priv->lock); |
@@ -4372,10 +4402,10 @@ error_netdev_register: | |||
4372 | priv->hw->pcs != STMMAC_PCS_RTBI) | 4402 | priv->hw->pcs != STMMAC_PCS_RTBI) |
4373 | stmmac_mdio_unregister(ndev); | 4403 | stmmac_mdio_unregister(ndev); |
4374 | error_mdio_register: | 4404 | error_mdio_register: |
4375 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { | 4405 | for (queue = 0; queue < maxq; queue++) { |
4376 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 4406 | struct stmmac_channel *ch = &priv->channel[queue]; |
4377 | 4407 | ||
4378 | netif_napi_del(&rx_q->napi); | 4408 | netif_napi_del(&ch->napi); |
4379 | } | 4409 | } |
4380 | error_hw_init: | 4410 | error_hw_init: |
4381 | destroy_workqueue(priv->wq); | 4411 | destroy_workqueue(priv->wq); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3609c7b696c7..2b800ce1d5bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins) | |||
67 | * Description: | 67 | * Description: |
68 | * This function validates the number of Unicast address entries supported | 68 | * This function validates the number of Unicast address entries supported |
69 | * by a particular Synopsys 10/100/1000 controller. The Synopsys controller | 69 | * by a particular Synopsys 10/100/1000 controller. The Synopsys controller |
70 | * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter | 70 | * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter |
71 | * logic. This function validates a valid, supported configuration is | 71 | * logic. This function validates a valid, supported configuration is |
72 | * selected, and defaults to 1 Unicast address if an unsupported | 72 | * selected, and defaults to 1 Unicast address if an unsupported |
73 | * configuration is selected. | 73 | * configuration is selected. |
@@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) | |||
77 | int x = ucast_entries; | 77 | int x = ucast_entries; |
78 | 78 | ||
79 | switch (x) { | 79 | switch (x) { |
80 | case 1: | 80 | case 1 ... 32: |
81 | case 32: | ||
82 | case 64: | 81 | case 64: |
83 | case 128: | 82 | case 128: |
84 | break; | 83 | break; |
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 9263d638bd6d..f932923f7d56 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig | |||
@@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO | |||
41 | config TI_DAVINCI_CPDMA | 41 | config TI_DAVINCI_CPDMA |
42 | tristate "TI DaVinci CPDMA Support" | 42 | tristate "TI DaVinci CPDMA Support" |
43 | depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST | 43 | depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST |
44 | select GENERIC_ALLOCATOR | ||
44 | ---help--- | 45 | ---help--- |
45 | This driver supports TI's DaVinci CPDMA dma engine. | 46 | This driver supports TI's DaVinci CPDMA dma engine. |
46 | 47 | ||
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 2bdfb39215e9..d8ba512f166a 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c | |||
@@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work) | |||
835 | w5100_tx_skb(priv->ndev, skb); | 835 | w5100_tx_skb(priv->ndev, skb); |
836 | } | 836 | } |
837 | 837 | ||
838 | static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) | 838 | static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) |
839 | { | 839 | { |
840 | struct w5100_priv *priv = netdev_priv(ndev); | 840 | struct w5100_priv *priv = netdev_priv(ndev); |
841 | 841 | ||
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 56ae573001e8..80fdbff67d82 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c | |||
@@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev) | |||
365 | netif_wake_queue(ndev); | 365 | netif_wake_queue(ndev); |
366 | } | 366 | } |
367 | 367 | ||
368 | static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) | 368 | static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) |
369 | { | 369 | { |
370 | struct w5300_priv *priv = netdev_priv(ndev); | 370 | struct w5300_priv *priv = netdev_priv(ndev); |
371 | 371 | ||
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 31c3d77b4733..fe01e141c8f8 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev, | |||
1203 | 1203 | ||
1204 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; | 1204 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
1205 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; | 1205 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; |
1206 | netdev_info(ndev, "VF slot %u %s\n", | ||
1207 | net_device_ctx->vf_serial, | ||
1208 | net_device_ctx->vf_alloc ? "added" : "removed"); | ||
1206 | } | 1209 | } |
1207 | 1210 | ||
1208 | static void netvsc_receive_inband(struct net_device *ndev, | 1211 | static void netvsc_receive_inband(struct net_device *ndev, |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 70921bbe0e28..3af6d8d15233 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -1894,20 +1894,6 @@ out_unlock: | |||
1894 | rtnl_unlock(); | 1894 | rtnl_unlock(); |
1895 | } | 1895 | } |
1896 | 1896 | ||
1897 | static struct net_device *get_netvsc_bymac(const u8 *mac) | ||
1898 | { | ||
1899 | struct net_device_context *ndev_ctx; | ||
1900 | |||
1901 | list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { | ||
1902 | struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx); | ||
1903 | |||
1904 | if (ether_addr_equal(mac, dev->perm_addr)) | ||
1905 | return dev; | ||
1906 | } | ||
1907 | |||
1908 | return NULL; | ||
1909 | } | ||
1910 | |||
1911 | static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) | 1897 | static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) |
1912 | { | 1898 | { |
1913 | struct net_device_context *net_device_ctx; | 1899 | struct net_device_context *net_device_ctx; |
@@ -2036,26 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w) | |||
2036 | rtnl_unlock(); | 2022 | rtnl_unlock(); |
2037 | } | 2023 | } |
2038 | 2024 | ||
2025 | /* Find netvsc by VMBus serial number. | ||
2026 | * The PCI hyperv controller records the serial number as the slot. | ||
2027 | */ | ||
2028 | static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) | ||
2029 | { | ||
2030 | struct device *parent = vf_netdev->dev.parent; | ||
2031 | struct net_device_context *ndev_ctx; | ||
2032 | struct pci_dev *pdev; | ||
2033 | |||
2034 | if (!parent || !dev_is_pci(parent)) | ||
2035 | return NULL; /* not a PCI device */ | ||
2036 | |||
2037 | pdev = to_pci_dev(parent); | ||
2038 | if (!pdev->slot) { | ||
2039 | netdev_notice(vf_netdev, "no PCI slot information\n"); | ||
2040 | return NULL; | ||
2041 | } | ||
2042 | |||
2043 | list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { | ||
2044 | if (!ndev_ctx->vf_alloc) | ||
2045 | continue; | ||
2046 | |||
2047 | if (ndev_ctx->vf_serial == pdev->slot->number) | ||
2048 | return hv_get_drvdata(ndev_ctx->device_ctx); | ||
2049 | } | ||
2050 | |||
2051 | netdev_notice(vf_netdev, | ||
2052 | "no netdev found for slot %u\n", pdev->slot->number); | ||
2053 | return NULL; | ||
2054 | } | ||
2055 | |||
2039 | static int netvsc_register_vf(struct net_device *vf_netdev) | 2056 | static int netvsc_register_vf(struct net_device *vf_netdev) |
2040 | { | 2057 | { |
2041 | struct net_device *ndev; | ||
2042 | struct net_device_context *net_device_ctx; | 2058 | struct net_device_context *net_device_ctx; |
2043 | struct device *pdev = vf_netdev->dev.parent; | ||
2044 | struct netvsc_device *netvsc_dev; | 2059 | struct netvsc_device *netvsc_dev; |
2060 | struct net_device *ndev; | ||
2045 | int ret; | 2061 | int ret; |
2046 | 2062 | ||
2047 | if (vf_netdev->addr_len != ETH_ALEN) | 2063 | if (vf_netdev->addr_len != ETH_ALEN) |
2048 | return NOTIFY_DONE; | 2064 | return NOTIFY_DONE; |
2049 | 2065 | ||
2050 | if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev)) | 2066 | ndev = get_netvsc_byslot(vf_netdev); |
2051 | return NOTIFY_DONE; | ||
2052 | |||
2053 | /* | ||
2054 | * We will use the MAC address to locate the synthetic interface to | ||
2055 | * associate with the VF interface. If we don't find a matching | ||
2056 | * synthetic interface, move on. | ||
2057 | */ | ||
2058 | ndev = get_netvsc_bymac(vf_netdev->perm_addr); | ||
2059 | if (!ndev) | 2067 | if (!ndev) |
2060 | return NOTIFY_DONE; | 2068 | return NOTIFY_DONE; |
2061 | 2069 | ||
@@ -2272,17 +2280,15 @@ static int netvsc_remove(struct hv_device *dev) | |||
2272 | 2280 | ||
2273 | cancel_delayed_work_sync(&ndev_ctx->dwork); | 2281 | cancel_delayed_work_sync(&ndev_ctx->dwork); |
2274 | 2282 | ||
2275 | rcu_read_lock(); | 2283 | rtnl_lock(); |
2276 | nvdev = rcu_dereference(ndev_ctx->nvdev); | 2284 | nvdev = rtnl_dereference(ndev_ctx->nvdev); |
2277 | 2285 | if (nvdev) | |
2278 | if (nvdev) | ||
2279 | cancel_work_sync(&nvdev->subchan_work); | 2286 | cancel_work_sync(&nvdev->subchan_work); |
2280 | 2287 | ||
2281 | /* | 2288 | /* |
2282 | * Call to the vsc driver to let it know that the device is being | 2289 | * Call to the vsc driver to let it know that the device is being |
2283 | * removed. Also blocks mtu and channel changes. | 2290 | * removed. Also blocks mtu and channel changes. |
2284 | */ | 2291 | */ |
2285 | rtnl_lock(); | ||
2286 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); | 2292 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
2287 | if (vf_netdev) | 2293 | if (vf_netdev) |
2288 | netvsc_unregister_vf(vf_netdev); | 2294 | netvsc_unregister_vf(vf_netdev); |
@@ -2294,7 +2300,6 @@ static int netvsc_remove(struct hv_device *dev) | |||
2294 | list_del(&ndev_ctx->list); | 2300 | list_del(&ndev_ctx->list); |
2295 | 2301 | ||
2296 | rtnl_unlock(); | 2302 | rtnl_unlock(); |
2297 | rcu_read_unlock(); | ||
2298 | 2303 | ||
2299 | hv_set_drvdata(dev, NULL); | 2304 | hv_set_drvdata(dev, NULL); |
2300 | 2305 | ||
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 740655261e5b..83060fb349f4 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
@@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus) | |||
349 | } | 349 | } |
350 | if (bus->started) | 350 | if (bus->started) |
351 | bus->socket_ops->start(bus->sfp); | 351 | bus->socket_ops->start(bus->sfp); |
352 | bus->netdev->sfp_bus = bus; | ||
352 | bus->registered = true; | 353 | bus->registered = true; |
353 | return 0; | 354 | return 0; |
354 | } | 355 | } |
@@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) | |||
357 | { | 358 | { |
358 | const struct sfp_upstream_ops *ops = bus->upstream_ops; | 359 | const struct sfp_upstream_ops *ops = bus->upstream_ops; |
359 | 360 | ||
361 | bus->netdev->sfp_bus = NULL; | ||
360 | if (bus->registered) { | 362 | if (bus->registered) { |
361 | if (bus->started) | 363 | if (bus->started) |
362 | bus->socket_ops->stop(bus->sfp); | 364 | bus->socket_ops->stop(bus->sfp); |
@@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus) | |||
438 | { | 440 | { |
439 | bus->upstream_ops = NULL; | 441 | bus->upstream_ops = NULL; |
440 | bus->upstream = NULL; | 442 | bus->upstream = NULL; |
441 | bus->netdev->sfp_bus = NULL; | ||
442 | bus->netdev = NULL; | 443 | bus->netdev = NULL; |
443 | } | 444 | } |
444 | 445 | ||
@@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, | |||
467 | bus->upstream_ops = ops; | 468 | bus->upstream_ops = ops; |
468 | bus->upstream = upstream; | 469 | bus->upstream = upstream; |
469 | bus->netdev = ndev; | 470 | bus->netdev = ndev; |
470 | ndev->sfp_bus = bus; | ||
471 | 471 | ||
472 | if (bus->sfp) { | 472 | if (bus->sfp) { |
473 | ret = sfp_register_bus(bus); | 473 | ret = sfp_register_bus(bus); |
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index ce61231e96ea..62dc564b251d 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c | |||
@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
429 | if (!skb) | 429 | if (!skb) |
430 | goto out; | 430 | goto out; |
431 | 431 | ||
432 | if (skb_mac_header_len(skb) < ETH_HLEN) | ||
433 | goto drop; | ||
434 | |||
432 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) | 435 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) |
433 | goto drop; | 436 | goto drop; |
434 | 437 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ebd07ad82431..e2648b5a3861 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1153,43 +1153,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev, | |||
1153 | 1153 | ||
1154 | return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); | 1154 | return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); |
1155 | } | 1155 | } |
1156 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1157 | static void tun_poll_controller(struct net_device *dev) | ||
1158 | { | ||
1159 | /* | ||
1160 | * Tun only receives frames when: | ||
1161 | * 1) the char device endpoint gets data from user space | ||
1162 | * 2) the tun socket gets a sendmsg call from user space | ||
1163 | * If NAPI is not enabled, since both of those are synchronous | ||
1164 | * operations, we are guaranteed never to have pending data when we poll | ||
1165 | * for it so there is nothing to do here but return. | ||
1166 | * We need this though so netpoll recognizes us as an interface that | ||
1167 | * supports polling, which enables bridge devices in virt setups to | ||
1168 | * still use netconsole | ||
1169 | * If NAPI is enabled, however, we need to schedule polling for all | ||
1170 | * queues unless we are using napi_gro_frags(), which we call in | ||
1171 | * process context and not in NAPI context. | ||
1172 | */ | ||
1173 | struct tun_struct *tun = netdev_priv(dev); | ||
1174 | |||
1175 | if (tun->flags & IFF_NAPI) { | ||
1176 | struct tun_file *tfile; | ||
1177 | int i; | ||
1178 | |||
1179 | if (tun_napi_frags_enabled(tun)) | ||
1180 | return; | ||
1181 | |||
1182 | rcu_read_lock(); | ||
1183 | for (i = 0; i < tun->numqueues; i++) { | ||
1184 | tfile = rcu_dereference(tun->tfiles[i]); | ||
1185 | if (tfile->napi_enabled) | ||
1186 | napi_schedule(&tfile->napi); | ||
1187 | } | ||
1188 | rcu_read_unlock(); | ||
1189 | } | ||
1190 | return; | ||
1191 | } | ||
1192 | #endif | ||
1193 | 1156 | ||
1194 | static void tun_set_headroom(struct net_device *dev, int new_hr) | 1157 | static void tun_set_headroom(struct net_device *dev, int new_hr) |
1195 | { | 1158 | { |
@@ -1283,9 +1246,6 @@ static const struct net_device_ops tun_netdev_ops = { | |||
1283 | .ndo_start_xmit = tun_net_xmit, | 1246 | .ndo_start_xmit = tun_net_xmit, |
1284 | .ndo_fix_features = tun_net_fix_features, | 1247 | .ndo_fix_features = tun_net_fix_features, |
1285 | .ndo_select_queue = tun_select_queue, | 1248 | .ndo_select_queue = tun_select_queue, |
1286 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1287 | .ndo_poll_controller = tun_poll_controller, | ||
1288 | #endif | ||
1289 | .ndo_set_rx_headroom = tun_set_headroom, | 1249 | .ndo_set_rx_headroom = tun_set_headroom, |
1290 | .ndo_get_stats64 = tun_net_get_stats64, | 1250 | .ndo_get_stats64 = tun_net_get_stats64, |
1291 | }; | 1251 | }; |
@@ -1365,9 +1325,6 @@ static const struct net_device_ops tap_netdev_ops = { | |||
1365 | .ndo_set_mac_address = eth_mac_addr, | 1325 | .ndo_set_mac_address = eth_mac_addr, |
1366 | .ndo_validate_addr = eth_validate_addr, | 1326 | .ndo_validate_addr = eth_validate_addr, |
1367 | .ndo_select_queue = tun_select_queue, | 1327 | .ndo_select_queue = tun_select_queue, |
1368 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1369 | .ndo_poll_controller = tun_poll_controller, | ||
1370 | #endif | ||
1371 | .ndo_features_check = passthru_features_check, | 1328 | .ndo_features_check = passthru_features_check, |
1372 | .ndo_set_rx_headroom = tun_set_headroom, | 1329 | .ndo_set_rx_headroom = tun_set_headroom, |
1373 | .ndo_get_stats64 = tun_net_get_stats64, | 1330 | .ndo_get_stats64 = tun_net_get_stats64, |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index e3270deecec2..533b6fb8d923 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1213,13 +1213,13 @@ static const struct usb_device_id products[] = { | |||
1213 | {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ | 1213 | {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ |
1214 | {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ | 1214 | {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ |
1215 | {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ | 1215 | {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ |
1216 | {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ | 1216 | {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ |
1217 | {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ | 1217 | {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */ |
1218 | {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ | 1218 | {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ |
1219 | {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ | 1219 | {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */ |
1220 | {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ | 1220 | {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ |
1221 | {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ | 1221 | {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */ |
1222 | {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ | 1222 | {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ |
1223 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ | 1223 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ |
1224 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ | 1224 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ |
1225 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ | 1225 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 8d679c8b7f25..41a00cd76955 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
@@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, | |||
463 | int mac_len, delta, off; | 463 | int mac_len, delta, off; |
464 | struct xdp_buff xdp; | 464 | struct xdp_buff xdp; |
465 | 465 | ||
466 | skb_orphan(skb); | ||
467 | |||
466 | rcu_read_lock(); | 468 | rcu_read_lock(); |
467 | xdp_prog = rcu_dereference(rq->xdp_prog); | 469 | xdp_prog = rcu_dereference(rq->xdp_prog); |
468 | if (unlikely(!xdp_prog)) { | 470 | if (unlikely(!xdp_prog)) { |
@@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, | |||
508 | skb_copy_header(nskb, skb); | 510 | skb_copy_header(nskb, skb); |
509 | head_off = skb_headroom(nskb) - skb_headroom(skb); | 511 | head_off = skb_headroom(nskb) - skb_headroom(skb); |
510 | skb_headers_offset_update(nskb, head_off); | 512 | skb_headers_offset_update(nskb, head_off); |
511 | if (skb->sk) | ||
512 | skb_set_owner_w(nskb, skb->sk); | ||
513 | consume_skb(skb); | 513 | consume_skb(skb); |
514 | skb = nskb; | 514 | skb = nskb; |
515 | } | 515 | } |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 9407acbd19a9..f17f602e6171 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -908,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, | |||
908 | BUG_ON(pull_to <= skb_headlen(skb)); | 908 | BUG_ON(pull_to <= skb_headlen(skb)); |
909 | __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); | 909 | __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); |
910 | } | 910 | } |
911 | BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); | 911 | if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { |
912 | queue->rx.rsp_cons = ++cons; | ||
913 | kfree_skb(nskb); | ||
914 | return ~0U; | ||
915 | } | ||
912 | 916 | ||
913 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | 917 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
914 | skb_frag_page(nfrag), | 918 | skb_frag_page(nfrag), |
@@ -1045,6 +1049,8 @@ err: | |||
1045 | skb->len += rx->status; | 1049 | skb->len += rx->status; |
1046 | 1050 | ||
1047 | i = xennet_fill_frags(queue, skb, &tmpq); | 1051 | i = xennet_fill_frags(queue, skb, &tmpq); |
1052 | if (unlikely(i == ~0U)) | ||
1053 | goto err; | ||
1048 | 1054 | ||
1049 | if (rx->flags & XEN_NETRXF_csum_blank) | 1055 | if (rx->flags & XEN_NETRXF_csum_blank) |
1050 | skb->ip_summed = CHECKSUM_PARTIAL; | 1056 | skb->ip_summed = CHECKSUM_PARTIAL; |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5a9562881d4e..9fe3fff818b8 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
@@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
537 | 537 | ||
538 | INIT_WORK(&ctrl->ana_work, nvme_ana_work); | 538 | INIT_WORK(&ctrl->ana_work, nvme_ana_work); |
539 | ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); | 539 | ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); |
540 | if (!ctrl->ana_log_buf) | 540 | if (!ctrl->ana_log_buf) { |
541 | error = -ENOMEM; | ||
541 | goto out; | 542 | goto out; |
543 | } | ||
542 | 544 | ||
543 | error = nvme_read_ana_log(ctrl, true); | 545 | error = nvme_read_ana_log(ctrl, true); |
544 | if (error) | 546 | if (error) |
@@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
547 | out_free_ana_log_buf: | 549 | out_free_ana_log_buf: |
548 | kfree(ctrl->ana_log_buf); | 550 | kfree(ctrl->ana_log_buf); |
549 | out: | 551 | out: |
550 | return -ENOMEM; | 552 | return error; |
551 | } | 553 | } |
552 | 554 | ||
553 | void nvme_mpath_uninit(struct nvme_ctrl *ctrl) | 555 | void nvme_mpath_uninit(struct nvme_ctrl *ctrl) |
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index a21caea1e080..2008fa62a373 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c | |||
@@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) | |||
245 | offset += len; | 245 | offset += len; |
246 | ngrps++; | 246 | ngrps++; |
247 | } | 247 | } |
248 | for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) { | ||
249 | if (nvmet_ana_group_enabled[grpid]) | ||
250 | ngrps++; | ||
251 | } | ||
248 | 252 | ||
249 | hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); | 253 | hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); |
250 | hdr.ngrps = cpu_to_le16(ngrps); | 254 | hdr.ngrps = cpu_to_le16(ngrps); |
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 778c4f76a884..2153956a0b20 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c | |||
@@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, | |||
135 | if (val & PCIE_ATU_ENABLE) | 135 | if (val & PCIE_ATU_ENABLE) |
136 | return; | 136 | return; |
137 | 137 | ||
138 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 138 | mdelay(LINK_WAIT_IATU); |
139 | } | 139 | } |
140 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); | 140 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); |
141 | } | 141 | } |
@@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, | |||
178 | if (val & PCIE_ATU_ENABLE) | 178 | if (val & PCIE_ATU_ENABLE) |
179 | return; | 179 | return; |
180 | 180 | ||
181 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 181 | mdelay(LINK_WAIT_IATU); |
182 | } | 182 | } |
183 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); | 183 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); |
184 | } | 184 | } |
@@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, | |||
236 | if (val & PCIE_ATU_ENABLE) | 236 | if (val & PCIE_ATU_ENABLE) |
237 | return 0; | 237 | return 0; |
238 | 238 | ||
239 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 239 | mdelay(LINK_WAIT_IATU); |
240 | } | 240 | } |
241 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); | 241 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); |
242 | 242 | ||
@@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, | |||
282 | if (val & PCIE_ATU_ENABLE) | 282 | if (val & PCIE_ATU_ENABLE) |
283 | return 0; | 283 | return 0; |
284 | 284 | ||
285 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 285 | mdelay(LINK_WAIT_IATU); |
286 | } | 286 | } |
287 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); | 287 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); |
288 | 288 | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 96126fd8403c..9f1a5e399b70 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h | |||
@@ -26,8 +26,7 @@ | |||
26 | 26 | ||
27 | /* Parameters for the waiting for iATU enabled routine */ | 27 | /* Parameters for the waiting for iATU enabled routine */ |
28 | #define LINK_WAIT_MAX_IATU_RETRIES 5 | 28 | #define LINK_WAIT_MAX_IATU_RETRIES 5 |
29 | #define LINK_WAIT_IATU_MIN 9000 | 29 | #define LINK_WAIT_IATU 9 |
30 | #define LINK_WAIT_IATU_MAX 10000 | ||
31 | 30 | ||
32 | /* Synopsys-specific PCIe configuration registers */ | 31 | /* Synopsys-specific PCIe configuration registers */ |
33 | #define PCIE_PORT_LINK_CONTROL 0x710 | 32 | #define PCIE_PORT_LINK_CONTROL 0x710 |
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index c00f82cc54aa..9ba4d12c179c 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c | |||
@@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version; | |||
89 | 89 | ||
90 | #define STATUS_REVISION_MISMATCH 0xC0000059 | 90 | #define STATUS_REVISION_MISMATCH 0xC0000059 |
91 | 91 | ||
92 | /* space for 32bit serial number as string */ | ||
93 | #define SLOT_NAME_SIZE 11 | ||
94 | |||
92 | /* | 95 | /* |
93 | * Message Types | 96 | * Message Types |
94 | */ | 97 | */ |
@@ -494,6 +497,7 @@ struct hv_pci_dev { | |||
494 | struct list_head list_entry; | 497 | struct list_head list_entry; |
495 | refcount_t refs; | 498 | refcount_t refs; |
496 | enum hv_pcichild_state state; | 499 | enum hv_pcichild_state state; |
500 | struct pci_slot *pci_slot; | ||
497 | struct pci_function_description desc; | 501 | struct pci_function_description desc; |
498 | bool reported_missing; | 502 | bool reported_missing; |
499 | struct hv_pcibus_device *hbus; | 503 | struct hv_pcibus_device *hbus; |
@@ -1457,6 +1461,36 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus) | |||
1457 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | 1461 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); |
1458 | } | 1462 | } |
1459 | 1463 | ||
1464 | /* | ||
1465 | * Assign entries in sysfs pci slot directory. | ||
1466 | * | ||
1467 | * Note that this function does not need to lock the children list | ||
1468 | * because it is called from pci_devices_present_work which | ||
1469 | * is serialized with hv_eject_device_work because they are on the | ||
1470 | * same ordered workqueue. Therefore hbus->children list will not change | ||
1471 | * even when pci_create_slot sleeps. | ||
1472 | */ | ||
1473 | static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) | ||
1474 | { | ||
1475 | struct hv_pci_dev *hpdev; | ||
1476 | char name[SLOT_NAME_SIZE]; | ||
1477 | int slot_nr; | ||
1478 | |||
1479 | list_for_each_entry(hpdev, &hbus->children, list_entry) { | ||
1480 | if (hpdev->pci_slot) | ||
1481 | continue; | ||
1482 | |||
1483 | slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot)); | ||
1484 | snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser); | ||
1485 | hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr, | ||
1486 | name, NULL); | ||
1487 | if (IS_ERR(hpdev->pci_slot)) { | ||
1488 | pr_warn("pci_create slot %s failed\n", name); | ||
1489 | hpdev->pci_slot = NULL; | ||
1490 | } | ||
1491 | } | ||
1492 | } | ||
1493 | |||
1460 | /** | 1494 | /** |
1461 | * create_root_hv_pci_bus() - Expose a new root PCI bus | 1495 | * create_root_hv_pci_bus() - Expose a new root PCI bus |
1462 | * @hbus: Root PCI bus, as understood by this driver | 1496 | * @hbus: Root PCI bus, as understood by this driver |
@@ -1480,6 +1514,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) | |||
1480 | pci_lock_rescan_remove(); | 1514 | pci_lock_rescan_remove(); |
1481 | pci_scan_child_bus(hbus->pci_bus); | 1515 | pci_scan_child_bus(hbus->pci_bus); |
1482 | pci_bus_assign_resources(hbus->pci_bus); | 1516 | pci_bus_assign_resources(hbus->pci_bus); |
1517 | hv_pci_assign_slots(hbus); | ||
1483 | pci_bus_add_devices(hbus->pci_bus); | 1518 | pci_bus_add_devices(hbus->pci_bus); |
1484 | pci_unlock_rescan_remove(); | 1519 | pci_unlock_rescan_remove(); |
1485 | hbus->state = hv_pcibus_installed; | 1520 | hbus->state = hv_pcibus_installed; |
@@ -1742,6 +1777,7 @@ static void pci_devices_present_work(struct work_struct *work) | |||
1742 | */ | 1777 | */ |
1743 | pci_lock_rescan_remove(); | 1778 | pci_lock_rescan_remove(); |
1744 | pci_scan_child_bus(hbus->pci_bus); | 1779 | pci_scan_child_bus(hbus->pci_bus); |
1780 | hv_pci_assign_slots(hbus); | ||
1745 | pci_unlock_rescan_remove(); | 1781 | pci_unlock_rescan_remove(); |
1746 | break; | 1782 | break; |
1747 | 1783 | ||
@@ -1858,6 +1894,9 @@ static void hv_eject_device_work(struct work_struct *work) | |||
1858 | list_del(&hpdev->list_entry); | 1894 | list_del(&hpdev->list_entry); |
1859 | spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); | 1895 | spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); |
1860 | 1896 | ||
1897 | if (hpdev->pci_slot) | ||
1898 | pci_destroy_slot(hpdev->pci_slot); | ||
1899 | |||
1861 | memset(&ctxt, 0, sizeof(ctxt)); | 1900 | memset(&ctxt, 0, sizeof(ctxt)); |
1862 | ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; | 1901 | ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; |
1863 | ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; | 1902 | ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index ef0b1b6ba86f..12afa7fdf77e 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge) | |||
457 | /** | 457 | /** |
458 | * enable_slot - enable, configure a slot | 458 | * enable_slot - enable, configure a slot |
459 | * @slot: slot to be enabled | 459 | * @slot: slot to be enabled |
460 | * @bridge: true if enable is for the whole bridge (not a single slot) | ||
460 | * | 461 | * |
461 | * This function should be called per *physical slot*, | 462 | * This function should be called per *physical slot*, |
462 | * not per each slot object in ACPI namespace. | 463 | * not per each slot object in ACPI namespace. |
463 | */ | 464 | */ |
464 | static void enable_slot(struct acpiphp_slot *slot) | 465 | static void enable_slot(struct acpiphp_slot *slot, bool bridge) |
465 | { | 466 | { |
466 | struct pci_dev *dev; | 467 | struct pci_dev *dev; |
467 | struct pci_bus *bus = slot->bus; | 468 | struct pci_bus *bus = slot->bus; |
468 | struct acpiphp_func *func; | 469 | struct acpiphp_func *func; |
469 | 470 | ||
470 | if (bus->self && hotplug_is_native(bus->self)) { | 471 | if (bridge && bus->self && hotplug_is_native(bus->self)) { |
471 | /* | 472 | /* |
472 | * If native hotplug is used, it will take care of hotplug | 473 | * If native hotplug is used, it will take care of hotplug |
473 | * slot management and resource allocation for hotplug | 474 | * slot management and resource allocation for hotplug |
@@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) | |||
701 | trim_stale_devices(dev); | 702 | trim_stale_devices(dev); |
702 | 703 | ||
703 | /* configure all functions */ | 704 | /* configure all functions */ |
704 | enable_slot(slot); | 705 | enable_slot(slot, true); |
705 | } else { | 706 | } else { |
706 | disable_slot(slot); | 707 | disable_slot(slot); |
707 | } | 708 | } |
@@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context) | |||
785 | if (bridge) | 786 | if (bridge) |
786 | acpiphp_check_bridge(bridge); | 787 | acpiphp_check_bridge(bridge); |
787 | else if (!(slot->flags & SLOT_IS_GOING_AWAY)) | 788 | else if (!(slot->flags & SLOT_IS_GOING_AWAY)) |
788 | enable_slot(slot); | 789 | enable_slot(slot, false); |
789 | 790 | ||
790 | break; | 791 | break; |
791 | 792 | ||
@@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) | |||
973 | 974 | ||
974 | /* configure all functions */ | 975 | /* configure all functions */ |
975 | if (!(slot->flags & SLOT_ENABLED)) | 976 | if (!(slot->flags & SLOT_ENABLED)) |
976 | enable_slot(slot); | 977 | enable_slot(slot, false); |
977 | 978 | ||
978 | pci_unlock_rescan_remove(); | 979 | pci_unlock_rescan_remove(); |
979 | return 0; | 980 | return 0; |
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c index fb1afe55bf53..e7f45d96b0cb 100644 --- a/drivers/pinctrl/intel/pinctrl-cannonlake.c +++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c | |||
@@ -15,10 +15,11 @@ | |||
15 | 15 | ||
16 | #include "pinctrl-intel.h" | 16 | #include "pinctrl-intel.h" |
17 | 17 | ||
18 | #define CNL_PAD_OWN 0x020 | 18 | #define CNL_PAD_OWN 0x020 |
19 | #define CNL_PADCFGLOCK 0x080 | 19 | #define CNL_PADCFGLOCK 0x080 |
20 | #define CNL_HOSTSW_OWN 0x0b0 | 20 | #define CNL_LP_HOSTSW_OWN 0x0b0 |
21 | #define CNL_GPI_IE 0x120 | 21 | #define CNL_H_HOSTSW_OWN 0x0c0 |
22 | #define CNL_GPI_IE 0x120 | ||
22 | 23 | ||
23 | #define CNL_GPP(r, s, e, g) \ | 24 | #define CNL_GPP(r, s, e, g) \ |
24 | { \ | 25 | { \ |
@@ -30,12 +31,12 @@ | |||
30 | 31 | ||
31 | #define CNL_NO_GPIO -1 | 32 | #define CNL_NO_GPIO -1 |
32 | 33 | ||
33 | #define CNL_COMMUNITY(b, s, e, g) \ | 34 | #define CNL_COMMUNITY(b, s, e, o, g) \ |
34 | { \ | 35 | { \ |
35 | .barno = (b), \ | 36 | .barno = (b), \ |
36 | .padown_offset = CNL_PAD_OWN, \ | 37 | .padown_offset = CNL_PAD_OWN, \ |
37 | .padcfglock_offset = CNL_PADCFGLOCK, \ | 38 | .padcfglock_offset = CNL_PADCFGLOCK, \ |
38 | .hostown_offset = CNL_HOSTSW_OWN, \ | 39 | .hostown_offset = (o), \ |
39 | .ie_offset = CNL_GPI_IE, \ | 40 | .ie_offset = CNL_GPI_IE, \ |
40 | .pin_base = (s), \ | 41 | .pin_base = (s), \ |
41 | .npins = ((e) - (s) + 1), \ | 42 | .npins = ((e) - (s) + 1), \ |
@@ -43,6 +44,12 @@ | |||
43 | .ngpps = ARRAY_SIZE(g), \ | 44 | .ngpps = ARRAY_SIZE(g), \ |
44 | } | 45 | } |
45 | 46 | ||
47 | #define CNLLP_COMMUNITY(b, s, e, g) \ | ||
48 | CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g) | ||
49 | |||
50 | #define CNLH_COMMUNITY(b, s, e, g) \ | ||
51 | CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g) | ||
52 | |||
46 | /* Cannon Lake-H */ | 53 | /* Cannon Lake-H */ |
47 | static const struct pinctrl_pin_desc cnlh_pins[] = { | 54 | static const struct pinctrl_pin_desc cnlh_pins[] = { |
48 | /* GPP_A */ | 55 | /* GPP_A */ |
@@ -379,7 +386,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = { | |||
379 | static const struct intel_padgroup cnlh_community3_gpps[] = { | 386 | static const struct intel_padgroup cnlh_community3_gpps[] = { |
380 | CNL_GPP(0, 155, 178, 192), /* GPP_K */ | 387 | CNL_GPP(0, 155, 178, 192), /* GPP_K */ |
381 | CNL_GPP(1, 179, 202, 224), /* GPP_H */ | 388 | CNL_GPP(1, 179, 202, 224), /* GPP_H */ |
382 | CNL_GPP(2, 203, 215, 258), /* GPP_E */ | 389 | CNL_GPP(2, 203, 215, 256), /* GPP_E */ |
383 | CNL_GPP(3, 216, 239, 288), /* GPP_F */ | 390 | CNL_GPP(3, 216, 239, 288), /* GPP_F */ |
384 | CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */ | 391 | CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */ |
385 | }; | 392 | }; |
@@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = { | |||
442 | }; | 449 | }; |
443 | 450 | ||
444 | static const struct intel_community cnlh_communities[] = { | 451 | static const struct intel_community cnlh_communities[] = { |
445 | CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps), | 452 | CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps), |
446 | CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps), | 453 | CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps), |
447 | CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps), | 454 | CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps), |
448 | CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps), | 455 | CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps), |
449 | }; | 456 | }; |
450 | 457 | ||
451 | static const struct intel_pinctrl_soc_data cnlh_soc_data = { | 458 | static const struct intel_pinctrl_soc_data cnlh_soc_data = { |
@@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = { | |||
803 | }; | 810 | }; |
804 | 811 | ||
805 | static const struct intel_community cnllp_communities[] = { | 812 | static const struct intel_community cnllp_communities[] = { |
806 | CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps), | 813 | CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps), |
807 | CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps), | 814 | CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps), |
808 | CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps), | 815 | CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps), |
809 | }; | 816 | }; |
810 | 817 | ||
811 | static const struct intel_pinctrl_soc_data cnllp_soc_data = { | 818 | static const struct intel_pinctrl_soc_data cnllp_soc_data = { |
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 62b009b27eda..1ea3438ea67e 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c | |||
@@ -747,13 +747,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = { | |||
747 | .owner = THIS_MODULE, | 747 | .owner = THIS_MODULE, |
748 | }; | 748 | }; |
749 | 749 | ||
750 | /** | ||
751 | * intel_gpio_to_pin() - Translate from GPIO offset to pin number | ||
752 | * @pctrl: Pinctrl structure | ||
753 | * @offset: GPIO offset from gpiolib | ||
754 | * @commmunity: Community is filled here if not %NULL | ||
755 | * @padgrp: Pad group is filled here if not %NULL | ||
756 | * | ||
757 | * When coming through gpiolib irqchip, the GPIO offset is not | ||
758 | * automatically translated to pinctrl pin number. This function can be | ||
759 | * used to find out the corresponding pinctrl pin. | ||
760 | */ | ||
761 | static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset, | ||
762 | const struct intel_community **community, | ||
763 | const struct intel_padgroup **padgrp) | ||
764 | { | ||
765 | int i; | ||
766 | |||
767 | for (i = 0; i < pctrl->ncommunities; i++) { | ||
768 | const struct intel_community *comm = &pctrl->communities[i]; | ||
769 | int j; | ||
770 | |||
771 | for (j = 0; j < comm->ngpps; j++) { | ||
772 | const struct intel_padgroup *pgrp = &comm->gpps[j]; | ||
773 | |||
774 | if (pgrp->gpio_base < 0) | ||
775 | continue; | ||
776 | |||
777 | if (offset >= pgrp->gpio_base && | ||
778 | offset < pgrp->gpio_base + pgrp->size) { | ||
779 | int pin; | ||
780 | |||
781 | pin = pgrp->base + offset - pgrp->gpio_base; | ||
782 | if (community) | ||
783 | *community = comm; | ||
784 | if (padgrp) | ||
785 | *padgrp = pgrp; | ||
786 | |||
787 | return pin; | ||
788 | } | ||
789 | } | ||
790 | } | ||
791 | |||
792 | return -EINVAL; | ||
793 | } | ||
794 | |||
750 | static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) | 795 | static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) |
751 | { | 796 | { |
752 | struct intel_pinctrl *pctrl = gpiochip_get_data(chip); | 797 | struct intel_pinctrl *pctrl = gpiochip_get_data(chip); |
753 | void __iomem *reg; | 798 | void __iomem *reg; |
754 | u32 padcfg0; | 799 | u32 padcfg0; |
800 | int pin; | ||
755 | 801 | ||
756 | reg = intel_get_padcfg(pctrl, offset, PADCFG0); | 802 | pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); |
803 | if (pin < 0) | ||
804 | return -EINVAL; | ||
805 | |||
806 | reg = intel_get_padcfg(pctrl, pin, PADCFG0); | ||
757 | if (!reg) | 807 | if (!reg) |
758 | return -EINVAL; | 808 | return -EINVAL; |
759 | 809 | ||
@@ -770,8 +820,13 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
770 | unsigned long flags; | 820 | unsigned long flags; |
771 | void __iomem *reg; | 821 | void __iomem *reg; |
772 | u32 padcfg0; | 822 | u32 padcfg0; |
823 | int pin; | ||
773 | 824 | ||
774 | reg = intel_get_padcfg(pctrl, offset, PADCFG0); | 825 | pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); |
826 | if (pin < 0) | ||
827 | return; | ||
828 | |||
829 | reg = intel_get_padcfg(pctrl, pin, PADCFG0); | ||
775 | if (!reg) | 830 | if (!reg) |
776 | return; | 831 | return; |
777 | 832 | ||
@@ -790,8 +845,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) | |||
790 | struct intel_pinctrl *pctrl = gpiochip_get_data(chip); | 845 | struct intel_pinctrl *pctrl = gpiochip_get_data(chip); |
791 | void __iomem *reg; | 846 | void __iomem *reg; |
792 | u32 padcfg0; | 847 | u32 padcfg0; |
848 | int pin; | ||
793 | 849 | ||
794 | reg = intel_get_padcfg(pctrl, offset, PADCFG0); | 850 | pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); |
851 | if (pin < 0) | ||
852 | return -EINVAL; | ||
853 | |||
854 | reg = intel_get_padcfg(pctrl, pin, PADCFG0); | ||
795 | if (!reg) | 855 | if (!reg) |
796 | return -EINVAL; | 856 | return -EINVAL; |
797 | 857 | ||
@@ -827,81 +887,6 @@ static const struct gpio_chip intel_gpio_chip = { | |||
827 | .set_config = gpiochip_generic_config, | 887 | .set_config = gpiochip_generic_config, |
828 | }; | 888 | }; |
829 | 889 | ||
830 | /** | ||
831 | * intel_gpio_to_pin() - Translate from GPIO offset to pin number | ||
832 | * @pctrl: Pinctrl structure | ||
833 | * @offset: GPIO offset from gpiolib | ||
834 | * @commmunity: Community is filled here if not %NULL | ||
835 | * @padgrp: Pad group is filled here if not %NULL | ||
836 | * | ||
837 | * When coming through gpiolib irqchip, the GPIO offset is not | ||
838 | * automatically translated to pinctrl pin number. This function can be | ||
839 | * used to find out the corresponding pinctrl pin. | ||
840 | */ | ||
841 | static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset, | ||
842 | const struct intel_community **community, | ||
843 | const struct intel_padgroup **padgrp) | ||
844 | { | ||
845 | int i; | ||
846 | |||
847 | for (i = 0; i < pctrl->ncommunities; i++) { | ||
848 | const struct intel_community *comm = &pctrl->communities[i]; | ||
849 | int j; | ||
850 | |||
851 | for (j = 0; j < comm->ngpps; j++) { | ||
852 | const struct intel_padgroup *pgrp = &comm->gpps[j]; | ||
853 | |||
854 | if (pgrp->gpio_base < 0) | ||
855 | continue; | ||
856 | |||
857 | if (offset >= pgrp->gpio_base && | ||
858 | offset < pgrp->gpio_base + pgrp->size) { | ||
859 | int pin; | ||
860 | |||
861 | pin = pgrp->base + offset - pgrp->gpio_base; | ||
862 | if (community) | ||
863 | *community = comm; | ||
864 | if (padgrp) | ||
865 | *padgrp = pgrp; | ||
866 | |||
867 | return pin; | ||
868 | } | ||
869 | } | ||
870 | } | ||
871 | |||
872 | return -EINVAL; | ||
873 | } | ||
874 | |||
875 | static int intel_gpio_irq_reqres(struct irq_data *d) | ||
876 | { | ||
877 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
878 | struct intel_pinctrl *pctrl = gpiochip_get_data(gc); | ||
879 | int pin; | ||
880 | int ret; | ||
881 | |||
882 | pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); | ||
883 | if (pin >= 0) { | ||
884 | ret = gpiochip_lock_as_irq(gc, pin); | ||
885 | if (ret) { | ||
886 | dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n", | ||
887 | pin); | ||
888 | return ret; | ||
889 | } | ||
890 | } | ||
891 | return 0; | ||
892 | } | ||
893 | |||
894 | static void intel_gpio_irq_relres(struct irq_data *d) | ||
895 | { | ||
896 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
897 | struct intel_pinctrl *pctrl = gpiochip_get_data(gc); | ||
898 | int pin; | ||
899 | |||
900 | pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); | ||
901 | if (pin >= 0) | ||
902 | gpiochip_unlock_as_irq(gc, pin); | ||
903 | } | ||
904 | |||
905 | static void intel_gpio_irq_ack(struct irq_data *d) | 890 | static void intel_gpio_irq_ack(struct irq_data *d) |
906 | { | 891 | { |
907 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | 892 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
@@ -1117,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) | |||
1117 | 1102 | ||
1118 | static struct irq_chip intel_gpio_irqchip = { | 1103 | static struct irq_chip intel_gpio_irqchip = { |
1119 | .name = "intel-gpio", | 1104 | .name = "intel-gpio", |
1120 | .irq_request_resources = intel_gpio_irq_reqres, | ||
1121 | .irq_release_resources = intel_gpio_irq_relres, | ||
1122 | .irq_enable = intel_gpio_irq_enable, | 1105 | .irq_enable = intel_gpio_irq_enable, |
1123 | .irq_ack = intel_gpio_irq_ack, | 1106 | .irq_ack = intel_gpio_irq_ack, |
1124 | .irq_mask = intel_gpio_irq_mask, | 1107 | .irq_mask = intel_gpio_irq_mask, |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 41ccc759b8b8..1425c2874d40 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d) | |||
348 | unsigned long flags; | 348 | unsigned long flags; |
349 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | 349 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
350 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); | 350 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); |
351 | u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF); | ||
352 | 351 | ||
353 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); | 352 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); |
354 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); | 353 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); |
355 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); | 354 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); |
356 | pin_reg |= BIT(INTERRUPT_MASK_OFF); | 355 | pin_reg |= BIT(INTERRUPT_MASK_OFF); |
357 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); | 356 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); |
358 | /* | ||
359 | * When debounce logic is enabled it takes ~900 us before interrupts | ||
360 | * can be enabled. During this "debounce warm up" period the | ||
361 | * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it | ||
362 | * reads back as 1, signaling that interrupts are now enabled. | ||
363 | */ | ||
364 | while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask) | ||
365 | continue; | ||
366 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | 357 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); |
367 | } | 358 | } |
368 | 359 | ||
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d) | |||
426 | static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) | 417 | static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) |
427 | { | 418 | { |
428 | int ret = 0; | 419 | int ret = 0; |
429 | u32 pin_reg; | 420 | u32 pin_reg, pin_reg_irq_en, mask; |
430 | unsigned long flags, irq_flags; | 421 | unsigned long flags, irq_flags; |
431 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | 422 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
432 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); | 423 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); |
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) | |||
495 | } | 486 | } |
496 | 487 | ||
497 | pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF; | 488 | pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF; |
489 | /* | ||
490 | * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the | ||
491 | * debounce registers of any GPIO will block wake/interrupt status | ||
492 | * generation for *all* GPIOs for a lenght of time that depends on | ||
493 | * WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the | ||
494 | * INTERRUPT_ENABLE bit will read as 0. | ||
495 | * | ||
496 | * We temporarily enable irq for the GPIO whose configuration is | ||
497 | * changing, and then wait for it to read back as 1 to know when | ||
498 | * debounce has settled and then disable the irq again. | ||
499 | * We do this polling with the spinlock held to ensure other GPIO | ||
500 | * access routines do not read an incorrect value for the irq enable | ||
501 | * bit of other GPIOs. We keep the GPIO masked while polling to avoid | ||
502 | * spurious irqs, and disable the irq again after polling. | ||
503 | */ | ||
504 | mask = BIT(INTERRUPT_ENABLE_OFF); | ||
505 | pin_reg_irq_en = pin_reg; | ||
506 | pin_reg_irq_en |= mask; | ||
507 | pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF); | ||
508 | writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4); | ||
509 | while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask) | ||
510 | continue; | ||
498 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); | 511 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); |
499 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | 512 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); |
500 | 513 | ||
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index d975462a4c57..f10af5c383c5 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c | |||
@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args, | |||
536 | if (obj && obj->type == ACPI_TYPE_INTEGER) | 536 | if (obj && obj->type == ACPI_TYPE_INTEGER) |
537 | *out_data = (u32) obj->integer.value; | 537 | *out_data = (u32) obj->integer.value; |
538 | } | 538 | } |
539 | kfree(output.pointer); | ||
539 | return status; | 540 | return status; |
540 | 541 | ||
541 | } | 542 | } |
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index 88afe5651d24..cf2229ece9ff 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c | |||
@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev) | |||
78 | dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n", | 78 | dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n", |
79 | priv->buf->std.output[0], priv->buf->std.output[1], | 79 | priv->buf->std.output[0], priv->buf->std.output[1], |
80 | priv->buf->std.output[2], priv->buf->std.output[3]); | 80 | priv->buf->std.output[2], priv->buf->std.output[3]); |
81 | kfree(output.pointer); | ||
81 | 82 | ||
82 | return 0; | 83 | return 0; |
83 | } | 84 | } |
diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c index 0f8ac8dec3e1..a1bd8aaf4d98 100644 --- a/drivers/regulator/bd71837-regulator.c +++ b/drivers/regulator/bd71837-regulator.c | |||
@@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev) | |||
569 | BD71837_REG_REGLOCK); | 569 | BD71837_REG_REGLOCK); |
570 | } | 570 | } |
571 | 571 | ||
572 | /* | ||
573 | * There is a HW quirk in BD71837. The shutdown sequence timings for | ||
574 | * bucks/LDOs which are controlled via register interface are changed. | ||
575 | * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the | ||
576 | * beginning of shut-down sequence. As bucks 6 and 7 are parent | ||
577 | * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage | ||
578 | * monitoring to errorneously detect under voltage and force PMIC to | ||
579 | * emergency state instead of poweroff. In order to avoid this we | ||
580 | * disable voltage monitoring for LDO5 and LDO6 | ||
581 | */ | ||
582 | err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2, | ||
583 | BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80, | ||
584 | BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80); | ||
585 | if (err) { | ||
586 | dev_err(&pmic->pdev->dev, | ||
587 | "Failed to disable voltage monitoring\n"); | ||
588 | goto err; | ||
589 | } | ||
590 | |||
572 | for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) { | 591 | for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) { |
573 | 592 | ||
574 | struct regulator_desc *desc; | 593 | struct regulator_desc *desc; |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index bb1324f93143..9577d8941846 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev, | |||
3161 | if (!rstate->changeable) | 3161 | if (!rstate->changeable) |
3162 | return -EPERM; | 3162 | return -EPERM; |
3163 | 3163 | ||
3164 | rstate->enabled = en; | 3164 | rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND; |
3165 | 3165 | ||
3166 | return 0; | 3166 | return 0; |
3167 | } | 3167 | } |
@@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc, | |||
4395 | !rdev->desc->fixed_uV) | 4395 | !rdev->desc->fixed_uV) |
4396 | rdev->is_switch = true; | 4396 | rdev->is_switch = true; |
4397 | 4397 | ||
4398 | dev_set_drvdata(&rdev->dev, rdev); | ||
4398 | ret = device_register(&rdev->dev); | 4399 | ret = device_register(&rdev->dev); |
4399 | if (ret != 0) { | 4400 | if (ret != 0) { |
4400 | put_device(&rdev->dev); | 4401 | put_device(&rdev->dev); |
4401 | goto unset_supplies; | 4402 | goto unset_supplies; |
4402 | } | 4403 | } |
4403 | 4404 | ||
4404 | dev_set_drvdata(&rdev->dev, rdev); | ||
4405 | rdev_init_debugfs(rdev); | 4405 | rdev_init_debugfs(rdev); |
4406 | 4406 | ||
4407 | /* try to resolve regulators supply since a new one was registered */ | 4407 | /* try to resolve regulators supply since a new one was registered */ |
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 638f17d4c848..210fc20f7de7 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c | |||
@@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np, | |||
213 | else if (of_property_read_bool(suspend_np, | 213 | else if (of_property_read_bool(suspend_np, |
214 | "regulator-off-in-suspend")) | 214 | "regulator-off-in-suspend")) |
215 | suspend_state->enabled = DISABLE_IN_SUSPEND; | 215 | suspend_state->enabled = DISABLE_IN_SUSPEND; |
216 | else | ||
217 | suspend_state->enabled = DO_NOTHING_IN_SUSPEND; | ||
218 | 216 | ||
219 | if (!of_property_read_u32(np, "regulator-suspend-min-microvolt", | 217 | if (!of_property_read_u32(np, "regulator-suspend-min-microvolt", |
220 | &pval)) | 218 | &pval)) |
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index fac377320158..f42a619198c4 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
@@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev, | |||
3474 | vscsi->dds.window[LOCAL].liobn, | 3474 | vscsi->dds.window[LOCAL].liobn, |
3475 | vscsi->dds.window[REMOTE].liobn); | 3475 | vscsi->dds.window[REMOTE].liobn); |
3476 | 3476 | ||
3477 | strcpy(vscsi->eye, "VSCSI "); | 3477 | snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name); |
3478 | strncat(vscsi->eye, vdev->name, MAX_EYE); | ||
3479 | 3478 | ||
3480 | vscsi->dds.unit_id = vdev->unit_address; | 3479 | vscsi->dds.unit_id = vdev->unit_address; |
3481 | strncpy(vscsi->dds.partition_name, partition_name, | 3480 | strscpy(vscsi->dds.partition_name, partition_name, |
3482 | sizeof(vscsi->dds.partition_name)); | 3481 | sizeof(vscsi->dds.partition_name)); |
3483 | vscsi->dds.partition_num = partition_number; | 3482 | vscsi->dds.partition_num = partition_number; |
3484 | 3483 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f2ec80b0ffc0..271990bc065b 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref) | |||
3335 | LEAVE; | 3335 | LEAVE; |
3336 | } | 3336 | } |
3337 | 3337 | ||
3338 | static void ipr_add_remove_thread(struct work_struct *work) | ||
3339 | { | ||
3340 | unsigned long lock_flags; | ||
3341 | struct ipr_resource_entry *res; | ||
3342 | struct scsi_device *sdev; | ||
3343 | struct ipr_ioa_cfg *ioa_cfg = | ||
3344 | container_of(work, struct ipr_ioa_cfg, scsi_add_work_q); | ||
3345 | u8 bus, target, lun; | ||
3346 | int did_work; | ||
3347 | |||
3348 | ENTER; | ||
3349 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
3350 | |||
3351 | restart: | ||
3352 | do { | ||
3353 | did_work = 0; | ||
3354 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { | ||
3355 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
3356 | return; | ||
3357 | } | ||
3358 | |||
3359 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
3360 | if (res->del_from_ml && res->sdev) { | ||
3361 | did_work = 1; | ||
3362 | sdev = res->sdev; | ||
3363 | if (!scsi_device_get(sdev)) { | ||
3364 | if (!res->add_to_ml) | ||
3365 | list_move_tail(&res->queue, &ioa_cfg->free_res_q); | ||
3366 | else | ||
3367 | res->del_from_ml = 0; | ||
3368 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
3369 | scsi_remove_device(sdev); | ||
3370 | scsi_device_put(sdev); | ||
3371 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
3372 | } | ||
3373 | break; | ||
3374 | } | ||
3375 | } | ||
3376 | } while (did_work); | ||
3377 | |||
3378 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
3379 | if (res->add_to_ml) { | ||
3380 | bus = res->bus; | ||
3381 | target = res->target; | ||
3382 | lun = res->lun; | ||
3383 | res->add_to_ml = 0; | ||
3384 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
3385 | scsi_add_device(ioa_cfg->host, bus, target, lun); | ||
3386 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
3387 | goto restart; | ||
3388 | } | ||
3389 | } | ||
3390 | |||
3391 | ioa_cfg->scan_done = 1; | ||
3392 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
3393 | kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); | ||
3394 | LEAVE; | ||
3395 | } | ||
3396 | |||
3338 | /** | 3397 | /** |
3339 | * ipr_worker_thread - Worker thread | 3398 | * ipr_worker_thread - Worker thread |
3340 | * @work: ioa config struct | 3399 | * @work: ioa config struct |
@@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref) | |||
3349 | static void ipr_worker_thread(struct work_struct *work) | 3408 | static void ipr_worker_thread(struct work_struct *work) |
3350 | { | 3409 | { |
3351 | unsigned long lock_flags; | 3410 | unsigned long lock_flags; |
3352 | struct ipr_resource_entry *res; | ||
3353 | struct scsi_device *sdev; | ||
3354 | struct ipr_dump *dump; | 3411 | struct ipr_dump *dump; |
3355 | struct ipr_ioa_cfg *ioa_cfg = | 3412 | struct ipr_ioa_cfg *ioa_cfg = |
3356 | container_of(work, struct ipr_ioa_cfg, work_q); | 3413 | container_of(work, struct ipr_ioa_cfg, work_q); |
3357 | u8 bus, target, lun; | ||
3358 | int did_work; | ||
3359 | 3414 | ||
3360 | ENTER; | 3415 | ENTER; |
3361 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 3416 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
@@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work) | |||
3393 | return; | 3448 | return; |
3394 | } | 3449 | } |
3395 | 3450 | ||
3396 | restart: | 3451 | schedule_work(&ioa_cfg->scsi_add_work_q); |
3397 | do { | ||
3398 | did_work = 0; | ||
3399 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { | ||
3400 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
3401 | return; | ||
3402 | } | ||
3403 | 3452 | ||
3404 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
3405 | if (res->del_from_ml && res->sdev) { | ||
3406 | did_work = 1; | ||
3407 | sdev = res->sdev; | ||
3408 | if (!scsi_device_get(sdev)) { | ||
3409 | if (!res->add_to_ml) | ||
3410 | list_move_tail(&res->queue, &ioa_cfg->free_res_q); | ||
3411 | else | ||
3412 | res->del_from_ml = 0; | ||
3413 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
3414 | scsi_remove_device(sdev); | ||
3415 | scsi_device_put(sdev); | ||
3416 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
3417 | } | ||
3418 | break; | ||
3419 | } | ||
3420 | } | ||
3421 | } while (did_work); | ||
3422 | |||
3423 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
3424 | if (res->add_to_ml) { | ||
3425 | bus = res->bus; | ||
3426 | target = res->target; | ||
3427 | lun = res->lun; | ||
3428 | res->add_to_ml = 0; | ||
3429 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
3430 | scsi_add_device(ioa_cfg->host, bus, target, lun); | ||
3431 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
3432 | goto restart; | ||
3433 | } | ||
3434 | } | ||
3435 | |||
3436 | ioa_cfg->scan_done = 1; | ||
3437 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 3453 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
3438 | kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); | ||
3439 | LEAVE; | 3454 | LEAVE; |
3440 | } | 3455 | } |
3441 | 3456 | ||
@@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
9933 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); | 9948 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); |
9934 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | 9949 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); |
9935 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); | 9950 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); |
9951 | INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); | ||
9936 | init_waitqueue_head(&ioa_cfg->reset_wait_q); | 9952 | init_waitqueue_head(&ioa_cfg->reset_wait_q); |
9937 | init_waitqueue_head(&ioa_cfg->msi_wait_q); | 9953 | init_waitqueue_head(&ioa_cfg->msi_wait_q); |
9938 | init_waitqueue_head(&ioa_cfg->eeh_wait_q); | 9954 | init_waitqueue_head(&ioa_cfg->eeh_wait_q); |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 68afbbde54d3..f6baa2351313 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg { | |||
1575 | u8 saved_mode_page_len; | 1575 | u8 saved_mode_page_len; |
1576 | 1576 | ||
1577 | struct work_struct work_q; | 1577 | struct work_struct work_q; |
1578 | struct work_struct scsi_add_work_q; | ||
1578 | struct workqueue_struct *reset_work_q; | 1579 | struct workqueue_struct *reset_work_q; |
1579 | 1580 | ||
1580 | wait_queue_head_t reset_wait_q; | 1581 | wait_queue_head_t reset_wait_q; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 057a60abe664..1a6ed9b0a249 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
360 | goto buffer_done; | 360 | goto buffer_done; |
361 | 361 | ||
362 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 362 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
363 | nrport = NULL; | ||
364 | spin_lock(&vport->phba->hbalock); | ||
363 | rport = lpfc_ndlp_get_nrport(ndlp); | 365 | rport = lpfc_ndlp_get_nrport(ndlp); |
364 | if (!rport) | 366 | if (rport) |
365 | continue; | 367 | nrport = rport->remoteport; |
366 | 368 | spin_unlock(&vport->phba->hbalock); | |
367 | /* local short-hand pointer. */ | ||
368 | nrport = rport->remoteport; | ||
369 | if (!nrport) | 369 | if (!nrport) |
370 | continue; | 370 | continue; |
371 | 371 | ||
@@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | |||
3386 | struct lpfc_nodelist *ndlp; | 3386 | struct lpfc_nodelist *ndlp; |
3387 | #if (IS_ENABLED(CONFIG_NVME_FC)) | 3387 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
3388 | struct lpfc_nvme_rport *rport; | 3388 | struct lpfc_nvme_rport *rport; |
3389 | struct nvme_fc_remote_port *remoteport = NULL; | ||
3389 | #endif | 3390 | #endif |
3390 | 3391 | ||
3391 | shost = lpfc_shost_from_vport(vport); | 3392 | shost = lpfc_shost_from_vport(vport); |
@@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | |||
3396 | if (ndlp->rport) | 3397 | if (ndlp->rport) |
3397 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; | 3398 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; |
3398 | #if (IS_ENABLED(CONFIG_NVME_FC)) | 3399 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
3400 | spin_lock(&vport->phba->hbalock); | ||
3399 | rport = lpfc_ndlp_get_nrport(ndlp); | 3401 | rport = lpfc_ndlp_get_nrport(ndlp); |
3400 | if (rport) | 3402 | if (rport) |
3403 | remoteport = rport->remoteport; | ||
3404 | spin_unlock(&vport->phba->hbalock); | ||
3405 | if (remoteport) | ||
3401 | nvme_fc_set_remoteport_devloss(rport->remoteport, | 3406 | nvme_fc_set_remoteport_devloss(rport->remoteport, |
3402 | vport->cfg_devloss_tmo); | 3407 | vport->cfg_devloss_tmo); |
3403 | #endif | 3408 | #endif |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 9df0c051349f..aec5b10a8c85 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) | |||
551 | unsigned char *statep; | 551 | unsigned char *statep; |
552 | struct nvme_fc_local_port *localport; | 552 | struct nvme_fc_local_port *localport; |
553 | struct lpfc_nvmet_tgtport *tgtp; | 553 | struct lpfc_nvmet_tgtport *tgtp; |
554 | struct nvme_fc_remote_port *nrport; | 554 | struct nvme_fc_remote_port *nrport = NULL; |
555 | struct lpfc_nvme_rport *rport; | 555 | struct lpfc_nvme_rport *rport; |
556 | 556 | ||
557 | cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); | 557 | cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); |
@@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) | |||
696 | len += snprintf(buf + len, size - len, "\tRport List:\n"); | 696 | len += snprintf(buf + len, size - len, "\tRport List:\n"); |
697 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 697 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
698 | /* local short-hand pointer. */ | 698 | /* local short-hand pointer. */ |
699 | spin_lock(&phba->hbalock); | ||
699 | rport = lpfc_ndlp_get_nrport(ndlp); | 700 | rport = lpfc_ndlp_get_nrport(ndlp); |
700 | if (!rport) | 701 | if (rport) |
701 | continue; | 702 | nrport = rport->remoteport; |
702 | 703 | spin_unlock(&phba->hbalock); | |
703 | nrport = rport->remoteport; | ||
704 | if (!nrport) | 704 | if (!nrport) |
705 | continue; | 705 | continue; |
706 | 706 | ||
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 028462e5994d..918ae18ef8a8 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c | |||
@@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2725 | rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); | 2725 | rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); |
2726 | rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); | 2726 | rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); |
2727 | 2727 | ||
2728 | spin_lock_irq(&vport->phba->hbalock); | ||
2728 | oldrport = lpfc_ndlp_get_nrport(ndlp); | 2729 | oldrport = lpfc_ndlp_get_nrport(ndlp); |
2730 | spin_unlock_irq(&vport->phba->hbalock); | ||
2729 | if (!oldrport) | 2731 | if (!oldrport) |
2730 | lpfc_nlp_get(ndlp); | 2732 | lpfc_nlp_get(ndlp); |
2731 | 2733 | ||
@@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2840 | struct nvme_fc_local_port *localport; | 2842 | struct nvme_fc_local_port *localport; |
2841 | struct lpfc_nvme_lport *lport; | 2843 | struct lpfc_nvme_lport *lport; |
2842 | struct lpfc_nvme_rport *rport; | 2844 | struct lpfc_nvme_rport *rport; |
2843 | struct nvme_fc_remote_port *remoteport; | 2845 | struct nvme_fc_remote_port *remoteport = NULL; |
2844 | 2846 | ||
2845 | localport = vport->localport; | 2847 | localport = vport->localport; |
2846 | 2848 | ||
@@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2854 | if (!lport) | 2856 | if (!lport) |
2855 | goto input_err; | 2857 | goto input_err; |
2856 | 2858 | ||
2859 | spin_lock_irq(&vport->phba->hbalock); | ||
2857 | rport = lpfc_ndlp_get_nrport(ndlp); | 2860 | rport = lpfc_ndlp_get_nrport(ndlp); |
2858 | if (!rport) | 2861 | if (rport) |
2862 | remoteport = rport->remoteport; | ||
2863 | spin_unlock_irq(&vport->phba->hbalock); | ||
2864 | if (!remoteport) | ||
2859 | goto input_err; | 2865 | goto input_err; |
2860 | 2866 | ||
2861 | remoteport = rport->remoteport; | ||
2862 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | 2867 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
2863 | "6033 Unreg nvme remoteport %p, portname x%llx, " | 2868 | "6033 Unreg nvme remoteport %p, portname x%llx, " |
2864 | "port_id x%06x, portstate x%x port type x%x\n", | 2869 | "port_id x%06x, portstate x%x port type x%x\n", |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index fecf96f0225c..199d3ba1916d 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
@@ -374,8 +374,8 @@ struct atio_from_isp { | |||
374 | static inline int fcpcmd_is_corrupted(struct atio *atio) | 374 | static inline int fcpcmd_is_corrupted(struct atio *atio) |
375 | { | 375 | { |
376 | if (atio->entry_type == ATIO_TYPE7 && | 376 | if (atio->entry_type == ATIO_TYPE7 && |
377 | (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < | 377 | ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) < |
378 | FCP_CMD_LENGTH_MIN)) | 378 | FCP_CMD_LENGTH_MIN)) |
379 | return 1; | 379 | return 1; |
380 | else | 380 | else |
381 | return 0; | 381 | return 0; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b79b366a94f7..4a57ffecc7e6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd) | |||
1276 | case REQ_OP_ZONE_RESET: | 1276 | case REQ_OP_ZONE_RESET: |
1277 | return sd_zbc_setup_reset_cmnd(cmd); | 1277 | return sd_zbc_setup_reset_cmnd(cmd); |
1278 | default: | 1278 | default: |
1279 | BUG(); | 1279 | WARN_ON_ONCE(1); |
1280 | return BLKPREP_KILL; | ||
1280 | } | 1281 | } |
1281 | } | 1282 | } |
1282 | 1283 | ||
@@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) | |||
2959 | if (rot == 1) { | 2960 | if (rot == 1) { |
2960 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); | 2961 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
2961 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); | 2962 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
2963 | } else { | ||
2964 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
2965 | blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
2962 | } | 2966 | } |
2963 | 2967 | ||
2964 | if (sdkp->device->type == TYPE_ZBC) { | 2968 | if (sdkp->device->type == TYPE_ZBC) { |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9d5d2ca7fc4f..c55f38ec391c 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) | |||
7940 | err = -ENOMEM; | 7940 | err = -ENOMEM; |
7941 | goto out_error; | 7941 | goto out_error; |
7942 | } | 7942 | } |
7943 | |||
7944 | /* | ||
7945 | * Do not use blk-mq at this time because blk-mq does not support | ||
7946 | * runtime pm. | ||
7947 | */ | ||
7948 | host->use_blk_mq = false; | ||
7949 | |||
7943 | hba = shost_priv(host); | 7950 | hba = shost_priv(host); |
7944 | hba->host = host; | 7951 | hba->host = host; |
7945 | hba->dev = dev; | 7952 | hba->dev = dev; |
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 4b5e250e8615..e5c7e1ef6318 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c | |||
@@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream) | |||
899 | struct sdw_master_runtime *m_rt = stream->m_rt; | 899 | struct sdw_master_runtime *m_rt = stream->m_rt; |
900 | struct sdw_slave_runtime *s_rt, *_s_rt; | 900 | struct sdw_slave_runtime *s_rt, *_s_rt; |
901 | 901 | ||
902 | list_for_each_entry_safe(s_rt, _s_rt, | 902 | list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) { |
903 | &m_rt->slave_rt_list, m_rt_node) | 903 | sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream); |
904 | sdw_stream_remove_slave(s_rt->slave, stream); | 904 | sdw_release_slave_stream(s_rt->slave, stream); |
905 | } | ||
905 | 906 | ||
906 | list_del(&m_rt->bus_node); | 907 | list_del(&m_rt->bus_node); |
907 | } | 908 | } |
@@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus, | |||
1112 | "Master runtime config failed for stream:%s", | 1113 | "Master runtime config failed for stream:%s", |
1113 | stream->name); | 1114 | stream->name); |
1114 | ret = -ENOMEM; | 1115 | ret = -ENOMEM; |
1115 | goto error; | 1116 | goto unlock; |
1116 | } | 1117 | } |
1117 | 1118 | ||
1118 | ret = sdw_config_stream(bus->dev, stream, stream_config, false); | 1119 | ret = sdw_config_stream(bus->dev, stream, stream_config, false); |
@@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus, | |||
1123 | if (ret) | 1124 | if (ret) |
1124 | goto stream_error; | 1125 | goto stream_error; |
1125 | 1126 | ||
1126 | stream->state = SDW_STREAM_CONFIGURED; | 1127 | goto unlock; |
1127 | 1128 | ||
1128 | stream_error: | 1129 | stream_error: |
1129 | sdw_release_master_stream(stream); | 1130 | sdw_release_master_stream(stream); |
1130 | error: | 1131 | unlock: |
1131 | mutex_unlock(&bus->bus_lock); | 1132 | mutex_unlock(&bus->bus_lock); |
1132 | return ret; | 1133 | return ret; |
1133 | } | 1134 | } |
@@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master); | |||
1141 | * @stream: SoundWire stream | 1142 | * @stream: SoundWire stream |
1142 | * @port_config: Port configuration for audio stream | 1143 | * @port_config: Port configuration for audio stream |
1143 | * @num_ports: Number of ports | 1144 | * @num_ports: Number of ports |
1145 | * | ||
1146 | * It is expected that Slave is added before adding Master | ||
1147 | * to the Stream. | ||
1148 | * | ||
1144 | */ | 1149 | */ |
1145 | int sdw_stream_add_slave(struct sdw_slave *slave, | 1150 | int sdw_stream_add_slave(struct sdw_slave *slave, |
1146 | struct sdw_stream_config *stream_config, | 1151 | struct sdw_stream_config *stream_config, |
@@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave, | |||
1186 | if (ret) | 1191 | if (ret) |
1187 | goto stream_error; | 1192 | goto stream_error; |
1188 | 1193 | ||
1194 | /* | ||
1195 | * Change stream state to CONFIGURED on first Slave add. | ||
1196 | * Bus is not aware of number of Slave(s) in a stream at this | ||
1197 | * point so cannot depend on all Slave(s) to be added in order to | ||
1198 | * change stream state to CONFIGURED. | ||
1199 | */ | ||
1189 | stream->state = SDW_STREAM_CONFIGURED; | 1200 | stream->state = SDW_STREAM_CONFIGURED; |
1190 | goto error; | 1201 | goto error; |
1191 | 1202 | ||
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 7cb3ab0a35a0..3082e72e4f6c 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c | |||
@@ -30,7 +30,11 @@ | |||
30 | 30 | ||
31 | #define DRIVER_NAME "fsl-dspi" | 31 | #define DRIVER_NAME "fsl-dspi" |
32 | 32 | ||
33 | #ifdef CONFIG_M5441x | ||
34 | #define DSPI_FIFO_SIZE 16 | ||
35 | #else | ||
33 | #define DSPI_FIFO_SIZE 4 | 36 | #define DSPI_FIFO_SIZE 4 |
37 | #endif | ||
34 | #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) | 38 | #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) |
35 | 39 | ||
36 | #define SPI_MCR 0x00 | 40 | #define SPI_MCR 0x00 |
@@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi) | |||
623 | static void dspi_eoq_write(struct fsl_dspi *dspi) | 627 | static void dspi_eoq_write(struct fsl_dspi *dspi) |
624 | { | 628 | { |
625 | int fifo_size = DSPI_FIFO_SIZE; | 629 | int fifo_size = DSPI_FIFO_SIZE; |
630 | u16 xfer_cmd = dspi->tx_cmd; | ||
626 | 631 | ||
627 | /* Fill TX FIFO with as many transfers as possible */ | 632 | /* Fill TX FIFO with as many transfers as possible */ |
628 | while (dspi->len && fifo_size--) { | 633 | while (dspi->len && fifo_size--) { |
634 | dspi->tx_cmd = xfer_cmd; | ||
629 | /* Request EOQF for last transfer in FIFO */ | 635 | /* Request EOQF for last transfer in FIFO */ |
630 | if (dspi->len == dspi->bytes_per_word || fifo_size == 0) | 636 | if (dspi->len == dspi->bytes_per_word || fifo_size == 0) |
631 | dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; | 637 | dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; |
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 0626e6e3ea0c..421bfc7dda67 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c | |||
@@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev, | |||
300 | *mflags |= SPI_MASTER_NO_RX; | 300 | *mflags |= SPI_MASTER_NO_RX; |
301 | 301 | ||
302 | spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); | 302 | spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); |
303 | if (IS_ERR(spi_gpio->mosi)) | 303 | if (IS_ERR(spi_gpio->sck)) |
304 | return PTR_ERR(spi_gpio->mosi); | 304 | return PTR_ERR(spi_gpio->sck); |
305 | 305 | ||
306 | for (i = 0; i < num_chipselects; i++) { | 306 | for (i = 0; i < num_chipselects; i++) { |
307 | spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", | 307 | spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", |
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 95dc4d78618d..b37de1d991d6 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c | |||
@@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, | |||
598 | 598 | ||
599 | ret = wait_event_interruptible_timeout(rspi->wait, | 599 | ret = wait_event_interruptible_timeout(rspi->wait, |
600 | rspi->dma_callbacked, HZ); | 600 | rspi->dma_callbacked, HZ); |
601 | if (ret > 0 && rspi->dma_callbacked) | 601 | if (ret > 0 && rspi->dma_callbacked) { |
602 | ret = 0; | 602 | ret = 0; |
603 | else if (!ret) { | 603 | } else { |
604 | dev_err(&rspi->master->dev, "DMA timeout\n"); | 604 | if (!ret) { |
605 | ret = -ETIMEDOUT; | 605 | dev_err(&rspi->master->dev, "DMA timeout\n"); |
606 | ret = -ETIMEDOUT; | ||
607 | } | ||
606 | if (tx) | 608 | if (tx) |
607 | dmaengine_terminate_all(rspi->master->dma_tx); | 609 | dmaengine_terminate_all(rspi->master->dma_tx); |
608 | if (rx) | 610 | if (rx) |
@@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = { | |||
1350 | 1352 | ||
1351 | MODULE_DEVICE_TABLE(platform, spi_driver_ids); | 1353 | MODULE_DEVICE_TABLE(platform, spi_driver_ids); |
1352 | 1354 | ||
1355 | #ifdef CONFIG_PM_SLEEP | ||
1356 | static int rspi_suspend(struct device *dev) | ||
1357 | { | ||
1358 | struct platform_device *pdev = to_platform_device(dev); | ||
1359 | struct rspi_data *rspi = platform_get_drvdata(pdev); | ||
1360 | |||
1361 | return spi_master_suspend(rspi->master); | ||
1362 | } | ||
1363 | |||
1364 | static int rspi_resume(struct device *dev) | ||
1365 | { | ||
1366 | struct platform_device *pdev = to_platform_device(dev); | ||
1367 | struct rspi_data *rspi = platform_get_drvdata(pdev); | ||
1368 | |||
1369 | return spi_master_resume(rspi->master); | ||
1370 | } | ||
1371 | |||
1372 | static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); | ||
1373 | #define DEV_PM_OPS &rspi_pm_ops | ||
1374 | #else | ||
1375 | #define DEV_PM_OPS NULL | ||
1376 | #endif /* CONFIG_PM_SLEEP */ | ||
1377 | |||
1353 | static struct platform_driver rspi_driver = { | 1378 | static struct platform_driver rspi_driver = { |
1354 | .probe = rspi_probe, | 1379 | .probe = rspi_probe, |
1355 | .remove = rspi_remove, | 1380 | .remove = rspi_remove, |
1356 | .id_table = spi_driver_ids, | 1381 | .id_table = spi_driver_ids, |
1357 | .driver = { | 1382 | .driver = { |
1358 | .name = "renesas_spi", | 1383 | .name = "renesas_spi", |
1384 | .pm = DEV_PM_OPS, | ||
1359 | .of_match_table = of_match_ptr(rspi_of_match), | 1385 | .of_match_table = of_match_ptr(rspi_of_match), |
1360 | }, | 1386 | }, |
1361 | }; | 1387 | }; |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 539d6d1a277a..101cd6aae2ea 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
@@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, | |||
397 | 397 | ||
398 | static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) | 398 | static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) |
399 | { | 399 | { |
400 | sh_msiof_write(p, STR, sh_msiof_read(p, STR)); | 400 | sh_msiof_write(p, STR, |
401 | sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ)); | ||
401 | } | 402 | } |
402 | 403 | ||
403 | static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, | 404 | static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, |
@@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = { | |||
1426 | }; | 1427 | }; |
1427 | MODULE_DEVICE_TABLE(platform, spi_driver_ids); | 1428 | MODULE_DEVICE_TABLE(platform, spi_driver_ids); |
1428 | 1429 | ||
1430 | #ifdef CONFIG_PM_SLEEP | ||
1431 | static int sh_msiof_spi_suspend(struct device *dev) | ||
1432 | { | ||
1433 | struct platform_device *pdev = to_platform_device(dev); | ||
1434 | struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); | ||
1435 | |||
1436 | return spi_master_suspend(p->master); | ||
1437 | } | ||
1438 | |||
1439 | static int sh_msiof_spi_resume(struct device *dev) | ||
1440 | { | ||
1441 | struct platform_device *pdev = to_platform_device(dev); | ||
1442 | struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); | ||
1443 | |||
1444 | return spi_master_resume(p->master); | ||
1445 | } | ||
1446 | |||
1447 | static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, | ||
1448 | sh_msiof_spi_resume); | ||
1449 | #define DEV_PM_OPS &sh_msiof_spi_pm_ops | ||
1450 | #else | ||
1451 | #define DEV_PM_OPS NULL | ||
1452 | #endif /* CONFIG_PM_SLEEP */ | ||
1453 | |||
1429 | static struct platform_driver sh_msiof_spi_drv = { | 1454 | static struct platform_driver sh_msiof_spi_drv = { |
1430 | .probe = sh_msiof_spi_probe, | 1455 | .probe = sh_msiof_spi_probe, |
1431 | .remove = sh_msiof_spi_remove, | 1456 | .remove = sh_msiof_spi_remove, |
1432 | .id_table = spi_driver_ids, | 1457 | .id_table = spi_driver_ids, |
1433 | .driver = { | 1458 | .driver = { |
1434 | .name = "spi_sh_msiof", | 1459 | .name = "spi_sh_msiof", |
1460 | .pm = DEV_PM_OPS, | ||
1435 | .of_match_table = of_match_ptr(sh_msiof_match), | 1461 | .of_match_table = of_match_ptr(sh_msiof_match), |
1436 | }, | 1462 | }, |
1437 | }; | 1463 | }; |
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 6f7b946b5ced..1427f343b39a 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c | |||
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
1063 | goto exit_free_master; | 1063 | goto exit_free_master; |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | /* disabled clock may cause interrupt storm upon request */ | ||
1067 | tspi->clk = devm_clk_get(&pdev->dev, NULL); | ||
1068 | if (IS_ERR(tspi->clk)) { | ||
1069 | ret = PTR_ERR(tspi->clk); | ||
1070 | dev_err(&pdev->dev, "Can not get clock %d\n", ret); | ||
1071 | goto exit_free_master; | ||
1072 | } | ||
1073 | ret = clk_prepare(tspi->clk); | ||
1074 | if (ret < 0) { | ||
1075 | dev_err(&pdev->dev, "Clock prepare failed %d\n", ret); | ||
1076 | goto exit_free_master; | ||
1077 | } | ||
1078 | ret = clk_enable(tspi->clk); | ||
1079 | if (ret < 0) { | ||
1080 | dev_err(&pdev->dev, "Clock enable failed %d\n", ret); | ||
1081 | goto exit_free_master; | ||
1082 | } | ||
1083 | |||
1066 | spi_irq = platform_get_irq(pdev, 0); | 1084 | spi_irq = platform_get_irq(pdev, 0); |
1067 | tspi->irq = spi_irq; | 1085 | tspi->irq = spi_irq; |
1068 | ret = request_threaded_irq(tspi->irq, tegra_slink_isr, | 1086 | ret = request_threaded_irq(tspi->irq, tegra_slink_isr, |
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
1071 | if (ret < 0) { | 1089 | if (ret < 0) { |
1072 | dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", | 1090 | dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", |
1073 | tspi->irq); | 1091 | tspi->irq); |
1074 | goto exit_free_master; | 1092 | goto exit_clk_disable; |
1075 | } | ||
1076 | |||
1077 | tspi->clk = devm_clk_get(&pdev->dev, NULL); | ||
1078 | if (IS_ERR(tspi->clk)) { | ||
1079 | dev_err(&pdev->dev, "can not get clock\n"); | ||
1080 | ret = PTR_ERR(tspi->clk); | ||
1081 | goto exit_free_irq; | ||
1082 | } | 1093 | } |
1083 | 1094 | ||
1084 | tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); | 1095 | tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); |
@@ -1138,6 +1149,8 @@ exit_rx_dma_free: | |||
1138 | tegra_slink_deinit_dma_param(tspi, true); | 1149 | tegra_slink_deinit_dma_param(tspi, true); |
1139 | exit_free_irq: | 1150 | exit_free_irq: |
1140 | free_irq(spi_irq, tspi); | 1151 | free_irq(spi_irq, tspi); |
1152 | exit_clk_disable: | ||
1153 | clk_disable(tspi->clk); | ||
1141 | exit_free_master: | 1154 | exit_free_master: |
1142 | spi_master_put(master); | 1155 | spi_master_put(master); |
1143 | return ret; | 1156 | return ret; |
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev) | |||
1150 | 1163 | ||
1151 | free_irq(tspi->irq, tspi); | 1164 | free_irq(tspi->irq, tspi); |
1152 | 1165 | ||
1166 | clk_disable(tspi->clk); | ||
1167 | |||
1153 | if (tspi->tx_dma_chan) | 1168 | if (tspi->tx_dma_chan) |
1154 | tegra_slink_deinit_dma_param(tspi, false); | 1169 | tegra_slink_deinit_dma_param(tspi, false); |
1155 | 1170 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ec395a6baf9c..9da0bc5a036c 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr) | |||
2143 | */ | 2143 | */ |
2144 | if (ctlr->num_chipselect == 0) | 2144 | if (ctlr->num_chipselect == 0) |
2145 | return -EINVAL; | 2145 | return -EINVAL; |
2146 | /* allocate dynamic bus number using Linux idr */ | 2146 | if (ctlr->bus_num >= 0) { |
2147 | if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { | 2147 | /* devices with a fixed bus num must check-in with the num */ |
2148 | mutex_lock(&board_lock); | ||
2149 | id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, | ||
2150 | ctlr->bus_num + 1, GFP_KERNEL); | ||
2151 | mutex_unlock(&board_lock); | ||
2152 | if (WARN(id < 0, "couldn't get idr")) | ||
2153 | return id == -ENOSPC ? -EBUSY : id; | ||
2154 | ctlr->bus_num = id; | ||
2155 | } else if (ctlr->dev.of_node) { | ||
2156 | /* allocate dynamic bus number using Linux idr */ | ||
2148 | id = of_alias_get_id(ctlr->dev.of_node, "spi"); | 2157 | id = of_alias_get_id(ctlr->dev.of_node, "spi"); |
2149 | if (id >= 0) { | 2158 | if (id >= 0) { |
2150 | ctlr->bus_num = id; | 2159 | ctlr->bus_num = id; |
diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig index f48e06a03cdb..9a58aaf72edd 100644 --- a/drivers/staging/media/mt9t031/Kconfig +++ b/drivers/staging/media/mt9t031/Kconfig | |||
@@ -1,9 +1,3 @@ | |||
1 | config SOC_CAMERA_IMX074 | ||
2 | tristate "imx074 support (DEPRECATED)" | ||
3 | depends on SOC_CAMERA && I2C | ||
4 | help | ||
5 | This driver supports IMX074 cameras from Sony | ||
6 | |||
7 | config SOC_CAMERA_MT9T031 | 1 | config SOC_CAMERA_MT9T031 |
8 | tristate "mt9t031 support (DEPRECATED)" | 2 | tristate "mt9t031 support (DEPRECATED)" |
9 | depends on SOC_CAMERA && I2C | 3 | depends on SOC_CAMERA && I2C |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 9cdfccbdd06f..cc756a123fd8 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash, | |||
1416 | 1416 | ||
1417 | sg_init_table(sg, ARRAY_SIZE(sg)); | 1417 | sg_init_table(sg, ARRAY_SIZE(sg)); |
1418 | sg_set_buf(sg, buf, payload_length); | 1418 | sg_set_buf(sg, buf, payload_length); |
1419 | sg_set_buf(sg + 1, pad_bytes, padding); | 1419 | if (padding) |
1420 | sg_set_buf(sg + 1, pad_bytes, padding); | ||
1420 | 1421 | ||
1421 | ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); | 1422 | ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); |
1422 | 1423 | ||
@@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) | |||
3910 | static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | 3911 | static void iscsit_get_rx_pdu(struct iscsi_conn *conn) |
3911 | { | 3912 | { |
3912 | int ret; | 3913 | int ret; |
3913 | u8 buffer[ISCSI_HDR_LEN], opcode; | 3914 | u8 *buffer, opcode; |
3914 | u32 checksum = 0, digest = 0; | 3915 | u32 checksum = 0, digest = 0; |
3915 | struct kvec iov; | 3916 | struct kvec iov; |
3916 | 3917 | ||
3918 | buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL); | ||
3919 | if (!buffer) | ||
3920 | return; | ||
3921 | |||
3917 | while (!kthread_should_stop()) { | 3922 | while (!kthread_should_stop()) { |
3918 | /* | 3923 | /* |
3919 | * Ensure that both TX and RX per connection kthreads | 3924 | * Ensure that both TX and RX per connection kthreads |
@@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
3921 | */ | 3926 | */ |
3922 | iscsit_thread_check_cpumask(conn, current, 0); | 3927 | iscsit_thread_check_cpumask(conn, current, 0); |
3923 | 3928 | ||
3924 | memset(buffer, 0, ISCSI_HDR_LEN); | ||
3925 | memset(&iov, 0, sizeof(struct kvec)); | 3929 | memset(&iov, 0, sizeof(struct kvec)); |
3926 | 3930 | ||
3927 | iov.iov_base = buffer; | 3931 | iov.iov_base = buffer; |
@@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
3930 | ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); | 3934 | ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); |
3931 | if (ret != ISCSI_HDR_LEN) { | 3935 | if (ret != ISCSI_HDR_LEN) { |
3932 | iscsit_rx_thread_wait_for_tcp(conn); | 3936 | iscsit_rx_thread_wait_for_tcp(conn); |
3933 | return; | 3937 | break; |
3934 | } | 3938 | } |
3935 | 3939 | ||
3936 | if (conn->conn_ops->HeaderDigest) { | 3940 | if (conn->conn_ops->HeaderDigest) { |
@@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
3940 | ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); | 3944 | ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); |
3941 | if (ret != ISCSI_CRC_LEN) { | 3945 | if (ret != ISCSI_CRC_LEN) { |
3942 | iscsit_rx_thread_wait_for_tcp(conn); | 3946 | iscsit_rx_thread_wait_for_tcp(conn); |
3943 | return; | 3947 | break; |
3944 | } | 3948 | } |
3945 | 3949 | ||
3946 | iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, | 3950 | iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, |
@@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
3964 | } | 3968 | } |
3965 | 3969 | ||
3966 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) | 3970 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) |
3967 | return; | 3971 | break; |
3968 | 3972 | ||
3969 | opcode = buffer[0] & ISCSI_OPCODE_MASK; | 3973 | opcode = buffer[0] & ISCSI_OPCODE_MASK; |
3970 | 3974 | ||
@@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
3975 | " while in Discovery Session, rejecting.\n", opcode); | 3979 | " while in Discovery Session, rejecting.\n", opcode); |
3976 | iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, | 3980 | iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, |
3977 | buffer); | 3981 | buffer); |
3978 | return; | 3982 | break; |
3979 | } | 3983 | } |
3980 | 3984 | ||
3981 | ret = iscsi_target_rx_opcode(conn, buffer); | 3985 | ret = iscsi_target_rx_opcode(conn, buffer); |
3982 | if (ret < 0) | 3986 | if (ret < 0) |
3983 | return; | 3987 | break; |
3984 | } | 3988 | } |
3989 | |||
3990 | kfree(buffer); | ||
3985 | } | 3991 | } |
3986 | 3992 | ||
3987 | int iscsi_target_rx_thread(void *arg) | 3993 | int iscsi_target_rx_thread(void *arg) |
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 9518ffd8b8ba..4e680d753941 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c | |||
@@ -26,27 +26,6 @@ | |||
26 | #include "iscsi_target_nego.h" | 26 | #include "iscsi_target_nego.h" |
27 | #include "iscsi_target_auth.h" | 27 | #include "iscsi_target_auth.h" |
28 | 28 | ||
29 | static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) | ||
30 | { | ||
31 | int j = DIV_ROUND_UP(len, 2), rc; | ||
32 | |||
33 | rc = hex2bin(dst, src, j); | ||
34 | if (rc < 0) | ||
35 | pr_debug("CHAP string contains non hex digit symbols\n"); | ||
36 | |||
37 | dst[j] = '\0'; | ||
38 | return j; | ||
39 | } | ||
40 | |||
41 | static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len) | ||
42 | { | ||
43 | int i; | ||
44 | |||
45 | for (i = 0; i < src_len; i++) { | ||
46 | sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff); | ||
47 | } | ||
48 | } | ||
49 | |||
50 | static int chap_gen_challenge( | 29 | static int chap_gen_challenge( |
51 | struct iscsi_conn *conn, | 30 | struct iscsi_conn *conn, |
52 | int caller, | 31 | int caller, |
@@ -62,7 +41,7 @@ static int chap_gen_challenge( | |||
62 | ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); | 41 | ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); |
63 | if (unlikely(ret)) | 42 | if (unlikely(ret)) |
64 | return ret; | 43 | return ret; |
65 | chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, | 44 | bin2hex(challenge_asciihex, chap->challenge, |
66 | CHAP_CHALLENGE_LENGTH); | 45 | CHAP_CHALLENGE_LENGTH); |
67 | /* | 46 | /* |
68 | * Set CHAP_C, and copy the generated challenge into c_str. | 47 | * Set CHAP_C, and copy the generated challenge into c_str. |
@@ -248,9 +227,16 @@ static int chap_server_compute_md5( | |||
248 | pr_err("Could not find CHAP_R.\n"); | 227 | pr_err("Could not find CHAP_R.\n"); |
249 | goto out; | 228 | goto out; |
250 | } | 229 | } |
230 | if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) { | ||
231 | pr_err("Malformed CHAP_R\n"); | ||
232 | goto out; | ||
233 | } | ||
234 | if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) { | ||
235 | pr_err("Malformed CHAP_R\n"); | ||
236 | goto out; | ||
237 | } | ||
251 | 238 | ||
252 | pr_debug("[server] Got CHAP_R=%s\n", chap_r); | 239 | pr_debug("[server] Got CHAP_R=%s\n", chap_r); |
253 | chap_string_to_hex(client_digest, chap_r, strlen(chap_r)); | ||
254 | 240 | ||
255 | tfm = crypto_alloc_shash("md5", 0, 0); | 241 | tfm = crypto_alloc_shash("md5", 0, 0); |
256 | if (IS_ERR(tfm)) { | 242 | if (IS_ERR(tfm)) { |
@@ -294,7 +280,7 @@ static int chap_server_compute_md5( | |||
294 | goto out; | 280 | goto out; |
295 | } | 281 | } |
296 | 282 | ||
297 | chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); | 283 | bin2hex(response, server_digest, MD5_SIGNATURE_SIZE); |
298 | pr_debug("[server] MD5 Server Digest: %s\n", response); | 284 | pr_debug("[server] MD5 Server Digest: %s\n", response); |
299 | 285 | ||
300 | if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { | 286 | if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { |
@@ -349,9 +335,7 @@ static int chap_server_compute_md5( | |||
349 | pr_err("Could not find CHAP_C.\n"); | 335 | pr_err("Could not find CHAP_C.\n"); |
350 | goto out; | 336 | goto out; |
351 | } | 337 | } |
352 | pr_debug("[server] Got CHAP_C=%s\n", challenge); | 338 | challenge_len = DIV_ROUND_UP(strlen(challenge), 2); |
353 | challenge_len = chap_string_to_hex(challenge_binhex, challenge, | ||
354 | strlen(challenge)); | ||
355 | if (!challenge_len) { | 339 | if (!challenge_len) { |
356 | pr_err("Unable to convert incoming challenge\n"); | 340 | pr_err("Unable to convert incoming challenge\n"); |
357 | goto out; | 341 | goto out; |
@@ -360,6 +344,11 @@ static int chap_server_compute_md5( | |||
360 | pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); | 344 | pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); |
361 | goto out; | 345 | goto out; |
362 | } | 346 | } |
347 | if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) { | ||
348 | pr_err("Malformed CHAP_C\n"); | ||
349 | goto out; | ||
350 | } | ||
351 | pr_debug("[server] Got CHAP_C=%s\n", challenge); | ||
363 | /* | 352 | /* |
364 | * During mutual authentication, the CHAP_C generated by the | 353 | * During mutual authentication, the CHAP_C generated by the |
365 | * initiator must not match the original CHAP_C generated by | 354 | * initiator must not match the original CHAP_C generated by |
@@ -413,7 +402,7 @@ static int chap_server_compute_md5( | |||
413 | /* | 402 | /* |
414 | * Convert response from binary hex to ascii hext. | 403 | * Convert response from binary hex to ascii hext. |
415 | */ | 404 | */ |
416 | chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE); | 405 | bin2hex(response, digest, MD5_SIGNATURE_SIZE); |
417 | *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", | 406 | *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", |
418 | response); | 407 | response); |
419 | *nr_out_len += 1; | 408 | *nr_out_len += 1; |
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index 24a5f05e769b..e5389591bb4f 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c | |||
@@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo) | |||
1054 | /* Get the address of the host memory buffer. | 1054 | /* Get the address of the host memory buffer. |
1055 | */ | 1055 | */ |
1056 | bdp = pinfo->rx_cur; | 1056 | bdp = pinfo->rx_cur; |
1057 | while (bdp->cbd_sc & BD_SC_EMPTY) | 1057 | if (bdp->cbd_sc & BD_SC_EMPTY) |
1058 | ; | 1058 | return NO_POLL_CHAR; |
1059 | 1059 | ||
1060 | /* If the buffer address is in the CPM DPRAM, don't | 1060 | /* If the buffer address is in the CPM DPRAM, don't |
1061 | * convert it. | 1061 | * convert it. |
@@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port) | |||
1090 | poll_chars = 0; | 1090 | poll_chars = 0; |
1091 | } | 1091 | } |
1092 | if (poll_chars <= 0) { | 1092 | if (poll_chars <= 0) { |
1093 | poll_chars = poll_wait_key(poll_buf, pinfo); | 1093 | int ret = poll_wait_key(poll_buf, pinfo); |
1094 | |||
1095 | if (ret == NO_POLL_CHAR) | ||
1096 | return ret; | ||
1097 | poll_chars = ret; | ||
1094 | pollp = poll_buf; | 1098 | pollp = poll_buf; |
1095 | } | 1099 | } |
1096 | poll_chars--; | 1100 | poll_chars--; |
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 51e47a63d61a..3f8d1274fc85 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c | |||
@@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport) | |||
979 | struct circ_buf *ring = &sport->rx_ring; | 979 | struct circ_buf *ring = &sport->rx_ring; |
980 | int ret, nent; | 980 | int ret, nent; |
981 | int bits, baud; | 981 | int bits, baud; |
982 | struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port); | 982 | struct tty_port *port = &sport->port.state->port; |
983 | struct tty_struct *tty = port->tty; | ||
983 | struct ktermios *termios = &tty->termios; | 984 | struct ktermios *termios = &tty->termios; |
984 | 985 | ||
985 | baud = tty_get_baud_rate(tty); | 986 | baud = tty_get_baud_rate(tty); |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 239c0fa2e981..0f67197a3783 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev) | |||
2351 | ret); | 2351 | ret); |
2352 | return ret; | 2352 | return ret; |
2353 | } | 2353 | } |
2354 | |||
2355 | ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0, | ||
2356 | dev_name(&pdev->dev), sport); | ||
2357 | if (ret) { | ||
2358 | dev_err(&pdev->dev, "failed to request rts irq: %d\n", | ||
2359 | ret); | ||
2360 | return ret; | ||
2361 | } | ||
2354 | } else { | 2362 | } else { |
2355 | ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, | 2363 | ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, |
2356 | dev_name(&pdev->dev), sport); | 2364 | dev_name(&pdev->dev), sport); |
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index d04b5eeea3c6..170e446a2f62 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c | |||
@@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port, | |||
511 | termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); | 511 | termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); |
512 | termios->c_cflag &= CREAD | CBAUD; | 512 | termios->c_cflag &= CREAD | CBAUD; |
513 | termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); | 513 | termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); |
514 | termios->c_cflag |= CS8; | ||
514 | } | 515 | } |
515 | 516 | ||
516 | spin_unlock_irqrestore(&port->lock, flags); | 517 | spin_unlock_irqrestore(&port->lock, flags); |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 32bc3e3fe4d3..5e5da9acaf0a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct * | |||
1255 | static int tty_reopen(struct tty_struct *tty) | 1255 | static int tty_reopen(struct tty_struct *tty) |
1256 | { | 1256 | { |
1257 | struct tty_driver *driver = tty->driver; | 1257 | struct tty_driver *driver = tty->driver; |
1258 | int retval; | ||
1258 | 1259 | ||
1259 | if (driver->type == TTY_DRIVER_TYPE_PTY && | 1260 | if (driver->type == TTY_DRIVER_TYPE_PTY && |
1260 | driver->subtype == PTY_TYPE_MASTER) | 1261 | driver->subtype == PTY_TYPE_MASTER) |
@@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty) | |||
1268 | 1269 | ||
1269 | tty->count++; | 1270 | tty->count++; |
1270 | 1271 | ||
1271 | if (!tty->ldisc) | 1272 | if (tty->ldisc) |
1272 | return tty_ldisc_reinit(tty, tty->termios.c_line); | 1273 | return 0; |
1273 | 1274 | ||
1274 | return 0; | 1275 | retval = tty_ldisc_reinit(tty, tty->termios.c_line); |
1276 | if (retval) | ||
1277 | tty->count--; | ||
1278 | |||
1279 | return retval; | ||
1275 | } | 1280 | } |
1276 | 1281 | ||
1277 | /** | 1282 | /** |
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index a78ad10a119b..73cdc0d633dd 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <asm/io.h> | 32 | #include <asm/io.h> |
33 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
34 | 34 | ||
35 | #include <linux/nospec.h> | ||
36 | |||
35 | #include <linux/kbd_kern.h> | 37 | #include <linux/kbd_kern.h> |
36 | #include <linux/vt_kern.h> | 38 | #include <linux/vt_kern.h> |
37 | #include <linux/kbd_diacr.h> | 39 | #include <linux/kbd_diacr.h> |
@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty, | |||
700 | if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) | 702 | if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) |
701 | ret = -ENXIO; | 703 | ret = -ENXIO; |
702 | else { | 704 | else { |
705 | vsa.console = array_index_nospec(vsa.console, | ||
706 | MAX_NR_CONSOLES + 1); | ||
703 | vsa.console--; | 707 | vsa.console--; |
704 | console_lock(); | 708 | console_lock(); |
705 | ret = vc_allocate(vsa.console); | 709 | ret = vc_allocate(vsa.console); |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 656d247819c9..bec581fb7c63 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
@@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc) | |||
460 | 460 | ||
461 | set_bit(WDM_RESPONDING, &desc->flags); | 461 | set_bit(WDM_RESPONDING, &desc->flags); |
462 | spin_unlock_irq(&desc->iuspin); | 462 | spin_unlock_irq(&desc->iuspin); |
463 | rv = usb_submit_urb(desc->response, GFP_ATOMIC); | 463 | rv = usb_submit_urb(desc->response, GFP_KERNEL); |
464 | spin_lock_irq(&desc->iuspin); | 464 | spin_lock_irq(&desc->iuspin); |
465 | if (rv) { | 465 | if (rv) { |
466 | dev_err(&desc->intf->dev, | 466 | dev_err(&desc->intf->dev, |
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c index 15cc76e22123..99116af07f1d 100644 --- a/drivers/usb/common/roles.c +++ b/drivers/usb/common/roles.c | |||
@@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep, | |||
109 | */ | 109 | */ |
110 | struct usb_role_switch *usb_role_switch_get(struct device *dev) | 110 | struct usb_role_switch *usb_role_switch_get(struct device *dev) |
111 | { | 111 | { |
112 | return device_connection_find_match(dev, "usb-role-switch", NULL, | 112 | struct usb_role_switch *sw; |
113 | usb_role_switch_match); | 113 | |
114 | sw = device_connection_find_match(dev, "usb-role-switch", NULL, | ||
115 | usb_role_switch_match); | ||
116 | |||
117 | if (!IS_ERR_OR_NULL(sw)) | ||
118 | WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); | ||
119 | |||
120 | return sw; | ||
114 | } | 121 | } |
115 | EXPORT_SYMBOL_GPL(usb_role_switch_get); | 122 | EXPORT_SYMBOL_GPL(usb_role_switch_get); |
116 | 123 | ||
@@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get); | |||
122 | */ | 129 | */ |
123 | void usb_role_switch_put(struct usb_role_switch *sw) | 130 | void usb_role_switch_put(struct usb_role_switch *sw) |
124 | { | 131 | { |
125 | if (!IS_ERR_OR_NULL(sw)) | 132 | if (!IS_ERR_OR_NULL(sw)) { |
126 | put_device(&sw->dev); | 133 | put_device(&sw->dev); |
134 | module_put(sw->dev.parent->driver->owner); | ||
135 | } | ||
127 | } | 136 | } |
128 | EXPORT_SYMBOL_GPL(usb_role_switch_put); | 137 | EXPORT_SYMBOL_GPL(usb_role_switch_put); |
129 | 138 | ||
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6ce77b33da61..244417d0dfd1 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
1434 | struct async *as = NULL; | 1434 | struct async *as = NULL; |
1435 | struct usb_ctrlrequest *dr = NULL; | 1435 | struct usb_ctrlrequest *dr = NULL; |
1436 | unsigned int u, totlen, isofrmlen; | 1436 | unsigned int u, totlen, isofrmlen; |
1437 | int i, ret, is_in, num_sgs = 0, ifnum = -1; | 1437 | int i, ret, num_sgs = 0, ifnum = -1; |
1438 | int number_of_packets = 0; | 1438 | int number_of_packets = 0; |
1439 | unsigned int stream_id = 0; | 1439 | unsigned int stream_id = 0; |
1440 | void *buf; | 1440 | void *buf; |
1441 | bool is_in; | ||
1442 | bool allow_short = false; | ||
1443 | bool allow_zero = false; | ||
1441 | unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | | 1444 | unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | |
1442 | USBDEVFS_URB_BULK_CONTINUATION | | 1445 | USBDEVFS_URB_BULK_CONTINUATION | |
1443 | USBDEVFS_URB_NO_FSBR | | 1446 | USBDEVFS_URB_NO_FSBR | |
@@ -1471,6 +1474,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
1471 | u = 0; | 1474 | u = 0; |
1472 | switch (uurb->type) { | 1475 | switch (uurb->type) { |
1473 | case USBDEVFS_URB_TYPE_CONTROL: | 1476 | case USBDEVFS_URB_TYPE_CONTROL: |
1477 | if (is_in) | ||
1478 | allow_short = true; | ||
1474 | if (!usb_endpoint_xfer_control(&ep->desc)) | 1479 | if (!usb_endpoint_xfer_control(&ep->desc)) |
1475 | return -EINVAL; | 1480 | return -EINVAL; |
1476 | /* min 8 byte setup packet */ | 1481 | /* min 8 byte setup packet */ |
@@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
1511 | break; | 1516 | break; |
1512 | 1517 | ||
1513 | case USBDEVFS_URB_TYPE_BULK: | 1518 | case USBDEVFS_URB_TYPE_BULK: |
1519 | if (!is_in) | ||
1520 | allow_zero = true; | ||
1521 | else | ||
1522 | allow_short = true; | ||
1514 | switch (usb_endpoint_type(&ep->desc)) { | 1523 | switch (usb_endpoint_type(&ep->desc)) { |
1515 | case USB_ENDPOINT_XFER_CONTROL: | 1524 | case USB_ENDPOINT_XFER_CONTROL: |
1516 | case USB_ENDPOINT_XFER_ISOC: | 1525 | case USB_ENDPOINT_XFER_ISOC: |
@@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
1531 | if (!usb_endpoint_xfer_int(&ep->desc)) | 1540 | if (!usb_endpoint_xfer_int(&ep->desc)) |
1532 | return -EINVAL; | 1541 | return -EINVAL; |
1533 | interrupt_urb: | 1542 | interrupt_urb: |
1543 | if (!is_in) | ||
1544 | allow_zero = true; | ||
1545 | else | ||
1546 | allow_short = true; | ||
1534 | break; | 1547 | break; |
1535 | 1548 | ||
1536 | case USBDEVFS_URB_TYPE_ISO: | 1549 | case USBDEVFS_URB_TYPE_ISO: |
@@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
1676 | u = (is_in ? URB_DIR_IN : URB_DIR_OUT); | 1689 | u = (is_in ? URB_DIR_IN : URB_DIR_OUT); |
1677 | if (uurb->flags & USBDEVFS_URB_ISO_ASAP) | 1690 | if (uurb->flags & USBDEVFS_URB_ISO_ASAP) |
1678 | u |= URB_ISO_ASAP; | 1691 | u |= URB_ISO_ASAP; |
1679 | if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in) | 1692 | if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) |
1680 | u |= URB_SHORT_NOT_OK; | 1693 | u |= URB_SHORT_NOT_OK; |
1681 | if (uurb->flags & USBDEVFS_URB_ZERO_PACKET) | 1694 | if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) |
1682 | u |= URB_ZERO_PACKET; | 1695 | u |= URB_ZERO_PACKET; |
1683 | if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) | 1696 | if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) |
1684 | u |= URB_NO_INTERRUPT; | 1697 | u |= URB_NO_INTERRUPT; |
1685 | as->urb->transfer_flags = u; | 1698 | as->urb->transfer_flags = u; |
1686 | 1699 | ||
1700 | if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) | ||
1701 | dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n"); | ||
1702 | if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) | ||
1703 | dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n"); | ||
1704 | |||
1687 | as->urb->transfer_buffer_length = uurb->buffer_length; | 1705 | as->urb->transfer_buffer_length = uurb->buffer_length; |
1688 | as->urb->setup_packet = (unsigned char *)dr; | 1706 | as->urb->setup_packet = (unsigned char *)dr; |
1689 | dr = NULL; | 1707 | dr = NULL; |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index e76e95f62f76..a1f225f077cd 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
512 | struct device *dev; | 512 | struct device *dev; |
513 | struct usb_device *udev; | 513 | struct usb_device *udev; |
514 | int retval = 0; | 514 | int retval = 0; |
515 | int lpm_disable_error = -ENODEV; | ||
516 | 515 | ||
517 | if (!iface) | 516 | if (!iface) |
518 | return -ENODEV; | 517 | return -ENODEV; |
@@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
533 | 532 | ||
534 | iface->condition = USB_INTERFACE_BOUND; | 533 | iface->condition = USB_INTERFACE_BOUND; |
535 | 534 | ||
536 | /* See the comment about disabling LPM in usb_probe_interface(). */ | ||
537 | if (driver->disable_hub_initiated_lpm) { | ||
538 | lpm_disable_error = usb_unlocked_disable_lpm(udev); | ||
539 | if (lpm_disable_error) { | ||
540 | dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n", | ||
541 | __func__, driver->name); | ||
542 | return -ENOMEM; | ||
543 | } | ||
544 | } | ||
545 | |||
546 | /* Claimed interfaces are initially inactive (suspended) and | 535 | /* Claimed interfaces are initially inactive (suspended) and |
547 | * runtime-PM-enabled, but only if the driver has autosuspend | 536 | * runtime-PM-enabled, but only if the driver has autosuspend |
548 | * support. Otherwise they are marked active, to prevent the | 537 | * support. Otherwise they are marked active, to prevent the |
@@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
561 | if (device_is_registered(dev)) | 550 | if (device_is_registered(dev)) |
562 | retval = device_bind_driver(dev); | 551 | retval = device_bind_driver(dev); |
563 | 552 | ||
564 | /* Attempt to re-enable USB3 LPM, if the disable was successful. */ | 553 | if (retval) { |
565 | if (!lpm_disable_error) | 554 | dev->driver = NULL; |
566 | usb_unlocked_enable_lpm(udev); | 555 | usb_set_intfdata(iface, NULL); |
556 | iface->needs_remote_wakeup = 0; | ||
557 | iface->condition = USB_INTERFACE_UNBOUND; | ||
558 | |||
559 | /* | ||
560 | * Unbound interfaces are always runtime-PM-disabled | ||
561 | * and runtime-PM-suspended | ||
562 | */ | ||
563 | if (driver->supports_autosuspend) | ||
564 | pm_runtime_disable(dev); | ||
565 | pm_runtime_set_suspended(dev); | ||
566 | } | ||
567 | 567 | ||
568 | return retval; | 568 | return retval; |
569 | } | 569 | } |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index e77dfe5ed5ec..178d6c6063c0 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) | |||
58 | quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry), | 58 | quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry), |
59 | GFP_KERNEL); | 59 | GFP_KERNEL); |
60 | if (!quirk_list) { | 60 | if (!quirk_list) { |
61 | quirk_count = 0; | ||
61 | mutex_unlock(&quirk_mutex); | 62 | mutex_unlock(&quirk_mutex); |
62 | return -ENOMEM; | 63 | return -ENOMEM; |
63 | } | 64 | } |
@@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = { | |||
154 | .string = quirks_param, | 155 | .string = quirks_param, |
155 | }; | 156 | }; |
156 | 157 | ||
157 | module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); | 158 | device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); |
158 | MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks"); | 159 | MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks"); |
159 | 160 | ||
160 | /* Lists of quirky USB devices, split in device quirks and interface quirks. | 161 | /* Lists of quirky USB devices, split in device quirks and interface quirks. |
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 623be3174fb3..79d8bd7a612e 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
@@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting( | |||
228 | struct usb_interface_cache *intf_cache = NULL; | 228 | struct usb_interface_cache *intf_cache = NULL; |
229 | int i; | 229 | int i; |
230 | 230 | ||
231 | if (!config) | ||
232 | return NULL; | ||
231 | for (i = 0; i < config->desc.bNumInterfaces; i++) { | 233 | for (i = 0; i < config->desc.bNumInterfaces; i++) { |
232 | if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber | 234 | if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber |
233 | == iface_num) { | 235 | == iface_num) { |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index df827ff57b0d..23a0df79ef21 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
@@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base) | |||
658 | return controller; | 658 | return controller; |
659 | } | 659 | } |
660 | 660 | ||
661 | static void dsps_dma_controller_destroy(struct dma_controller *c) | ||
662 | { | ||
663 | struct musb *musb = c->musb; | ||
664 | struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent); | ||
665 | void __iomem *usbss_base = glue->usbss_base; | ||
666 | |||
667 | musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP); | ||
668 | cppi41_dma_controller_destroy(c); | ||
669 | } | ||
670 | |||
671 | #ifdef CONFIG_PM_SLEEP | 661 | #ifdef CONFIG_PM_SLEEP |
672 | static void dsps_dma_controller_suspend(struct dsps_glue *glue) | 662 | static void dsps_dma_controller_suspend(struct dsps_glue *glue) |
673 | { | 663 | { |
@@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = { | |||
697 | 687 | ||
698 | #ifdef CONFIG_USB_TI_CPPI41_DMA | 688 | #ifdef CONFIG_USB_TI_CPPI41_DMA |
699 | .dma_init = dsps_dma_controller_create, | 689 | .dma_init = dsps_dma_controller_create, |
700 | .dma_exit = dsps_dma_controller_destroy, | 690 | .dma_exit = cppi41_dma_controller_destroy, |
701 | #endif | 691 | #endif |
702 | .enable = dsps_musb_enable, | 692 | .enable = dsps_musb_enable, |
703 | .disable = dsps_musb_disable, | 693 | .disable = dsps_musb_disable, |
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index ddaac63ecf12..d990aa510fab 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/module.h> | ||
12 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
13 | #include <linux/usb/typec_mux.h> | 14 | #include <linux/usb/typec_mux.h> |
14 | 15 | ||
@@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev) | |||
49 | mutex_lock(&switch_lock); | 50 | mutex_lock(&switch_lock); |
50 | sw = device_connection_find_match(dev, "typec-switch", NULL, | 51 | sw = device_connection_find_match(dev, "typec-switch", NULL, |
51 | typec_switch_match); | 52 | typec_switch_match); |
52 | if (!IS_ERR_OR_NULL(sw)) | 53 | if (!IS_ERR_OR_NULL(sw)) { |
54 | WARN_ON(!try_module_get(sw->dev->driver->owner)); | ||
53 | get_device(sw->dev); | 55 | get_device(sw->dev); |
56 | } | ||
54 | mutex_unlock(&switch_lock); | 57 | mutex_unlock(&switch_lock); |
55 | 58 | ||
56 | return sw; | 59 | return sw; |
@@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get); | |||
65 | */ | 68 | */ |
66 | void typec_switch_put(struct typec_switch *sw) | 69 | void typec_switch_put(struct typec_switch *sw) |
67 | { | 70 | { |
68 | if (!IS_ERR_OR_NULL(sw)) | 71 | if (!IS_ERR_OR_NULL(sw)) { |
72 | module_put(sw->dev->driver->owner); | ||
69 | put_device(sw->dev); | 73 | put_device(sw->dev); |
74 | } | ||
70 | } | 75 | } |
71 | EXPORT_SYMBOL_GPL(typec_switch_put); | 76 | EXPORT_SYMBOL_GPL(typec_switch_put); |
72 | 77 | ||
@@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name) | |||
136 | 141 | ||
137 | mutex_lock(&mux_lock); | 142 | mutex_lock(&mux_lock); |
138 | mux = device_connection_find_match(dev, name, NULL, typec_mux_match); | 143 | mux = device_connection_find_match(dev, name, NULL, typec_mux_match); |
139 | if (!IS_ERR_OR_NULL(mux)) | 144 | if (!IS_ERR_OR_NULL(mux)) { |
145 | WARN_ON(!try_module_get(mux->dev->driver->owner)); | ||
140 | get_device(mux->dev); | 146 | get_device(mux->dev); |
147 | } | ||
141 | mutex_unlock(&mux_lock); | 148 | mutex_unlock(&mux_lock); |
142 | 149 | ||
143 | return mux; | 150 | return mux; |
@@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get); | |||
152 | */ | 159 | */ |
153 | void typec_mux_put(struct typec_mux *mux) | 160 | void typec_mux_put(struct typec_mux *mux) |
154 | { | 161 | { |
155 | if (!IS_ERR_OR_NULL(mux)) | 162 | if (!IS_ERR_OR_NULL(mux)) { |
163 | module_put(mux->dev->driver->owner); | ||
156 | put_device(mux->dev); | 164 | put_device(mux->dev); |
165 | } | ||
157 | } | 166 | } |
158 | EXPORT_SYMBOL_GPL(typec_mux_put); | 167 | EXPORT_SYMBOL_GPL(typec_mux_put); |
159 | 168 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7bafa703a992..84575baceebc 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
1040 | return ret; | 1040 | return ret; |
1041 | 1041 | ||
1042 | for (i = 0; i < count; i++) { | 1042 | for (i = 0; i < count; i++) { |
1043 | /* Retry eagain maps */ | 1043 | switch (map_ops[i].status) { |
1044 | if (map_ops[i].status == GNTST_eagain) | 1044 | case GNTST_okay: |
1045 | gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, | 1045 | { |
1046 | &map_ops[i].status, __func__); | ||
1047 | |||
1048 | if (map_ops[i].status == GNTST_okay) { | ||
1049 | struct xen_page_foreign *foreign; | 1046 | struct xen_page_foreign *foreign; |
1050 | 1047 | ||
1051 | SetPageForeign(pages[i]); | 1048 | SetPageForeign(pages[i]); |
1052 | foreign = xen_page_foreign(pages[i]); | 1049 | foreign = xen_page_foreign(pages[i]); |
1053 | foreign->domid = map_ops[i].dom; | 1050 | foreign->domid = map_ops[i].dom; |
1054 | foreign->gref = map_ops[i].ref; | 1051 | foreign->gref = map_ops[i].ref; |
1052 | break; | ||
1053 | } | ||
1054 | |||
1055 | case GNTST_no_device_space: | ||
1056 | pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n"); | ||
1057 | break; | ||
1058 | |||
1059 | case GNTST_eagain: | ||
1060 | /* Retry eagain maps */ | ||
1061 | gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, | ||
1062 | map_ops + i, | ||
1063 | &map_ops[i].status, __func__); | ||
1064 | /* Test status in next loop iteration. */ | ||
1065 | i--; | ||
1066 | break; | ||
1067 | |||
1068 | default: | ||
1069 | break; | ||
1055 | } | 1070 | } |
1056 | } | 1071 | } |
1057 | 1072 | ||
@@ -447,6 +447,7 @@ bool dax_lock_mapping_entry(struct page *page) | |||
447 | xa_unlock_irq(&mapping->i_pages); | 447 | xa_unlock_irq(&mapping->i_pages); |
448 | break; | 448 | break; |
449 | } else if (IS_ERR(entry)) { | 449 | } else if (IS_ERR(entry)) { |
450 | xa_unlock_irq(&mapping->i_pages); | ||
450 | WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); | 451 | WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); |
451 | continue; | 452 | continue; |
452 | } | 453 | } |
@@ -1120,21 +1121,12 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, | |||
1120 | { | 1121 | { |
1121 | struct inode *inode = mapping->host; | 1122 | struct inode *inode = mapping->host; |
1122 | unsigned long vaddr = vmf->address; | 1123 | unsigned long vaddr = vmf->address; |
1123 | vm_fault_t ret = VM_FAULT_NOPAGE; | 1124 | pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); |
1124 | struct page *zero_page; | 1125 | vm_fault_t ret; |
1125 | pfn_t pfn; | ||
1126 | |||
1127 | zero_page = ZERO_PAGE(0); | ||
1128 | if (unlikely(!zero_page)) { | ||
1129 | ret = VM_FAULT_OOM; | ||
1130 | goto out; | ||
1131 | } | ||
1132 | 1126 | ||
1133 | pfn = page_to_pfn_t(zero_page); | ||
1134 | dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, | 1127 | dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, |
1135 | false); | 1128 | false); |
1136 | ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); | 1129 | ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); |
1137 | out: | ||
1138 | trace_dax_load_hole(inode, vmf, ret); | 1130 | trace_dax_load_hole(inode, vmf, ret); |
1139 | return ret; | 1131 | return ret; |
1140 | } | 1132 | } |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 7f7ee18fe179..e4bb9386c045 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
@@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) | |||
1448 | } | 1448 | } |
1449 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); | 1449 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); |
1450 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); | 1450 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); |
1451 | ext2_set_inode_flags(inode); | ||
1451 | ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); | 1452 | ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); |
1452 | ei->i_frag_no = raw_inode->i_frag; | 1453 | ei->i_frag_no = raw_inode->i_frag; |
1453 | ei->i_frag_size = raw_inode->i_fsize; | 1454 | ei->i_frag_size = raw_inode->i_fsize; |
@@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) | |||
1517 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | 1518 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
1518 | } | 1519 | } |
1519 | brelse (bh); | 1520 | brelse (bh); |
1520 | ext2_set_inode_flags(inode); | ||
1521 | unlock_new_inode(inode); | 1521 | unlock_new_inode(inode); |
1522 | return inode; | 1522 | return inode; |
1523 | 1523 | ||
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index e2902d394f1b..f93f9881ec18 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c | |||
@@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, | |||
76 | else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) | 76 | else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) |
77 | error_msg = "rec_len is too small for name_len"; | 77 | error_msg = "rec_len is too small for name_len"; |
78 | else if (unlikely(((char *) de - buf) + rlen > size)) | 78 | else if (unlikely(((char *) de - buf) + rlen > size)) |
79 | error_msg = "directory entry across range"; | 79 | error_msg = "directory entry overrun"; |
80 | else if (unlikely(le32_to_cpu(de->inode) > | 80 | else if (unlikely(le32_to_cpu(de->inode) > |
81 | le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) | 81 | le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) |
82 | error_msg = "inode out of bounds"; | 82 | error_msg = "inode out of bounds"; |
@@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, | |||
85 | 85 | ||
86 | if (filp) | 86 | if (filp) |
87 | ext4_error_file(filp, function, line, bh->b_blocknr, | 87 | ext4_error_file(filp, function, line, bh->b_blocknr, |
88 | "bad entry in directory: %s - offset=%u(%u), " | 88 | "bad entry in directory: %s - offset=%u, " |
89 | "inode=%u, rec_len=%d, name_len=%d", | 89 | "inode=%u, rec_len=%d, name_len=%d, size=%d", |
90 | error_msg, (unsigned) (offset % size), | 90 | error_msg, offset, le32_to_cpu(de->inode), |
91 | offset, le32_to_cpu(de->inode), | 91 | rlen, de->name_len, size); |
92 | rlen, de->name_len); | ||
93 | else | 92 | else |
94 | ext4_error_inode(dir, function, line, bh->b_blocknr, | 93 | ext4_error_inode(dir, function, line, bh->b_blocknr, |
95 | "bad entry in directory: %s - offset=%u(%u), " | 94 | "bad entry in directory: %s - offset=%u, " |
96 | "inode=%u, rec_len=%d, name_len=%d", | 95 | "inode=%u, rec_len=%d, name_len=%d, size=%d", |
97 | error_msg, (unsigned) (offset % size), | 96 | error_msg, offset, le32_to_cpu(de->inode), |
98 | offset, le32_to_cpu(de->inode), | 97 | rlen, de->name_len, size); |
99 | rlen, de->name_len); | ||
100 | 98 | ||
101 | return 1; | 99 | return 1; |
102 | } | 100 | } |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0f0edd1cd0cd..caff935fbeb8 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -43,6 +43,17 @@ | |||
43 | #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION) | 43 | #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION) |
44 | #include <linux/fscrypt.h> | 44 | #include <linux/fscrypt.h> |
45 | 45 | ||
46 | #include <linux/compiler.h> | ||
47 | |||
48 | /* Until this gets included into linux/compiler-gcc.h */ | ||
49 | #ifndef __nonstring | ||
50 | #if defined(GCC_VERSION) && (GCC_VERSION >= 80000) | ||
51 | #define __nonstring __attribute__((nonstring)) | ||
52 | #else | ||
53 | #define __nonstring | ||
54 | #endif | ||
55 | #endif | ||
56 | |||
46 | /* | 57 | /* |
47 | * The fourth extended filesystem constants/structures | 58 | * The fourth extended filesystem constants/structures |
48 | */ | 59 | */ |
@@ -675,6 +686,9 @@ enum { | |||
675 | /* Max physical block we can address w/o extents */ | 686 | /* Max physical block we can address w/o extents */ |
676 | #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF | 687 | #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF |
677 | 688 | ||
689 | /* Max logical block we can support */ | ||
690 | #define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF | ||
691 | |||
678 | /* | 692 | /* |
679 | * Structure of an inode on the disk | 693 | * Structure of an inode on the disk |
680 | */ | 694 | */ |
@@ -1226,7 +1240,7 @@ struct ext4_super_block { | |||
1226 | __le32 s_feature_ro_compat; /* readonly-compatible feature set */ | 1240 | __le32 s_feature_ro_compat; /* readonly-compatible feature set */ |
1227 | /*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ | 1241 | /*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ |
1228 | /*78*/ char s_volume_name[16]; /* volume name */ | 1242 | /*78*/ char s_volume_name[16]; /* volume name */ |
1229 | /*88*/ char s_last_mounted[64]; /* directory where last mounted */ | 1243 | /*88*/ char s_last_mounted[64] __nonstring; /* directory where last mounted */ |
1230 | /*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */ | 1244 | /*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */ |
1231 | /* | 1245 | /* |
1232 | * Performance hints. Directory preallocation should only | 1246 | * Performance hints. Directory preallocation should only |
@@ -1277,13 +1291,13 @@ struct ext4_super_block { | |||
1277 | __le32 s_first_error_time; /* first time an error happened */ | 1291 | __le32 s_first_error_time; /* first time an error happened */ |
1278 | __le32 s_first_error_ino; /* inode involved in first error */ | 1292 | __le32 s_first_error_ino; /* inode involved in first error */ |
1279 | __le64 s_first_error_block; /* block involved of first error */ | 1293 | __le64 s_first_error_block; /* block involved of first error */ |
1280 | __u8 s_first_error_func[32]; /* function where the error happened */ | 1294 | __u8 s_first_error_func[32] __nonstring; /* function where the error happened */ |
1281 | __le32 s_first_error_line; /* line number where error happened */ | 1295 | __le32 s_first_error_line; /* line number where error happened */ |
1282 | __le32 s_last_error_time; /* most recent time of an error */ | 1296 | __le32 s_last_error_time; /* most recent time of an error */ |
1283 | __le32 s_last_error_ino; /* inode involved in last error */ | 1297 | __le32 s_last_error_ino; /* inode involved in last error */ |
1284 | __le32 s_last_error_line; /* line number where error happened */ | 1298 | __le32 s_last_error_line; /* line number where error happened */ |
1285 | __le64 s_last_error_block; /* block involved of last error */ | 1299 | __le64 s_last_error_block; /* block involved of last error */ |
1286 | __u8 s_last_error_func[32]; /* function where the error happened */ | 1300 | __u8 s_last_error_func[32] __nonstring; /* function where the error happened */ |
1287 | #define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts) | 1301 | #define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts) |
1288 | __u8 s_mount_opts[64]; | 1302 | __u8 s_mount_opts[64]; |
1289 | __le32 s_usr_quota_inum; /* inode for tracking user quota */ | 1303 | __le32 s_usr_quota_inum; /* inode for tracking user quota */ |
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 3543fe80a3c4..7b4736022761 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c | |||
@@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) | |||
1753 | { | 1753 | { |
1754 | int err, inline_size; | 1754 | int err, inline_size; |
1755 | struct ext4_iloc iloc; | 1755 | struct ext4_iloc iloc; |
1756 | size_t inline_len; | ||
1756 | void *inline_pos; | 1757 | void *inline_pos; |
1757 | unsigned int offset; | 1758 | unsigned int offset; |
1758 | struct ext4_dir_entry_2 *de; | 1759 | struct ext4_dir_entry_2 *de; |
@@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) | |||
1780 | goto out; | 1781 | goto out; |
1781 | } | 1782 | } |
1782 | 1783 | ||
1784 | inline_len = ext4_get_inline_size(dir); | ||
1783 | offset = EXT4_INLINE_DOTDOT_SIZE; | 1785 | offset = EXT4_INLINE_DOTDOT_SIZE; |
1784 | while (offset < dir->i_size) { | 1786 | while (offset < inline_len) { |
1785 | de = ext4_get_inline_entry(dir, &iloc, offset, | 1787 | de = ext4_get_inline_entry(dir, &iloc, offset, |
1786 | &inline_pos, &inline_size); | 1788 | &inline_pos, &inline_size); |
1787 | if (ext4_check_dir_entry(dir, NULL, de, | 1789 | if (ext4_check_dir_entry(dir, NULL, de, |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index d0dd585add6a..d767e993591d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -3413,12 +3413,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, | |||
3413 | { | 3413 | { |
3414 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 3414 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
3415 | unsigned int blkbits = inode->i_blkbits; | 3415 | unsigned int blkbits = inode->i_blkbits; |
3416 | unsigned long first_block = offset >> blkbits; | 3416 | unsigned long first_block, last_block; |
3417 | unsigned long last_block = (offset + length - 1) >> blkbits; | ||
3418 | struct ext4_map_blocks map; | 3417 | struct ext4_map_blocks map; |
3419 | bool delalloc = false; | 3418 | bool delalloc = false; |
3420 | int ret; | 3419 | int ret; |
3421 | 3420 | ||
3421 | if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) | ||
3422 | return -EINVAL; | ||
3423 | first_block = offset >> blkbits; | ||
3424 | last_block = min_t(loff_t, (offset + length - 1) >> blkbits, | ||
3425 | EXT4_MAX_LOGICAL_BLOCK); | ||
3422 | 3426 | ||
3423 | if (flags & IOMAP_REPORT) { | 3427 | if (flags & IOMAP_REPORT) { |
3424 | if (ext4_has_inline_data(inode)) { | 3428 | if (ext4_has_inline_data(inode)) { |
@@ -3948,6 +3952,7 @@ static const struct address_space_operations ext4_dax_aops = { | |||
3948 | .writepages = ext4_dax_writepages, | 3952 | .writepages = ext4_dax_writepages, |
3949 | .direct_IO = noop_direct_IO, | 3953 | .direct_IO = noop_direct_IO, |
3950 | .set_page_dirty = noop_set_page_dirty, | 3954 | .set_page_dirty = noop_set_page_dirty, |
3955 | .bmap = ext4_bmap, | ||
3951 | .invalidatepage = noop_invalidatepage, | 3956 | .invalidatepage = noop_invalidatepage, |
3952 | }; | 3957 | }; |
3953 | 3958 | ||
@@ -4192,9 +4197,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, | |||
4192 | return 0; | 4197 | return 0; |
4193 | } | 4198 | } |
4194 | 4199 | ||
4195 | static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock) | 4200 | static void ext4_wait_dax_page(struct ext4_inode_info *ei) |
4196 | { | 4201 | { |
4197 | *did_unlock = true; | ||
4198 | up_write(&ei->i_mmap_sem); | 4202 | up_write(&ei->i_mmap_sem); |
4199 | schedule(); | 4203 | schedule(); |
4200 | down_write(&ei->i_mmap_sem); | 4204 | down_write(&ei->i_mmap_sem); |
@@ -4204,14 +4208,12 @@ int ext4_break_layouts(struct inode *inode) | |||
4204 | { | 4208 | { |
4205 | struct ext4_inode_info *ei = EXT4_I(inode); | 4209 | struct ext4_inode_info *ei = EXT4_I(inode); |
4206 | struct page *page; | 4210 | struct page *page; |
4207 | bool retry; | ||
4208 | int error; | 4211 | int error; |
4209 | 4212 | ||
4210 | if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) | 4213 | if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) |
4211 | return -EINVAL; | 4214 | return -EINVAL; |
4212 | 4215 | ||
4213 | do { | 4216 | do { |
4214 | retry = false; | ||
4215 | page = dax_layout_busy_page(inode->i_mapping); | 4217 | page = dax_layout_busy_page(inode->i_mapping); |
4216 | if (!page) | 4218 | if (!page) |
4217 | return 0; | 4219 | return 0; |
@@ -4219,8 +4221,8 @@ int ext4_break_layouts(struct inode *inode) | |||
4219 | error = ___wait_var_event(&page->_refcount, | 4221 | error = ___wait_var_event(&page->_refcount, |
4220 | atomic_read(&page->_refcount) == 1, | 4222 | atomic_read(&page->_refcount) == 1, |
4221 | TASK_INTERRUPTIBLE, 0, 0, | 4223 | TASK_INTERRUPTIBLE, 0, 0, |
4222 | ext4_wait_dax_page(ei, &retry)); | 4224 | ext4_wait_dax_page(ei)); |
4223 | } while (error == 0 && retry); | 4225 | } while (error == 0); |
4224 | 4226 | ||
4225 | return error; | 4227 | return error; |
4226 | } | 4228 | } |
@@ -4895,6 +4897,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
4895 | * not initialized on a new filesystem. */ | 4897 | * not initialized on a new filesystem. */ |
4896 | } | 4898 | } |
4897 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); | 4899 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); |
4900 | ext4_set_inode_flags(inode); | ||
4898 | inode->i_blocks = ext4_inode_blocks(raw_inode, ei); | 4901 | inode->i_blocks = ext4_inode_blocks(raw_inode, ei); |
4899 | ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); | 4902 | ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); |
4900 | if (ext4_has_feature_64bit(sb)) | 4903 | if (ext4_has_feature_64bit(sb)) |
@@ -5041,7 +5044,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
5041 | goto bad_inode; | 5044 | goto bad_inode; |
5042 | } | 5045 | } |
5043 | brelse(iloc.bh); | 5046 | brelse(iloc.bh); |
5044 | ext4_set_inode_flags(inode); | ||
5045 | 5047 | ||
5046 | unlock_new_inode(inode); | 5048 | unlock_new_inode(inode); |
5047 | return inode; | 5049 | return inode; |
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 39b07c2d3384..2305b4374fd3 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c | |||
@@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) | |||
49 | */ | 49 | */ |
50 | sb_start_write(sb); | 50 | sb_start_write(sb); |
51 | ext4_mmp_csum_set(sb, mmp); | 51 | ext4_mmp_csum_set(sb, mmp); |
52 | mark_buffer_dirty(bh); | ||
53 | lock_buffer(bh); | 52 | lock_buffer(bh); |
54 | bh->b_end_io = end_buffer_write_sync; | 53 | bh->b_end_io = end_buffer_write_sync; |
55 | get_bh(bh); | 54 | get_bh(bh); |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 116ff68c5bd4..377d516c475f 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
3478 | int credits; | 3478 | int credits; |
3479 | u8 old_file_type; | 3479 | u8 old_file_type; |
3480 | 3480 | ||
3481 | if (new.inode && new.inode->i_nlink == 0) { | ||
3482 | EXT4_ERROR_INODE(new.inode, | ||
3483 | "target of rename is already freed"); | ||
3484 | return -EFSCORRUPTED; | ||
3485 | } | ||
3486 | |||
3481 | if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) && | 3487 | if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) && |
3482 | (!projid_eq(EXT4_I(new_dir)->i_projid, | 3488 | (!projid_eq(EXT4_I(new_dir)->i_projid, |
3483 | EXT4_I(old_dentry->d_inode)->i_projid))) | 3489 | EXT4_I(old_dentry->d_inode)->i_projid))) |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index e5fb38451a73..ebbc663d0798 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | int ext4_resize_begin(struct super_block *sb) | 20 | int ext4_resize_begin(struct super_block *sb) |
21 | { | 21 | { |
22 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
22 | int ret = 0; | 23 | int ret = 0; |
23 | 24 | ||
24 | if (!capable(CAP_SYS_RESOURCE)) | 25 | if (!capable(CAP_SYS_RESOURCE)) |
@@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb) | |||
29 | * because the user tools have no way of handling this. Probably a | 30 | * because the user tools have no way of handling this. Probably a |
30 | * bad time to do it anyways. | 31 | * bad time to do it anyways. |
31 | */ | 32 | */ |
32 | if (EXT4_SB(sb)->s_sbh->b_blocknr != | 33 | if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != |
33 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { | 34 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { |
34 | ext4_warning(sb, "won't resize using backup superblock at %llu", | 35 | ext4_warning(sb, "won't resize using backup superblock at %llu", |
35 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); | 36 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); |
@@ -1986,6 +1987,26 @@ retry: | |||
1986 | } | 1987 | } |
1987 | } | 1988 | } |
1988 | 1989 | ||
1990 | /* | ||
1991 | * Make sure the last group has enough space so that it's | ||
1992 | * guaranteed to have enough space for all metadata blocks | ||
1993 | * that it might need to hold. (We might not need to store | ||
1994 | * the inode table blocks in the last block group, but there | ||
1995 | * will be cases where this might be needed.) | ||
1996 | */ | ||
1997 | if ((ext4_group_first_block_no(sb, n_group) + | ||
1998 | ext4_group_overhead_blocks(sb, n_group) + 2 + | ||
1999 | sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { | ||
2000 | n_blocks_count = ext4_group_first_block_no(sb, n_group); | ||
2001 | n_group--; | ||
2002 | n_blocks_count_retry = 0; | ||
2003 | if (resize_inode) { | ||
2004 | iput(resize_inode); | ||
2005 | resize_inode = NULL; | ||
2006 | } | ||
2007 | goto retry; | ||
2008 | } | ||
2009 | |||
1989 | /* extend the last group */ | 2010 | /* extend the last group */ |
1990 | if (n_group == o_group) | 2011 | if (n_group == o_group) |
1991 | add = n_blocks_count - o_blocks_count; | 2012 | add = n_blocks_count - o_blocks_count; |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5863fd22e90b..1145109968ef 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -2145,6 +2145,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, | |||
2145 | SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); | 2145 | SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); |
2146 | if (test_opt(sb, DATA_ERR_ABORT)) | 2146 | if (test_opt(sb, DATA_ERR_ABORT)) |
2147 | SEQ_OPTS_PUTS("data_err=abort"); | 2147 | SEQ_OPTS_PUTS("data_err=abort"); |
2148 | if (DUMMY_ENCRYPTION_ENABLED(sbi)) | ||
2149 | SEQ_OPTS_PUTS("test_dummy_encryption"); | ||
2148 | 2150 | ||
2149 | ext4_show_quota_options(seq, sb); | 2151 | ext4_show_quota_options(seq, sb); |
2150 | return 0; | 2152 | return 0; |
@@ -4378,11 +4380,13 @@ no_journal: | |||
4378 | block = ext4_count_free_clusters(sb); | 4380 | block = ext4_count_free_clusters(sb); |
4379 | ext4_free_blocks_count_set(sbi->s_es, | 4381 | ext4_free_blocks_count_set(sbi->s_es, |
4380 | EXT4_C2B(sbi, block)); | 4382 | EXT4_C2B(sbi, block)); |
4383 | ext4_superblock_csum_set(sb); | ||
4381 | err = percpu_counter_init(&sbi->s_freeclusters_counter, block, | 4384 | err = percpu_counter_init(&sbi->s_freeclusters_counter, block, |
4382 | GFP_KERNEL); | 4385 | GFP_KERNEL); |
4383 | if (!err) { | 4386 | if (!err) { |
4384 | unsigned long freei = ext4_count_free_inodes(sb); | 4387 | unsigned long freei = ext4_count_free_inodes(sb); |
4385 | sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); | 4388 | sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); |
4389 | ext4_superblock_csum_set(sb); | ||
4386 | err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, | 4390 | err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, |
4387 | GFP_KERNEL); | 4391 | GFP_KERNEL); |
4388 | } | 4392 | } |
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index d9ebe11c8990..1d098c3c00e0 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c | |||
@@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, | |||
342 | * for this bh as it's not marked locally | 342 | * for this bh as it's not marked locally |
343 | * uptodate. */ | 343 | * uptodate. */ |
344 | status = -EIO; | 344 | status = -EIO; |
345 | clear_buffer_needs_validate(bh); | ||
345 | put_bh(bh); | 346 | put_bh(bh); |
346 | bhs[i] = NULL; | 347 | bhs[i] = NULL; |
347 | continue; | 348 | continue; |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index ad72261ee3fe..d297fe4472a9 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
@@ -464,6 +464,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) | |||
464 | ret = -EFAULT; | 464 | ret = -EFAULT; |
465 | goto out; | 465 | goto out; |
466 | } | 466 | } |
467 | m = NULL; /* skip the list anchor */ | ||
467 | } else if (m->type == KCORE_VMALLOC) { | 468 | } else if (m->type == KCORE_VMALLOC) { |
468 | vread(buf, (char *)start, tsz); | 469 | vread(buf, (char *)start, tsz); |
469 | /* we have to zero-fill user buffer even if no read */ | 470 | /* we have to zero-fill user buffer even if no read */ |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 23e7042666a7..bf000c8aeffb 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -1912,7 +1912,9 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) | |||
1912 | mutex_unlock(&c->bu_mutex); | 1912 | mutex_unlock(&c->bu_mutex); |
1913 | } | 1913 | } |
1914 | 1914 | ||
1915 | ubifs_assert(c, c->lst.taken_empty_lebs > 0); | 1915 | if (!c->need_recovery) |
1916 | ubifs_assert(c, c->lst.taken_empty_lebs > 0); | ||
1917 | |||
1916 | return 0; | 1918 | return 0; |
1917 | } | 1919 | } |
1918 | 1920 | ||
@@ -1954,6 +1956,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode) | |||
1954 | int dev, vol; | 1956 | int dev, vol; |
1955 | char *endptr; | 1957 | char *endptr; |
1956 | 1958 | ||
1959 | if (!name || !*name) | ||
1960 | return ERR_PTR(-EINVAL); | ||
1961 | |||
1957 | /* First, try to open using the device node path method */ | 1962 | /* First, try to open using the device node path method */ |
1958 | ubi = ubi_open_volume_path(name, mode); | 1963 | ubi = ubi_open_volume_path(name, mode); |
1959 | if (!IS_ERR(ubi)) | 1964 | if (!IS_ERR(ubi)) |
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 61afdfee4b28..f5ad1ede7990 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c | |||
@@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, | |||
152 | ui->data_len = size; | 152 | ui->data_len = size; |
153 | 153 | ||
154 | mutex_lock(&host_ui->ui_mutex); | 154 | mutex_lock(&host_ui->ui_mutex); |
155 | |||
156 | if (!host->i_nlink) { | ||
157 | err = -ENOENT; | ||
158 | goto out_noent; | ||
159 | } | ||
160 | |||
161 | host->i_ctime = current_time(host); | 155 | host->i_ctime = current_time(host); |
162 | host_ui->xattr_cnt += 1; | 156 | host_ui->xattr_cnt += 1; |
163 | host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); | 157 | host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); |
@@ -190,7 +184,6 @@ out_cancel: | |||
190 | host_ui->xattr_size -= CALC_XATTR_BYTES(size); | 184 | host_ui->xattr_size -= CALC_XATTR_BYTES(size); |
191 | host_ui->xattr_names -= fname_len(nm); | 185 | host_ui->xattr_names -= fname_len(nm); |
192 | host_ui->flags &= ~UBIFS_CRYPT_FL; | 186 | host_ui->flags &= ~UBIFS_CRYPT_FL; |
193 | out_noent: | ||
194 | mutex_unlock(&host_ui->ui_mutex); | 187 | mutex_unlock(&host_ui->ui_mutex); |
195 | out_free: | 188 | out_free: |
196 | make_bad_inode(inode); | 189 | make_bad_inode(inode); |
@@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, | |||
242 | mutex_unlock(&ui->ui_mutex); | 235 | mutex_unlock(&ui->ui_mutex); |
243 | 236 | ||
244 | mutex_lock(&host_ui->ui_mutex); | 237 | mutex_lock(&host_ui->ui_mutex); |
245 | |||
246 | if (!host->i_nlink) { | ||
247 | err = -ENOENT; | ||
248 | goto out_noent; | ||
249 | } | ||
250 | |||
251 | host->i_ctime = current_time(host); | 238 | host->i_ctime = current_time(host); |
252 | host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); | 239 | host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); |
253 | host_ui->xattr_size += CALC_XATTR_BYTES(size); | 240 | host_ui->xattr_size += CALC_XATTR_BYTES(size); |
@@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, | |||
269 | out_cancel: | 256 | out_cancel: |
270 | host_ui->xattr_size -= CALC_XATTR_BYTES(size); | 257 | host_ui->xattr_size -= CALC_XATTR_BYTES(size); |
271 | host_ui->xattr_size += CALC_XATTR_BYTES(old_size); | 258 | host_ui->xattr_size += CALC_XATTR_BYTES(old_size); |
272 | out_noent: | ||
273 | mutex_unlock(&host_ui->ui_mutex); | 259 | mutex_unlock(&host_ui->ui_mutex); |
274 | make_bad_inode(inode); | 260 | make_bad_inode(inode); |
275 | out_free: | 261 | out_free: |
@@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host, | |||
496 | return err; | 482 | return err; |
497 | 483 | ||
498 | mutex_lock(&host_ui->ui_mutex); | 484 | mutex_lock(&host_ui->ui_mutex); |
499 | |||
500 | if (!host->i_nlink) { | ||
501 | err = -ENOENT; | ||
502 | goto out_noent; | ||
503 | } | ||
504 | |||
505 | host->i_ctime = current_time(host); | 485 | host->i_ctime = current_time(host); |
506 | host_ui->xattr_cnt -= 1; | 486 | host_ui->xattr_cnt -= 1; |
507 | host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm)); | 487 | host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm)); |
@@ -521,7 +501,6 @@ out_cancel: | |||
521 | host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); | 501 | host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); |
522 | host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); | 502 | host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); |
523 | host_ui->xattr_names += fname_len(nm); | 503 | host_ui->xattr_names += fname_len(nm); |
524 | out_noent: | ||
525 | mutex_unlock(&host_ui->ui_mutex); | 504 | mutex_unlock(&host_ui->ui_mutex); |
526 | ubifs_release_budget(c, &req); | 505 | ubifs_release_budget(c, &req); |
527 | make_bad_inode(inode); | 506 | make_bad_inode(inode); |
@@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name) | |||
561 | 540 | ||
562 | ubifs_assert(c, inode_is_locked(host)); | 541 | ubifs_assert(c, inode_is_locked(host)); |
563 | 542 | ||
564 | if (!host->i_nlink) | ||
565 | return -ENOENT; | ||
566 | |||
567 | if (fname_len(&nm) > UBIFS_MAX_NLEN) | 543 | if (fname_len(&nm) > UBIFS_MAX_NLEN) |
568 | return -ENAMETOOLONG; | 544 | return -ENAMETOOLONG; |
569 | 545 | ||
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 46a8009784df..152b3055e9e1 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h | |||
@@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature) | |||
675 | static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) | 675 | static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) |
676 | { | 676 | { |
677 | return drm_core_check_feature(dev, DRIVER_ATOMIC) || | 677 | return drm_core_check_feature(dev, DRIVER_ATOMIC) || |
678 | dev->mode_config.funcs->atomic_commit != NULL; | 678 | (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); |
679 | } | 679 | } |
680 | 680 | ||
681 | 681 | ||
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 582a0ec0aa70..777814755fa6 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h | |||
@@ -89,7 +89,6 @@ struct drm_panel { | |||
89 | struct drm_device *drm; | 89 | struct drm_device *drm; |
90 | struct drm_connector *connector; | 90 | struct drm_connector *connector; |
91 | struct device *dev; | 91 | struct device *dev; |
92 | struct device_link *link; | ||
93 | 92 | ||
94 | const struct drm_panel_funcs *funcs; | 93 | const struct drm_panel_funcs *funcs; |
95 | 94 | ||
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 763bbad1e258..4d36b27214fd 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -79,20 +79,6 @@ | |||
79 | #define __noretpoline __attribute__((indirect_branch("keep"))) | 79 | #define __noretpoline __attribute__((indirect_branch("keep"))) |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | /* | ||
83 | * it doesn't make sense on ARM (currently the only user of __naked) | ||
84 | * to trace naked functions because then mcount is called without | ||
85 | * stack and frame pointer being set up and there is no chance to | ||
86 | * restore the lr register to the value before mcount was called. | ||
87 | * | ||
88 | * The asm() bodies of naked functions often depend on standard calling | ||
89 | * conventions, therefore they must be noinline and noclone. | ||
90 | * | ||
91 | * GCC 4.[56] currently fail to enforce this, so we must do so ourselves. | ||
92 | * See GCC PR44290. | ||
93 | */ | ||
94 | #define __naked __attribute__((naked)) noinline __noclone notrace | ||
95 | |||
96 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) | 82 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) |
97 | 83 | ||
98 | #define __optimize(level) __attribute__((__optimize__(level))) | 84 | #define __optimize(level) __attribute__((__optimize__(level))) |
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3525c179698c..db192becfec4 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h | |||
@@ -226,6 +226,14 @@ struct ftrace_likely_data { | |||
226 | #define notrace __attribute__((no_instrument_function)) | 226 | #define notrace __attribute__((no_instrument_function)) |
227 | #endif | 227 | #endif |
228 | 228 | ||
229 | /* | ||
230 | * it doesn't make sense on ARM (currently the only user of __naked) | ||
231 | * to trace naked functions because then mcount is called without | ||
232 | * stack and frame pointer being set up and there is no chance to | ||
233 | * restore the lr register to the value before mcount was called. | ||
234 | */ | ||
235 | #define __naked __attribute__((naked)) notrace | ||
236 | |||
229 | #define __compiler_offsetof(a, b) __builtin_offsetof(a, b) | 237 | #define __compiler_offsetof(a, b) __builtin_offsetof(a, b) |
230 | 238 | ||
231 | /* | 239 | /* |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 57864422a2c8..25c08c6c7f99 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -83,10 +83,10 @@ struct partition { | |||
83 | } __attribute__((packed)); | 83 | } __attribute__((packed)); |
84 | 84 | ||
85 | struct disk_stats { | 85 | struct disk_stats { |
86 | u64 nsecs[NR_STAT_GROUPS]; | ||
86 | unsigned long sectors[NR_STAT_GROUPS]; | 87 | unsigned long sectors[NR_STAT_GROUPS]; |
87 | unsigned long ios[NR_STAT_GROUPS]; | 88 | unsigned long ios[NR_STAT_GROUPS]; |
88 | unsigned long merges[NR_STAT_GROUPS]; | 89 | unsigned long merges[NR_STAT_GROUPS]; |
89 | unsigned long ticks[NR_STAT_GROUPS]; | ||
90 | unsigned long io_ticks; | 90 | unsigned long io_ticks; |
91 | unsigned long time_in_queue; | 91 | unsigned long time_in_queue; |
92 | }; | 92 | }; |
@@ -354,6 +354,9 @@ static inline void free_part_stats(struct hd_struct *part) | |||
354 | 354 | ||
355 | #endif /* CONFIG_SMP */ | 355 | #endif /* CONFIG_SMP */ |
356 | 356 | ||
357 | #define part_stat_read_msecs(part, which) \ | ||
358 | div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC) | ||
359 | |||
357 | #define part_stat_read_accum(part, field) \ | 360 | #define part_stat_read_accum(part, field) \ |
358 | (part_stat_read(part, field[STAT_READ]) + \ | 361 | (part_stat_read(part, field[STAT_READ]) + \ |
359 | part_stat_read(part, field[STAT_WRITE]) + \ | 362 | part_stat_read(part, field[STAT_WRITE]) + \ |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0205aee44ded..c926698040e0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); | |||
733 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 733 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
734 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); | 734 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
735 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); | 735 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); |
736 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); | ||
737 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | ||
738 | 736 | ||
739 | void kvm_flush_remote_tlbs(struct kvm *kvm); | 737 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
740 | void kvm_reload_remote_mmus(struct kvm *kvm); | 738 | void kvm_reload_remote_mmus(struct kvm *kvm); |
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h index 8a125701ef7b..50bed4f89c1a 100644 --- a/include/linux/mfd/da9063/pdata.h +++ b/include/linux/mfd/da9063/pdata.h | |||
@@ -21,7 +21,7 @@ | |||
21 | /* | 21 | /* |
22 | * Regulator configuration | 22 | * Regulator configuration |
23 | */ | 23 | */ |
24 | /* DA9063 regulator IDs */ | 24 | /* DA9063 and DA9063L regulator IDs */ |
25 | enum { | 25 | enum { |
26 | /* BUCKs */ | 26 | /* BUCKs */ |
27 | DA9063_ID_BCORE1, | 27 | DA9063_ID_BCORE1, |
@@ -37,18 +37,20 @@ enum { | |||
37 | DA9063_ID_BMEM_BIO_MERGED, | 37 | DA9063_ID_BMEM_BIO_MERGED, |
38 | /* When two BUCKs are merged, they cannot be reused separately */ | 38 | /* When two BUCKs are merged, they cannot be reused separately */ |
39 | 39 | ||
40 | /* LDOs */ | 40 | /* LDOs on both DA9063 and DA9063L */ |
41 | DA9063_ID_LDO3, | ||
42 | DA9063_ID_LDO7, | ||
43 | DA9063_ID_LDO8, | ||
44 | DA9063_ID_LDO9, | ||
45 | DA9063_ID_LDO11, | ||
46 | |||
47 | /* DA9063-only LDOs */ | ||
41 | DA9063_ID_LDO1, | 48 | DA9063_ID_LDO1, |
42 | DA9063_ID_LDO2, | 49 | DA9063_ID_LDO2, |
43 | DA9063_ID_LDO3, | ||
44 | DA9063_ID_LDO4, | 50 | DA9063_ID_LDO4, |
45 | DA9063_ID_LDO5, | 51 | DA9063_ID_LDO5, |
46 | DA9063_ID_LDO6, | 52 | DA9063_ID_LDO6, |
47 | DA9063_ID_LDO7, | ||
48 | DA9063_ID_LDO8, | ||
49 | DA9063_ID_LDO9, | ||
50 | DA9063_ID_LDO10, | 53 | DA9063_ID_LDO10, |
51 | DA9063_ID_LDO11, | ||
52 | }; | 54 | }; |
53 | 55 | ||
54 | /* Regulators platform data */ | 56 | /* Regulators platform data */ |
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h index a528747f8aed..e8338e5dc10b 100644 --- a/include/linux/mfd/rohm-bd718x7.h +++ b/include/linux/mfd/rohm-bd718x7.h | |||
@@ -78,9 +78,9 @@ enum { | |||
78 | BD71837_REG_TRANS_COND0 = 0x1F, | 78 | BD71837_REG_TRANS_COND0 = 0x1F, |
79 | BD71837_REG_TRANS_COND1 = 0x20, | 79 | BD71837_REG_TRANS_COND1 = 0x20, |
80 | BD71837_REG_VRFAULTEN = 0x21, | 80 | BD71837_REG_VRFAULTEN = 0x21, |
81 | BD71837_REG_MVRFLTMASK0 = 0x22, | 81 | BD718XX_REG_MVRFLTMASK0 = 0x22, |
82 | BD71837_REG_MVRFLTMASK1 = 0x23, | 82 | BD718XX_REG_MVRFLTMASK1 = 0x23, |
83 | BD71837_REG_MVRFLTMASK2 = 0x24, | 83 | BD718XX_REG_MVRFLTMASK2 = 0x24, |
84 | BD71837_REG_RCVCFG = 0x25, | 84 | BD71837_REG_RCVCFG = 0x25, |
85 | BD71837_REG_RCVNUM = 0x26, | 85 | BD71837_REG_RCVNUM = 0x26, |
86 | BD71837_REG_PWRONCONFIG0 = 0x27, | 86 | BD71837_REG_PWRONCONFIG0 = 0x27, |
@@ -159,6 +159,33 @@ enum { | |||
159 | #define BUCK8_MASK 0x3F | 159 | #define BUCK8_MASK 0x3F |
160 | #define BUCK8_DEFAULT 0x1E | 160 | #define BUCK8_DEFAULT 0x1E |
161 | 161 | ||
162 | /* BD718XX Voltage monitoring masks */ | ||
163 | #define BD718XX_BUCK1_VRMON80 0x1 | ||
164 | #define BD718XX_BUCK1_VRMON130 0x2 | ||
165 | #define BD718XX_BUCK2_VRMON80 0x4 | ||
166 | #define BD718XX_BUCK2_VRMON130 0x8 | ||
167 | #define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1 | ||
168 | #define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2 | ||
169 | #define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4 | ||
170 | #define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8 | ||
171 | #define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10 | ||
172 | #define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20 | ||
173 | #define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40 | ||
174 | #define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80 | ||
175 | #define BD718XX_LDO1_VRMON80 0x1 | ||
176 | #define BD718XX_LDO2_VRMON80 0x2 | ||
177 | #define BD718XX_LDO3_VRMON80 0x4 | ||
178 | #define BD718XX_LDO4_VRMON80 0x8 | ||
179 | #define BD718XX_LDO5_VRMON80 0x10 | ||
180 | #define BD718XX_LDO6_VRMON80 0x20 | ||
181 | |||
182 | /* BD71837 specific voltage monitoring masks */ | ||
183 | #define BD71837_BUCK3_VRMON80 0x10 | ||
184 | #define BD71837_BUCK3_VRMON130 0x20 | ||
185 | #define BD71837_BUCK4_VRMON80 0x40 | ||
186 | #define BD71837_BUCK4_VRMON130 0x80 | ||
187 | #define BD71837_LDO7_VRMON80 0x40 | ||
188 | |||
162 | /* BD71837_REG_IRQ bits */ | 189 | /* BD71837_REG_IRQ bits */ |
163 | #define IRQ_SWRST 0x40 | 190 | #define IRQ_SWRST 0x40 |
164 | #define IRQ_PWRON_S 0x20 | 191 | #define IRQ_PWRON_S 0x20 |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 67662d01130a..3ef82d3a78db 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -49,8 +49,9 @@ struct netpoll_info { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | #ifdef CONFIG_NETPOLL | 51 | #ifdef CONFIG_NETPOLL |
52 | extern void netpoll_poll_disable(struct net_device *dev); | 52 | void netpoll_poll_dev(struct net_device *dev); |
53 | extern void netpoll_poll_enable(struct net_device *dev); | 53 | void netpoll_poll_disable(struct net_device *dev); |
54 | void netpoll_poll_enable(struct net_device *dev); | ||
54 | #else | 55 | #else |
55 | static inline void netpoll_poll_disable(struct net_device *dev) { return; } | 56 | static inline void netpoll_poll_disable(struct net_device *dev) { return; } |
56 | static inline void netpoll_poll_enable(struct net_device *dev) { return; } | 57 | static inline void netpoll_poll_enable(struct net_device *dev) { return; } |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 3468703d663a..a459a5e973a7 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
@@ -48,9 +48,9 @@ struct regulator; | |||
48 | * DISABLE_IN_SUSPEND - turn off regulator in suspend states | 48 | * DISABLE_IN_SUSPEND - turn off regulator in suspend states |
49 | * ENABLE_IN_SUSPEND - keep regulator on in suspend states | 49 | * ENABLE_IN_SUSPEND - keep regulator on in suspend states |
50 | */ | 50 | */ |
51 | #define DO_NOTHING_IN_SUSPEND (-1) | 51 | #define DO_NOTHING_IN_SUSPEND 0 |
52 | #define DISABLE_IN_SUSPEND 0 | 52 | #define DISABLE_IN_SUSPEND 1 |
53 | #define ENABLE_IN_SUSPEND 1 | 53 | #define ENABLE_IN_SUSPEND 2 |
54 | 54 | ||
55 | /* Regulator active discharge flags */ | 55 | /* Regulator active discharge flags */ |
56 | enum regulator_active_discharge { | 56 | enum regulator_active_discharge { |
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index b2bd4b4127c4..69ee30456864 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h | |||
@@ -81,8 +81,10 @@ enum spi_mem_data_dir { | |||
81 | * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes | 81 | * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes |
82 | * @data.buswidth: number of IO lanes used to send/receive the data | 82 | * @data.buswidth: number of IO lanes used to send/receive the data |
83 | * @data.dir: direction of the transfer | 83 | * @data.dir: direction of the transfer |
84 | * @data.buf.in: input buffer | 84 | * @data.nbytes: number of data bytes to send/receive. Can be zero if the |
85 | * @data.buf.out: output buffer | 85 | * operation does not involve transferring data |
86 | * @data.buf.in: input buffer (must be DMA-able) | ||
87 | * @data.buf.out: output buffer (must be DMA-able) | ||
86 | */ | 88 | */ |
87 | struct spi_mem_op { | 89 | struct spi_mem_op { |
88 | struct { | 90 | struct { |
@@ -105,7 +107,6 @@ struct spi_mem_op { | |||
105 | u8 buswidth; | 107 | u8 buswidth; |
106 | enum spi_mem_data_dir dir; | 108 | enum spi_mem_data_dir dir; |
107 | unsigned int nbytes; | 109 | unsigned int nbytes; |
108 | /* buf.{in,out} must be DMA-able. */ | ||
109 | union { | 110 | union { |
110 | void *in; | 111 | void *in; |
111 | const void *out; | 112 | const void *out; |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index c43e9a01b892..7ddfc65586b0 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #define MTL_MAX_RX_QUEUES 8 | 31 | #define MTL_MAX_RX_QUEUES 8 |
32 | #define MTL_MAX_TX_QUEUES 8 | 32 | #define MTL_MAX_TX_QUEUES 8 |
33 | #define STMMAC_CH_MAX 8 | ||
33 | 34 | ||
34 | #define STMMAC_RX_COE_NONE 0 | 35 | #define STMMAC_RX_COE_NONE 0 |
35 | #define STMMAC_RX_COE_TYPE1 1 | 36 | #define STMMAC_RX_COE_TYPE1 1 |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 409c845d4cd3..422b1c01ee0d 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) | |||
172 | static __always_inline __must_check | 172 | static __always_inline __must_check |
173 | size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) | 173 | size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) |
174 | { | 174 | { |
175 | if (unlikely(!check_copy_size(addr, bytes, false))) | 175 | if (unlikely(!check_copy_size(addr, bytes, true))) |
176 | return 0; | 176 | return 0; |
177 | else | 177 | else |
178 | return _copy_to_iter_mcsafe(addr, bytes, i); | 178 | return _copy_to_iter_mcsafe(addr, bytes, i); |
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index a34539b7f750..7e6ac0114d55 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h | |||
@@ -133,15 +133,18 @@ struct vga_switcheroo_handler { | |||
133 | * @can_switch: check if the device is in a position to switch now. | 133 | * @can_switch: check if the device is in a position to switch now. |
134 | * Mandatory. The client should return false if a user space process | 134 | * Mandatory. The client should return false if a user space process |
135 | * has one of its device files open | 135 | * has one of its device files open |
136 | * @gpu_bound: notify the client id to audio client when the GPU is bound. | ||
136 | * | 137 | * |
137 | * Client callbacks. A client can be either a GPU or an audio device on a GPU. | 138 | * Client callbacks. A client can be either a GPU or an audio device on a GPU. |
138 | * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be | 139 | * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be |
139 | * set to NULL. For audio clients, the @reprobe member is bogus. | 140 | * set to NULL. For audio clients, the @reprobe member is bogus. |
141 | * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients. | ||
140 | */ | 142 | */ |
141 | struct vga_switcheroo_client_ops { | 143 | struct vga_switcheroo_client_ops { |
142 | void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); | 144 | void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); |
143 | void (*reprobe)(struct pci_dev *dev); | 145 | void (*reprobe)(struct pci_dev *dev); |
144 | bool (*can_switch)(struct pci_dev *dev); | 146 | bool (*can_switch)(struct pci_dev *dev); |
147 | void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id); | ||
145 | }; | 148 | }; |
146 | 149 | ||
147 | #if defined(CONFIG_VGA_SWITCHEROO) | 150 | #if defined(CONFIG_VGA_SWITCHEROO) |
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 316694dafa5b..008f466d1da7 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h | |||
@@ -87,7 +87,7 @@ struct nfc_hci_pipe { | |||
87 | * According to specification 102 622 chapter 4.4 Pipes, | 87 | * According to specification 102 622 chapter 4.4 Pipes, |
88 | * the pipe identifier is 7 bits long. | 88 | * the pipe identifier is 7 bits long. |
89 | */ | 89 | */ |
90 | #define NFC_HCI_MAX_PIPES 127 | 90 | #define NFC_HCI_MAX_PIPES 128 |
91 | struct nfc_hci_init_data { | 91 | struct nfc_hci_init_data { |
92 | u8 gate_count; | 92 | u8 gate_count; |
93 | struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES]; | 93 | struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES]; |
diff --git a/include/net/tls.h b/include/net/tls.h index d5c683e8bb22..0a769cf2f5f3 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
@@ -171,15 +171,14 @@ struct cipher_context { | |||
171 | char *rec_seq; | 171 | char *rec_seq; |
172 | }; | 172 | }; |
173 | 173 | ||
174 | union tls_crypto_context { | ||
175 | struct tls_crypto_info info; | ||
176 | struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; | ||
177 | }; | ||
178 | |||
174 | struct tls_context { | 179 | struct tls_context { |
175 | union { | 180 | union tls_crypto_context crypto_send; |
176 | struct tls_crypto_info crypto_send; | 181 | union tls_crypto_context crypto_recv; |
177 | struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128; | ||
178 | }; | ||
179 | union { | ||
180 | struct tls_crypto_info crypto_recv; | ||
181 | struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128; | ||
182 | }; | ||
183 | 182 | ||
184 | struct list_head list; | 183 | struct list_head list; |
185 | struct net_device *netdev; | 184 | struct net_device *netdev; |
@@ -367,8 +366,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx, | |||
367 | * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE | 366 | * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE |
368 | */ | 367 | */ |
369 | buf[0] = record_type; | 368 | buf[0] = record_type; |
370 | buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version); | 369 | buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version); |
371 | buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version); | 370 | buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version); |
372 | /* we can use IV for nonce explicit according to spec */ | 371 | /* we can use IV for nonce explicit according to spec */ |
373 | buf[3] = pkt_len >> 8; | 372 | buf[3] = pkt_len >> 8; |
374 | buf[4] = pkt_len & 0xFF; | 373 | buf[4] = pkt_len & 0xFF; |
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 6f1e1f3b3063..cd1773d0e08f 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h | |||
@@ -412,6 +412,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus); | |||
412 | void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus); | 412 | void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus); |
413 | void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus); | 413 | void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus); |
414 | void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus); | 414 | void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus); |
415 | int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset); | ||
415 | 416 | ||
416 | void snd_hdac_bus_update_rirb(struct hdac_bus *bus); | 417 | void snd_hdac_bus_update_rirb(struct hdac_bus *bus); |
417 | int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, | 418 | int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, |
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index af9ef16cc34d..fdaaafdc7a00 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h | |||
@@ -407,6 +407,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, | |||
407 | int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card); | 407 | int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card); |
408 | void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card); | 408 | void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card); |
409 | int snd_soc_dapm_new_pcm(struct snd_soc_card *card, | 409 | int snd_soc_dapm_new_pcm(struct snd_soc_card *card, |
410 | struct snd_soc_pcm_runtime *rtd, | ||
410 | const struct snd_soc_pcm_stream *params, | 411 | const struct snd_soc_pcm_stream *params, |
411 | unsigned int num_params, | 412 | unsigned int num_params, |
412 | struct snd_soc_dapm_widget *source, | 413 | struct snd_soc_dapm_widget *source, |
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index 910cc4334b21..7b8c9e19bad1 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h | |||
@@ -65,7 +65,7 @@ | |||
65 | 65 | ||
66 | /* keyctl structures */ | 66 | /* keyctl structures */ |
67 | struct keyctl_dh_params { | 67 | struct keyctl_dh_params { |
68 | __s32 dh_private; | 68 | __s32 private; |
69 | __s32 prime; | 69 | __s32 prime; |
70 | __s32 base; | 70 | __s32 base; |
71 | }; | 71 | }; |
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 07548de5c988..251be353f950 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -952,6 +952,7 @@ struct kvm_ppc_resize_hpt { | |||
952 | #define KVM_CAP_S390_HPAGE_1M 156 | 952 | #define KVM_CAP_S390_HPAGE_1M 156 |
953 | #define KVM_CAP_NESTED_STATE 157 | 953 | #define KVM_CAP_NESTED_STATE 157 |
954 | #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 | 954 | #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 |
955 | #define KVM_CAP_MSR_PLATFORM_INFO 159 | ||
955 | 956 | ||
956 | #ifdef KVM_CAP_IRQ_ROUTING | 957 | #ifdef KVM_CAP_IRQ_ROUTING |
957 | 958 | ||
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index f58cafa42f18..f39352cef382 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef __HDA_TPLG_INTERFACE_H__ | 10 | #ifndef __HDA_TPLG_INTERFACE_H__ |
11 | #define __HDA_TPLG_INTERFACE_H__ | 11 | #define __HDA_TPLG_INTERFACE_H__ |
12 | 12 | ||
13 | #include <linux/types.h> | ||
14 | |||
13 | /* | 15 | /* |
14 | * Default types range from 0~12. type can range from 0 to 0xff | 16 | * Default types range from 0~12. type can range from 0 to 0xff |
15 | * SST types start at higher to avoid any overlapping in future | 17 | * SST types start at higher to avoid any overlapping in future |
@@ -143,10 +145,10 @@ enum skl_module_param_type { | |||
143 | }; | 145 | }; |
144 | 146 | ||
145 | struct skl_dfw_algo_data { | 147 | struct skl_dfw_algo_data { |
146 | u32 set_params:2; | 148 | __u32 set_params:2; |
147 | u32 rsvd:30; | 149 | __u32 rsvd:30; |
148 | u32 param_id; | 150 | __u32 param_id; |
149 | u32 max; | 151 | __u32 max; |
150 | char params[0]; | 152 | char params[0]; |
151 | } __packed; | 153 | } __packed; |
152 | 154 | ||
@@ -163,68 +165,68 @@ enum skl_tuple_type { | |||
163 | /* v4 configuration data */ | 165 | /* v4 configuration data */ |
164 | 166 | ||
165 | struct skl_dfw_v4_module_pin { | 167 | struct skl_dfw_v4_module_pin { |
166 | u16 module_id; | 168 | __u16 module_id; |
167 | u16 instance_id; | 169 | __u16 instance_id; |
168 | } __packed; | 170 | } __packed; |
169 | 171 | ||
170 | struct skl_dfw_v4_module_fmt { | 172 | struct skl_dfw_v4_module_fmt { |
171 | u32 channels; | 173 | __u32 channels; |
172 | u32 freq; | 174 | __u32 freq; |
173 | u32 bit_depth; | 175 | __u32 bit_depth; |
174 | u32 valid_bit_depth; | 176 | __u32 valid_bit_depth; |
175 | u32 ch_cfg; | 177 | __u32 ch_cfg; |
176 | u32 interleaving_style; | 178 | __u32 interleaving_style; |
177 | u32 sample_type; | 179 | __u32 sample_type; |
178 | u32 ch_map; | 180 | __u32 ch_map; |
179 | } __packed; | 181 | } __packed; |
180 | 182 | ||
181 | struct skl_dfw_v4_module_caps { | 183 | struct skl_dfw_v4_module_caps { |
182 | u32 set_params:2; | 184 | __u32 set_params:2; |
183 | u32 rsvd:30; | 185 | __u32 rsvd:30; |
184 | u32 param_id; | 186 | __u32 param_id; |
185 | u32 caps_size; | 187 | __u32 caps_size; |
186 | u32 caps[HDA_SST_CFG_MAX]; | 188 | __u32 caps[HDA_SST_CFG_MAX]; |
187 | } __packed; | 189 | } __packed; |
188 | 190 | ||
189 | struct skl_dfw_v4_pipe { | 191 | struct skl_dfw_v4_pipe { |
190 | u8 pipe_id; | 192 | __u8 pipe_id; |
191 | u8 pipe_priority; | 193 | __u8 pipe_priority; |
192 | u16 conn_type:4; | 194 | __u16 conn_type:4; |
193 | u16 rsvd:4; | 195 | __u16 rsvd:4; |
194 | u16 memory_pages:8; | 196 | __u16 memory_pages:8; |
195 | } __packed; | 197 | } __packed; |
196 | 198 | ||
197 | struct skl_dfw_v4_module { | 199 | struct skl_dfw_v4_module { |
198 | char uuid[SKL_UUID_STR_SZ]; | 200 | char uuid[SKL_UUID_STR_SZ]; |
199 | 201 | ||
200 | u16 module_id; | 202 | __u16 module_id; |
201 | u16 instance_id; | 203 | __u16 instance_id; |
202 | u32 max_mcps; | 204 | __u32 max_mcps; |
203 | u32 mem_pages; | 205 | __u32 mem_pages; |
204 | u32 obs; | 206 | __u32 obs; |
205 | u32 ibs; | 207 | __u32 ibs; |
206 | u32 vbus_id; | 208 | __u32 vbus_id; |
207 | 209 | ||
208 | u32 max_in_queue:8; | 210 | __u32 max_in_queue:8; |
209 | u32 max_out_queue:8; | 211 | __u32 max_out_queue:8; |
210 | u32 time_slot:8; | 212 | __u32 time_slot:8; |
211 | u32 core_id:4; | 213 | __u32 core_id:4; |
212 | u32 rsvd1:4; | 214 | __u32 rsvd1:4; |
213 | 215 | ||
214 | u32 module_type:8; | 216 | __u32 module_type:8; |
215 | u32 conn_type:4; | 217 | __u32 conn_type:4; |
216 | u32 dev_type:4; | 218 | __u32 dev_type:4; |
217 | u32 hw_conn_type:4; | 219 | __u32 hw_conn_type:4; |
218 | u32 rsvd2:12; | 220 | __u32 rsvd2:12; |
219 | 221 | ||
220 | u32 params_fixup:8; | 222 | __u32 params_fixup:8; |
221 | u32 converter:8; | 223 | __u32 converter:8; |
222 | u32 input_pin_type:1; | 224 | __u32 input_pin_type:1; |
223 | u32 output_pin_type:1; | 225 | __u32 output_pin_type:1; |
224 | u32 is_dynamic_in_pin:1; | 226 | __u32 is_dynamic_in_pin:1; |
225 | u32 is_dynamic_out_pin:1; | 227 | __u32 is_dynamic_out_pin:1; |
226 | u32 is_loadable:1; | 228 | __u32 is_loadable:1; |
227 | u32 rsvd3:11; | 229 | __u32 rsvd3:11; |
228 | 230 | ||
229 | struct skl_dfw_v4_pipe pipe; | 231 | struct skl_dfw_v4_pipe pipe; |
230 | struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE]; | 232 | struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE]; |
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 2590700237c1..138f0302692e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
@@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env) | |||
1844 | 1844 | ||
1845 | hdr = &btf->hdr; | 1845 | hdr = &btf->hdr; |
1846 | cur = btf->nohdr_data + hdr->type_off; | 1846 | cur = btf->nohdr_data + hdr->type_off; |
1847 | end = btf->nohdr_data + hdr->type_len; | 1847 | end = cur + hdr->type_len; |
1848 | 1848 | ||
1849 | env->log_type_id = 1; | 1849 | env->log_type_id = 1; |
1850 | while (cur < end) { | 1850 | while (cur < end) { |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 488ef9663c01..0a0f2ec75370 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
@@ -132,6 +132,7 @@ struct smap_psock { | |||
132 | struct work_struct gc_work; | 132 | struct work_struct gc_work; |
133 | 133 | ||
134 | struct proto *sk_proto; | 134 | struct proto *sk_proto; |
135 | void (*save_unhash)(struct sock *sk); | ||
135 | void (*save_close)(struct sock *sk, long timeout); | 136 | void (*save_close)(struct sock *sk, long timeout); |
136 | void (*save_data_ready)(struct sock *sk); | 137 | void (*save_data_ready)(struct sock *sk); |
137 | void (*save_write_space)(struct sock *sk); | 138 | void (*save_write_space)(struct sock *sk); |
@@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
143 | static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); | 144 | static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); |
144 | static int bpf_tcp_sendpage(struct sock *sk, struct page *page, | 145 | static int bpf_tcp_sendpage(struct sock *sk, struct page *page, |
145 | int offset, size_t size, int flags); | 146 | int offset, size_t size, int flags); |
147 | static void bpf_tcp_unhash(struct sock *sk); | ||
146 | static void bpf_tcp_close(struct sock *sk, long timeout); | 148 | static void bpf_tcp_close(struct sock *sk, long timeout); |
147 | 149 | ||
148 | static inline struct smap_psock *smap_psock_sk(const struct sock *sk) | 150 | static inline struct smap_psock *smap_psock_sk(const struct sock *sk) |
@@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], | |||
184 | struct proto *base) | 186 | struct proto *base) |
185 | { | 187 | { |
186 | prot[SOCKMAP_BASE] = *base; | 188 | prot[SOCKMAP_BASE] = *base; |
189 | prot[SOCKMAP_BASE].unhash = bpf_tcp_unhash; | ||
187 | prot[SOCKMAP_BASE].close = bpf_tcp_close; | 190 | prot[SOCKMAP_BASE].close = bpf_tcp_close; |
188 | prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; | 191 | prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; |
189 | prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; | 192 | prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; |
@@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk) | |||
217 | return -EBUSY; | 220 | return -EBUSY; |
218 | } | 221 | } |
219 | 222 | ||
223 | psock->save_unhash = sk->sk_prot->unhash; | ||
220 | psock->save_close = sk->sk_prot->close; | 224 | psock->save_close = sk->sk_prot->close; |
221 | psock->sk_proto = sk->sk_prot; | 225 | psock->sk_proto = sk->sk_prot; |
222 | 226 | ||
@@ -305,30 +309,12 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk, | |||
305 | return e; | 309 | return e; |
306 | } | 310 | } |
307 | 311 | ||
308 | static void bpf_tcp_close(struct sock *sk, long timeout) | 312 | static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock) |
309 | { | 313 | { |
310 | void (*close_fun)(struct sock *sk, long timeout); | ||
311 | struct smap_psock_map_entry *e; | 314 | struct smap_psock_map_entry *e; |
312 | struct sk_msg_buff *md, *mtmp; | 315 | struct sk_msg_buff *md, *mtmp; |
313 | struct smap_psock *psock; | ||
314 | struct sock *osk; | 316 | struct sock *osk; |
315 | 317 | ||
316 | lock_sock(sk); | ||
317 | rcu_read_lock(); | ||
318 | psock = smap_psock_sk(sk); | ||
319 | if (unlikely(!psock)) { | ||
320 | rcu_read_unlock(); | ||
321 | release_sock(sk); | ||
322 | return sk->sk_prot->close(sk, timeout); | ||
323 | } | ||
324 | |||
325 | /* The psock may be destroyed anytime after exiting the RCU critial | ||
326 | * section so by the time we use close_fun the psock may no longer | ||
327 | * be valid. However, bpf_tcp_close is called with the sock lock | ||
328 | * held so the close hook and sk are still valid. | ||
329 | */ | ||
330 | close_fun = psock->save_close; | ||
331 | |||
332 | if (psock->cork) { | 318 | if (psock->cork) { |
333 | free_start_sg(psock->sock, psock->cork, true); | 319 | free_start_sg(psock->sock, psock->cork, true); |
334 | kfree(psock->cork); | 320 | kfree(psock->cork); |
@@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
379 | kfree(e); | 365 | kfree(e); |
380 | e = psock_map_pop(sk, psock); | 366 | e = psock_map_pop(sk, psock); |
381 | } | 367 | } |
368 | } | ||
369 | |||
370 | static void bpf_tcp_unhash(struct sock *sk) | ||
371 | { | ||
372 | void (*unhash_fun)(struct sock *sk); | ||
373 | struct smap_psock *psock; | ||
374 | |||
375 | rcu_read_lock(); | ||
376 | psock = smap_psock_sk(sk); | ||
377 | if (unlikely(!psock)) { | ||
378 | rcu_read_unlock(); | ||
379 | if (sk->sk_prot->unhash) | ||
380 | sk->sk_prot->unhash(sk); | ||
381 | return; | ||
382 | } | ||
383 | unhash_fun = psock->save_unhash; | ||
384 | bpf_tcp_remove(sk, psock); | ||
385 | rcu_read_unlock(); | ||
386 | unhash_fun(sk); | ||
387 | } | ||
388 | |||
389 | static void bpf_tcp_close(struct sock *sk, long timeout) | ||
390 | { | ||
391 | void (*close_fun)(struct sock *sk, long timeout); | ||
392 | struct smap_psock *psock; | ||
393 | |||
394 | lock_sock(sk); | ||
395 | rcu_read_lock(); | ||
396 | psock = smap_psock_sk(sk); | ||
397 | if (unlikely(!psock)) { | ||
398 | rcu_read_unlock(); | ||
399 | release_sock(sk); | ||
400 | return sk->sk_prot->close(sk, timeout); | ||
401 | } | ||
402 | close_fun = psock->save_close; | ||
403 | bpf_tcp_remove(sk, psock); | ||
382 | rcu_read_unlock(); | 404 | rcu_read_unlock(); |
383 | release_sock(sk); | 405 | release_sock(sk); |
384 | close_fun(sk, timeout); | 406 | close_fun(sk, timeout); |
@@ -2097,8 +2119,12 @@ static int sock_map_update_elem(struct bpf_map *map, | |||
2097 | return -EINVAL; | 2119 | return -EINVAL; |
2098 | } | 2120 | } |
2099 | 2121 | ||
2122 | /* ULPs are currently supported only for TCP sockets in ESTABLISHED | ||
2123 | * state. | ||
2124 | */ | ||
2100 | if (skops.sk->sk_type != SOCK_STREAM || | 2125 | if (skops.sk->sk_type != SOCK_STREAM || |
2101 | skops.sk->sk_protocol != IPPROTO_TCP) { | 2126 | skops.sk->sk_protocol != IPPROTO_TCP || |
2127 | skops.sk->sk_state != TCP_ESTABLISHED) { | ||
2102 | fput(socket->file); | 2128 | fput(socket->file); |
2103 | return -EOPNOTSUPP; | 2129 | return -EOPNOTSUPP; |
2104 | } | 2130 | } |
@@ -2453,6 +2479,16 @@ static int sock_hash_update_elem(struct bpf_map *map, | |||
2453 | return -EINVAL; | 2479 | return -EINVAL; |
2454 | } | 2480 | } |
2455 | 2481 | ||
2482 | /* ULPs are currently supported only for TCP sockets in ESTABLISHED | ||
2483 | * state. | ||
2484 | */ | ||
2485 | if (skops.sk->sk_type != SOCK_STREAM || | ||
2486 | skops.sk->sk_protocol != IPPROTO_TCP || | ||
2487 | skops.sk->sk_state != TCP_ESTABLISHED) { | ||
2488 | fput(socket->file); | ||
2489 | return -EOPNOTSUPP; | ||
2490 | } | ||
2491 | |||
2456 | lock_sock(skops.sk); | 2492 | lock_sock(skops.sk); |
2457 | preempt_disable(); | 2493 | preempt_disable(); |
2458 | rcu_read_lock(); | 2494 | rcu_read_lock(); |
@@ -2543,10 +2579,22 @@ const struct bpf_map_ops sock_hash_ops = { | |||
2543 | .map_check_btf = map_check_no_btf, | 2579 | .map_check_btf = map_check_no_btf, |
2544 | }; | 2580 | }; |
2545 | 2581 | ||
2582 | static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops) | ||
2583 | { | ||
2584 | return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || | ||
2585 | ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB; | ||
2586 | } | ||
2546 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, | 2587 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, |
2547 | struct bpf_map *, map, void *, key, u64, flags) | 2588 | struct bpf_map *, map, void *, key, u64, flags) |
2548 | { | 2589 | { |
2549 | WARN_ON_ONCE(!rcu_read_lock_held()); | 2590 | WARN_ON_ONCE(!rcu_read_lock_held()); |
2591 | |||
2592 | /* ULPs are currently supported only for TCP sockets in ESTABLISHED | ||
2593 | * state. This checks that the sock ops triggering the update is | ||
2594 | * one indicating we are (or will be soon) in an ESTABLISHED state. | ||
2595 | */ | ||
2596 | if (!bpf_is_valid_sock_op(bpf_sock)) | ||
2597 | return -EOPNOTSUPP; | ||
2550 | return sock_map_ctx_update_elem(bpf_sock, map, key, flags); | 2598 | return sock_map_ctx_update_elem(bpf_sock, map, key, flags); |
2551 | } | 2599 | } |
2552 | 2600 | ||
@@ -2565,6 +2613,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock, | |||
2565 | struct bpf_map *, map, void *, key, u64, flags) | 2613 | struct bpf_map *, map, void *, key, u64, flags) |
2566 | { | 2614 | { |
2567 | WARN_ON_ONCE(!rcu_read_lock_held()); | 2615 | WARN_ON_ONCE(!rcu_read_lock_held()); |
2616 | |||
2617 | if (!bpf_is_valid_sock_op(bpf_sock)) | ||
2618 | return -EOPNOTSUPP; | ||
2568 | return sock_hash_ctx_update_elem(bpf_sock, map, key, flags); | 2619 | return sock_hash_ctx_update_elem(bpf_sock, map, key, flags); |
2569 | } | 2620 | } |
2570 | 2621 | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 92246117d2b0..bb07e74b34a2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -3163,7 +3163,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, | |||
3163 | * an arbitrary scalar. Disallow all math except | 3163 | * an arbitrary scalar. Disallow all math except |
3164 | * pointer subtraction | 3164 | * pointer subtraction |
3165 | */ | 3165 | */ |
3166 | if (opcode == BPF_SUB){ | 3166 | if (opcode == BPF_SUB && env->allow_ptr_leaks) { |
3167 | mark_reg_unknown(env, regs, insn->dst_reg); | 3167 | mark_reg_unknown(env, regs, insn->dst_reg); |
3168 | return 0; | 3168 | return 0; |
3169 | } | 3169 | } |
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 9bd54304446f..1b1d63b3634b 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig | |||
@@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU | |||
23 | bool | 23 | bool |
24 | select NEED_DMA_MAP_STATE | 24 | select NEED_DMA_MAP_STATE |
25 | 25 | ||
26 | config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL | ||
27 | bool | ||
28 | |||
26 | config DMA_DIRECT_OPS | 29 | config DMA_DIRECT_OPS |
27 | bool | 30 | bool |
28 | depends on HAS_DMA | 31 | depends on HAS_DMA |
diff --git a/kernel/events/core.c b/kernel/events/core.c index c80549bf82c6..dcb093e7b377 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3935,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value, | |||
3935 | goto out; | 3935 | goto out; |
3936 | } | 3936 | } |
3937 | 3937 | ||
3938 | /* If this is a pinned event it must be running on this CPU */ | ||
3939 | if (event->attr.pinned && event->oncpu != smp_processor_id()) { | ||
3940 | ret = -EBUSY; | ||
3941 | goto out; | ||
3942 | } | ||
3943 | |||
3938 | /* | 3944 | /* |
3939 | * If the event is currently on this CPU, its either a per-task event, | 3945 | * If the event is currently on this CPU, its either a per-task event, |
3940 | * or local to this CPU. Furthermore it means its ACTIVE (otherwise | 3946 | * or local to this CPU. Furthermore it means its ACTIVE (otherwise |
diff --git a/kernel/pid.c b/kernel/pid.c index de1cfc4f75a2..cdf63e53a014 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns) | |||
195 | idr_preload_end(); | 195 | idr_preload_end(); |
196 | 196 | ||
197 | if (nr < 0) { | 197 | if (nr < 0) { |
198 | retval = nr; | 198 | retval = (nr == -ENOSPC) ? -EAGAIN : nr; |
199 | goto out_free; | 199 | goto out_free; |
200 | } | 200 | } |
201 | 201 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index cf5c67533ff1..123bd73046ec 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -71,9 +71,6 @@ | |||
71 | #include <asm/io.h> | 71 | #include <asm/io.h> |
72 | #include <asm/unistd.h> | 72 | #include <asm/unistd.h> |
73 | 73 | ||
74 | /* Hardening for Spectre-v1 */ | ||
75 | #include <linux/nospec.h> | ||
76 | |||
77 | #include "uid16.h" | 74 | #include "uid16.h" |
78 | 75 | ||
79 | #ifndef SET_UNALIGN_CTL | 76 | #ifndef SET_UNALIGN_CTL |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1d92d4a982fd..65bd4616220d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) | |||
1546 | tmp_iter_page = first_page; | 1546 | tmp_iter_page = first_page; |
1547 | 1547 | ||
1548 | do { | 1548 | do { |
1549 | cond_resched(); | ||
1550 | |||
1549 | to_remove_page = tmp_iter_page; | 1551 | to_remove_page = tmp_iter_page; |
1550 | rb_inc_page(cpu_buffer, &tmp_iter_page); | 1552 | rb_inc_page(cpu_buffer, &tmp_iter_page); |
1551 | 1553 | ||
diff --git a/mm/Kconfig b/mm/Kconfig index a550635ea5c3..de64ea658716 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT | |||
637 | depends on NO_BOOTMEM | 637 | depends on NO_BOOTMEM |
638 | depends on SPARSEMEM | 638 | depends on SPARSEMEM |
639 | depends on !NEED_PER_CPU_KM | 639 | depends on !NEED_PER_CPU_KM |
640 | depends on 64BIT | ||
640 | help | 641 | help |
641 | Ordinarily all struct pages are initialised during early boot in a | 642 | Ordinarily all struct pages are initialised during early boot in a |
642 | single thread. On very large machines this can take a considerable | 643 | single thread. On very large machines this can take a considerable |
diff --git a/mm/shmem.c b/mm/shmem.c index 0376c124b043..446942677cd4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode | |||
2227 | mpol_shared_policy_init(&info->policy, NULL); | 2227 | mpol_shared_policy_init(&info->policy, NULL); |
2228 | break; | 2228 | break; |
2229 | } | 2229 | } |
2230 | |||
2231 | lockdep_annotate_inode_mutex_key(inode); | ||
2230 | } else | 2232 | } else |
2231 | shmem_free_inode(sb); | 2233 | shmem_free_inode(sb); |
2232 | return inode; | 2234 | return inode; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7e7d25504651..c7ce2c161225 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, | |||
476 | delta = freeable >> priority; | 476 | delta = freeable >> priority; |
477 | delta *= 4; | 477 | delta *= 4; |
478 | do_div(delta, shrinker->seeks); | 478 | do_div(delta, shrinker->seeks); |
479 | |||
480 | /* | ||
481 | * Make sure we apply some minimal pressure on default priority | ||
482 | * even on small cgroups. Stale objects are not only consuming memory | ||
483 | * by themselves, but can also hold a reference to a dying cgroup, | ||
484 | * preventing it from being reclaimed. A dying cgroup with all | ||
485 | * corresponding structures like per-cpu stats and kmem caches | ||
486 | * can be really big, so it may lead to a significant waste of memory. | ||
487 | */ | ||
488 | delta = max_t(unsigned long long, delta, min(freeable, batch_size)); | ||
489 | |||
479 | total_scan += delta; | 490 | total_scan += delta; |
480 | if (total_scan < 0) { | 491 | if (total_scan < 0) { |
481 | pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", | 492 | pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", |
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 71c20c1d4002..9f481cfdf77d 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c | |||
@@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh) | |||
241 | * the packet to be exactly of that size to make the link | 241 | * the packet to be exactly of that size to make the link |
242 | * throughput estimation effective. | 242 | * throughput estimation effective. |
243 | */ | 243 | */ |
244 | skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len); | 244 | skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len); |
245 | 245 | ||
246 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 246 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
247 | "Sending unicast (probe) ELP packet on interface %s to %pM\n", | 247 | "Sending unicast (probe) ELP packet on interface %s to %pM\n", |
@@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) | |||
268 | struct batadv_priv *bat_priv; | 268 | struct batadv_priv *bat_priv; |
269 | struct sk_buff *skb; | 269 | struct sk_buff *skb; |
270 | u32 elp_interval; | 270 | u32 elp_interval; |
271 | bool ret; | ||
271 | 272 | ||
272 | bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); | 273 | bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); |
273 | hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v); | 274 | hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v); |
@@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) | |||
329 | * may sleep and that is not allowed in an rcu protected | 330 | * may sleep and that is not allowed in an rcu protected |
330 | * context. Therefore schedule a task for that. | 331 | * context. Therefore schedule a task for that. |
331 | */ | 332 | */ |
332 | queue_work(batadv_event_workqueue, | 333 | ret = queue_work(batadv_event_workqueue, |
333 | &hardif_neigh->bat_v.metric_work); | 334 | &hardif_neigh->bat_v.metric_work); |
335 | |||
336 | if (!ret) | ||
337 | batadv_hardif_neigh_put(hardif_neigh); | ||
334 | } | 338 | } |
335 | rcu_read_unlock(); | 339 | rcu_read_unlock(); |
336 | 340 | ||
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ff9659af6b91..5f1aeeded0e3 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
1772 | { | 1772 | { |
1773 | struct batadv_bla_backbone_gw *backbone_gw; | 1773 | struct batadv_bla_backbone_gw *backbone_gw; |
1774 | struct ethhdr *ethhdr; | 1774 | struct ethhdr *ethhdr; |
1775 | bool ret; | ||
1775 | 1776 | ||
1776 | ethhdr = eth_hdr(skb); | 1777 | ethhdr = eth_hdr(skb); |
1777 | 1778 | ||
@@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
1795 | if (unlikely(!backbone_gw)) | 1796 | if (unlikely(!backbone_gw)) |
1796 | return true; | 1797 | return true; |
1797 | 1798 | ||
1798 | queue_work(batadv_event_workqueue, &backbone_gw->report_work); | 1799 | ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work); |
1799 | /* backbone_gw is unreferenced in the report work function function */ | 1800 | |
1801 | /* backbone_gw is unreferenced in the report work function function | ||
1802 | * if queue_work() call was successful | ||
1803 | */ | ||
1804 | if (!ret) | ||
1805 | batadv_backbone_gw_put(backbone_gw); | ||
1800 | 1806 | ||
1801 | return true; | 1807 | return true; |
1802 | } | 1808 | } |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 8b198ee798c9..140c61a3f1ec 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/kref.h> | 33 | #include <linux/kref.h> |
34 | #include <linux/list.h> | 34 | #include <linux/list.h> |
35 | #include <linux/lockdep.h> | ||
35 | #include <linux/netdevice.h> | 36 | #include <linux/netdevice.h> |
36 | #include <linux/netlink.h> | 37 | #include <linux/netlink.h> |
37 | #include <linux/rculist.h> | 38 | #include <linux/rculist.h> |
@@ -348,6 +349,9 @@ out: | |||
348 | * @bat_priv: the bat priv with all the soft interface information | 349 | * @bat_priv: the bat priv with all the soft interface information |
349 | * @orig_node: originator announcing gateway capabilities | 350 | * @orig_node: originator announcing gateway capabilities |
350 | * @gateway: announced bandwidth information | 351 | * @gateway: announced bandwidth information |
352 | * | ||
353 | * Has to be called with the appropriate locks being acquired | ||
354 | * (gw.list_lock). | ||
351 | */ | 355 | */ |
352 | static void batadv_gw_node_add(struct batadv_priv *bat_priv, | 356 | static void batadv_gw_node_add(struct batadv_priv *bat_priv, |
353 | struct batadv_orig_node *orig_node, | 357 | struct batadv_orig_node *orig_node, |
@@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, | |||
355 | { | 359 | { |
356 | struct batadv_gw_node *gw_node; | 360 | struct batadv_gw_node *gw_node; |
357 | 361 | ||
362 | lockdep_assert_held(&bat_priv->gw.list_lock); | ||
363 | |||
358 | if (gateway->bandwidth_down == 0) | 364 | if (gateway->bandwidth_down == 0) |
359 | return; | 365 | return; |
360 | 366 | ||
@@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, | |||
369 | gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); | 375 | gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); |
370 | gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); | 376 | gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); |
371 | 377 | ||
372 | spin_lock_bh(&bat_priv->gw.list_lock); | ||
373 | kref_get(&gw_node->refcount); | 378 | kref_get(&gw_node->refcount); |
374 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list); | 379 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list); |
375 | spin_unlock_bh(&bat_priv->gw.list_lock); | ||
376 | 380 | ||
377 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 381 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
378 | "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n", | 382 | "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n", |
@@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, | |||
428 | { | 432 | { |
429 | struct batadv_gw_node *gw_node, *curr_gw = NULL; | 433 | struct batadv_gw_node *gw_node, *curr_gw = NULL; |
430 | 434 | ||
435 | spin_lock_bh(&bat_priv->gw.list_lock); | ||
431 | gw_node = batadv_gw_node_get(bat_priv, orig_node); | 436 | gw_node = batadv_gw_node_get(bat_priv, orig_node); |
432 | if (!gw_node) { | 437 | if (!gw_node) { |
433 | batadv_gw_node_add(bat_priv, orig_node, gateway); | 438 | batadv_gw_node_add(bat_priv, orig_node, gateway); |
439 | spin_unlock_bh(&bat_priv->gw.list_lock); | ||
434 | goto out; | 440 | goto out; |
435 | } | 441 | } |
442 | spin_unlock_bh(&bat_priv->gw.list_lock); | ||
436 | 443 | ||
437 | if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) && | 444 | if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) && |
438 | gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)) | 445 | gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)) |
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 8da3c9336111..3ccc75ee719c 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #define BATADV_DRIVER_DEVICE "batman-adv" | 25 | #define BATADV_DRIVER_DEVICE "batman-adv" |
26 | 26 | ||
27 | #ifndef BATADV_SOURCE_VERSION | 27 | #ifndef BATADV_SOURCE_VERSION |
28 | #define BATADV_SOURCE_VERSION "2018.2" | 28 | #define BATADV_SOURCE_VERSION "2018.3" |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* B.A.T.M.A.N. parameters */ | 31 | /* B.A.T.M.A.N. parameters */ |
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index c3578444f3cb..34caf129a9bf 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c | |||
@@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, | |||
854 | spinlock_t *lock; /* Used to lock list selected by "int in_coding" */ | 854 | spinlock_t *lock; /* Used to lock list selected by "int in_coding" */ |
855 | struct list_head *list; | 855 | struct list_head *list; |
856 | 856 | ||
857 | /* Select ingoing or outgoing coding node */ | ||
858 | if (in_coding) { | ||
859 | lock = &orig_neigh_node->in_coding_list_lock; | ||
860 | list = &orig_neigh_node->in_coding_list; | ||
861 | } else { | ||
862 | lock = &orig_neigh_node->out_coding_list_lock; | ||
863 | list = &orig_neigh_node->out_coding_list; | ||
864 | } | ||
865 | |||
866 | spin_lock_bh(lock); | ||
867 | |||
857 | /* Check if nc_node is already added */ | 868 | /* Check if nc_node is already added */ |
858 | nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding); | 869 | nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding); |
859 | 870 | ||
860 | /* Node found */ | 871 | /* Node found */ |
861 | if (nc_node) | 872 | if (nc_node) |
862 | return nc_node; | 873 | goto unlock; |
863 | 874 | ||
864 | nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC); | 875 | nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC); |
865 | if (!nc_node) | 876 | if (!nc_node) |
866 | return NULL; | 877 | goto unlock; |
867 | 878 | ||
868 | /* Initialize nc_node */ | 879 | /* Initialize nc_node */ |
869 | INIT_LIST_HEAD(&nc_node->list); | 880 | INIT_LIST_HEAD(&nc_node->list); |
@@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, | |||
872 | kref_get(&orig_neigh_node->refcount); | 883 | kref_get(&orig_neigh_node->refcount); |
873 | nc_node->orig_node = orig_neigh_node; | 884 | nc_node->orig_node = orig_neigh_node; |
874 | 885 | ||
875 | /* Select ingoing or outgoing coding node */ | ||
876 | if (in_coding) { | ||
877 | lock = &orig_neigh_node->in_coding_list_lock; | ||
878 | list = &orig_neigh_node->in_coding_list; | ||
879 | } else { | ||
880 | lock = &orig_neigh_node->out_coding_list_lock; | ||
881 | list = &orig_neigh_node->out_coding_list; | ||
882 | } | ||
883 | |||
884 | batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n", | 886 | batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n", |
885 | nc_node->addr, nc_node->orig_node->orig); | 887 | nc_node->addr, nc_node->orig_node->orig); |
886 | 888 | ||
887 | /* Add nc_node to orig_node */ | 889 | /* Add nc_node to orig_node */ |
888 | spin_lock_bh(lock); | ||
889 | kref_get(&nc_node->refcount); | 890 | kref_get(&nc_node->refcount); |
890 | list_add_tail_rcu(&nc_node->list, list); | 891 | list_add_tail_rcu(&nc_node->list, list); |
892 | |||
893 | unlock: | ||
891 | spin_unlock_bh(lock); | 894 | spin_unlock_bh(lock); |
892 | 895 | ||
893 | return nc_node; | 896 | return nc_node; |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 1485263a348b..626ddca332db 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) | |||
574 | struct batadv_softif_vlan *vlan; | 574 | struct batadv_softif_vlan *vlan; |
575 | int err; | 575 | int err; |
576 | 576 | ||
577 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); | ||
578 | |||
577 | vlan = batadv_softif_vlan_get(bat_priv, vid); | 579 | vlan = batadv_softif_vlan_get(bat_priv, vid); |
578 | if (vlan) { | 580 | if (vlan) { |
579 | batadv_softif_vlan_put(vlan); | 581 | batadv_softif_vlan_put(vlan); |
582 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | ||
580 | return -EEXIST; | 583 | return -EEXIST; |
581 | } | 584 | } |
582 | 585 | ||
583 | vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); | 586 | vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); |
584 | if (!vlan) | 587 | if (!vlan) { |
588 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | ||
585 | return -ENOMEM; | 589 | return -ENOMEM; |
590 | } | ||
586 | 591 | ||
587 | vlan->bat_priv = bat_priv; | 592 | vlan->bat_priv = bat_priv; |
588 | vlan->vid = vid; | 593 | vlan->vid = vid; |
@@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) | |||
590 | 595 | ||
591 | atomic_set(&vlan->ap_isolation, 0); | 596 | atomic_set(&vlan->ap_isolation, 0); |
592 | 597 | ||
598 | kref_get(&vlan->refcount); | ||
599 | hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); | ||
600 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | ||
601 | |||
602 | /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the | ||
603 | * sleeping behavior of the sysfs functions and the fs_reclaim lock | ||
604 | */ | ||
593 | err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); | 605 | err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); |
594 | if (err) { | 606 | if (err) { |
595 | kfree(vlan); | 607 | /* ref for the function */ |
608 | batadv_softif_vlan_put(vlan); | ||
609 | |||
610 | /* ref for the list */ | ||
611 | batadv_softif_vlan_put(vlan); | ||
596 | return err; | 612 | return err; |
597 | } | 613 | } |
598 | 614 | ||
599 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); | ||
600 | kref_get(&vlan->refcount); | ||
601 | hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); | ||
602 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | ||
603 | |||
604 | /* add a new TT local entry. This one will be marked with the NOPURGE | 615 | /* add a new TT local entry. This one will be marked with the NOPURGE |
605 | * flag | 616 | * flag |
606 | */ | 617 | */ |
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index f2eef43bd2ec..09427fc6494a 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c | |||
@@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \ | |||
188 | \ | 188 | \ |
189 | return __batadv_store_uint_attr(buff, count, _min, _max, \ | 189 | return __batadv_store_uint_attr(buff, count, _min, _max, \ |
190 | _post_func, attr, \ | 190 | _post_func, attr, \ |
191 | &bat_priv->_var, net_dev); \ | 191 | &bat_priv->_var, net_dev, \ |
192 | NULL); \ | ||
192 | } | 193 | } |
193 | 194 | ||
194 | #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \ | 195 | #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \ |
@@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \ | |||
262 | \ | 263 | \ |
263 | length = __batadv_store_uint_attr(buff, count, _min, _max, \ | 264 | length = __batadv_store_uint_attr(buff, count, _min, _max, \ |
264 | _post_func, attr, \ | 265 | _post_func, attr, \ |
265 | &hard_iface->_var, net_dev); \ | 266 | &hard_iface->_var, \ |
267 | hard_iface->soft_iface, \ | ||
268 | net_dev); \ | ||
266 | \ | 269 | \ |
267 | batadv_hardif_put(hard_iface); \ | 270 | batadv_hardif_put(hard_iface); \ |
268 | return length; \ | 271 | return length; \ |
@@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count, | |||
356 | 359 | ||
357 | static int batadv_store_uint_attr(const char *buff, size_t count, | 360 | static int batadv_store_uint_attr(const char *buff, size_t count, |
358 | struct net_device *net_dev, | 361 | struct net_device *net_dev, |
362 | struct net_device *slave_dev, | ||
359 | const char *attr_name, | 363 | const char *attr_name, |
360 | unsigned int min, unsigned int max, | 364 | unsigned int min, unsigned int max, |
361 | atomic_t *attr) | 365 | atomic_t *attr) |
362 | { | 366 | { |
367 | char ifname[IFNAMSIZ + 3] = ""; | ||
363 | unsigned long uint_val; | 368 | unsigned long uint_val; |
364 | int ret; | 369 | int ret; |
365 | 370 | ||
@@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count, | |||
385 | if (atomic_read(attr) == uint_val) | 390 | if (atomic_read(attr) == uint_val) |
386 | return count; | 391 | return count; |
387 | 392 | ||
388 | batadv_info(net_dev, "%s: Changing from: %i to: %lu\n", | 393 | if (slave_dev) |
389 | attr_name, atomic_read(attr), uint_val); | 394 | snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name); |
395 | |||
396 | batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n", | ||
397 | attr_name, ifname, atomic_read(attr), uint_val); | ||
390 | 398 | ||
391 | atomic_set(attr, uint_val); | 399 | atomic_set(attr, uint_val); |
392 | return count; | 400 | return count; |
@@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count, | |||
397 | void (*post_func)(struct net_device *), | 405 | void (*post_func)(struct net_device *), |
398 | const struct attribute *attr, | 406 | const struct attribute *attr, |
399 | atomic_t *attr_store, | 407 | atomic_t *attr_store, |
400 | struct net_device *net_dev) | 408 | struct net_device *net_dev, |
409 | struct net_device *slave_dev) | ||
401 | { | 410 | { |
402 | int ret; | 411 | int ret; |
403 | 412 | ||
404 | ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max, | 413 | ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev, |
405 | attr_store); | 414 | attr->name, min, max, attr_store); |
406 | if (post_func && ret) | 415 | if (post_func && ret) |
407 | post_func(net_dev); | 416 | post_func(net_dev); |
408 | 417 | ||
@@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj, | |||
571 | return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE, | 580 | return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE, |
572 | batadv_post_gw_reselect, attr, | 581 | batadv_post_gw_reselect, attr, |
573 | &bat_priv->gw.sel_class, | 582 | &bat_priv->gw.sel_class, |
574 | bat_priv->soft_iface); | 583 | bat_priv->soft_iface, NULL); |
575 | } | 584 | } |
576 | 585 | ||
577 | static ssize_t batadv_show_gw_bwidth(struct kobject *kobj, | 586 | static ssize_t batadv_show_gw_bwidth(struct kobject *kobj, |
@@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, | |||
1090 | if (old_tp_override == tp_override) | 1099 | if (old_tp_override == tp_override) |
1091 | goto out; | 1100 | goto out; |
1092 | 1101 | ||
1093 | batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n", | 1102 | batadv_info(hard_iface->soft_iface, |
1094 | "throughput_override", | 1103 | "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n", |
1104 | "throughput_override", net_dev->name, | ||
1095 | old_tp_override / 10, old_tp_override % 10, | 1105 | old_tp_override / 10, old_tp_override % 10, |
1096 | tp_override / 10, tp_override % 10); | 1106 | tp_override / 10, tp_override % 10); |
1097 | 1107 | ||
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 12a2b7d21376..d21624c44665 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
1613 | { | 1613 | { |
1614 | struct batadv_tt_orig_list_entry *orig_entry; | 1614 | struct batadv_tt_orig_list_entry *orig_entry; |
1615 | 1615 | ||
1616 | spin_lock_bh(&tt_global->list_lock); | ||
1617 | |||
1616 | orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node); | 1618 | orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node); |
1617 | if (orig_entry) { | 1619 | if (orig_entry) { |
1618 | /* refresh the ttvn: the current value could be a bogus one that | 1620 | /* refresh the ttvn: the current value could be a bogus one that |
@@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
1635 | orig_entry->flags = flags; | 1637 | orig_entry->flags = flags; |
1636 | kref_init(&orig_entry->refcount); | 1638 | kref_init(&orig_entry->refcount); |
1637 | 1639 | ||
1638 | spin_lock_bh(&tt_global->list_lock); | ||
1639 | kref_get(&orig_entry->refcount); | 1640 | kref_get(&orig_entry->refcount); |
1640 | hlist_add_head_rcu(&orig_entry->list, | 1641 | hlist_add_head_rcu(&orig_entry->list, |
1641 | &tt_global->orig_list); | 1642 | &tt_global->orig_list); |
1642 | spin_unlock_bh(&tt_global->list_lock); | ||
1643 | atomic_inc(&tt_global->orig_list_count); | 1643 | atomic_inc(&tt_global->orig_list_count); |
1644 | 1644 | ||
1645 | sync_flags: | 1645 | sync_flags: |
@@ -1647,6 +1647,8 @@ sync_flags: | |||
1647 | out: | 1647 | out: |
1648 | if (orig_entry) | 1648 | if (orig_entry) |
1649 | batadv_tt_orig_list_entry_put(orig_entry); | 1649 | batadv_tt_orig_list_entry_put(orig_entry); |
1650 | |||
1651 | spin_unlock_bh(&tt_global->list_lock); | ||
1650 | } | 1652 | } |
1651 | 1653 | ||
1652 | /** | 1654 | /** |
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index a637458205d1..40e69c9346d2 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c | |||
@@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv, | |||
529 | { | 529 | { |
530 | struct batadv_tvlv_handler *tvlv_handler; | 530 | struct batadv_tvlv_handler *tvlv_handler; |
531 | 531 | ||
532 | spin_lock_bh(&bat_priv->tvlv.handler_list_lock); | ||
533 | |||
532 | tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version); | 534 | tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version); |
533 | if (tvlv_handler) { | 535 | if (tvlv_handler) { |
536 | spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); | ||
534 | batadv_tvlv_handler_put(tvlv_handler); | 537 | batadv_tvlv_handler_put(tvlv_handler); |
535 | return; | 538 | return; |
536 | } | 539 | } |
537 | 540 | ||
538 | tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC); | 541 | tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC); |
539 | if (!tvlv_handler) | 542 | if (!tvlv_handler) { |
543 | spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); | ||
540 | return; | 544 | return; |
545 | } | ||
541 | 546 | ||
542 | tvlv_handler->ogm_handler = optr; | 547 | tvlv_handler->ogm_handler = optr; |
543 | tvlv_handler->unicast_handler = uptr; | 548 | tvlv_handler->unicast_handler = uptr; |
@@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv, | |||
547 | kref_init(&tvlv_handler->refcount); | 552 | kref_init(&tvlv_handler->refcount); |
548 | INIT_HLIST_NODE(&tvlv_handler->list); | 553 | INIT_HLIST_NODE(&tvlv_handler->list); |
549 | 554 | ||
550 | spin_lock_bh(&bat_priv->tvlv.handler_list_lock); | ||
551 | kref_get(&tvlv_handler->refcount); | 555 | kref_get(&tvlv_handler->refcount); |
552 | hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list); | 556 | hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list); |
553 | spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); | 557 | spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index ae91e2d40056..3a7b0773536b 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -83,6 +83,7 @@ enum { | |||
83 | 83 | ||
84 | struct smp_dev { | 84 | struct smp_dev { |
85 | /* Secure Connections OOB data */ | 85 | /* Secure Connections OOB data */ |
86 | bool local_oob; | ||
86 | u8 local_pk[64]; | 87 | u8 local_pk[64]; |
87 | u8 local_rand[16]; | 88 | u8 local_rand[16]; |
88 | bool debug_key; | 89 | bool debug_key; |
@@ -599,6 +600,8 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) | |||
599 | 600 | ||
600 | memcpy(rand, smp->local_rand, 16); | 601 | memcpy(rand, smp->local_rand, 16); |
601 | 602 | ||
603 | smp->local_oob = true; | ||
604 | |||
602 | return 0; | 605 | return 0; |
603 | } | 606 | } |
604 | 607 | ||
@@ -1785,7 +1788,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
1785 | * successfully received our local OOB data - therefore set the | 1788 | * successfully received our local OOB data - therefore set the |
1786 | * flag to indicate that local OOB is in use. | 1789 | * flag to indicate that local OOB is in use. |
1787 | */ | 1790 | */ |
1788 | if (req->oob_flag == SMP_OOB_PRESENT) | 1791 | if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) |
1789 | set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); | 1792 | set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); |
1790 | 1793 | ||
1791 | /* SMP over BR/EDR requires special treatment */ | 1794 | /* SMP over BR/EDR requires special treatment */ |
@@ -1967,7 +1970,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) | |||
1967 | * successfully received our local OOB data - therefore set the | 1970 | * successfully received our local OOB data - therefore set the |
1968 | * flag to indicate that local OOB is in use. | 1971 | * flag to indicate that local OOB is in use. |
1969 | */ | 1972 | */ |
1970 | if (rsp->oob_flag == SMP_OOB_PRESENT) | 1973 | if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) |
1971 | set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); | 1974 | set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); |
1972 | 1975 | ||
1973 | smp->prsp[0] = SMP_CMD_PAIRING_RSP; | 1976 | smp->prsp[0] = SMP_CMD_PAIRING_RSP; |
@@ -2697,7 +2700,13 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) | |||
2697 | * key was set/generated. | 2700 | * key was set/generated. |
2698 | */ | 2701 | */ |
2699 | if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { | 2702 | if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { |
2700 | struct smp_dev *smp_dev = chan->data; | 2703 | struct l2cap_chan *hchan = hdev->smp_data; |
2704 | struct smp_dev *smp_dev; | ||
2705 | |||
2706 | if (!hchan || !hchan->data) | ||
2707 | return SMP_UNSPECIFIED; | ||
2708 | |||
2709 | smp_dev = hchan->data; | ||
2701 | 2710 | ||
2702 | tfm_ecdh = smp_dev->tfm_ecdh; | 2711 | tfm_ecdh = smp_dev->tfm_ecdh; |
2703 | } else { | 2712 | } else { |
@@ -3230,6 +3239,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) | |||
3230 | return ERR_CAST(tfm_ecdh); | 3239 | return ERR_CAST(tfm_ecdh); |
3231 | } | 3240 | } |
3232 | 3241 | ||
3242 | smp->local_oob = false; | ||
3233 | smp->tfm_aes = tfm_aes; | 3243 | smp->tfm_aes = tfm_aes; |
3234 | smp->tfm_cmac = tfm_cmac; | 3244 | smp->tfm_cmac = tfm_cmac; |
3235 | smp->tfm_ecdh = tfm_ecdh; | 3245 | smp->tfm_ecdh = tfm_ecdh; |
diff --git a/net/core/devlink.c b/net/core/devlink.c index 65fc366a78a4..8c0ed225e280 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c | |||
@@ -2592,7 +2592,7 @@ send_done: | |||
2592 | if (!nlh) { | 2592 | if (!nlh) { |
2593 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); | 2593 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); |
2594 | if (err) | 2594 | if (err) |
2595 | goto err_skb_send_alloc; | 2595 | return err; |
2596 | goto send_done; | 2596 | goto send_done; |
2597 | } | 2597 | } |
2598 | return genlmsg_reply(skb, info); | 2598 | return genlmsg_reply(skb, info); |
@@ -2600,7 +2600,6 @@ send_done: | |||
2600 | nla_put_failure: | 2600 | nla_put_failure: |
2601 | err = -EMSGSIZE; | 2601 | err = -EMSGSIZE; |
2602 | err_resource_put: | 2602 | err_resource_put: |
2603 | err_skb_send_alloc: | ||
2604 | nlmsg_free(skb); | 2603 | nlmsg_free(skb); |
2605 | return err; | 2604 | return err; |
2606 | } | 2605 | } |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index c9993c6c2fd4..234a0ec2e932 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -2624,6 +2624,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
2624 | case ETHTOOL_GPHYSTATS: | 2624 | case ETHTOOL_GPHYSTATS: |
2625 | case ETHTOOL_GTSO: | 2625 | case ETHTOOL_GTSO: |
2626 | case ETHTOOL_GPERMADDR: | 2626 | case ETHTOOL_GPERMADDR: |
2627 | case ETHTOOL_GUFO: | ||
2627 | case ETHTOOL_GGSO: | 2628 | case ETHTOOL_GGSO: |
2628 | case ETHTOOL_GGRO: | 2629 | case ETHTOOL_GGRO: |
2629 | case ETHTOOL_GFLAGS: | 2630 | case ETHTOOL_GFLAGS: |
diff --git a/net/core/filter.c b/net/core/filter.c index aecdeba052d3..5e00f2b85a56 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2344,7 +2344,8 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2344 | if (unlikely(bytes_sg_total > copy)) | 2344 | if (unlikely(bytes_sg_total > copy)) |
2345 | return -EINVAL; | 2345 | return -EINVAL; |
2346 | 2346 | ||
2347 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); | 2347 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, |
2348 | get_order(copy)); | ||
2348 | if (unlikely(!page)) | 2349 | if (unlikely(!page)) |
2349 | return -ENOMEM; | 2350 | return -ENOMEM; |
2350 | p = page_address(page); | 2351 | p = page_address(page); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index aa19d86937af..91592fceeaad 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1180,6 +1180,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1180 | lladdr = neigh->ha; | 1180 | lladdr = neigh->ha; |
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | /* Update confirmed timestamp for neighbour entry after we | ||
1184 | * received ARP packet even if it doesn't change IP to MAC binding. | ||
1185 | */ | ||
1186 | if (new & NUD_CONNECTED) | ||
1187 | neigh->confirmed = jiffies; | ||
1188 | |||
1183 | /* If entry was valid and address is not changed, | 1189 | /* If entry was valid and address is not changed, |
1184 | do not change entry state, if new one is STALE. | 1190 | do not change entry state, if new one is STALE. |
1185 | */ | 1191 | */ |
@@ -1201,15 +1207,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1201 | } | 1207 | } |
1202 | } | 1208 | } |
1203 | 1209 | ||
1204 | /* Update timestamps only once we know we will make a change to the | 1210 | /* Update timestamp only once we know we will make a change to the |
1205 | * neighbour entry. Otherwise we risk to move the locktime window with | 1211 | * neighbour entry. Otherwise we risk to move the locktime window with |
1206 | * noop updates and ignore relevant ARP updates. | 1212 | * noop updates and ignore relevant ARP updates. |
1207 | */ | 1213 | */ |
1208 | if (new != old || lladdr != neigh->ha) { | 1214 | if (new != old || lladdr != neigh->ha) |
1209 | if (new & NUD_CONNECTED) | ||
1210 | neigh->confirmed = jiffies; | ||
1211 | neigh->updated = jiffies; | 1215 | neigh->updated = jiffies; |
1212 | } | ||
1213 | 1216 | ||
1214 | if (new != old) { | 1217 | if (new != old) { |
1215 | neigh_del_timer(neigh); | 1218 | neigh_del_timer(neigh); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 57557a6a950c..3219a2932463 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -187,16 +187,16 @@ static void poll_napi(struct net_device *dev) | |||
187 | } | 187 | } |
188 | } | 188 | } |
189 | 189 | ||
190 | static void netpoll_poll_dev(struct net_device *dev) | 190 | void netpoll_poll_dev(struct net_device *dev) |
191 | { | 191 | { |
192 | const struct net_device_ops *ops; | ||
193 | struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); | 192 | struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); |
193 | const struct net_device_ops *ops; | ||
194 | 194 | ||
195 | /* Don't do any rx activity if the dev_lock mutex is held | 195 | /* Don't do any rx activity if the dev_lock mutex is held |
196 | * the dev_open/close paths use this to block netpoll activity | 196 | * the dev_open/close paths use this to block netpoll activity |
197 | * while changing device state | 197 | * while changing device state |
198 | */ | 198 | */ |
199 | if (down_trylock(&ni->dev_lock)) | 199 | if (!ni || down_trylock(&ni->dev_lock)) |
200 | return; | 200 | return; |
201 | 201 | ||
202 | if (!netif_running(dev)) { | 202 | if (!netif_running(dev)) { |
@@ -205,13 +205,8 @@ static void netpoll_poll_dev(struct net_device *dev) | |||
205 | } | 205 | } |
206 | 206 | ||
207 | ops = dev->netdev_ops; | 207 | ops = dev->netdev_ops; |
208 | if (!ops->ndo_poll_controller) { | 208 | if (ops->ndo_poll_controller) |
209 | up(&ni->dev_lock); | 209 | ops->ndo_poll_controller(dev); |
210 | return; | ||
211 | } | ||
212 | |||
213 | /* Process pending work on NIC */ | ||
214 | ops->ndo_poll_controller(dev); | ||
215 | 210 | ||
216 | poll_napi(dev); | 211 | poll_napi(dev); |
217 | 212 | ||
@@ -219,6 +214,7 @@ static void netpoll_poll_dev(struct net_device *dev) | |||
219 | 214 | ||
220 | zap_completion_queue(); | 215 | zap_completion_queue(); |
221 | } | 216 | } |
217 | EXPORT_SYMBOL(netpoll_poll_dev); | ||
222 | 218 | ||
223 | void netpoll_poll_disable(struct net_device *dev) | 219 | void netpoll_poll_disable(struct net_device *dev) |
224 | { | 220 | { |
@@ -613,8 +609,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | |||
613 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); | 609 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); |
614 | INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); | 610 | INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); |
615 | 611 | ||
616 | if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || | 612 | if (ndev->priv_flags & IFF_DISABLE_NETPOLL) { |
617 | !ndev->netdev_ops->ndo_poll_controller) { | ||
618 | np_err(np, "%s doesn't support polling, aborting\n", | 613 | np_err(np, "%s doesn't support polling, aborting\n", |
619 | np->dev_name); | 614 | np->dev_name); |
620 | err = -ENOTSUPP; | 615 | err = -ENOTSUPP; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 60c928894a78..63ce2283a456 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2810,7 +2810,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) | |||
2810 | } | 2810 | } |
2811 | 2811 | ||
2812 | if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { | 2812 | if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { |
2813 | __dev_notify_flags(dev, old_flags, 0U); | 2813 | __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); |
2814 | } else { | 2814 | } else { |
2815 | dev->rtnl_link_state = RTNL_LINK_INITIALIZED; | 2815 | dev->rtnl_link_state = RTNL_LINK_INITIALIZED; |
2816 | __dev_notify_flags(dev, old_flags, ~0U); | 2816 | __dev_notify_flags(dev, old_flags, ~0U); |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 20fda8fb8ffd..1fbe2f815474 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1377 | if (encap) | 1377 | if (encap) |
1378 | skb_reset_inner_headers(skb); | 1378 | skb_reset_inner_headers(skb); |
1379 | skb->network_header = (u8 *)iph - skb->head; | 1379 | skb->network_header = (u8 *)iph - skb->head; |
1380 | skb_reset_mac_len(skb); | ||
1380 | } while ((skb = skb->next)); | 1381 | } while ((skb = skb->next)); |
1381 | 1382 | ||
1382 | out: | 1383 | out: |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index c4f5602308ed..284a22154b4e 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
627 | const struct iphdr *tnl_params, u8 protocol) | 627 | const struct iphdr *tnl_params, u8 protocol) |
628 | { | 628 | { |
629 | struct ip_tunnel *tunnel = netdev_priv(dev); | 629 | struct ip_tunnel *tunnel = netdev_priv(dev); |
630 | unsigned int inner_nhdr_len = 0; | ||
630 | const struct iphdr *inner_iph; | 631 | const struct iphdr *inner_iph; |
631 | struct flowi4 fl4; | 632 | struct flowi4 fl4; |
632 | u8 tos, ttl; | 633 | u8 tos, ttl; |
@@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
636 | __be32 dst; | 637 | __be32 dst; |
637 | bool connected; | 638 | bool connected; |
638 | 639 | ||
640 | /* ensure we can access the inner net header, for several users below */ | ||
641 | if (skb->protocol == htons(ETH_P_IP)) | ||
642 | inner_nhdr_len = sizeof(struct iphdr); | ||
643 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
644 | inner_nhdr_len = sizeof(struct ipv6hdr); | ||
645 | if (unlikely(!pskb_may_pull(skb, inner_nhdr_len))) | ||
646 | goto tx_error; | ||
647 | |||
639 | inner_iph = (const struct iphdr *)skb_inner_network_header(skb); | 648 | inner_iph = (const struct iphdr *)skb_inner_network_header(skb); |
640 | connected = (tunnel->parms.iph.daddr != 0); | 649 | connected = (tunnel->parms.iph.daddr != 0); |
641 | 650 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f4e35b2ff8b8..7d69dd6fa7e8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -2124,6 +2124,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, | |||
2124 | inet_compute_pseudo); | 2124 | inet_compute_pseudo); |
2125 | } | 2125 | } |
2126 | 2126 | ||
2127 | /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and | ||
2128 | * return code conversion for ip layer consumption | ||
2129 | */ | ||
2130 | static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, | ||
2131 | struct udphdr *uh) | ||
2132 | { | ||
2133 | int ret; | ||
2134 | |||
2135 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) | ||
2136 | skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | ||
2137 | inet_compute_pseudo); | ||
2138 | |||
2139 | ret = udp_queue_rcv_skb(sk, skb); | ||
2140 | |||
2141 | /* a return value > 0 means to resubmit the input, but | ||
2142 | * it wants the return to be -protocol, or 0 | ||
2143 | */ | ||
2144 | if (ret > 0) | ||
2145 | return -ret; | ||
2146 | return 0; | ||
2147 | } | ||
2148 | |||
2127 | /* | 2149 | /* |
2128 | * All we need to do is get the socket, and then do a checksum. | 2150 | * All we need to do is get the socket, and then do a checksum. |
2129 | */ | 2151 | */ |
@@ -2170,14 +2192,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
2170 | if (unlikely(sk->sk_rx_dst != dst)) | 2192 | if (unlikely(sk->sk_rx_dst != dst)) |
2171 | udp_sk_rx_dst_set(sk, dst); | 2193 | udp_sk_rx_dst_set(sk, dst); |
2172 | 2194 | ||
2173 | ret = udp_queue_rcv_skb(sk, skb); | 2195 | ret = udp_unicast_rcv_skb(sk, skb, uh); |
2174 | sock_put(sk); | 2196 | sock_put(sk); |
2175 | /* a return value > 0 means to resubmit the input, but | 2197 | return ret; |
2176 | * it wants the return to be -protocol, or 0 | ||
2177 | */ | ||
2178 | if (ret > 0) | ||
2179 | return -ret; | ||
2180 | return 0; | ||
2181 | } | 2198 | } |
2182 | 2199 | ||
2183 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) | 2200 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) |
@@ -2185,22 +2202,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
2185 | saddr, daddr, udptable, proto); | 2202 | saddr, daddr, udptable, proto); |
2186 | 2203 | ||
2187 | sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); | 2204 | sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); |
2188 | if (sk) { | 2205 | if (sk) |
2189 | int ret; | 2206 | return udp_unicast_rcv_skb(sk, skb, uh); |
2190 | |||
2191 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) | ||
2192 | skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | ||
2193 | inet_compute_pseudo); | ||
2194 | |||
2195 | ret = udp_queue_rcv_skb(sk, skb); | ||
2196 | |||
2197 | /* a return value > 0 means to resubmit the input, but | ||
2198 | * it wants the return to be -protocol, or 0 | ||
2199 | */ | ||
2200 | if (ret > 0) | ||
2201 | return -ret; | ||
2202 | return 0; | ||
2203 | } | ||
2204 | 2207 | ||
2205 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 2208 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
2206 | goto drop; | 2209 | goto drop; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d51a8c0b3372..c63ccce6425f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -4201,7 +4201,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) | |||
4201 | p++; | 4201 | p++; |
4202 | continue; | 4202 | continue; |
4203 | } | 4203 | } |
4204 | state->offset++; | ||
4205 | return ifa; | 4204 | return ifa; |
4206 | } | 4205 | } |
4207 | 4206 | ||
@@ -4225,13 +4224,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, | |||
4225 | return ifa; | 4224 | return ifa; |
4226 | } | 4225 | } |
4227 | 4226 | ||
4227 | state->offset = 0; | ||
4228 | while (++state->bucket < IN6_ADDR_HSIZE) { | 4228 | while (++state->bucket < IN6_ADDR_HSIZE) { |
4229 | state->offset = 0; | ||
4230 | hlist_for_each_entry_rcu(ifa, | 4229 | hlist_for_each_entry_rcu(ifa, |
4231 | &inet6_addr_lst[state->bucket], addr_lst) { | 4230 | &inet6_addr_lst[state->bucket], addr_lst) { |
4232 | if (!net_eq(dev_net(ifa->idev->dev), net)) | 4231 | if (!net_eq(dev_net(ifa->idev->dev), net)) |
4233 | continue; | 4232 | continue; |
4234 | state->offset++; | ||
4235 | return ifa; | 4233 | return ifa; |
4236 | } | 4234 | } |
4237 | } | 4235 | } |
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 37ff4805b20c..c7e495f12011 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
115 | payload_len = skb->len - nhoff - sizeof(*ipv6h); | 115 | payload_len = skb->len - nhoff - sizeof(*ipv6h); |
116 | ipv6h->payload_len = htons(payload_len); | 116 | ipv6h->payload_len = htons(payload_len); |
117 | skb->network_header = (u8 *)ipv6h - skb->head; | 117 | skb->network_header = (u8 *)ipv6h - skb->head; |
118 | skb_reset_mac_len(skb); | ||
118 | 119 | ||
119 | if (udpfrag) { | 120 | if (udpfrag) { |
120 | int err = ip6_find_1stfragopt(skb, &prevhdr); | 121 | int err = ip6_find_1stfragopt(skb, &prevhdr); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16f200f06500..f9f8f554d141 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
219 | kfree_skb(skb); | 219 | kfree_skb(skb); |
220 | return -ENOBUFS; | 220 | return -ENOBUFS; |
221 | } | 221 | } |
222 | if (skb->sk) | ||
223 | skb_set_owner_w(skb2, skb->sk); | ||
222 | consume_skb(skb); | 224 | consume_skb(skb); |
223 | skb = skb2; | 225 | skb = skb2; |
224 | /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically, | ||
225 | * it is safe to call in our context (socket lock not held) | ||
226 | */ | ||
227 | skb_set_owner_w(skb, (struct sock *)sk); | ||
228 | } | 226 | } |
229 | if (opt->opt_flen) | 227 | if (opt->opt_flen) |
230 | ipv6_push_frag_opts(skb, opt, &proto); | 228 | ipv6_push_frag_opts(skb, opt, &proto); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 419960b0ba16..a0b6932c3afd 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1234,7 +1234,7 @@ static inline int | |||
1234 | ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | 1234 | ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) |
1235 | { | 1235 | { |
1236 | struct ip6_tnl *t = netdev_priv(dev); | 1236 | struct ip6_tnl *t = netdev_priv(dev); |
1237 | const struct iphdr *iph = ip_hdr(skb); | 1237 | const struct iphdr *iph; |
1238 | int encap_limit = -1; | 1238 | int encap_limit = -1; |
1239 | struct flowi6 fl6; | 1239 | struct flowi6 fl6; |
1240 | __u8 dsfield; | 1240 | __u8 dsfield; |
@@ -1242,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1242 | u8 tproto; | 1242 | u8 tproto; |
1243 | int err; | 1243 | int err; |
1244 | 1244 | ||
1245 | /* ensure we can access the full inner ip header */ | ||
1246 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) | ||
1247 | return -1; | ||
1248 | |||
1249 | iph = ip_hdr(skb); | ||
1245 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 1250 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
1246 | 1251 | ||
1247 | tproto = READ_ONCE(t->parms.proto); | 1252 | tproto = READ_ONCE(t->parms.proto); |
@@ -1306,7 +1311,7 @@ static inline int | |||
1306 | ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | 1311 | ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) |
1307 | { | 1312 | { |
1308 | struct ip6_tnl *t = netdev_priv(dev); | 1313 | struct ip6_tnl *t = netdev_priv(dev); |
1309 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 1314 | struct ipv6hdr *ipv6h; |
1310 | int encap_limit = -1; | 1315 | int encap_limit = -1; |
1311 | __u16 offset; | 1316 | __u16 offset; |
1312 | struct flowi6 fl6; | 1317 | struct flowi6 fl6; |
@@ -1315,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1315 | u8 tproto; | 1320 | u8 tproto; |
1316 | int err; | 1321 | int err; |
1317 | 1322 | ||
1323 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
1324 | return -1; | ||
1325 | |||
1326 | ipv6h = ipv6_hdr(skb); | ||
1318 | tproto = READ_ONCE(t->parms.proto); | 1327 | tproto = READ_ONCE(t->parms.proto); |
1319 | if ((tproto != IPPROTO_IPV6 && tproto != 0) || | 1328 | if ((tproto != IPPROTO_IPV6 && tproto != 0) || |
1320 | ip6_tnl_addr_conflict(t, ipv6h)) | 1329 | ip6_tnl_addr_conflict(t, ipv6h)) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 18e00ce1719a..826b14de7dbb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -364,11 +364,14 @@ EXPORT_SYMBOL(ip6_dst_alloc); | |||
364 | 364 | ||
365 | static void ip6_dst_destroy(struct dst_entry *dst) | 365 | static void ip6_dst_destroy(struct dst_entry *dst) |
366 | { | 366 | { |
367 | struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); | ||
367 | struct rt6_info *rt = (struct rt6_info *)dst; | 368 | struct rt6_info *rt = (struct rt6_info *)dst; |
368 | struct fib6_info *from; | 369 | struct fib6_info *from; |
369 | struct inet6_dev *idev; | 370 | struct inet6_dev *idev; |
370 | 371 | ||
371 | dst_destroy_metrics_generic(dst); | 372 | if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) |
373 | kfree(p); | ||
374 | |||
372 | rt6_uncached_list_del(rt); | 375 | rt6_uncached_list_del(rt); |
373 | 376 | ||
374 | idev = rt->rt6i_idev; | 377 | idev = rt->rt6i_idev; |
@@ -946,8 +949,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort) | |||
946 | 949 | ||
947 | static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) | 950 | static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) |
948 | { | 951 | { |
949 | rt->dst.flags |= fib6_info_dst_flags(ort); | ||
950 | |||
951 | if (ort->fib6_flags & RTF_REJECT) { | 952 | if (ort->fib6_flags & RTF_REJECT) { |
952 | ip6_rt_init_dst_reject(rt, ort); | 953 | ip6_rt_init_dst_reject(rt, ort); |
953 | return; | 954 | return; |
@@ -978,6 +979,10 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) | |||
978 | rt->rt6i_flags &= ~RTF_EXPIRES; | 979 | rt->rt6i_flags &= ~RTF_EXPIRES; |
979 | rcu_assign_pointer(rt->from, from); | 980 | rcu_assign_pointer(rt->from, from); |
980 | dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); | 981 | dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); |
982 | if (from->fib6_metrics != &dst_default_metrics) { | ||
983 | rt->dst._metrics |= DST_METRICS_REFCOUNTED; | ||
984 | refcount_inc(&from->fib6_metrics->refcnt); | ||
985 | } | ||
981 | } | 986 | } |
982 | 987 | ||
983 | /* Caller must already hold reference to @ort */ | 988 | /* Caller must already hold reference to @ort */ |
@@ -4670,20 +4675,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4670 | int iif, int type, u32 portid, u32 seq, | 4675 | int iif, int type, u32 portid, u32 seq, |
4671 | unsigned int flags) | 4676 | unsigned int flags) |
4672 | { | 4677 | { |
4673 | struct rtmsg *rtm; | 4678 | struct rt6_info *rt6 = (struct rt6_info *)dst; |
4679 | struct rt6key *rt6_dst, *rt6_src; | ||
4680 | u32 *pmetrics, table, rt6_flags; | ||
4674 | struct nlmsghdr *nlh; | 4681 | struct nlmsghdr *nlh; |
4682 | struct rtmsg *rtm; | ||
4675 | long expires = 0; | 4683 | long expires = 0; |
4676 | u32 *pmetrics; | ||
4677 | u32 table; | ||
4678 | 4684 | ||
4679 | nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); | 4685 | nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); |
4680 | if (!nlh) | 4686 | if (!nlh) |
4681 | return -EMSGSIZE; | 4687 | return -EMSGSIZE; |
4682 | 4688 | ||
4689 | if (rt6) { | ||
4690 | rt6_dst = &rt6->rt6i_dst; | ||
4691 | rt6_src = &rt6->rt6i_src; | ||
4692 | rt6_flags = rt6->rt6i_flags; | ||
4693 | } else { | ||
4694 | rt6_dst = &rt->fib6_dst; | ||
4695 | rt6_src = &rt->fib6_src; | ||
4696 | rt6_flags = rt->fib6_flags; | ||
4697 | } | ||
4698 | |||
4683 | rtm = nlmsg_data(nlh); | 4699 | rtm = nlmsg_data(nlh); |
4684 | rtm->rtm_family = AF_INET6; | 4700 | rtm->rtm_family = AF_INET6; |
4685 | rtm->rtm_dst_len = rt->fib6_dst.plen; | 4701 | rtm->rtm_dst_len = rt6_dst->plen; |
4686 | rtm->rtm_src_len = rt->fib6_src.plen; | 4702 | rtm->rtm_src_len = rt6_src->plen; |
4687 | rtm->rtm_tos = 0; | 4703 | rtm->rtm_tos = 0; |
4688 | if (rt->fib6_table) | 4704 | if (rt->fib6_table) |
4689 | table = rt->fib6_table->tb6_id; | 4705 | table = rt->fib6_table->tb6_id; |
@@ -4698,7 +4714,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4698 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; | 4714 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; |
4699 | rtm->rtm_protocol = rt->fib6_protocol; | 4715 | rtm->rtm_protocol = rt->fib6_protocol; |
4700 | 4716 | ||
4701 | if (rt->fib6_flags & RTF_CACHE) | 4717 | if (rt6_flags & RTF_CACHE) |
4702 | rtm->rtm_flags |= RTM_F_CLONED; | 4718 | rtm->rtm_flags |= RTM_F_CLONED; |
4703 | 4719 | ||
4704 | if (dest) { | 4720 | if (dest) { |
@@ -4706,7 +4722,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4706 | goto nla_put_failure; | 4722 | goto nla_put_failure; |
4707 | rtm->rtm_dst_len = 128; | 4723 | rtm->rtm_dst_len = 128; |
4708 | } else if (rtm->rtm_dst_len) | 4724 | } else if (rtm->rtm_dst_len) |
4709 | if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr)) | 4725 | if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr)) |
4710 | goto nla_put_failure; | 4726 | goto nla_put_failure; |
4711 | #ifdef CONFIG_IPV6_SUBTREES | 4727 | #ifdef CONFIG_IPV6_SUBTREES |
4712 | if (src) { | 4728 | if (src) { |
@@ -4714,12 +4730,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4714 | goto nla_put_failure; | 4730 | goto nla_put_failure; |
4715 | rtm->rtm_src_len = 128; | 4731 | rtm->rtm_src_len = 128; |
4716 | } else if (rtm->rtm_src_len && | 4732 | } else if (rtm->rtm_src_len && |
4717 | nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr)) | 4733 | nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr)) |
4718 | goto nla_put_failure; | 4734 | goto nla_put_failure; |
4719 | #endif | 4735 | #endif |
4720 | if (iif) { | 4736 | if (iif) { |
4721 | #ifdef CONFIG_IPV6_MROUTE | 4737 | #ifdef CONFIG_IPV6_MROUTE |
4722 | if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) { | 4738 | if (ipv6_addr_is_multicast(&rt6_dst->addr)) { |
4723 | int err = ip6mr_get_route(net, skb, rtm, portid); | 4739 | int err = ip6mr_get_route(net, skb, rtm, portid); |
4724 | 4740 | ||
4725 | if (err == 0) | 4741 | if (err == 0) |
@@ -4754,7 +4770,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4754 | /* For multipath routes, walk the siblings list and add | 4770 | /* For multipath routes, walk the siblings list and add |
4755 | * each as a nexthop within RTA_MULTIPATH. | 4771 | * each as a nexthop within RTA_MULTIPATH. |
4756 | */ | 4772 | */ |
4757 | if (rt->fib6_nsiblings) { | 4773 | if (rt6) { |
4774 | if (rt6_flags & RTF_GATEWAY && | ||
4775 | nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway)) | ||
4776 | goto nla_put_failure; | ||
4777 | |||
4778 | if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex)) | ||
4779 | goto nla_put_failure; | ||
4780 | } else if (rt->fib6_nsiblings) { | ||
4758 | struct fib6_info *sibling, *next_sibling; | 4781 | struct fib6_info *sibling, *next_sibling; |
4759 | struct nlattr *mp; | 4782 | struct nlattr *mp; |
4760 | 4783 | ||
@@ -4777,7 +4800,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4777 | goto nla_put_failure; | 4800 | goto nla_put_failure; |
4778 | } | 4801 | } |
4779 | 4802 | ||
4780 | if (rt->fib6_flags & RTF_EXPIRES) { | 4803 | if (rt6_flags & RTF_EXPIRES) { |
4781 | expires = dst ? dst->expires : rt->expires; | 4804 | expires = dst ? dst->expires : rt->expires; |
4782 | expires -= jiffies; | 4805 | expires -= jiffies; |
4783 | } | 4806 | } |
@@ -4785,7 +4808,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4785 | if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) | 4808 | if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) |
4786 | goto nla_put_failure; | 4809 | goto nla_put_failure; |
4787 | 4810 | ||
4788 | if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags))) | 4811 | if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags))) |
4789 | goto nla_put_failure; | 4812 | goto nla_put_failure; |
4790 | 4813 | ||
4791 | 4814 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 83f4c77c79d8..28c4aa5078fc 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -752,6 +752,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) | |||
752 | } | 752 | } |
753 | } | 753 | } |
754 | 754 | ||
755 | /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and | ||
756 | * return code conversion for ip layer consumption | ||
757 | */ | ||
758 | static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, | ||
759 | struct udphdr *uh) | ||
760 | { | ||
761 | int ret; | ||
762 | |||
763 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) | ||
764 | skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | ||
765 | ip6_compute_pseudo); | ||
766 | |||
767 | ret = udpv6_queue_rcv_skb(sk, skb); | ||
768 | |||
769 | /* a return value > 0 means to resubmit the input, but | ||
770 | * it wants the return to be -protocol, or 0 | ||
771 | */ | ||
772 | if (ret > 0) | ||
773 | return -ret; | ||
774 | return 0; | ||
775 | } | ||
776 | |||
755 | int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | 777 | int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, |
756 | int proto) | 778 | int proto) |
757 | { | 779 | { |
@@ -803,13 +825,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
803 | if (unlikely(sk->sk_rx_dst != dst)) | 825 | if (unlikely(sk->sk_rx_dst != dst)) |
804 | udp6_sk_rx_dst_set(sk, dst); | 826 | udp6_sk_rx_dst_set(sk, dst); |
805 | 827 | ||
806 | ret = udpv6_queue_rcv_skb(sk, skb); | 828 | if (!uh->check && !udp_sk(sk)->no_check6_rx) { |
807 | sock_put(sk); | 829 | sock_put(sk); |
830 | goto report_csum_error; | ||
831 | } | ||
808 | 832 | ||
809 | /* a return value > 0 means to resubmit the input */ | 833 | ret = udp6_unicast_rcv_skb(sk, skb, uh); |
810 | if (ret > 0) | 834 | sock_put(sk); |
811 | return ret; | 835 | return ret; |
812 | return 0; | ||
813 | } | 836 | } |
814 | 837 | ||
815 | /* | 838 | /* |
@@ -822,30 +845,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
822 | /* Unicast */ | 845 | /* Unicast */ |
823 | sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); | 846 | sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); |
824 | if (sk) { | 847 | if (sk) { |
825 | int ret; | 848 | if (!uh->check && !udp_sk(sk)->no_check6_rx) |
826 | 849 | goto report_csum_error; | |
827 | if (!uh->check && !udp_sk(sk)->no_check6_rx) { | 850 | return udp6_unicast_rcv_skb(sk, skb, uh); |
828 | udp6_csum_zero_error(skb); | ||
829 | goto csum_error; | ||
830 | } | ||
831 | |||
832 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) | ||
833 | skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | ||
834 | ip6_compute_pseudo); | ||
835 | |||
836 | ret = udpv6_queue_rcv_skb(sk, skb); | ||
837 | |||
838 | /* a return value > 0 means to resubmit the input */ | ||
839 | if (ret > 0) | ||
840 | return ret; | ||
841 | |||
842 | return 0; | ||
843 | } | 851 | } |
844 | 852 | ||
845 | if (!uh->check) { | 853 | if (!uh->check) |
846 | udp6_csum_zero_error(skb); | 854 | goto report_csum_error; |
847 | goto csum_error; | ||
848 | } | ||
849 | 855 | ||
850 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 856 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
851 | goto discard; | 857 | goto discard; |
@@ -866,6 +872,9 @@ short_packet: | |||
866 | ulen, skb->len, | 872 | ulen, skb->len, |
867 | daddr, ntohs(uh->dest)); | 873 | daddr, ntohs(uh->dest)); |
868 | goto discard; | 874 | goto discard; |
875 | |||
876 | report_csum_error: | ||
877 | udp6_csum_zero_error(skb); | ||
869 | csum_error: | 878 | csum_error: |
870 | __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); | 879 | __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); |
871 | discard: | 880 | discard: |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 7a4de6d618b1..8fbe6cdbe255 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
@@ -1533,10 +1533,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, | |||
1533 | unsigned int flags; | 1533 | unsigned int flags; |
1534 | 1534 | ||
1535 | if (event == NETDEV_REGISTER) { | 1535 | if (event == NETDEV_REGISTER) { |
1536 | /* For now just support Ethernet, IPGRE, SIT and IPIP devices */ | 1536 | |
1537 | /* For now just support Ethernet, IPGRE, IP6GRE, SIT and | ||
1538 | * IPIP devices | ||
1539 | */ | ||
1537 | if (dev->type == ARPHRD_ETHER || | 1540 | if (dev->type == ARPHRD_ETHER || |
1538 | dev->type == ARPHRD_LOOPBACK || | 1541 | dev->type == ARPHRD_LOOPBACK || |
1539 | dev->type == ARPHRD_IPGRE || | 1542 | dev->type == ARPHRD_IPGRE || |
1543 | dev->type == ARPHRD_IP6GRE || | ||
1540 | dev->type == ARPHRD_SIT || | 1544 | dev->type == ARPHRD_SIT || |
1541 | dev->type == ARPHRD_TUNNEL) { | 1545 | dev->type == ARPHRD_TUNNEL) { |
1542 | mdev = mpls_add_dev(dev); | 1546 | mdev = mpls_add_dev(dev); |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index c070dfc0190a..c92894c3e40a 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info, | |||
781 | { | 781 | { |
782 | u32 addr_len; | 782 | u32 addr_len; |
783 | 783 | ||
784 | if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) { | 784 | if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] && |
785 | info->attrs[NLBL_UNLABEL_A_IPV4MASK]) { | ||
785 | addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); | 786 | addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); |
786 | if (addr_len != sizeof(struct in_addr) && | 787 | if (addr_len != sizeof(struct in_addr) && |
787 | addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK])) | 788 | addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK])) |
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index ac8030c4bcf8..19cb2e473ea6 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c | |||
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, | |||
209 | } | 209 | } |
210 | create_info = (struct hci_create_pipe_resp *)skb->data; | 210 | create_info = (struct hci_create_pipe_resp *)skb->data; |
211 | 211 | ||
212 | if (create_info->pipe >= NFC_HCI_MAX_PIPES) { | ||
213 | status = NFC_HCI_ANY_E_NOK; | ||
214 | goto exit; | ||
215 | } | ||
216 | |||
212 | /* Save the new created pipe and bind with local gate, | 217 | /* Save the new created pipe and bind with local gate, |
213 | * the description for skb->data[3] is destination gate id | 218 | * the description for skb->data[3] is destination gate id |
214 | * but since we received this cmd from host controller, we | 219 | * but since we received this cmd from host controller, we |
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, | |||
232 | } | 237 | } |
233 | delete_info = (struct hci_delete_pipe_noti *)skb->data; | 238 | delete_info = (struct hci_delete_pipe_noti *)skb->data; |
234 | 239 | ||
240 | if (delete_info->pipe >= NFC_HCI_MAX_PIPES) { | ||
241 | status = NFC_HCI_ANY_E_NOK; | ||
242 | goto exit; | ||
243 | } | ||
244 | |||
235 | hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; | 245 | hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; |
236 | hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; | 246 | hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; |
237 | break; | 247 | break; |
diff --git a/net/rds/ib.h b/net/rds/ib.h index 73427ff439f9..71ff356ee702 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, | |||
443 | int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); | 443 | int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); |
444 | 444 | ||
445 | /* ib_stats.c */ | 445 | /* ib_stats.c */ |
446 | DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); | 446 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats); |
447 | #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) | 447 | #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) |
448 | #define rds_ib_stats_add(member, count) \ | 448 | #define rds_ib_stats_add(member, count) \ |
449 | rds_stats_add_which(rds_ib_stats, member, count) | 449 | rds_stats_add_which(rds_ib_stats, member, count) |
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 44e9c00657bc..6b67aa13d2dd 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c | |||
@@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, | |||
69 | 69 | ||
70 | if (!exists) { | 70 | if (!exists) { |
71 | ret = tcf_idr_create(tn, parm->index, est, a, | 71 | ret = tcf_idr_create(tn, parm->index, est, a, |
72 | &act_sample_ops, bind, false); | 72 | &act_sample_ops, bind, true); |
73 | if (ret) { | 73 | if (ret) { |
74 | tcf_idr_cleanup(tn, parm->index); | 74 | tcf_idr_cleanup(tn, parm->index); |
75 | return ret; | 75 | return ret; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 1a67af8a6e8c..0a75cb2e5e7b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -1902,6 +1902,8 @@ replay: | |||
1902 | RTM_NEWCHAIN, false); | 1902 | RTM_NEWCHAIN, false); |
1903 | break; | 1903 | break; |
1904 | case RTM_DELCHAIN: | 1904 | case RTM_DELCHAIN: |
1905 | tfilter_notify_chain(net, skb, block, q, parent, n, | ||
1906 | chain, RTM_DELTFILTER); | ||
1905 | /* Flush the chain first as the user requested chain removal. */ | 1907 | /* Flush the chain first as the user requested chain removal. */ |
1906 | tcf_chain_flush(chain); | 1908 | tcf_chain_flush(chain); |
1907 | /* In case the chain was successfully deleted, put a reference | 1909 | /* In case the chain was successfully deleted, put a reference |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 12cac85da994..033696e6f74f 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) | |||
260 | bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) | 260 | bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) |
261 | { | 261 | { |
262 | struct dst_entry *dst = sctp_transport_dst_check(t); | 262 | struct dst_entry *dst = sctp_transport_dst_check(t); |
263 | struct sock *sk = t->asoc->base.sk; | ||
263 | bool change = true; | 264 | bool change = true; |
264 | 265 | ||
265 | if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { | 266 | if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { |
@@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) | |||
271 | pmtu = SCTP_TRUNC4(pmtu); | 272 | pmtu = SCTP_TRUNC4(pmtu); |
272 | 273 | ||
273 | if (dst) { | 274 | if (dst) { |
274 | dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); | 275 | struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family); |
276 | union sctp_addr addr; | ||
277 | |||
278 | pf->af->from_sk(&addr, sk); | ||
279 | pf->to_sk_daddr(&t->ipaddr, sk); | ||
280 | dst->ops->update_pmtu(dst, sk, NULL, pmtu); | ||
281 | pf->to_sk_daddr(&addr, sk); | ||
282 | |||
275 | dst = sctp_transport_dst_check(t); | 283 | dst = sctp_transport_dst_check(t); |
276 | } | 284 | } |
277 | 285 | ||
278 | if (!dst) { | 286 | if (!dst) { |
279 | t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); | 287 | t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); |
280 | dst = t->dst; | 288 | dst = t->dst; |
281 | } | 289 | } |
282 | 290 | ||
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 2d8a1e15e4f9..015231789ed2 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work) | |||
742 | smc->sk.sk_err = -rc; | 742 | smc->sk.sk_err = -rc; |
743 | 743 | ||
744 | out: | 744 | out: |
745 | smc->sk.sk_state_change(&smc->sk); | 745 | if (smc->sk.sk_err) |
746 | smc->sk.sk_state_change(&smc->sk); | ||
747 | else | ||
748 | smc->sk.sk_write_space(&smc->sk); | ||
746 | kfree(smc->connect_info); | 749 | kfree(smc->connect_info); |
747 | smc->connect_info = NULL; | 750 | smc->connect_info = NULL; |
748 | release_sock(&smc->sk); | 751 | release_sock(&smc->sk); |
@@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact) | |||
1150 | } | 1153 | } |
1151 | 1154 | ||
1152 | /* listen worker: finish RDMA setup */ | 1155 | /* listen worker: finish RDMA setup */ |
1153 | static void smc_listen_rdma_finish(struct smc_sock *new_smc, | 1156 | static int smc_listen_rdma_finish(struct smc_sock *new_smc, |
1154 | struct smc_clc_msg_accept_confirm *cclc, | 1157 | struct smc_clc_msg_accept_confirm *cclc, |
1155 | int local_contact) | 1158 | int local_contact) |
1156 | { | 1159 | { |
1157 | struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; | 1160 | struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; |
1158 | int reason_code = 0; | 1161 | int reason_code = 0; |
@@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc, | |||
1175 | if (reason_code) | 1178 | if (reason_code) |
1176 | goto decline; | 1179 | goto decline; |
1177 | } | 1180 | } |
1178 | return; | 1181 | return 0; |
1179 | 1182 | ||
1180 | decline: | 1183 | decline: |
1181 | mutex_unlock(&smc_create_lgr_pending); | 1184 | mutex_unlock(&smc_create_lgr_pending); |
1182 | smc_listen_decline(new_smc, reason_code, local_contact); | 1185 | smc_listen_decline(new_smc, reason_code, local_contact); |
1186 | return reason_code; | ||
1183 | } | 1187 | } |
1184 | 1188 | ||
1185 | /* setup for RDMA connection of server */ | 1189 | /* setup for RDMA connection of server */ |
@@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work) | |||
1276 | } | 1280 | } |
1277 | 1281 | ||
1278 | /* finish worker */ | 1282 | /* finish worker */ |
1279 | if (!ism_supported) | 1283 | if (!ism_supported) { |
1280 | smc_listen_rdma_finish(new_smc, &cclc, local_contact); | 1284 | if (smc_listen_rdma_finish(new_smc, &cclc, local_contact)) |
1285 | return; | ||
1286 | } | ||
1281 | smc_conn_save_peer_info(new_smc, &cclc); | 1287 | smc_conn_save_peer_info(new_smc, &cclc); |
1282 | mutex_unlock(&smc_create_lgr_pending); | 1288 | mutex_unlock(&smc_create_lgr_pending); |
1283 | smc_listen_out_connected(new_smc); | 1289 | smc_listen_out_connected(new_smc); |
@@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
1529 | return EPOLLNVAL; | 1535 | return EPOLLNVAL; |
1530 | 1536 | ||
1531 | smc = smc_sk(sock->sk); | 1537 | smc = smc_sk(sock->sk); |
1532 | if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { | 1538 | if (smc->use_fallback) { |
1533 | /* delegate to CLC child sock */ | 1539 | /* delegate to CLC child sock */ |
1534 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); | 1540 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); |
1535 | sk->sk_err = smc->clcsock->sk->sk_err; | 1541 | sk->sk_err = smc->clcsock->sk->sk_err; |
@@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
1560 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; | 1566 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; |
1561 | if (sk->sk_state == SMC_APPCLOSEWAIT1) | 1567 | if (sk->sk_state == SMC_APPCLOSEWAIT1) |
1562 | mask |= EPOLLIN; | 1568 | mask |= EPOLLIN; |
1569 | if (smc->conn.urg_state == SMC_URG_VALID) | ||
1570 | mask |= EPOLLPRI; | ||
1563 | } | 1571 | } |
1564 | if (smc->conn.urg_state == SMC_URG_VALID) | ||
1565 | mask |= EPOLLPRI; | ||
1566 | } | 1572 | } |
1567 | 1573 | ||
1568 | return mask; | 1574 | return mask; |
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 83aba9ade060..52241d679cc9 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c | |||
@@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, | |||
446 | vec[i++].iov_len = sizeof(trl); | 446 | vec[i++].iov_len = sizeof(trl); |
447 | /* due to the few bytes needed for clc-handshake this cannot block */ | 447 | /* due to the few bytes needed for clc-handshake this cannot block */ |
448 | len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen); | 448 | len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen); |
449 | if (len < sizeof(pclc)) { | 449 | if (len < 0) { |
450 | if (len >= 0) { | 450 | smc->sk.sk_err = smc->clcsock->sk->sk_err; |
451 | reason_code = -ENETUNREACH; | 451 | reason_code = -smc->sk.sk_err; |
452 | smc->sk.sk_err = -reason_code; | 452 | } else if (len < (int)sizeof(pclc)) { |
453 | } else { | 453 | reason_code = -ENETUNREACH; |
454 | smc->sk.sk_err = smc->clcsock->sk->sk_err; | 454 | smc->sk.sk_err = -reason_code; |
455 | reason_code = -smc->sk.sk_err; | ||
456 | } | ||
457 | } | 455 | } |
458 | 456 | ||
459 | return reason_code; | 457 | return reason_code; |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index ac961dfb1ea1..ea2b87f29469 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
@@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc) | |||
100 | struct smc_cdc_conn_state_flags *txflags = | 100 | struct smc_cdc_conn_state_flags *txflags = |
101 | &smc->conn.local_tx_ctrl.conn_state_flags; | 101 | &smc->conn.local_tx_ctrl.conn_state_flags; |
102 | 102 | ||
103 | sk->sk_err = ECONNABORTED; | 103 | if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) { |
104 | if (smc->clcsock && smc->clcsock->sk) { | 104 | sk->sk_err = ECONNABORTED; |
105 | smc->clcsock->sk->sk_err = ECONNABORTED; | 105 | if (smc->clcsock && smc->clcsock->sk) { |
106 | smc->clcsock->sk->sk_state_change(smc->clcsock->sk); | 106 | smc->clcsock->sk->sk_err = ECONNABORTED; |
107 | smc->clcsock->sk->sk_state_change(smc->clcsock->sk); | ||
108 | } | ||
107 | } | 109 | } |
108 | switch (sk->sk_state) { | 110 | switch (sk->sk_state) { |
109 | case SMC_INIT: | ||
110 | sk->sk_state = SMC_PEERABORTWAIT; | ||
111 | break; | ||
112 | case SMC_ACTIVE: | 111 | case SMC_ACTIVE: |
113 | sk->sk_state = SMC_PEERABORTWAIT; | 112 | sk->sk_state = SMC_PEERABORTWAIT; |
114 | release_sock(sk); | 113 | release_sock(sk); |
@@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc) | |||
143 | case SMC_PEERFINCLOSEWAIT: | 142 | case SMC_PEERFINCLOSEWAIT: |
144 | sock_put(sk); /* passive closing */ | 143 | sock_put(sk); /* passive closing */ |
145 | break; | 144 | break; |
145 | case SMC_INIT: | ||
146 | case SMC_PEERABORTWAIT: | 146 | case SMC_PEERABORTWAIT: |
147 | case SMC_CLOSED: | 147 | case SMC_CLOSED: |
148 | break; | 148 | break; |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 01c6ce042a1c..7cb3e4f07c10 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
@@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = { | |||
461 | }; | 461 | }; |
462 | 462 | ||
463 | /* SMC_PNETID family definition */ | 463 | /* SMC_PNETID family definition */ |
464 | static struct genl_family smc_pnet_nl_family = { | 464 | static struct genl_family smc_pnet_nl_family __ro_after_init = { |
465 | .hdrsize = 0, | 465 | .hdrsize = 0, |
466 | .name = SMCR_GENL_FAMILY_NAME, | 466 | .name = SMCR_GENL_FAMILY_NAME, |
467 | .version = SMCR_GENL_FAMILY_VERSION, | 467 | .version = SMCR_GENL_FAMILY_VERSION, |
diff --git a/net/socket.c b/net/socket.c index e6945e318f02..01f3f8f32d6f 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -941,7 +941,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) | |||
941 | EXPORT_SYMBOL(dlci_ioctl_set); | 941 | EXPORT_SYMBOL(dlci_ioctl_set); |
942 | 942 | ||
943 | static long sock_do_ioctl(struct net *net, struct socket *sock, | 943 | static long sock_do_ioctl(struct net *net, struct socket *sock, |
944 | unsigned int cmd, unsigned long arg) | 944 | unsigned int cmd, unsigned long arg, |
945 | unsigned int ifreq_size) | ||
945 | { | 946 | { |
946 | int err; | 947 | int err; |
947 | void __user *argp = (void __user *)arg; | 948 | void __user *argp = (void __user *)arg; |
@@ -967,11 +968,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, | |||
967 | } else { | 968 | } else { |
968 | struct ifreq ifr; | 969 | struct ifreq ifr; |
969 | bool need_copyout; | 970 | bool need_copyout; |
970 | if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) | 971 | if (copy_from_user(&ifr, argp, ifreq_size)) |
971 | return -EFAULT; | 972 | return -EFAULT; |
972 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); | 973 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); |
973 | if (!err && need_copyout) | 974 | if (!err && need_copyout) |
974 | if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) | 975 | if (copy_to_user(argp, &ifr, ifreq_size)) |
975 | return -EFAULT; | 976 | return -EFAULT; |
976 | } | 977 | } |
977 | return err; | 978 | return err; |
@@ -1070,7 +1071,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
1070 | err = open_related_ns(&net->ns, get_net_ns); | 1071 | err = open_related_ns(&net->ns, get_net_ns); |
1071 | break; | 1072 | break; |
1072 | default: | 1073 | default: |
1073 | err = sock_do_ioctl(net, sock, cmd, arg); | 1074 | err = sock_do_ioctl(net, sock, cmd, arg, |
1075 | sizeof(struct ifreq)); | ||
1074 | break; | 1076 | break; |
1075 | } | 1077 | } |
1076 | return err; | 1078 | return err; |
@@ -2750,7 +2752,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock, | |||
2750 | int err; | 2752 | int err; |
2751 | 2753 | ||
2752 | set_fs(KERNEL_DS); | 2754 | set_fs(KERNEL_DS); |
2753 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); | 2755 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, |
2756 | sizeof(struct compat_ifreq)); | ||
2754 | set_fs(old_fs); | 2757 | set_fs(old_fs); |
2755 | if (!err) | 2758 | if (!err) |
2756 | err = compat_put_timeval(&ktv, up); | 2759 | err = compat_put_timeval(&ktv, up); |
@@ -2766,7 +2769,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock, | |||
2766 | int err; | 2769 | int err; |
2767 | 2770 | ||
2768 | set_fs(KERNEL_DS); | 2771 | set_fs(KERNEL_DS); |
2769 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); | 2772 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, |
2773 | sizeof(struct compat_ifreq)); | ||
2770 | set_fs(old_fs); | 2774 | set_fs(old_fs); |
2771 | if (!err) | 2775 | if (!err) |
2772 | err = compat_put_timespec(&kts, up); | 2776 | err = compat_put_timespec(&kts, up); |
@@ -3072,7 +3076,8 @@ static int routing_ioctl(struct net *net, struct socket *sock, | |||
3072 | } | 3076 | } |
3073 | 3077 | ||
3074 | set_fs(KERNEL_DS); | 3078 | set_fs(KERNEL_DS); |
3075 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); | 3079 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, |
3080 | sizeof(struct compat_ifreq)); | ||
3076 | set_fs(old_fs); | 3081 | set_fs(old_fs); |
3077 | 3082 | ||
3078 | out: | 3083 | out: |
@@ -3185,7 +3190,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, | |||
3185 | case SIOCBONDSETHWADDR: | 3190 | case SIOCBONDSETHWADDR: |
3186 | case SIOCBONDCHANGEACTIVE: | 3191 | case SIOCBONDCHANGEACTIVE: |
3187 | case SIOCGIFNAME: | 3192 | case SIOCGIFNAME: |
3188 | return sock_do_ioctl(net, sock, cmd, arg); | 3193 | return sock_do_ioctl(net, sock, cmd, arg, |
3194 | sizeof(struct compat_ifreq)); | ||
3189 | } | 3195 | } |
3190 | 3196 | ||
3191 | return -ENOIOCTLCMD; | 3197 | return -ENOIOCTLCMD; |
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 292742e50bfa..961b07d4d41c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c | |||
@@ -686,7 +686,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) | |||
686 | goto free_marker_record; | 686 | goto free_marker_record; |
687 | } | 687 | } |
688 | 688 | ||
689 | crypto_info = &ctx->crypto_send; | 689 | crypto_info = &ctx->crypto_send.info; |
690 | switch (crypto_info->cipher_type) { | 690 | switch (crypto_info->cipher_type) { |
691 | case TLS_CIPHER_AES_GCM_128: | 691 | case TLS_CIPHER_AES_GCM_128: |
692 | nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; | 692 | nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; |
@@ -780,7 +780,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) | |||
780 | 780 | ||
781 | ctx->priv_ctx_tx = offload_ctx; | 781 | ctx->priv_ctx_tx = offload_ctx; |
782 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, | 782 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, |
783 | &ctx->crypto_send, | 783 | &ctx->crypto_send.info, |
784 | tcp_sk(sk)->write_seq); | 784 | tcp_sk(sk)->write_seq); |
785 | if (rc) | 785 | if (rc) |
786 | goto release_netdev; | 786 | goto release_netdev; |
@@ -862,7 +862,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) | |||
862 | goto release_ctx; | 862 | goto release_ctx; |
863 | 863 | ||
864 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, | 864 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, |
865 | &ctx->crypto_recv, | 865 | &ctx->crypto_recv.info, |
866 | tcp_sk(sk)->copied_seq); | 866 | tcp_sk(sk)->copied_seq); |
867 | if (rc) { | 867 | if (rc) { |
868 | pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", | 868 | pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", |
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 6102169239d1..450a6dbc5a88 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c | |||
@@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, | |||
320 | goto free_req; | 320 | goto free_req; |
321 | 321 | ||
322 | iv = buf; | 322 | iv = buf; |
323 | memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt, | 323 | memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt, |
324 | TLS_CIPHER_AES_GCM_128_SALT_SIZE); | 324 | TLS_CIPHER_AES_GCM_128_SALT_SIZE); |
325 | aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE + | 325 | aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE + |
326 | TLS_CIPHER_AES_GCM_128_IV_SIZE; | 326 | TLS_CIPHER_AES_GCM_128_IV_SIZE; |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 180b6640e531..523622dc74f8 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
@@ -241,6 +241,16 @@ static void tls_write_space(struct sock *sk) | |||
241 | ctx->sk_write_space(sk); | 241 | ctx->sk_write_space(sk); |
242 | } | 242 | } |
243 | 243 | ||
244 | static void tls_ctx_free(struct tls_context *ctx) | ||
245 | { | ||
246 | if (!ctx) | ||
247 | return; | ||
248 | |||
249 | memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); | ||
250 | memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); | ||
251 | kfree(ctx); | ||
252 | } | ||
253 | |||
244 | static void tls_sk_proto_close(struct sock *sk, long timeout) | 254 | static void tls_sk_proto_close(struct sock *sk, long timeout) |
245 | { | 255 | { |
246 | struct tls_context *ctx = tls_get_ctx(sk); | 256 | struct tls_context *ctx = tls_get_ctx(sk); |
@@ -294,7 +304,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) | |||
294 | #else | 304 | #else |
295 | { | 305 | { |
296 | #endif | 306 | #endif |
297 | kfree(ctx); | 307 | tls_ctx_free(ctx); |
298 | ctx = NULL; | 308 | ctx = NULL; |
299 | } | 309 | } |
300 | 310 | ||
@@ -305,7 +315,7 @@ skip_tx_cleanup: | |||
305 | * for sk->sk_prot->unhash [tls_hw_unhash] | 315 | * for sk->sk_prot->unhash [tls_hw_unhash] |
306 | */ | 316 | */ |
307 | if (free_ctx) | 317 | if (free_ctx) |
308 | kfree(ctx); | 318 | tls_ctx_free(ctx); |
309 | } | 319 | } |
310 | 320 | ||
311 | static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, | 321 | static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, |
@@ -330,7 +340,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, | |||
330 | } | 340 | } |
331 | 341 | ||
332 | /* get user crypto info */ | 342 | /* get user crypto info */ |
333 | crypto_info = &ctx->crypto_send; | 343 | crypto_info = &ctx->crypto_send.info; |
334 | 344 | ||
335 | if (!TLS_CRYPTO_INFO_READY(crypto_info)) { | 345 | if (!TLS_CRYPTO_INFO_READY(crypto_info)) { |
336 | rc = -EBUSY; | 346 | rc = -EBUSY; |
@@ -417,9 +427,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, | |||
417 | } | 427 | } |
418 | 428 | ||
419 | if (tx) | 429 | if (tx) |
420 | crypto_info = &ctx->crypto_send; | 430 | crypto_info = &ctx->crypto_send.info; |
421 | else | 431 | else |
422 | crypto_info = &ctx->crypto_recv; | 432 | crypto_info = &ctx->crypto_recv.info; |
423 | 433 | ||
424 | /* Currently we don't support set crypto info more than one time */ | 434 | /* Currently we don't support set crypto info more than one time */ |
425 | if (TLS_CRYPTO_INFO_READY(crypto_info)) { | 435 | if (TLS_CRYPTO_INFO_READY(crypto_info)) { |
@@ -499,7 +509,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, | |||
499 | goto out; | 509 | goto out; |
500 | 510 | ||
501 | err_crypto_info: | 511 | err_crypto_info: |
502 | memset(crypto_info, 0, sizeof(*crypto_info)); | 512 | memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); |
503 | out: | 513 | out: |
504 | return rc; | 514 | return rc; |
505 | } | 515 | } |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index e28a6ff25d96..b9c6ecfbcfea 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
@@ -931,7 +931,15 @@ int tls_sw_recvmsg(struct sock *sk, | |||
931 | if (control != TLS_RECORD_TYPE_DATA) | 931 | if (control != TLS_RECORD_TYPE_DATA) |
932 | goto recv_end; | 932 | goto recv_end; |
933 | } | 933 | } |
934 | } else { | ||
935 | /* MSG_PEEK right now cannot look beyond current skb | ||
936 | * from strparser, meaning we cannot advance skb here | ||
937 | * and thus unpause strparser since we'd loose original | ||
938 | * one. | ||
939 | */ | ||
940 | break; | ||
934 | } | 941 | } |
942 | |||
935 | /* If we have a new message from strparser, continue now. */ | 943 | /* If we have a new message from strparser, continue now. */ |
936 | if (copied >= target && !ctx->recv_pkt) | 944 | if (copied >= target && !ctx->recv_pkt) |
937 | break; | 945 | break; |
@@ -1055,8 +1063,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) | |||
1055 | goto read_failure; | 1063 | goto read_failure; |
1056 | } | 1064 | } |
1057 | 1065 | ||
1058 | if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) || | 1066 | if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) || |
1059 | header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) { | 1067 | header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) { |
1060 | ret = -EINVAL; | 1068 | ret = -EINVAL; |
1061 | goto read_failure; | 1069 | goto read_failure; |
1062 | } | 1070 | } |
@@ -1136,7 +1144,6 @@ void tls_sw_free_resources_rx(struct sock *sk) | |||
1136 | 1144 | ||
1137 | int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | 1145 | int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) |
1138 | { | 1146 | { |
1139 | char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; | ||
1140 | struct tls_crypto_info *crypto_info; | 1147 | struct tls_crypto_info *crypto_info; |
1141 | struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; | 1148 | struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; |
1142 | struct tls_sw_context_tx *sw_ctx_tx = NULL; | 1149 | struct tls_sw_context_tx *sw_ctx_tx = NULL; |
@@ -1181,12 +1188,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | |||
1181 | 1188 | ||
1182 | if (tx) { | 1189 | if (tx) { |
1183 | crypto_init_wait(&sw_ctx_tx->async_wait); | 1190 | crypto_init_wait(&sw_ctx_tx->async_wait); |
1184 | crypto_info = &ctx->crypto_send; | 1191 | crypto_info = &ctx->crypto_send.info; |
1185 | cctx = &ctx->tx; | 1192 | cctx = &ctx->tx; |
1186 | aead = &sw_ctx_tx->aead_send; | 1193 | aead = &sw_ctx_tx->aead_send; |
1187 | } else { | 1194 | } else { |
1188 | crypto_init_wait(&sw_ctx_rx->async_wait); | 1195 | crypto_init_wait(&sw_ctx_rx->async_wait); |
1189 | crypto_info = &ctx->crypto_recv; | 1196 | crypto_info = &ctx->crypto_recv.info; |
1190 | cctx = &ctx->rx; | 1197 | cctx = &ctx->rx; |
1191 | aead = &sw_ctx_rx->aead_recv; | 1198 | aead = &sw_ctx_rx->aead_recv; |
1192 | } | 1199 | } |
@@ -1265,9 +1272,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | |||
1265 | 1272 | ||
1266 | ctx->push_pending_record = tls_sw_push_pending_record; | 1273 | ctx->push_pending_record = tls_sw_push_pending_record; |
1267 | 1274 | ||
1268 | memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); | 1275 | rc = crypto_aead_setkey(*aead, gcm_128_info->key, |
1269 | |||
1270 | rc = crypto_aead_setkey(*aead, keyval, | ||
1271 | TLS_CIPHER_AES_GCM_128_KEY_SIZE); | 1276 | TLS_CIPHER_AES_GCM_128_KEY_SIZE); |
1272 | if (rc) | 1277 | if (rc) |
1273 | goto free_aead; | 1278 | goto free_aead; |
diff --git a/scripts/subarch.include b/scripts/subarch.include new file mode 100644 index 000000000000..650682821126 --- /dev/null +++ b/scripts/subarch.include | |||
@@ -0,0 +1,13 @@ | |||
1 | # SUBARCH tells the usermode build what the underlying arch is. That is set | ||
2 | # first, and if a usermode build is happening, the "ARCH=um" on the command | ||
3 | # line overrides the setting of ARCH below. If a native build is happening, | ||
4 | # then ARCH is assigned, getting whatever value it gets normally, and | ||
5 | # SUBARCH is subsequently ignored. | ||
6 | |||
7 | SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ | ||
8 | -e s/sun4u/sparc64/ \ | ||
9 | -e s/arm.*/arm/ -e s/sa110/arm/ \ | ||
10 | -e s/s390x/s390/ -e s/parisc64/parisc/ \ | ||
11 | -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ | ||
12 | -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ | ||
13 | -e s/riscv.*/riscv/) | ||
diff --git a/security/keys/dh.c b/security/keys/dh.c index 3b602a1e27fa..711e89d8c415 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c | |||
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params, | |||
300 | } | 300 | } |
301 | dh_inputs.g_size = dlen; | 301 | dh_inputs.g_size = dlen; |
302 | 302 | ||
303 | dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key); | 303 | dlen = dh_data_from_key(pcopy.private, &dh_inputs.key); |
304 | if (dlen < 0) { | 304 | if (dlen < 0) { |
305 | ret = dlen; | 305 | ret = dlen; |
306 | goto out2; | 306 | goto out2; |
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index 730ea91d9be8..93676354f87f 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c | |||
@@ -263,6 +263,8 @@ do_registration(struct work_struct *work) | |||
263 | error: | 263 | error: |
264 | mutex_unlock(&devices_mutex); | 264 | mutex_unlock(&devices_mutex); |
265 | snd_bebob_stream_destroy_duplex(bebob); | 265 | snd_bebob_stream_destroy_duplex(bebob); |
266 | kfree(bebob->maudio_special_quirk); | ||
267 | bebob->maudio_special_quirk = NULL; | ||
266 | snd_card_free(bebob->card); | 268 | snd_card_free(bebob->card); |
267 | dev_info(&bebob->unit->device, | 269 | dev_info(&bebob->unit->device, |
268 | "Sound card registration failed: %d\n", err); | 270 | "Sound card registration failed: %d\n", err); |
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c index bd55620c6a47..c266997ad299 100644 --- a/sound/firewire/bebob/bebob_maudio.c +++ b/sound/firewire/bebob/bebob_maudio.c | |||
@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) | |||
96 | struct fw_device *device = fw_parent_device(unit); | 96 | struct fw_device *device = fw_parent_device(unit); |
97 | int err, rcode; | 97 | int err, rcode; |
98 | u64 date; | 98 | u64 date; |
99 | __le32 cues[3] = { | 99 | __le32 *cues; |
100 | cpu_to_le32(MAUDIO_BOOTLOADER_CUE1), | ||
101 | cpu_to_le32(MAUDIO_BOOTLOADER_CUE2), | ||
102 | cpu_to_le32(MAUDIO_BOOTLOADER_CUE3) | ||
103 | }; | ||
104 | 100 | ||
105 | /* check date of software used to build */ | 101 | /* check date of software used to build */ |
106 | err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE, | 102 | err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE, |
107 | &date, sizeof(u64)); | 103 | &date, sizeof(u64)); |
108 | if (err < 0) | 104 | if (err < 0) |
109 | goto end; | 105 | return err; |
110 | /* | 106 | /* |
111 | * firmware version 5058 or later has date later than "20070401", but | 107 | * firmware version 5058 or later has date later than "20070401", but |
112 | * 'date' is not null-terminated. | 108 | * 'date' is not null-terminated. |
@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) | |||
114 | if (date < 0x3230303730343031LL) { | 110 | if (date < 0x3230303730343031LL) { |
115 | dev_err(&unit->device, | 111 | dev_err(&unit->device, |
116 | "Use firmware version 5058 or later\n"); | 112 | "Use firmware version 5058 or later\n"); |
117 | err = -ENOSYS; | 113 | return -ENXIO; |
118 | goto end; | ||
119 | } | 114 | } |
120 | 115 | ||
116 | cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL); | ||
117 | if (!cues) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1); | ||
121 | cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2); | ||
122 | cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3); | ||
123 | |||
121 | rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST, | 124 | rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST, |
122 | device->node_id, device->generation, | 125 | device->node_id, device->generation, |
123 | device->max_speed, BEBOB_ADDR_REG_REQ, | 126 | device->max_speed, BEBOB_ADDR_REG_REQ, |
124 | cues, sizeof(cues)); | 127 | cues, 3 * sizeof(*cues)); |
128 | kfree(cues); | ||
125 | if (rcode != RCODE_COMPLETE) { | 129 | if (rcode != RCODE_COMPLETE) { |
126 | dev_err(&unit->device, | 130 | dev_err(&unit->device, |
127 | "Failed to send a cue to load firmware\n"); | 131 | "Failed to send a cue to load firmware\n"); |
128 | err = -EIO; | 132 | err = -EIO; |
129 | } | 133 | } |
130 | end: | 134 | |
131 | return err; | 135 | return err; |
132 | } | 136 | } |
133 | 137 | ||
@@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814) | |||
290 | bebob->midi_output_ports = 2; | 294 | bebob->midi_output_ports = 2; |
291 | } | 295 | } |
292 | end: | 296 | end: |
293 | if (err < 0) { | ||
294 | kfree(params); | ||
295 | bebob->maudio_special_quirk = NULL; | ||
296 | } | ||
297 | mutex_unlock(&bebob->mutex); | 297 | mutex_unlock(&bebob->mutex); |
298 | return err; | 298 | return err; |
299 | } | 299 | } |
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index 1f5e1d23f31a..ef689997d6a5 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c | |||
@@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x) | |||
49 | fw_unit_put(dg00x->unit); | 49 | fw_unit_put(dg00x->unit); |
50 | 50 | ||
51 | mutex_destroy(&dg00x->mutex); | 51 | mutex_destroy(&dg00x->mutex); |
52 | kfree(dg00x); | ||
52 | } | 53 | } |
53 | 54 | ||
54 | static void dg00x_card_free(struct snd_card *card) | 55 | static void dg00x_card_free(struct snd_card *card) |
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c index ad7a0a32557d..64c3cb0fb926 100644 --- a/sound/firewire/fireface/ff-protocol-ff400.c +++ b/sound/firewire/fireface/ff-protocol-ff400.c | |||
@@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable) | |||
146 | { | 146 | { |
147 | __le32 *reg; | 147 | __le32 *reg; |
148 | int i; | 148 | int i; |
149 | int err; | ||
149 | 150 | ||
150 | reg = kcalloc(18, sizeof(__le32), GFP_KERNEL); | 151 | reg = kcalloc(18, sizeof(__le32), GFP_KERNEL); |
151 | if (reg == NULL) | 152 | if (reg == NULL) |
@@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable) | |||
163 | reg[i] = cpu_to_le32(0x00000001); | 164 | reg[i] = cpu_to_le32(0x00000001); |
164 | } | 165 | } |
165 | 166 | ||
166 | return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST, | 167 | err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST, |
167 | FF400_FETCH_PCM_FRAMES, reg, | 168 | FF400_FETCH_PCM_FRAMES, reg, |
168 | sizeof(__le32) * 18, 0); | 169 | sizeof(__le32) * 18, 0); |
170 | kfree(reg); | ||
171 | return err; | ||
169 | } | 172 | } |
170 | 173 | ||
171 | static void ff400_dump_sync_status(struct snd_ff *ff, | 174 | static void ff400_dump_sync_status(struct snd_ff *ff, |
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 71a0613d3da0..f2d073365cf6 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c | |||
@@ -301,6 +301,8 @@ error: | |||
301 | snd_efw_transaction_remove_instance(efw); | 301 | snd_efw_transaction_remove_instance(efw); |
302 | snd_efw_stream_destroy_duplex(efw); | 302 | snd_efw_stream_destroy_duplex(efw); |
303 | snd_card_free(efw->card); | 303 | snd_card_free(efw->card); |
304 | kfree(efw->resp_buf); | ||
305 | efw->resp_buf = NULL; | ||
304 | dev_info(&efw->unit->device, | 306 | dev_info(&efw->unit->device, |
305 | "Sound card registration failed: %d\n", err); | 307 | "Sound card registration failed: %d\n", err); |
306 | } | 308 | } |
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 1e5b2c802635..2ea8be6c8584 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c | |||
@@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw) | |||
130 | 130 | ||
131 | kfree(oxfw->spec); | 131 | kfree(oxfw->spec); |
132 | mutex_destroy(&oxfw->mutex); | 132 | mutex_destroy(&oxfw->mutex); |
133 | kfree(oxfw); | ||
133 | } | 134 | } |
134 | 135 | ||
135 | /* | 136 | /* |
@@ -207,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw) | |||
207 | static void do_registration(struct work_struct *work) | 208 | static void do_registration(struct work_struct *work) |
208 | { | 209 | { |
209 | struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work); | 210 | struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work); |
211 | int i; | ||
210 | int err; | 212 | int err; |
211 | 213 | ||
212 | if (oxfw->registered) | 214 | if (oxfw->registered) |
@@ -269,7 +271,15 @@ error: | |||
269 | snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); | 271 | snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); |
270 | if (oxfw->has_output) | 272 | if (oxfw->has_output) |
271 | snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); | 273 | snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); |
274 | for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) { | ||
275 | kfree(oxfw->tx_stream_formats[i]); | ||
276 | oxfw->tx_stream_formats[i] = NULL; | ||
277 | kfree(oxfw->rx_stream_formats[i]); | ||
278 | oxfw->rx_stream_formats[i] = NULL; | ||
279 | } | ||
272 | snd_card_free(oxfw->card); | 280 | snd_card_free(oxfw->card); |
281 | kfree(oxfw->spec); | ||
282 | oxfw->spec = NULL; | ||
273 | dev_info(&oxfw->unit->device, | 283 | dev_info(&oxfw->unit->device, |
274 | "Sound card registration failed: %d\n", err); | 284 | "Sound card registration failed: %d\n", err); |
275 | } | 285 | } |
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index 44ad41fb7374..d3fdc463a884 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c | |||
@@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm) | |||
93 | fw_unit_put(tscm->unit); | 93 | fw_unit_put(tscm->unit); |
94 | 94 | ||
95 | mutex_destroy(&tscm->mutex); | 95 | mutex_destroy(&tscm->mutex); |
96 | kfree(tscm); | ||
96 | } | 97 | } |
97 | 98 | ||
98 | static void tscm_card_free(struct snd_card *card) | 99 | static void tscm_card_free(struct snd_card *card) |
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 560ec0986e1a..74244d8e2909 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c | |||
@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus) | |||
40 | */ | 40 | */ |
41 | void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus) | 41 | void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus) |
42 | { | 42 | { |
43 | WARN_ON_ONCE(!bus->rb.area); | ||
44 | |||
43 | spin_lock_irq(&bus->reg_lock); | 45 | spin_lock_irq(&bus->reg_lock); |
44 | /* CORB set up */ | 46 | /* CORB set up */ |
45 | bus->corb.addr = bus->rb.addr; | 47 | bus->corb.addr = bus->rb.addr; |
@@ -383,7 +385,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus) | |||
383 | EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset); | 385 | EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset); |
384 | 386 | ||
385 | /* reset codec link */ | 387 | /* reset codec link */ |
386 | static int azx_reset(struct hdac_bus *bus, bool full_reset) | 388 | int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset) |
387 | { | 389 | { |
388 | if (!full_reset) | 390 | if (!full_reset) |
389 | goto skip_reset; | 391 | goto skip_reset; |
@@ -408,7 +410,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset) | |||
408 | skip_reset: | 410 | skip_reset: |
409 | /* check to see if controller is ready */ | 411 | /* check to see if controller is ready */ |
410 | if (!snd_hdac_chip_readb(bus, GCTL)) { | 412 | if (!snd_hdac_chip_readb(bus, GCTL)) { |
411 | dev_dbg(bus->dev, "azx_reset: controller not ready!\n"); | 413 | dev_dbg(bus->dev, "controller not ready!\n"); |
412 | return -EBUSY; | 414 | return -EBUSY; |
413 | } | 415 | } |
414 | 416 | ||
@@ -423,6 +425,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset) | |||
423 | 425 | ||
424 | return 0; | 426 | return 0; |
425 | } | 427 | } |
428 | EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link); | ||
426 | 429 | ||
427 | /* enable interrupts */ | 430 | /* enable interrupts */ |
428 | static void azx_int_enable(struct hdac_bus *bus) | 431 | static void azx_int_enable(struct hdac_bus *bus) |
@@ -477,15 +480,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) | |||
477 | return false; | 480 | return false; |
478 | 481 | ||
479 | /* reset controller */ | 482 | /* reset controller */ |
480 | azx_reset(bus, full_reset); | 483 | snd_hdac_bus_reset_link(bus, full_reset); |
481 | 484 | ||
482 | /* initialize interrupts */ | 485 | /* clear interrupts */ |
483 | azx_int_clear(bus); | 486 | azx_int_clear(bus); |
484 | azx_int_enable(bus); | ||
485 | 487 | ||
486 | /* initialize the codec command I/O */ | 488 | /* initialize the codec command I/O */ |
487 | snd_hdac_bus_init_cmd_io(bus); | 489 | snd_hdac_bus_init_cmd_io(bus); |
488 | 490 | ||
491 | /* enable interrupts after CORB/RIRB buffers are initialized above */ | ||
492 | azx_int_enable(bus); | ||
493 | |||
489 | /* program the position buffer */ | 494 | /* program the position buffer */ |
490 | if (bus->use_posbuf && bus->posbuf.addr) { | 495 | if (bus->use_posbuf && bus->posbuf.addr) { |
491 | snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr); | 496 | snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr); |
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c index 90713741c2dc..6ebe817801ea 100644 --- a/sound/pci/emu10k1/emufx.c +++ b/sound/pci/emu10k1/emufx.c | |||
@@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un | |||
2540 | emu->support_tlv = 1; | 2540 | emu->support_tlv = 1; |
2541 | return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp); | 2541 | return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp); |
2542 | case SNDRV_EMU10K1_IOCTL_INFO: | 2542 | case SNDRV_EMU10K1_IOCTL_INFO: |
2543 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 2543 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
2544 | if (!info) | 2544 | if (!info) |
2545 | return -ENOMEM; | 2545 | return -ENOMEM; |
2546 | snd_emu10k1_fx8010_info(emu, info); | 2546 | snd_emu10k1_fx8010_info(emu, info); |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1b2ce304152a..aa4c672dbaf7 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -365,8 +365,10 @@ enum { | |||
365 | */ | 365 | */ |
366 | #ifdef SUPPORT_VGA_SWITCHEROO | 366 | #ifdef SUPPORT_VGA_SWITCHEROO |
367 | #define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo) | 367 | #define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo) |
368 | #define needs_eld_notify_link(chip) ((chip)->need_eld_notify_link) | ||
368 | #else | 369 | #else |
369 | #define use_vga_switcheroo(chip) 0 | 370 | #define use_vga_switcheroo(chip) 0 |
371 | #define needs_eld_notify_link(chip) false | ||
370 | #endif | 372 | #endif |
371 | 373 | ||
372 | #define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ | 374 | #define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ |
@@ -453,6 +455,7 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, | |||
453 | #endif | 455 | #endif |
454 | 456 | ||
455 | static int azx_acquire_irq(struct azx *chip, int do_disconnect); | 457 | static int azx_acquire_irq(struct azx *chip, int do_disconnect); |
458 | static void set_default_power_save(struct azx *chip); | ||
456 | 459 | ||
457 | /* | 460 | /* |
458 | * initialize the PCI registers | 461 | * initialize the PCI registers |
@@ -1201,6 +1204,10 @@ static int azx_runtime_idle(struct device *dev) | |||
1201 | azx_bus(chip)->codec_powered || !chip->running) | 1204 | azx_bus(chip)->codec_powered || !chip->running) |
1202 | return -EBUSY; | 1205 | return -EBUSY; |
1203 | 1206 | ||
1207 | /* ELD notification gets broken when HD-audio bus is off */ | ||
1208 | if (needs_eld_notify_link(hda)) | ||
1209 | return -EBUSY; | ||
1210 | |||
1204 | return 0; | 1211 | return 0; |
1205 | } | 1212 | } |
1206 | 1213 | ||
@@ -1298,6 +1305,36 @@ static bool azx_vs_can_switch(struct pci_dev *pci) | |||
1298 | return true; | 1305 | return true; |
1299 | } | 1306 | } |
1300 | 1307 | ||
1308 | /* | ||
1309 | * The discrete GPU cannot power down unless the HDA controller runtime | ||
1310 | * suspends, so activate runtime PM on codecs even if power_save == 0. | ||
1311 | */ | ||
1312 | static void setup_vga_switcheroo_runtime_pm(struct azx *chip) | ||
1313 | { | ||
1314 | struct hda_intel *hda = container_of(chip, struct hda_intel, chip); | ||
1315 | struct hda_codec *codec; | ||
1316 | |||
1317 | if (hda->use_vga_switcheroo && !hda->need_eld_notify_link) { | ||
1318 | list_for_each_codec(codec, &chip->bus) | ||
1319 | codec->auto_runtime_pm = 1; | ||
1320 | /* reset the power save setup */ | ||
1321 | if (chip->running) | ||
1322 | set_default_power_save(chip); | ||
1323 | } | ||
1324 | } | ||
1325 | |||
1326 | static void azx_vs_gpu_bound(struct pci_dev *pci, | ||
1327 | enum vga_switcheroo_client_id client_id) | ||
1328 | { | ||
1329 | struct snd_card *card = pci_get_drvdata(pci); | ||
1330 | struct azx *chip = card->private_data; | ||
1331 | struct hda_intel *hda = container_of(chip, struct hda_intel, chip); | ||
1332 | |||
1333 | if (client_id == VGA_SWITCHEROO_DIS) | ||
1334 | hda->need_eld_notify_link = 0; | ||
1335 | setup_vga_switcheroo_runtime_pm(chip); | ||
1336 | } | ||
1337 | |||
1301 | static void init_vga_switcheroo(struct azx *chip) | 1338 | static void init_vga_switcheroo(struct azx *chip) |
1302 | { | 1339 | { |
1303 | struct hda_intel *hda = container_of(chip, struct hda_intel, chip); | 1340 | struct hda_intel *hda = container_of(chip, struct hda_intel, chip); |
@@ -1306,6 +1343,7 @@ static void init_vga_switcheroo(struct azx *chip) | |||
1306 | dev_info(chip->card->dev, | 1343 | dev_info(chip->card->dev, |
1307 | "Handle vga_switcheroo audio client\n"); | 1344 | "Handle vga_switcheroo audio client\n"); |
1308 | hda->use_vga_switcheroo = 1; | 1345 | hda->use_vga_switcheroo = 1; |
1346 | hda->need_eld_notify_link = 1; /* cleared in gpu_bound op */ | ||
1309 | chip->driver_caps |= AZX_DCAPS_PM_RUNTIME; | 1347 | chip->driver_caps |= AZX_DCAPS_PM_RUNTIME; |
1310 | pci_dev_put(p); | 1348 | pci_dev_put(p); |
1311 | } | 1349 | } |
@@ -1314,6 +1352,7 @@ static void init_vga_switcheroo(struct azx *chip) | |||
1314 | static const struct vga_switcheroo_client_ops azx_vs_ops = { | 1352 | static const struct vga_switcheroo_client_ops azx_vs_ops = { |
1315 | .set_gpu_state = azx_vs_set_state, | 1353 | .set_gpu_state = azx_vs_set_state, |
1316 | .can_switch = azx_vs_can_switch, | 1354 | .can_switch = azx_vs_can_switch, |
1355 | .gpu_bound = azx_vs_gpu_bound, | ||
1317 | }; | 1356 | }; |
1318 | 1357 | ||
1319 | static int register_vga_switcheroo(struct azx *chip) | 1358 | static int register_vga_switcheroo(struct azx *chip) |
@@ -1339,6 +1378,7 @@ static int register_vga_switcheroo(struct azx *chip) | |||
1339 | #define init_vga_switcheroo(chip) /* NOP */ | 1378 | #define init_vga_switcheroo(chip) /* NOP */ |
1340 | #define register_vga_switcheroo(chip) 0 | 1379 | #define register_vga_switcheroo(chip) 0 |
1341 | #define check_hdmi_disabled(pci) false | 1380 | #define check_hdmi_disabled(pci) false |
1381 | #define setup_vga_switcheroo_runtime_pm(chip) /* NOP */ | ||
1342 | #endif /* SUPPORT_VGA_SWITCHER */ | 1382 | #endif /* SUPPORT_VGA_SWITCHER */ |
1343 | 1383 | ||
1344 | /* | 1384 | /* |
@@ -1352,6 +1392,7 @@ static int azx_free(struct azx *chip) | |||
1352 | 1392 | ||
1353 | if (azx_has_pm_runtime(chip) && chip->running) | 1393 | if (azx_has_pm_runtime(chip) && chip->running) |
1354 | pm_runtime_get_noresume(&pci->dev); | 1394 | pm_runtime_get_noresume(&pci->dev); |
1395 | chip->running = 0; | ||
1355 | 1396 | ||
1356 | azx_del_card_list(chip); | 1397 | azx_del_card_list(chip); |
1357 | 1398 | ||
@@ -2230,6 +2271,25 @@ static struct snd_pci_quirk power_save_blacklist[] = { | |||
2230 | }; | 2271 | }; |
2231 | #endif /* CONFIG_PM */ | 2272 | #endif /* CONFIG_PM */ |
2232 | 2273 | ||
2274 | static void set_default_power_save(struct azx *chip) | ||
2275 | { | ||
2276 | int val = power_save; | ||
2277 | |||
2278 | #ifdef CONFIG_PM | ||
2279 | if (pm_blacklist) { | ||
2280 | const struct snd_pci_quirk *q; | ||
2281 | |||
2282 | q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); | ||
2283 | if (q && val) { | ||
2284 | dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", | ||
2285 | q->subvendor, q->subdevice); | ||
2286 | val = 0; | ||
2287 | } | ||
2288 | } | ||
2289 | #endif /* CONFIG_PM */ | ||
2290 | snd_hda_set_power_save(&chip->bus, val * 1000); | ||
2291 | } | ||
2292 | |||
2233 | /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ | 2293 | /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ |
2234 | static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { | 2294 | static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { |
2235 | [AZX_DRIVER_NVIDIA] = 8, | 2295 | [AZX_DRIVER_NVIDIA] = 8, |
@@ -2241,9 +2301,7 @@ static int azx_probe_continue(struct azx *chip) | |||
2241 | struct hda_intel *hda = container_of(chip, struct hda_intel, chip); | 2301 | struct hda_intel *hda = container_of(chip, struct hda_intel, chip); |
2242 | struct hdac_bus *bus = azx_bus(chip); | 2302 | struct hdac_bus *bus = azx_bus(chip); |
2243 | struct pci_dev *pci = chip->pci; | 2303 | struct pci_dev *pci = chip->pci; |
2244 | struct hda_codec *codec; | ||
2245 | int dev = chip->dev_index; | 2304 | int dev = chip->dev_index; |
2246 | int val; | ||
2247 | int err; | 2305 | int err; |
2248 | 2306 | ||
2249 | hda->probe_continued = 1; | 2307 | hda->probe_continued = 1; |
@@ -2322,31 +2380,13 @@ static int azx_probe_continue(struct azx *chip) | |||
2322 | if (err < 0) | 2380 | if (err < 0) |
2323 | goto out_free; | 2381 | goto out_free; |
2324 | 2382 | ||
2383 | setup_vga_switcheroo_runtime_pm(chip); | ||
2384 | |||
2325 | chip->running = 1; | 2385 | chip->running = 1; |
2326 | azx_add_card_list(chip); | 2386 | azx_add_card_list(chip); |
2327 | 2387 | ||
2328 | val = power_save; | 2388 | set_default_power_save(chip); |
2329 | #ifdef CONFIG_PM | ||
2330 | if (pm_blacklist) { | ||
2331 | const struct snd_pci_quirk *q; | ||
2332 | |||
2333 | q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); | ||
2334 | if (q && val) { | ||
2335 | dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", | ||
2336 | q->subvendor, q->subdevice); | ||
2337 | val = 0; | ||
2338 | } | ||
2339 | } | ||
2340 | #endif /* CONFIG_PM */ | ||
2341 | /* | ||
2342 | * The discrete GPU cannot power down unless the HDA controller runtime | ||
2343 | * suspends, so activate runtime PM on codecs even if power_save == 0. | ||
2344 | */ | ||
2345 | if (use_vga_switcheroo(hda)) | ||
2346 | list_for_each_codec(codec, &chip->bus) | ||
2347 | codec->auto_runtime_pm = 1; | ||
2348 | 2389 | ||
2349 | snd_hda_set_power_save(&chip->bus, val * 1000); | ||
2350 | if (azx_has_pm_runtime(chip)) | 2390 | if (azx_has_pm_runtime(chip)) |
2351 | pm_runtime_put_autosuspend(&pci->dev); | 2391 | pm_runtime_put_autosuspend(&pci->dev); |
2352 | 2392 | ||
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h index e3a3d318d2e5..f59719e06b91 100644 --- a/sound/pci/hda/hda_intel.h +++ b/sound/pci/hda/hda_intel.h | |||
@@ -37,6 +37,7 @@ struct hda_intel { | |||
37 | 37 | ||
38 | /* vga_switcheroo setup */ | 38 | /* vga_switcheroo setup */ |
39 | unsigned int use_vga_switcheroo:1; | 39 | unsigned int use_vga_switcheroo:1; |
40 | unsigned int need_eld_notify_link:1; | ||
40 | unsigned int vga_switcheroo_registered:1; | 41 | unsigned int vga_switcheroo_registered:1; |
41 | unsigned int init_failed:1; /* delayed init failed */ | 42 | unsigned int init_failed:1; /* delayed init failed */ |
42 | 43 | ||
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index e359938e3d7e..77b265bd0505 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/iopoll.h> | ||
19 | #include <linux/sizes.h> | 20 | #include <linux/sizes.h> |
20 | #include <linux/pm_runtime.h> | 21 | #include <linux/pm_runtime.h> |
21 | 22 | ||
@@ -184,6 +185,24 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio, | |||
184 | acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data); | 185 | acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data); |
185 | } | 186 | } |
186 | 187 | ||
188 | static void pre_config_reset(void __iomem *acp_mmio, u16 ch_num) | ||
189 | { | ||
190 | u32 dma_ctrl; | ||
191 | int ret; | ||
192 | |||
193 | /* clear the reset bit */ | ||
194 | dma_ctrl = acp_reg_read(acp_mmio, mmACP_DMA_CNTL_0 + ch_num); | ||
195 | dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK; | ||
196 | acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num); | ||
197 | /* check the reset bit before programming configuration registers */ | ||
198 | ret = readl_poll_timeout(acp_mmio + ((mmACP_DMA_CNTL_0 + ch_num) * 4), | ||
199 | dma_ctrl, | ||
200 | !(dma_ctrl & ACP_DMA_CNTL_0__DMAChRst_MASK), | ||
201 | 100, ACP_DMA_RESET_TIME); | ||
202 | if (ret < 0) | ||
203 | pr_err("Failed to clear reset of channel : %d\n", ch_num); | ||
204 | } | ||
205 | |||
187 | /* | 206 | /* |
188 | * Initialize the DMA descriptor information for transfer between | 207 | * Initialize the DMA descriptor information for transfer between |
189 | * system memory <-> ACP SRAM | 208 | * system memory <-> ACP SRAM |
@@ -236,6 +255,7 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio, | |||
236 | config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, | 255 | config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, |
237 | &dmadscr[i]); | 256 | &dmadscr[i]); |
238 | } | 257 | } |
258 | pre_config_reset(acp_mmio, ch); | ||
239 | config_acp_dma_channel(acp_mmio, ch, | 259 | config_acp_dma_channel(acp_mmio, ch, |
240 | dma_dscr_idx - 1, | 260 | dma_dscr_idx - 1, |
241 | NUM_DSCRS_PER_CHANNEL, | 261 | NUM_DSCRS_PER_CHANNEL, |
@@ -275,6 +295,7 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size, | |||
275 | config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, | 295 | config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, |
276 | &dmadscr[i]); | 296 | &dmadscr[i]); |
277 | } | 297 | } |
298 | pre_config_reset(acp_mmio, ch); | ||
278 | /* Configure the DMA channel with the above descriptore */ | 299 | /* Configure the DMA channel with the above descriptore */ |
279 | config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1, | 300 | config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1, |
280 | NUM_DSCRS_PER_CHANNEL, | 301 | NUM_DSCRS_PER_CHANNEL, |
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index 275677de669f..407554175282 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c | |||
@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = { | |||
157 | SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2, | 157 | SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2, |
158 | 3, 1, 0), | 158 | 3, 1, 0), |
159 | SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum), | 159 | SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum), |
160 | SOC_SINGLE("MMTLR Data Switch", 0, | 160 | SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2, |
161 | 1, 1, 0), | 161 | 0, 1, 0), |
162 | SOC_ENUM("Mono Channel Select", spdif_mono_select_enum), | 162 | SOC_ENUM("Mono Channel Select", spdif_mono_select_enum), |
163 | SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24), | 163 | SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24), |
164 | }; | 164 | }; |
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c index 92b7125ea169..1093f766d0d2 100644 --- a/sound/soc/codecs/max98373.c +++ b/sound/soc/codecs/max98373.c | |||
@@ -520,6 +520,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg) | |||
520 | { | 520 | { |
521 | switch (reg) { | 521 | switch (reg) { |
522 | case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3: | 522 | case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3: |
523 | case MAX98373_R203E_AMP_PATH_GAIN: | ||
523 | case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK: | 524 | case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK: |
524 | case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK: | 525 | case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK: |
525 | case MAX98373_R20B6_BDE_CUR_STATE_READBACK: | 526 | case MAX98373_R20B6_BDE_CUR_STATE_READBACK: |
@@ -729,6 +730,7 @@ static int max98373_probe(struct snd_soc_component *component) | |||
729 | /* Software Reset */ | 730 | /* Software Reset */ |
730 | regmap_write(max98373->regmap, | 731 | regmap_write(max98373->regmap, |
731 | MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); | 732 | MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); |
733 | usleep_range(10000, 11000); | ||
732 | 734 | ||
733 | /* IV default slot configuration */ | 735 | /* IV default slot configuration */ |
734 | regmap_write(max98373->regmap, | 736 | regmap_write(max98373->regmap, |
@@ -817,6 +819,7 @@ static int max98373_resume(struct device *dev) | |||
817 | 819 | ||
818 | regmap_write(max98373->regmap, | 820 | regmap_write(max98373->regmap, |
819 | MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); | 821 | MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); |
822 | usleep_range(10000, 11000); | ||
820 | regcache_cache_only(max98373->regmap, false); | 823 | regcache_cache_only(max98373->regmap, false); |
821 | regcache_sync(max98373->regmap); | 824 | regcache_sync(max98373->regmap); |
822 | return 0; | 825 | return 0; |
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c index dca82dd6e3bf..32fe76c3134a 100644 --- a/sound/soc/codecs/rt5514.c +++ b/sound/soc/codecs/rt5514.c | |||
@@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = { | |||
64 | {RT5514_ANA_CTRL_LDO10, 0x00028604}, | 64 | {RT5514_ANA_CTRL_LDO10, 0x00028604}, |
65 | {RT5514_ANA_CTRL_ADCFED, 0x00000800}, | 65 | {RT5514_ANA_CTRL_ADCFED, 0x00000800}, |
66 | {RT5514_ASRC_IN_CTRL1, 0x00000003}, | 66 | {RT5514_ASRC_IN_CTRL1, 0x00000003}, |
67 | {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, | 67 | {RT5514_DOWNFILTER0_CTRL3, 0x10000342}, |
68 | {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, | 68 | {RT5514_DOWNFILTER1_CTRL3, 0x10000342}, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static const struct reg_default rt5514_reg[] = { | 71 | static const struct reg_default rt5514_reg[] = { |
@@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = { | |||
92 | {RT5514_ASRC_IN_CTRL1, 0x00000003}, | 92 | {RT5514_ASRC_IN_CTRL1, 0x00000003}, |
93 | {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, | 93 | {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, |
94 | {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, | 94 | {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, |
95 | {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, | 95 | {RT5514_DOWNFILTER0_CTRL3, 0x10000342}, |
96 | {RT5514_DOWNFILTER1_CTRL1, 0x00020c2f}, | 96 | {RT5514_DOWNFILTER1_CTRL1, 0x00020c2f}, |
97 | {RT5514_DOWNFILTER1_CTRL2, 0x00020c2f}, | 97 | {RT5514_DOWNFILTER1_CTRL2, 0x00020c2f}, |
98 | {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, | 98 | {RT5514_DOWNFILTER1_CTRL3, 0x10000342}, |
99 | {RT5514_ANA_CTRL_LDO10, 0x00028604}, | 99 | {RT5514_ANA_CTRL_LDO10, 0x00028604}, |
100 | {RT5514_ANA_CTRL_LDO18_16, 0x02000345}, | 100 | {RT5514_ANA_CTRL_LDO18_16, 0x02000345}, |
101 | {RT5514_ANA_CTRL_ADC12, 0x0000a2a8}, | 101 | {RT5514_ANA_CTRL_ADC12, 0x0000a2a8}, |
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 640d400ca013..afe7d5b19313 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c | |||
@@ -750,8 +750,8 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg) | |||
750 | } | 750 | } |
751 | 751 | ||
752 | static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0); | 752 | static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0); |
753 | static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); | 753 | static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0); |
754 | static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); | 754 | static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0); |
755 | static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); | 755 | static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); |
756 | 756 | ||
757 | /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ | 757 | /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ |
@@ -1114,7 +1114,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = { | |||
1114 | 1114 | ||
1115 | /* DAC Digital Volume */ | 1115 | /* DAC Digital Volume */ |
1116 | SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL, | 1116 | SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL, |
1117 | RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv), | 1117 | RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv), |
1118 | 1118 | ||
1119 | /* IN Boost Volume */ | 1119 | /* IN Boost Volume */ |
1120 | SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL, | 1120 | SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL, |
@@ -1124,7 +1124,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = { | |||
1124 | SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL, | 1124 | SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL, |
1125 | RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1), | 1125 | RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1), |
1126 | SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL, | 1126 | SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL, |
1127 | RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv), | 1127 | RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), |
1128 | 1128 | ||
1129 | /* ADC Boost Volume Control */ | 1129 | /* ADC Boost Volume Control */ |
1130 | SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST, | 1130 | SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST, |
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c index d53680ac78e4..6df158669420 100644 --- a/sound/soc/codecs/sigmadsp.c +++ b/sound/soc/codecs/sigmadsp.c | |||
@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp, | |||
117 | struct sigmadsp_control *ctrl, void *data) | 117 | struct sigmadsp_control *ctrl, void *data) |
118 | { | 118 | { |
119 | /* safeload loads up to 20 bytes in a atomic operation */ | 119 | /* safeload loads up to 20 bytes in a atomic operation */ |
120 | if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops && | 120 | if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload) |
121 | sigmadsp->ops->safeload) | ||
122 | return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data, | 121 | return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data, |
123 | ctrl->num_bytes); | 122 | ctrl->num_bytes); |
124 | else | 123 | else |
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c index 14999b999fd3..0d6145549a98 100644 --- a/sound/soc/codecs/tas6424.c +++ b/sound/soc/codecs/tas6424.c | |||
@@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work) | |||
424 | TAS6424_FAULT_PVDD_UV | | 424 | TAS6424_FAULT_PVDD_UV | |
425 | TAS6424_FAULT_VBAT_UV; | 425 | TAS6424_FAULT_VBAT_UV; |
426 | 426 | ||
427 | if (reg) | 427 | if (!reg) { |
428 | tas6424->last_fault1 = reg; | ||
428 | goto check_global_fault2_reg; | 429 | goto check_global_fault2_reg; |
430 | } | ||
429 | 431 | ||
430 | /* | 432 | /* |
431 | * Only flag errors once for a given occurrence. This is needed as | 433 | * Only flag errors once for a given occurrence. This is needed as |
@@ -461,8 +463,10 @@ check_global_fault2_reg: | |||
461 | TAS6424_FAULT_OTSD_CH3 | | 463 | TAS6424_FAULT_OTSD_CH3 | |
462 | TAS6424_FAULT_OTSD_CH4; | 464 | TAS6424_FAULT_OTSD_CH4; |
463 | 465 | ||
464 | if (!reg) | 466 | if (!reg) { |
467 | tas6424->last_fault2 = reg; | ||
465 | goto check_warn_reg; | 468 | goto check_warn_reg; |
469 | } | ||
466 | 470 | ||
467 | if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD)) | 471 | if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD)) |
468 | dev_crit(dev, "experienced a global overtemp shutdown\n"); | 472 | dev_crit(dev, "experienced a global overtemp shutdown\n"); |
@@ -497,8 +501,10 @@ check_warn_reg: | |||
497 | TAS6424_WARN_VDD_OTW_CH3 | | 501 | TAS6424_WARN_VDD_OTW_CH3 | |
498 | TAS6424_WARN_VDD_OTW_CH4; | 502 | TAS6424_WARN_VDD_OTW_CH4; |
499 | 503 | ||
500 | if (!reg) | 504 | if (!reg) { |
505 | tas6424->last_warn = reg; | ||
501 | goto out; | 506 | goto out; |
507 | } | ||
502 | 508 | ||
503 | if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV)) | 509 | if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV)) |
504 | dev_warn(dev, "experienced a VDD under voltage condition\n"); | 510 | dev_warn(dev, "experienced a VDD under voltage condition\n"); |
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c index f27464c2c5ba..79541960f45d 100644 --- a/sound/soc/codecs/wm8804-i2c.c +++ b/sound/soc/codecs/wm8804-i2c.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/i2c.h> | 15 | #include <linux/i2c.h> |
16 | #include <linux/acpi.h> | ||
16 | 17 | ||
17 | #include "wm8804.h" | 18 | #include "wm8804.h" |
18 | 19 | ||
@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = { | |||
40 | }; | 41 | }; |
41 | MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id); | 42 | MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id); |
42 | 43 | ||
44 | #if defined(CONFIG_OF) | ||
43 | static const struct of_device_id wm8804_of_match[] = { | 45 | static const struct of_device_id wm8804_of_match[] = { |
44 | { .compatible = "wlf,wm8804", }, | 46 | { .compatible = "wlf,wm8804", }, |
45 | { } | 47 | { } |
46 | }; | 48 | }; |
47 | MODULE_DEVICE_TABLE(of, wm8804_of_match); | 49 | MODULE_DEVICE_TABLE(of, wm8804_of_match); |
50 | #endif | ||
51 | |||
52 | #ifdef CONFIG_ACPI | ||
53 | static const struct acpi_device_id wm8804_acpi_match[] = { | ||
54 | { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */ | ||
55 | { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */ | ||
56 | { }, | ||
57 | }; | ||
58 | MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match); | ||
59 | #endif | ||
48 | 60 | ||
49 | static struct i2c_driver wm8804_i2c_driver = { | 61 | static struct i2c_driver wm8804_i2c_driver = { |
50 | .driver = { | 62 | .driver = { |
51 | .name = "wm8804", | 63 | .name = "wm8804", |
52 | .pm = &wm8804_pm, | 64 | .pm = &wm8804_pm, |
53 | .of_match_table = wm8804_of_match, | 65 | .of_match_table = of_match_ptr(wm8804_of_match), |
66 | .acpi_match_table = ACPI_PTR(wm8804_acpi_match), | ||
54 | }, | 67 | }, |
55 | .probe = wm8804_i2c_probe, | 68 | .probe = wm8804_i2c_probe, |
56 | .remove = wm8804_i2c_remove, | 69 | .remove = wm8804_i2c_remove, |
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 953d94d50586..ade34c26ad2f 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c | |||
@@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev) | |||
719 | 719 | ||
720 | static struct platform_driver wm9712_component_driver = { | 720 | static struct platform_driver wm9712_component_driver = { |
721 | .driver = { | 721 | .driver = { |
722 | .name = "wm9712-component", | 722 | .name = "wm9712-codec", |
723 | }, | 723 | }, |
724 | 724 | ||
725 | .probe = wm9712_probe, | 725 | .probe = wm9712_probe, |
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index d32844f94d74..b6dc524830b2 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c | |||
@@ -575,6 +575,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { | |||
575 | BYT_RT5640_MONO_SPEAKER | | 575 | BYT_RT5640_MONO_SPEAKER | |
576 | BYT_RT5640_MCLK_EN), | 576 | BYT_RT5640_MCLK_EN), |
577 | }, | 577 | }, |
578 | { /* Linx Linx7 tablet */ | ||
579 | .matches = { | ||
580 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"), | ||
581 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LINX7"), | ||
582 | }, | ||
583 | .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | | ||
584 | BYT_RT5640_MONO_SPEAKER | | ||
585 | BYT_RT5640_JD_NOT_INV | | ||
586 | BYT_RT5640_SSP0_AIF1 | | ||
587 | BYT_RT5640_MCLK_EN), | ||
588 | }, | ||
578 | { /* MSI S100 tablet */ | 589 | { /* MSI S100 tablet */ |
579 | .matches = { | 590 | .matches = { |
580 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), | 591 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), |
@@ -602,6 +613,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { | |||
602 | BYT_RT5640_SSP0_AIF1 | | 613 | BYT_RT5640_SSP0_AIF1 | |
603 | BYT_RT5640_MCLK_EN), | 614 | BYT_RT5640_MCLK_EN), |
604 | }, | 615 | }, |
616 | { /* Onda v975w */ | ||
617 | .matches = { | ||
618 | DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), | ||
619 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"), | ||
620 | /* The above are too generic, also match BIOS info */ | ||
621 | DMI_EXACT_MATCH(DMI_BIOS_VERSION, "5.6.5"), | ||
622 | DMI_EXACT_MATCH(DMI_BIOS_DATE, "07/25/2014"), | ||
623 | }, | ||
624 | .driver_data = (void *)(BYT_RT5640_IN1_MAP | | ||
625 | BYT_RT5640_JD_SRC_JD2_IN4N | | ||
626 | BYT_RT5640_OVCD_TH_2000UA | | ||
627 | BYT_RT5640_OVCD_SF_0P75 | | ||
628 | BYT_RT5640_DIFF_MIC | | ||
629 | BYT_RT5640_MCLK_EN), | ||
630 | }, | ||
605 | { /* Pipo W4 */ | 631 | { /* Pipo W4 */ |
606 | .matches = { | 632 | .matches = { |
607 | DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), | 633 | DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), |
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index dce649485649..1d17be0f78a0 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c | |||
@@ -834,7 +834,7 @@ static int skl_first_init(struct hdac_bus *bus) | |||
834 | return -ENXIO; | 834 | return -ENXIO; |
835 | } | 835 | } |
836 | 836 | ||
837 | skl_init_chip(bus, true); | 837 | snd_hdac_bus_reset_link(bus, true); |
838 | 838 | ||
839 | snd_hdac_bus_parse_capabilities(bus); | 839 | snd_hdac_bus_parse_capabilities(bus); |
840 | 840 | ||
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index dc94c5c53788..c6b51571be94 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c | |||
@@ -960,8 +960,10 @@ static int msm_routing_probe(struct snd_soc_component *c) | |||
960 | { | 960 | { |
961 | int i; | 961 | int i; |
962 | 962 | ||
963 | for (i = 0; i < MAX_SESSIONS; i++) | 963 | for (i = 0; i < MAX_SESSIONS; i++) { |
964 | routing_data->sessions[i].port_id = -1; | 964 | routing_data->sessions[i].port_id = -1; |
965 | routing_data->sessions[i].fedai_id = -1; | ||
966 | } | ||
965 | 967 | ||
966 | return 0; | 968 | return 0; |
967 | } | 969 | } |
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c index 3a3064dda57f..051f96405346 100644 --- a/sound/soc/sh/rcar/adg.c +++ b/sound/soc/sh/rcar/adg.c | |||
@@ -462,6 +462,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv, | |||
462 | goto rsnd_adg_get_clkout_end; | 462 | goto rsnd_adg_get_clkout_end; |
463 | 463 | ||
464 | req_size = prop->length / sizeof(u32); | 464 | req_size = prop->length / sizeof(u32); |
465 | if (req_size > REQ_SIZE) { | ||
466 | dev_err(dev, | ||
467 | "too many clock-frequency, use top %d\n", REQ_SIZE); | ||
468 | req_size = REQ_SIZE; | ||
469 | } | ||
465 | 470 | ||
466 | of_property_read_u32_array(np, "clock-frequency", req_rate, req_size); | 471 | of_property_read_u32_array(np, "clock-frequency", req_rate, req_size); |
467 | req_48kHz_rate = 0; | 472 | req_48kHz_rate = 0; |
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index f8425d8b44d2..d23c2bbff0cf 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c | |||
@@ -478,7 +478,7 @@ static int rsnd_status_update(u32 *status, | |||
478 | (func_call && (mod)->ops->fn) ? #fn : ""); \ | 478 | (func_call && (mod)->ops->fn) ? #fn : ""); \ |
479 | if (func_call && (mod)->ops->fn) \ | 479 | if (func_call && (mod)->ops->fn) \ |
480 | tmp = (mod)->ops->fn(mod, io, param); \ | 480 | tmp = (mod)->ops->fn(mod, io, param); \ |
481 | if (tmp) \ | 481 | if (tmp && (tmp != -EPROBE_DEFER)) \ |
482 | dev_err(dev, "%s[%d] : %s error %d\n", \ | 482 | dev_err(dev, "%s[%d] : %s error %d\n", \ |
483 | rsnd_mod_name(mod), rsnd_mod_id(mod), \ | 483 | rsnd_mod_name(mod), rsnd_mod_id(mod), \ |
484 | #fn, tmp); \ | 484 | #fn, tmp); \ |
@@ -958,12 +958,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream, | |||
958 | rsnd_dai_stream_quit(io); | 958 | rsnd_dai_stream_quit(io); |
959 | } | 959 | } |
960 | 960 | ||
961 | static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream, | ||
962 | struct snd_soc_dai *dai) | ||
963 | { | ||
964 | struct rsnd_priv *priv = rsnd_dai_to_priv(dai); | ||
965 | struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai); | ||
966 | struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); | ||
967 | |||
968 | return rsnd_dai_call(prepare, io, priv); | ||
969 | } | ||
970 | |||
961 | static const struct snd_soc_dai_ops rsnd_soc_dai_ops = { | 971 | static const struct snd_soc_dai_ops rsnd_soc_dai_ops = { |
962 | .startup = rsnd_soc_dai_startup, | 972 | .startup = rsnd_soc_dai_startup, |
963 | .shutdown = rsnd_soc_dai_shutdown, | 973 | .shutdown = rsnd_soc_dai_shutdown, |
964 | .trigger = rsnd_soc_dai_trigger, | 974 | .trigger = rsnd_soc_dai_trigger, |
965 | .set_fmt = rsnd_soc_dai_set_fmt, | 975 | .set_fmt = rsnd_soc_dai_set_fmt, |
966 | .set_tdm_slot = rsnd_soc_set_dai_tdm_slot, | 976 | .set_tdm_slot = rsnd_soc_set_dai_tdm_slot, |
977 | .prepare = rsnd_soc_dai_prepare, | ||
967 | }; | 978 | }; |
968 | 979 | ||
969 | void rsnd_parse_connect_common(struct rsnd_dai *rdai, | 980 | void rsnd_parse_connect_common(struct rsnd_dai *rdai, |
@@ -1550,6 +1561,14 @@ exit_snd_probe: | |||
1550 | rsnd_dai_call(remove, &rdai->capture, priv); | 1561 | rsnd_dai_call(remove, &rdai->capture, priv); |
1551 | } | 1562 | } |
1552 | 1563 | ||
1564 | /* | ||
1565 | * adg is very special mod which can't use rsnd_dai_call(remove), | ||
1566 | * and it registers ADG clock on probe. | ||
1567 | * It should be unregister if probe failed. | ||
1568 | * Mainly it is assuming -EPROBE_DEFER case | ||
1569 | */ | ||
1570 | rsnd_adg_remove(priv); | ||
1571 | |||
1553 | return ret; | 1572 | return ret; |
1554 | } | 1573 | } |
1555 | 1574 | ||
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c index fe63ef8600d0..d65ea7bc4dac 100644 --- a/sound/soc/sh/rcar/dma.c +++ b/sound/soc/sh/rcar/dma.c | |||
@@ -241,6 +241,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io, | |||
241 | /* try to get DMAEngine channel */ | 241 | /* try to get DMAEngine channel */ |
242 | chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); | 242 | chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); |
243 | if (IS_ERR_OR_NULL(chan)) { | 243 | if (IS_ERR_OR_NULL(chan)) { |
244 | /* Let's follow when -EPROBE_DEFER case */ | ||
245 | if (PTR_ERR(chan) == -EPROBE_DEFER) | ||
246 | return PTR_ERR(chan); | ||
247 | |||
244 | /* | 248 | /* |
245 | * DMA failed. try to PIO mode | 249 | * DMA failed. try to PIO mode |
246 | * see | 250 | * see |
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index 96d93330b1e1..8f7a0abfa751 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h | |||
@@ -280,6 +280,9 @@ struct rsnd_mod_ops { | |||
280 | int (*nolock_stop)(struct rsnd_mod *mod, | 280 | int (*nolock_stop)(struct rsnd_mod *mod, |
281 | struct rsnd_dai_stream *io, | 281 | struct rsnd_dai_stream *io, |
282 | struct rsnd_priv *priv); | 282 | struct rsnd_priv *priv); |
283 | int (*prepare)(struct rsnd_mod *mod, | ||
284 | struct rsnd_dai_stream *io, | ||
285 | struct rsnd_priv *priv); | ||
283 | }; | 286 | }; |
284 | 287 | ||
285 | struct rsnd_dai_stream; | 288 | struct rsnd_dai_stream; |
@@ -309,6 +312,7 @@ struct rsnd_mod { | |||
309 | * H 0: fallback | 312 | * H 0: fallback |
310 | * H 0: hw_params | 313 | * H 0: hw_params |
311 | * H 0: pointer | 314 | * H 0: pointer |
315 | * H 0: prepare | ||
312 | */ | 316 | */ |
313 | #define __rsnd_mod_shift_nolock_start 0 | 317 | #define __rsnd_mod_shift_nolock_start 0 |
314 | #define __rsnd_mod_shift_nolock_stop 0 | 318 | #define __rsnd_mod_shift_nolock_stop 0 |
@@ -323,6 +327,7 @@ struct rsnd_mod { | |||
323 | #define __rsnd_mod_shift_fallback 28 /* always called */ | 327 | #define __rsnd_mod_shift_fallback 28 /* always called */ |
324 | #define __rsnd_mod_shift_hw_params 28 /* always called */ | 328 | #define __rsnd_mod_shift_hw_params 28 /* always called */ |
325 | #define __rsnd_mod_shift_pointer 28 /* always called */ | 329 | #define __rsnd_mod_shift_pointer 28 /* always called */ |
330 | #define __rsnd_mod_shift_prepare 28 /* always called */ | ||
326 | 331 | ||
327 | #define __rsnd_mod_add_probe 0 | 332 | #define __rsnd_mod_add_probe 0 |
328 | #define __rsnd_mod_add_remove 0 | 333 | #define __rsnd_mod_add_remove 0 |
@@ -337,6 +342,7 @@ struct rsnd_mod { | |||
337 | #define __rsnd_mod_add_fallback 0 | 342 | #define __rsnd_mod_add_fallback 0 |
338 | #define __rsnd_mod_add_hw_params 0 | 343 | #define __rsnd_mod_add_hw_params 0 |
339 | #define __rsnd_mod_add_pointer 0 | 344 | #define __rsnd_mod_add_pointer 0 |
345 | #define __rsnd_mod_add_prepare 0 | ||
340 | 346 | ||
341 | #define __rsnd_mod_call_probe 0 | 347 | #define __rsnd_mod_call_probe 0 |
342 | #define __rsnd_mod_call_remove 0 | 348 | #define __rsnd_mod_call_remove 0 |
@@ -351,6 +357,7 @@ struct rsnd_mod { | |||
351 | #define __rsnd_mod_call_pointer 0 | 357 | #define __rsnd_mod_call_pointer 0 |
352 | #define __rsnd_mod_call_nolock_start 0 | 358 | #define __rsnd_mod_call_nolock_start 0 |
353 | #define __rsnd_mod_call_nolock_stop 1 | 359 | #define __rsnd_mod_call_nolock_stop 1 |
360 | #define __rsnd_mod_call_prepare 0 | ||
354 | 361 | ||
355 | #define rsnd_mod_to_priv(mod) ((mod)->priv) | 362 | #define rsnd_mod_to_priv(mod) ((mod)->priv) |
356 | #define rsnd_mod_name(mod) ((mod)->ops->name) | 363 | #define rsnd_mod_name(mod) ((mod)->ops->name) |
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 8304e4ec9242..3f880ec66459 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c | |||
@@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod, | |||
283 | if (rsnd_ssi_is_multi_slave(mod, io)) | 283 | if (rsnd_ssi_is_multi_slave(mod, io)) |
284 | return 0; | 284 | return 0; |
285 | 285 | ||
286 | if (ssi->usrcnt > 1) { | 286 | if (ssi->rate) { |
287 | if (ssi->rate != rate) { | 287 | if (ssi->rate != rate) { |
288 | dev_err(dev, "SSI parent/child should use same rate\n"); | 288 | dev_err(dev, "SSI parent/child should use same rate\n"); |
289 | return -EINVAL; | 289 | return -EINVAL; |
@@ -434,7 +434,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, | |||
434 | struct rsnd_priv *priv) | 434 | struct rsnd_priv *priv) |
435 | { | 435 | { |
436 | struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); | 436 | struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); |
437 | int ret; | ||
438 | 437 | ||
439 | if (!rsnd_ssi_is_run_mods(mod, io)) | 438 | if (!rsnd_ssi_is_run_mods(mod, io)) |
440 | return 0; | 439 | return 0; |
@@ -443,10 +442,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, | |||
443 | 442 | ||
444 | rsnd_mod_power_on(mod); | 443 | rsnd_mod_power_on(mod); |
445 | 444 | ||
446 | ret = rsnd_ssi_master_clk_start(mod, io); | ||
447 | if (ret < 0) | ||
448 | return ret; | ||
449 | |||
450 | rsnd_ssi_config_init(mod, io); | 445 | rsnd_ssi_config_init(mod, io); |
451 | 446 | ||
452 | rsnd_ssi_register_setup(mod); | 447 | rsnd_ssi_register_setup(mod); |
@@ -852,6 +847,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod, | |||
852 | return 0; | 847 | return 0; |
853 | } | 848 | } |
854 | 849 | ||
850 | static int rsnd_ssi_prepare(struct rsnd_mod *mod, | ||
851 | struct rsnd_dai_stream *io, | ||
852 | struct rsnd_priv *priv) | ||
853 | { | ||
854 | return rsnd_ssi_master_clk_start(mod, io); | ||
855 | } | ||
856 | |||
855 | static struct rsnd_mod_ops rsnd_ssi_pio_ops = { | 857 | static struct rsnd_mod_ops rsnd_ssi_pio_ops = { |
856 | .name = SSI_NAME, | 858 | .name = SSI_NAME, |
857 | .probe = rsnd_ssi_common_probe, | 859 | .probe = rsnd_ssi_common_probe, |
@@ -864,6 +866,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = { | |||
864 | .pointer = rsnd_ssi_pio_pointer, | 866 | .pointer = rsnd_ssi_pio_pointer, |
865 | .pcm_new = rsnd_ssi_pcm_new, | 867 | .pcm_new = rsnd_ssi_pcm_new, |
866 | .hw_params = rsnd_ssi_hw_params, | 868 | .hw_params = rsnd_ssi_hw_params, |
869 | .prepare = rsnd_ssi_prepare, | ||
867 | }; | 870 | }; |
868 | 871 | ||
869 | static int rsnd_ssi_dma_probe(struct rsnd_mod *mod, | 872 | static int rsnd_ssi_dma_probe(struct rsnd_mod *mod, |
@@ -940,6 +943,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = { | |||
940 | .pcm_new = rsnd_ssi_pcm_new, | 943 | .pcm_new = rsnd_ssi_pcm_new, |
941 | .fallback = rsnd_ssi_fallback, | 944 | .fallback = rsnd_ssi_fallback, |
942 | .hw_params = rsnd_ssi_hw_params, | 945 | .hw_params = rsnd_ssi_hw_params, |
946 | .prepare = rsnd_ssi_prepare, | ||
943 | }; | 947 | }; |
944 | 948 | ||
945 | int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod) | 949 | int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod) |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 9cfe10d8040c..473eefe8658e 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -1447,7 +1447,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card, | |||
1447 | sink = codec_dai->playback_widget; | 1447 | sink = codec_dai->playback_widget; |
1448 | source = cpu_dai->capture_widget; | 1448 | source = cpu_dai->capture_widget; |
1449 | if (sink && source) { | 1449 | if (sink && source) { |
1450 | ret = snd_soc_dapm_new_pcm(card, dai_link->params, | 1450 | ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params, |
1451 | dai_link->num_params, | 1451 | dai_link->num_params, |
1452 | source, sink); | 1452 | source, sink); |
1453 | if (ret != 0) { | 1453 | if (ret != 0) { |
@@ -1460,7 +1460,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card, | |||
1460 | sink = cpu_dai->playback_widget; | 1460 | sink = cpu_dai->playback_widget; |
1461 | source = codec_dai->capture_widget; | 1461 | source = codec_dai->capture_widget; |
1462 | if (sink && source) { | 1462 | if (sink && source) { |
1463 | ret = snd_soc_dapm_new_pcm(card, dai_link->params, | 1463 | ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params, |
1464 | dai_link->num_params, | 1464 | dai_link->num_params, |
1465 | source, sink); | 1465 | source, sink); |
1466 | if (ret != 0) { | 1466 | if (ret != 0) { |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 7e96793050c9..461d951917c0 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -3652,6 +3652,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, | |||
3652 | { | 3652 | { |
3653 | struct snd_soc_dapm_path *source_p, *sink_p; | 3653 | struct snd_soc_dapm_path *source_p, *sink_p; |
3654 | struct snd_soc_dai *source, *sink; | 3654 | struct snd_soc_dai *source, *sink; |
3655 | struct snd_soc_pcm_runtime *rtd = w->priv; | ||
3655 | const struct snd_soc_pcm_stream *config = w->params + w->params_select; | 3656 | const struct snd_soc_pcm_stream *config = w->params + w->params_select; |
3656 | struct snd_pcm_substream substream; | 3657 | struct snd_pcm_substream substream; |
3657 | struct snd_pcm_hw_params *params = NULL; | 3658 | struct snd_pcm_hw_params *params = NULL; |
@@ -3711,6 +3712,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, | |||
3711 | goto out; | 3712 | goto out; |
3712 | } | 3713 | } |
3713 | substream.runtime = runtime; | 3714 | substream.runtime = runtime; |
3715 | substream.private_data = rtd; | ||
3714 | 3716 | ||
3715 | switch (event) { | 3717 | switch (event) { |
3716 | case SND_SOC_DAPM_PRE_PMU: | 3718 | case SND_SOC_DAPM_PRE_PMU: |
@@ -3895,6 +3897,7 @@ outfree_w_param: | |||
3895 | } | 3897 | } |
3896 | 3898 | ||
3897 | int snd_soc_dapm_new_pcm(struct snd_soc_card *card, | 3899 | int snd_soc_dapm_new_pcm(struct snd_soc_card *card, |
3900 | struct snd_soc_pcm_runtime *rtd, | ||
3898 | const struct snd_soc_pcm_stream *params, | 3901 | const struct snd_soc_pcm_stream *params, |
3899 | unsigned int num_params, | 3902 | unsigned int num_params, |
3900 | struct snd_soc_dapm_widget *source, | 3903 | struct snd_soc_dapm_widget *source, |
@@ -3963,6 +3966,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card, | |||
3963 | 3966 | ||
3964 | w->params = params; | 3967 | w->params = params; |
3965 | w->num_params = num_params; | 3968 | w->num_params = num_params; |
3969 | w->priv = rtd; | ||
3966 | 3970 | ||
3967 | ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL); | 3971 | ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL); |
3968 | if (ret) | 3972 | if (ret) |
diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h index 664ced8cb1b0..e907ba6f15e5 100644 --- a/tools/include/tools/libc_compat.h +++ b/tools/include/tools/libc_compat.h | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | 1 | // SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause) |
2 | /* Copyright (C) 2018 Netronome Systems, Inc. */ | 2 | /* Copyright (C) 2018 Netronome Systems, Inc. */ |
3 | 3 | ||
4 | #ifndef __TOOLS_LIBC_COMPAT_H | 4 | #ifndef __TOOLS_LIBC_COMPAT_H |
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index 13a861135127..6eb9bacd1948 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build | |||
@@ -1 +1 @@ | |||
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o | libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o | ||
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2abd0f112627..bdb94939fd60 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include "libbpf.h" | 50 | #include "libbpf.h" |
51 | #include "bpf.h" | 51 | #include "bpf.h" |
52 | #include "btf.h" | 52 | #include "btf.h" |
53 | #include "str_error.h" | ||
53 | 54 | ||
54 | #ifndef EM_BPF | 55 | #ifndef EM_BPF |
55 | #define EM_BPF 247 | 56 | #define EM_BPF 247 |
@@ -469,7 +470,7 @@ static int bpf_object__elf_init(struct bpf_object *obj) | |||
469 | obj->efile.fd = open(obj->path, O_RDONLY); | 470 | obj->efile.fd = open(obj->path, O_RDONLY); |
470 | if (obj->efile.fd < 0) { | 471 | if (obj->efile.fd < 0) { |
471 | char errmsg[STRERR_BUFSIZE]; | 472 | char errmsg[STRERR_BUFSIZE]; |
472 | char *cp = strerror_r(errno, errmsg, sizeof(errmsg)); | 473 | char *cp = str_error(errno, errmsg, sizeof(errmsg)); |
473 | 474 | ||
474 | pr_warning("failed to open %s: %s\n", obj->path, cp); | 475 | pr_warning("failed to open %s: %s\n", obj->path, cp); |
475 | return -errno; | 476 | return -errno; |
@@ -810,8 +811,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj) | |||
810 | data->d_size, name, idx); | 811 | data->d_size, name, idx); |
811 | if (err) { | 812 | if (err) { |
812 | char errmsg[STRERR_BUFSIZE]; | 813 | char errmsg[STRERR_BUFSIZE]; |
813 | char *cp = strerror_r(-err, errmsg, | 814 | char *cp = str_error(-err, errmsg, sizeof(errmsg)); |
814 | sizeof(errmsg)); | ||
815 | 815 | ||
816 | pr_warning("failed to alloc program %s (%s): %s", | 816 | pr_warning("failed to alloc program %s (%s): %s", |
817 | name, obj->path, cp); | 817 | name, obj->path, cp); |
@@ -1140,7 +1140,7 @@ bpf_object__create_maps(struct bpf_object *obj) | |||
1140 | 1140 | ||
1141 | *pfd = bpf_create_map_xattr(&create_attr); | 1141 | *pfd = bpf_create_map_xattr(&create_attr); |
1142 | if (*pfd < 0 && create_attr.btf_key_type_id) { | 1142 | if (*pfd < 0 && create_attr.btf_key_type_id) { |
1143 | cp = strerror_r(errno, errmsg, sizeof(errmsg)); | 1143 | cp = str_error(errno, errmsg, sizeof(errmsg)); |
1144 | pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", | 1144 | pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", |
1145 | map->name, cp, errno); | 1145 | map->name, cp, errno); |
1146 | create_attr.btf_fd = 0; | 1146 | create_attr.btf_fd = 0; |
@@ -1155,7 +1155,7 @@ bpf_object__create_maps(struct bpf_object *obj) | |||
1155 | size_t j; | 1155 | size_t j; |
1156 | 1156 | ||
1157 | err = *pfd; | 1157 | err = *pfd; |
1158 | cp = strerror_r(errno, errmsg, sizeof(errmsg)); | 1158 | cp = str_error(errno, errmsg, sizeof(errmsg)); |
1159 | pr_warning("failed to create map (name: '%s'): %s\n", | 1159 | pr_warning("failed to create map (name: '%s'): %s\n", |
1160 | map->name, cp); | 1160 | map->name, cp); |
1161 | for (j = 0; j < i; j++) | 1161 | for (j = 0; j < i; j++) |
@@ -1339,7 +1339,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type, | |||
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | ret = -LIBBPF_ERRNO__LOAD; | 1341 | ret = -LIBBPF_ERRNO__LOAD; |
1342 | cp = strerror_r(errno, errmsg, sizeof(errmsg)); | 1342 | cp = str_error(errno, errmsg, sizeof(errmsg)); |
1343 | pr_warning("load bpf program failed: %s\n", cp); | 1343 | pr_warning("load bpf program failed: %s\n", cp); |
1344 | 1344 | ||
1345 | if (log_buf && log_buf[0] != '\0') { | 1345 | if (log_buf && log_buf[0] != '\0') { |
@@ -1654,7 +1654,7 @@ static int check_path(const char *path) | |||
1654 | 1654 | ||
1655 | dir = dirname(dname); | 1655 | dir = dirname(dname); |
1656 | if (statfs(dir, &st_fs)) { | 1656 | if (statfs(dir, &st_fs)) { |
1657 | cp = strerror_r(errno, errmsg, sizeof(errmsg)); | 1657 | cp = str_error(errno, errmsg, sizeof(errmsg)); |
1658 | pr_warning("failed to statfs %s: %s\n", dir, cp); | 1658 | pr_warning("failed to statfs %s: %s\n", dir, cp); |
1659 | err = -errno; | 1659 | err = -errno; |
1660 | } | 1660 | } |
@@ -1690,7 +1690,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, | |||
1690 | } | 1690 | } |
1691 | 1691 | ||
1692 | if (bpf_obj_pin(prog->instances.fds[instance], path)) { | 1692 | if (bpf_obj_pin(prog->instances.fds[instance], path)) { |
1693 | cp = strerror_r(errno, errmsg, sizeof(errmsg)); | 1693 | cp = str_error(errno, errmsg, sizeof(errmsg)); |
1694 | pr_warning("failed to pin program: %s\n", cp); | 1694 | pr_warning("failed to pin program: %s\n", cp); |
1695 | return -errno; | 1695 | return -errno; |
1696 | } | 1696 | } |
@@ -1708,7 +1708,7 @@ static int make_dir(const char *path) | |||
1708 | err = -errno; | 1708 | err = -errno; |
1709 | 1709 | ||
1710 | if (err) { | 1710 | if (err) { |
1711 | cp = strerror_r(-err, errmsg, sizeof(errmsg)); | 1711 | cp = str_error(-err, errmsg, sizeof(errmsg)); |
1712 | pr_warning("failed to mkdir %s: %s\n", path, cp); | 1712 | pr_warning("failed to mkdir %s: %s\n", path, cp); |
1713 | } | 1713 | } |
1714 | return err; | 1714 | return err; |
@@ -1770,7 +1770,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path) | |||
1770 | } | 1770 | } |
1771 | 1771 | ||
1772 | if (bpf_obj_pin(map->fd, path)) { | 1772 | if (bpf_obj_pin(map->fd, path)) { |
1773 | cp = strerror_r(errno, errmsg, sizeof(errmsg)); | 1773 | cp = str_error(errno, errmsg, sizeof(errmsg)); |
1774 | pr_warning("failed to pin map: %s\n", cp); | 1774 | pr_warning("failed to pin map: %s\n", cp); |
1775 | return -errno; | 1775 | return -errno; |
1776 | } | 1776 | } |
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c new file mode 100644 index 000000000000..b8798114a357 --- /dev/null +++ b/tools/lib/bpf/str_error.c | |||
@@ -0,0 +1,18 @@ | |||
1 | // SPDX-License-Identifier: LGPL-2.1 | ||
2 | #undef _GNU_SOURCE | ||
3 | #include <string.h> | ||
4 | #include <stdio.h> | ||
5 | #include "str_error.h" | ||
6 | |||
7 | /* | ||
8 | * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl | ||
9 | * libc, while checking strerror_r() return to avoid having to check this in | ||
10 | * all places calling it. | ||
11 | */ | ||
12 | char *str_error(int err, char *dst, int len) | ||
13 | { | ||
14 | int ret = strerror_r(err, dst, len); | ||
15 | if (ret) | ||
16 | snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret); | ||
17 | return dst; | ||
18 | } | ||
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h new file mode 100644 index 000000000000..355b1db571d1 --- /dev/null +++ b/tools/lib/bpf/str_error.h | |||
@@ -0,0 +1,6 @@ | |||
1 | // SPDX-License-Identifier: LGPL-2.1 | ||
2 | #ifndef BPF_STR_ERROR | ||
3 | #define BPF_STR_ERROR | ||
4 | |||
5 | char *str_error(int err, char *dst, int len); | ||
6 | #endif // BPF_STR_ERROR | ||
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index 42261a9b280e..ac841bc5c35b 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile | |||
@@ -280,7 +280,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt | |||
280 | mv $@+ $@ | 280 | mv $@+ $@ |
281 | 281 | ||
282 | ifdef USE_ASCIIDOCTOR | 282 | ifdef USE_ASCIIDOCTOR |
283 | $(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt | 283 | $(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : %.txt |
284 | $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ | 284 | $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ |
285 | $(ASCIIDOC) -b manpage -d manpage \ | 285 | $(ASCIIDOC) -b manpage -d manpage \ |
286 | $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ | 286 | $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ |
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile index 72c25a3cb658..d9a725478375 100644 --- a/tools/testing/selftests/android/Makefile +++ b/tools/testing/selftests/android/Makefile | |||
@@ -6,7 +6,7 @@ TEST_PROGS := run.sh | |||
6 | 6 | ||
7 | include ../lib.mk | 7 | include ../lib.mk |
8 | 8 | ||
9 | all: | 9 | all: khdr |
10 | @for DIR in $(SUBDIRS); do \ | 10 | @for DIR in $(SUBDIRS); do \ |
11 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ | 11 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ |
12 | mkdir $$BUILD_TARGET -p; \ | 12 | mkdir $$BUILD_TARGET -p; \ |
diff --git a/tools/testing/selftests/android/ion/config b/tools/testing/selftests/android/config index b4ad748a9dd9..b4ad748a9dd9 100644 --- a/tools/testing/selftests/android/ion/config +++ b/tools/testing/selftests/android/config | |||
diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile index e03695287f76..88cfe88e466f 100644 --- a/tools/testing/selftests/android/ion/Makefile +++ b/tools/testing/selftests/android/ion/Makefile | |||
@@ -10,6 +10,8 @@ $(TEST_GEN_FILES): ipcsocket.c ionutils.c | |||
10 | 10 | ||
11 | TEST_PROGS := ion_test.sh | 11 | TEST_PROGS := ion_test.sh |
12 | 12 | ||
13 | KSFT_KHDR_INSTALL := 1 | ||
14 | top_srcdir = ../../../../.. | ||
13 | include ../../lib.mk | 15 | include ../../lib.mk |
14 | 16 | ||
15 | $(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c | 17 | $(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c |
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 6f54f84144a0..9b552c0fc47d 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c | |||
@@ -580,7 +580,11 @@ static void test_sockmap(int tasks, void *data) | |||
580 | /* Test update without programs */ | 580 | /* Test update without programs */ |
581 | for (i = 0; i < 6; i++) { | 581 | for (i = 0; i < 6; i++) { |
582 | err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); | 582 | err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); |
583 | if (err) { | 583 | if (i < 2 && !err) { |
584 | printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n", | ||
585 | i, sfd[i]); | ||
586 | goto out_sockmap; | ||
587 | } else if (i >= 2 && err) { | ||
584 | printf("Failed noprog update sockmap '%i:%i'\n", | 588 | printf("Failed noprog update sockmap '%i:%i'\n", |
585 | i, sfd[i]); | 589 | i, sfd[i]); |
586 | goto out_sockmap; | 590 | goto out_sockmap; |
@@ -741,7 +745,7 @@ static void test_sockmap(int tasks, void *data) | |||
741 | } | 745 | } |
742 | 746 | ||
743 | /* Test map update elem afterwards fd lives in fd and map_fd */ | 747 | /* Test map update elem afterwards fd lives in fd and map_fd */ |
744 | for (i = 0; i < 6; i++) { | 748 | for (i = 2; i < 6; i++) { |
745 | err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY); | 749 | err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY); |
746 | if (err) { | 750 | if (err) { |
747 | printf("Failed map_fd_rx update sockmap %i '%i:%i'\n", | 751 | printf("Failed map_fd_rx update sockmap %i '%i:%i'\n", |
@@ -845,7 +849,7 @@ static void test_sockmap(int tasks, void *data) | |||
845 | } | 849 | } |
846 | 850 | ||
847 | /* Delete the elems without programs */ | 851 | /* Delete the elems without programs */ |
848 | for (i = 0; i < 6; i++) { | 852 | for (i = 2; i < 6; i++) { |
849 | err = bpf_map_delete_elem(fd, &i); | 853 | err = bpf_map_delete_elem(fd, &i); |
850 | if (err) { | 854 | if (err) { |
851 | printf("Failed delete sockmap %i '%i:%i'\n", | 855 | printf("Failed delete sockmap %i '%i:%i'\n", |
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore index 95eb3a53c381..adacda50a4b2 100644 --- a/tools/testing/selftests/cgroup/.gitignore +++ b/tools/testing/selftests/cgroup/.gitignore | |||
@@ -1 +1,2 @@ | |||
1 | test_memcontrol | 1 | test_memcontrol |
2 | test_core | ||
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 1c5d2b2a583b..14c9fe284806 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c | |||
@@ -89,17 +89,28 @@ int cg_read(const char *cgroup, const char *control, char *buf, size_t len) | |||
89 | int cg_read_strcmp(const char *cgroup, const char *control, | 89 | int cg_read_strcmp(const char *cgroup, const char *control, |
90 | const char *expected) | 90 | const char *expected) |
91 | { | 91 | { |
92 | size_t size = strlen(expected) + 1; | 92 | size_t size; |
93 | char *buf; | 93 | char *buf; |
94 | int ret; | ||
95 | |||
96 | /* Handle the case of comparing against empty string */ | ||
97 | if (!expected) | ||
98 | size = 32; | ||
99 | else | ||
100 | size = strlen(expected) + 1; | ||
94 | 101 | ||
95 | buf = malloc(size); | 102 | buf = malloc(size); |
96 | if (!buf) | 103 | if (!buf) |
97 | return -1; | 104 | return -1; |
98 | 105 | ||
99 | if (cg_read(cgroup, control, buf, size)) | 106 | if (cg_read(cgroup, control, buf, size)) { |
107 | free(buf); | ||
100 | return -1; | 108 | return -1; |
109 | } | ||
101 | 110 | ||
102 | return strcmp(expected, buf); | 111 | ret = strcmp(expected, buf); |
112 | free(buf); | ||
113 | return ret; | ||
103 | } | 114 | } |
104 | 115 | ||
105 | int cg_read_strstr(const char *cgroup, const char *control, const char *needle) | 116 | int cg_read_strstr(const char *cgroup, const char *control, const char *needle) |
@@ -337,3 +348,24 @@ int is_swap_enabled(void) | |||
337 | 348 | ||
338 | return cnt > 1; | 349 | return cnt > 1; |
339 | } | 350 | } |
351 | |||
352 | int set_oom_adj_score(int pid, int score) | ||
353 | { | ||
354 | char path[PATH_MAX]; | ||
355 | int fd, len; | ||
356 | |||
357 | sprintf(path, "/proc/%d/oom_score_adj", pid); | ||
358 | |||
359 | fd = open(path, O_WRONLY | O_APPEND); | ||
360 | if (fd < 0) | ||
361 | return fd; | ||
362 | |||
363 | len = dprintf(fd, "%d", score); | ||
364 | if (len < 0) { | ||
365 | close(fd); | ||
366 | return len; | ||
367 | } | ||
368 | |||
369 | close(fd); | ||
370 | return 0; | ||
371 | } | ||
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h index 1ff6f9f1abdc..9ac8b7958f83 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.h +++ b/tools/testing/selftests/cgroup/cgroup_util.h | |||
@@ -40,3 +40,4 @@ extern int get_temp_fd(void); | |||
40 | extern int alloc_pagecache(int fd, size_t size); | 40 | extern int alloc_pagecache(int fd, size_t size); |
41 | extern int alloc_anon(const char *cgroup, void *arg); | 41 | extern int alloc_anon(const char *cgroup, void *arg); |
42 | extern int is_swap_enabled(void); | 42 | extern int is_swap_enabled(void); |
43 | extern int set_oom_adj_score(int pid, int score); | ||
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c index cf0bddc9d271..28d321ba311b 100644 --- a/tools/testing/selftests/cgroup/test_memcontrol.c +++ b/tools/testing/selftests/cgroup/test_memcontrol.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _GNU_SOURCE | 2 | #define _GNU_SOURCE |
3 | 3 | ||
4 | #include <linux/limits.h> | 4 | #include <linux/limits.h> |
5 | #include <linux/oom.h> | ||
5 | #include <fcntl.h> | 6 | #include <fcntl.h> |
6 | #include <stdio.h> | 7 | #include <stdio.h> |
7 | #include <stdlib.h> | 8 | #include <stdlib.h> |
@@ -202,6 +203,36 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg) | |||
202 | return 0; | 203 | return 0; |
203 | } | 204 | } |
204 | 205 | ||
206 | static int alloc_anon_noexit(const char *cgroup, void *arg) | ||
207 | { | ||
208 | int ppid = getppid(); | ||
209 | |||
210 | if (alloc_anon(cgroup, arg)) | ||
211 | return -1; | ||
212 | |||
213 | while (getppid() == ppid) | ||
214 | sleep(1); | ||
215 | |||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * Wait until processes are killed asynchronously by the OOM killer | ||
221 | * If we exceed a timeout, fail. | ||
222 | */ | ||
223 | static int cg_test_proc_killed(const char *cgroup) | ||
224 | { | ||
225 | int limit; | ||
226 | |||
227 | for (limit = 10; limit > 0; limit--) { | ||
228 | if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0) | ||
229 | return 0; | ||
230 | |||
231 | usleep(100000); | ||
232 | } | ||
233 | return -1; | ||
234 | } | ||
235 | |||
205 | /* | 236 | /* |
206 | * First, this test creates the following hierarchy: | 237 | * First, this test creates the following hierarchy: |
207 | * A memory.min = 50M, memory.max = 200M | 238 | * A memory.min = 50M, memory.max = 200M |
@@ -964,6 +995,177 @@ cleanup: | |||
964 | return ret; | 995 | return ret; |
965 | } | 996 | } |
966 | 997 | ||
998 | /* | ||
999 | * This test disables swapping and tries to allocate anonymous memory | ||
1000 | * up to OOM with memory.group.oom set. Then it checks that all | ||
1001 | * processes in the leaf (but not the parent) were killed. | ||
1002 | */ | ||
1003 | static int test_memcg_oom_group_leaf_events(const char *root) | ||
1004 | { | ||
1005 | int ret = KSFT_FAIL; | ||
1006 | char *parent, *child; | ||
1007 | |||
1008 | parent = cg_name(root, "memcg_test_0"); | ||
1009 | child = cg_name(root, "memcg_test_0/memcg_test_1"); | ||
1010 | |||
1011 | if (!parent || !child) | ||
1012 | goto cleanup; | ||
1013 | |||
1014 | if (cg_create(parent)) | ||
1015 | goto cleanup; | ||
1016 | |||
1017 | if (cg_create(child)) | ||
1018 | goto cleanup; | ||
1019 | |||
1020 | if (cg_write(parent, "cgroup.subtree_control", "+memory")) | ||
1021 | goto cleanup; | ||
1022 | |||
1023 | if (cg_write(child, "memory.max", "50M")) | ||
1024 | goto cleanup; | ||
1025 | |||
1026 | if (cg_write(child, "memory.swap.max", "0")) | ||
1027 | goto cleanup; | ||
1028 | |||
1029 | if (cg_write(child, "memory.oom.group", "1")) | ||
1030 | goto cleanup; | ||
1031 | |||
1032 | cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60)); | ||
1033 | cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); | ||
1034 | cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); | ||
1035 | if (!cg_run(child, alloc_anon, (void *)MB(100))) | ||
1036 | goto cleanup; | ||
1037 | |||
1038 | if (cg_test_proc_killed(child)) | ||
1039 | goto cleanup; | ||
1040 | |||
1041 | if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0) | ||
1042 | goto cleanup; | ||
1043 | |||
1044 | if (cg_read_key_long(parent, "memory.events", "oom_kill ") != 0) | ||
1045 | goto cleanup; | ||
1046 | |||
1047 | ret = KSFT_PASS; | ||
1048 | |||
1049 | cleanup: | ||
1050 | if (child) | ||
1051 | cg_destroy(child); | ||
1052 | if (parent) | ||
1053 | cg_destroy(parent); | ||
1054 | free(child); | ||
1055 | free(parent); | ||
1056 | |||
1057 | return ret; | ||
1058 | } | ||
1059 | |||
1060 | /* | ||
1061 | * This test disables swapping and tries to allocate anonymous memory | ||
1062 | * up to OOM with memory.group.oom set. Then it checks that all | ||
1063 | * processes in the parent and leaf were killed. | ||
1064 | */ | ||
1065 | static int test_memcg_oom_group_parent_events(const char *root) | ||
1066 | { | ||
1067 | int ret = KSFT_FAIL; | ||
1068 | char *parent, *child; | ||
1069 | |||
1070 | parent = cg_name(root, "memcg_test_0"); | ||
1071 | child = cg_name(root, "memcg_test_0/memcg_test_1"); | ||
1072 | |||
1073 | if (!parent || !child) | ||
1074 | goto cleanup; | ||
1075 | |||
1076 | if (cg_create(parent)) | ||
1077 | goto cleanup; | ||
1078 | |||
1079 | if (cg_create(child)) | ||
1080 | goto cleanup; | ||
1081 | |||
1082 | if (cg_write(parent, "memory.max", "80M")) | ||
1083 | goto cleanup; | ||
1084 | |||
1085 | if (cg_write(parent, "memory.swap.max", "0")) | ||
1086 | goto cleanup; | ||
1087 | |||
1088 | if (cg_write(parent, "memory.oom.group", "1")) | ||
1089 | goto cleanup; | ||
1090 | |||
1091 | cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60)); | ||
1092 | cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); | ||
1093 | cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); | ||
1094 | |||
1095 | if (!cg_run(child, alloc_anon, (void *)MB(100))) | ||
1096 | goto cleanup; | ||
1097 | |||
1098 | if (cg_test_proc_killed(child)) | ||
1099 | goto cleanup; | ||
1100 | if (cg_test_proc_killed(parent)) | ||
1101 | goto cleanup; | ||
1102 | |||
1103 | ret = KSFT_PASS; | ||
1104 | |||
1105 | cleanup: | ||
1106 | if (child) | ||
1107 | cg_destroy(child); | ||
1108 | if (parent) | ||
1109 | cg_destroy(parent); | ||
1110 | free(child); | ||
1111 | free(parent); | ||
1112 | |||
1113 | return ret; | ||
1114 | } | ||
1115 | |||
1116 | /* | ||
1117 | * This test disables swapping and tries to allocate anonymous memory | ||
1118 | * up to OOM with memory.group.oom set. Then it checks that all | ||
1119 | * processes were killed except those set with OOM_SCORE_ADJ_MIN | ||
1120 | */ | ||
1121 | static int test_memcg_oom_group_score_events(const char *root) | ||
1122 | { | ||
1123 | int ret = KSFT_FAIL; | ||
1124 | char *memcg; | ||
1125 | int safe_pid; | ||
1126 | |||
1127 | memcg = cg_name(root, "memcg_test_0"); | ||
1128 | |||
1129 | if (!memcg) | ||
1130 | goto cleanup; | ||
1131 | |||
1132 | if (cg_create(memcg)) | ||
1133 | goto cleanup; | ||
1134 | |||
1135 | if (cg_write(memcg, "memory.max", "50M")) | ||
1136 | goto cleanup; | ||
1137 | |||
1138 | if (cg_write(memcg, "memory.swap.max", "0")) | ||
1139 | goto cleanup; | ||
1140 | |||
1141 | if (cg_write(memcg, "memory.oom.group", "1")) | ||
1142 | goto cleanup; | ||
1143 | |||
1144 | safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); | ||
1145 | if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN)) | ||
1146 | goto cleanup; | ||
1147 | |||
1148 | cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); | ||
1149 | if (!cg_run(memcg, alloc_anon, (void *)MB(100))) | ||
1150 | goto cleanup; | ||
1151 | |||
1152 | if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3) | ||
1153 | goto cleanup; | ||
1154 | |||
1155 | if (kill(safe_pid, SIGKILL)) | ||
1156 | goto cleanup; | ||
1157 | |||
1158 | ret = KSFT_PASS; | ||
1159 | |||
1160 | cleanup: | ||
1161 | if (memcg) | ||
1162 | cg_destroy(memcg); | ||
1163 | free(memcg); | ||
1164 | |||
1165 | return ret; | ||
1166 | } | ||
1167 | |||
1168 | |||
967 | #define T(x) { x, #x } | 1169 | #define T(x) { x, #x } |
968 | struct memcg_test { | 1170 | struct memcg_test { |
969 | int (*fn)(const char *root); | 1171 | int (*fn)(const char *root); |
@@ -978,6 +1180,9 @@ struct memcg_test { | |||
978 | T(test_memcg_oom_events), | 1180 | T(test_memcg_oom_events), |
979 | T(test_memcg_swap_max), | 1181 | T(test_memcg_swap_max), |
980 | T(test_memcg_sock), | 1182 | T(test_memcg_sock), |
1183 | T(test_memcg_oom_group_leaf_events), | ||
1184 | T(test_memcg_oom_group_parent_events), | ||
1185 | T(test_memcg_oom_group_score_events), | ||
981 | }; | 1186 | }; |
982 | #undef T | 1187 | #undef T |
983 | 1188 | ||
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config new file mode 100644 index 000000000000..4e151f1005b2 --- /dev/null +++ b/tools/testing/selftests/efivarfs/config | |||
@@ -0,0 +1 @@ | |||
CONFIG_EFIVAR_FS=y | |||
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index ff8feca49746..ad1eeb14fda7 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile | |||
@@ -18,6 +18,7 @@ TEST_GEN_FILES := \ | |||
18 | 18 | ||
19 | TEST_PROGS := run.sh | 19 | TEST_PROGS := run.sh |
20 | 20 | ||
21 | top_srcdir = ../../../../.. | ||
21 | include ../../lib.mk | 22 | include ../../lib.mk |
22 | 23 | ||
23 | $(TEST_GEN_FILES): $(HEADERS) | 24 | $(TEST_GEN_FILES): $(HEADERS) |
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile index 1bbb47565c55..4665cdbf1a8d 100644 --- a/tools/testing/selftests/gpio/Makefile +++ b/tools/testing/selftests/gpio/Makefile | |||
@@ -21,11 +21,8 @@ endef | |||
21 | CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ | 21 | CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ |
22 | LDLIBS += -lmount -I/usr/include/libmount | 22 | LDLIBS += -lmount -I/usr/include/libmount |
23 | 23 | ||
24 | $(BINARIES): ../../../gpio/gpio-utils.o ../../../../usr/include/linux/gpio.h | 24 | $(BINARIES):| khdr |
25 | $(BINARIES): ../../../gpio/gpio-utils.o | ||
25 | 26 | ||
26 | ../../../gpio/gpio-utils.o: | 27 | ../../../gpio/gpio-utils.o: |
27 | make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio | 28 | make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio |
28 | |||
29 | ../../../../usr/include/linux/gpio.h: | ||
30 | make -C ../../../.. headers_install INSTALL_HDR_PATH=$(shell pwd)/../../../../usr/ | ||
31 | |||
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h index 15e6b75fc3a5..a3edb2c8e43d 100644 --- a/tools/testing/selftests/kselftest.h +++ b/tools/testing/selftests/kselftest.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #define KSFT_FAIL 1 | 19 | #define KSFT_FAIL 1 |
20 | #define KSFT_XFAIL 2 | 20 | #define KSFT_XFAIL 2 |
21 | #define KSFT_XPASS 3 | 21 | #define KSFT_XPASS 3 |
22 | /* Treat skip as pass */ | ||
23 | #define KSFT_SKIP 4 | 22 | #define KSFT_SKIP 4 |
24 | 23 | ||
25 | /* counters */ | 24 | /* counters */ |
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 4202139d81d9..5c34752e1cff 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore | |||
@@ -1,4 +1,5 @@ | |||
1 | cr4_cpuid_sync_test | 1 | cr4_cpuid_sync_test |
2 | platform_info_test | ||
2 | set_sregs_test | 3 | set_sregs_test |
3 | sync_regs_test | 4 | sync_regs_test |
4 | vmx_tsc_adjust_test | 5 | vmx_tsc_adjust_test |
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 03b0f551bedf..ec32dad3c3f0 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile | |||
@@ -6,7 +6,8 @@ UNAME_M := $(shell uname -m) | |||
6 | LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c | 6 | LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c |
7 | LIBKVM_x86_64 = lib/x86.c lib/vmx.c | 7 | LIBKVM_x86_64 = lib/x86.c lib/vmx.c |
8 | 8 | ||
9 | TEST_GEN_PROGS_x86_64 = set_sregs_test | 9 | TEST_GEN_PROGS_x86_64 = platform_info_test |
10 | TEST_GEN_PROGS_x86_64 += set_sregs_test | ||
10 | TEST_GEN_PROGS_x86_64 += sync_regs_test | 11 | TEST_GEN_PROGS_x86_64 += sync_regs_test |
11 | TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test | 12 | TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test |
12 | TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test | 13 | TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test |
@@ -20,7 +21,7 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr | |||
20 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ | 21 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ |
21 | LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include | 22 | LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include |
22 | CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I.. | 23 | CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I.. |
23 | LDFLAGS += -lpthread | 24 | LDFLAGS += -pthread |
24 | 25 | ||
25 | # After inclusion, $(OUTPUT) is defined and | 26 | # After inclusion, $(OUTPUT) is defined and |
26 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ | 27 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ |
@@ -37,9 +38,6 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c | |||
37 | $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ) | 38 | $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ) |
38 | $(AR) crs $@ $^ | 39 | $(AR) crs $@ $^ |
39 | 40 | ||
40 | $(LINUX_HDR_PATH): | 41 | all: $(STATIC_LIBS) |
41 | make -C $(top_srcdir) headers_install | ||
42 | |||
43 | all: $(STATIC_LIBS) $(LINUX_HDR_PATH) | ||
44 | $(TEST_GEN_PROGS): $(STATIC_LIBS) | 42 | $(TEST_GEN_PROGS): $(STATIC_LIBS) |
45 | $(TEST_GEN_PROGS) $(LIBKVM_OBJ): | $(LINUX_HDR_PATH) | 43 | $(STATIC_LIBS):| khdr |
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index bb5a25fb82c6..3acf9a91704c 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h | |||
@@ -50,6 +50,7 @@ enum vm_mem_backing_src_type { | |||
50 | }; | 50 | }; |
51 | 51 | ||
52 | int kvm_check_cap(long cap); | 52 | int kvm_check_cap(long cap); |
53 | int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); | ||
53 | 54 | ||
54 | struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); | 55 | struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); |
55 | void kvm_vm_free(struct kvm_vm *vmp); | 56 | void kvm_vm_free(struct kvm_vm *vmp); |
@@ -108,6 +109,9 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, | |||
108 | struct kvm_vcpu_events *events); | 109 | struct kvm_vcpu_events *events); |
109 | void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, | 110 | void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, |
110 | struct kvm_vcpu_events *events); | 111 | struct kvm_vcpu_events *events); |
112 | uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index); | ||
113 | void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, | ||
114 | uint64_t msr_value); | ||
111 | 115 | ||
112 | const char *exit_reason_str(unsigned int exit_reason); | 116 | const char *exit_reason_str(unsigned int exit_reason); |
113 | 117 | ||
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index e9ba389c48db..6fd8c089cafc 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c | |||
@@ -63,6 +63,29 @@ int kvm_check_cap(long cap) | |||
63 | return ret; | 63 | return ret; |
64 | } | 64 | } |
65 | 65 | ||
66 | /* VM Enable Capability | ||
67 | * | ||
68 | * Input Args: | ||
69 | * vm - Virtual Machine | ||
70 | * cap - Capability | ||
71 | * | ||
72 | * Output Args: None | ||
73 | * | ||
74 | * Return: On success, 0. On failure a TEST_ASSERT failure is produced. | ||
75 | * | ||
76 | * Enables a capability (KVM_CAP_*) on the VM. | ||
77 | */ | ||
78 | int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) | ||
79 | { | ||
80 | int ret; | ||
81 | |||
82 | ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); | ||
83 | TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n" | ||
84 | " rc: %i errno: %i", ret, errno); | ||
85 | |||
86 | return ret; | ||
87 | } | ||
88 | |||
66 | static void vm_open(struct kvm_vm *vm, int perm) | 89 | static void vm_open(struct kvm_vm *vm, int perm) |
67 | { | 90 | { |
68 | vm->kvm_fd = open(KVM_DEV_PATH, perm); | 91 | vm->kvm_fd = open(KVM_DEV_PATH, perm); |
@@ -1220,6 +1243,72 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, | |||
1220 | ret, errno); | 1243 | ret, errno); |
1221 | } | 1244 | } |
1222 | 1245 | ||
1246 | /* VCPU Get MSR | ||
1247 | * | ||
1248 | * Input Args: | ||
1249 | * vm - Virtual Machine | ||
1250 | * vcpuid - VCPU ID | ||
1251 | * msr_index - Index of MSR | ||
1252 | * | ||
1253 | * Output Args: None | ||
1254 | * | ||
1255 | * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced. | ||
1256 | * | ||
1257 | * Get value of MSR for VCPU. | ||
1258 | */ | ||
1259 | uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index) | ||
1260 | { | ||
1261 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | ||
1262 | struct { | ||
1263 | struct kvm_msrs header; | ||
1264 | struct kvm_msr_entry entry; | ||
1265 | } buffer = {}; | ||
1266 | int r; | ||
1267 | |||
1268 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | ||
1269 | buffer.header.nmsrs = 1; | ||
1270 | buffer.entry.index = msr_index; | ||
1271 | r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header); | ||
1272 | TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n" | ||
1273 | " rc: %i errno: %i", r, errno); | ||
1274 | |||
1275 | return buffer.entry.data; | ||
1276 | } | ||
1277 | |||
1278 | /* VCPU Set MSR | ||
1279 | * | ||
1280 | * Input Args: | ||
1281 | * vm - Virtual Machine | ||
1282 | * vcpuid - VCPU ID | ||
1283 | * msr_index - Index of MSR | ||
1284 | * msr_value - New value of MSR | ||
1285 | * | ||
1286 | * Output Args: None | ||
1287 | * | ||
1288 | * Return: On success, nothing. On failure a TEST_ASSERT is produced. | ||
1289 | * | ||
1290 | * Set value of MSR for VCPU. | ||
1291 | */ | ||
1292 | void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, | ||
1293 | uint64_t msr_value) | ||
1294 | { | ||
1295 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | ||
1296 | struct { | ||
1297 | struct kvm_msrs header; | ||
1298 | struct kvm_msr_entry entry; | ||
1299 | } buffer = {}; | ||
1300 | int r; | ||
1301 | |||
1302 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | ||
1303 | memset(&buffer, 0, sizeof(buffer)); | ||
1304 | buffer.header.nmsrs = 1; | ||
1305 | buffer.entry.index = msr_index; | ||
1306 | buffer.entry.data = msr_value; | ||
1307 | r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header); | ||
1308 | TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n" | ||
1309 | " rc: %i errno: %i", r, errno); | ||
1310 | } | ||
1311 | |||
1223 | /* VM VCPU Args Set | 1312 | /* VM VCPU Args Set |
1224 | * | 1313 | * |
1225 | * Input Args: | 1314 | * Input Args: |
diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c new file mode 100644 index 000000000000..3764e7121265 --- /dev/null +++ b/tools/testing/selftests/kvm/platform_info_test.c | |||
@@ -0,0 +1,110 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Test for x86 KVM_CAP_MSR_PLATFORM_INFO | ||
4 | * | ||
5 | * Copyright (C) 2018, Google LLC. | ||
6 | * | ||
7 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
8 | * | ||
9 | * Verifies expected behavior of controlling guest access to | ||
10 | * MSR_PLATFORM_INFO. | ||
11 | */ | ||
12 | |||
13 | #define _GNU_SOURCE /* for program_invocation_short_name */ | ||
14 | #include <fcntl.h> | ||
15 | #include <stdio.h> | ||
16 | #include <stdlib.h> | ||
17 | #include <string.h> | ||
18 | #include <sys/ioctl.h> | ||
19 | |||
20 | #include "test_util.h" | ||
21 | #include "kvm_util.h" | ||
22 | #include "x86.h" | ||
23 | |||
24 | #define VCPU_ID 0 | ||
25 | #define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00 | ||
26 | |||
27 | static void guest_code(void) | ||
28 | { | ||
29 | uint64_t msr_platform_info; | ||
30 | |||
31 | for (;;) { | ||
32 | msr_platform_info = rdmsr(MSR_PLATFORM_INFO); | ||
33 | GUEST_SYNC(msr_platform_info); | ||
34 | asm volatile ("inc %r11"); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable) | ||
39 | { | ||
40 | struct kvm_enable_cap cap = {}; | ||
41 | |||
42 | cap.cap = KVM_CAP_MSR_PLATFORM_INFO; | ||
43 | cap.flags = 0; | ||
44 | cap.args[0] = (int)enable; | ||
45 | vm_enable_cap(vm, &cap); | ||
46 | } | ||
47 | |||
48 | static void test_msr_platform_info_enabled(struct kvm_vm *vm) | ||
49 | { | ||
50 | struct kvm_run *run = vcpu_state(vm, VCPU_ID); | ||
51 | struct guest_args args; | ||
52 | |||
53 | set_msr_platform_info_enabled(vm, true); | ||
54 | vcpu_run(vm, VCPU_ID); | ||
55 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | ||
56 | "Exit_reason other than KVM_EXIT_IO: %u (%s),\n", | ||
57 | run->exit_reason, | ||
58 | exit_reason_str(run->exit_reason)); | ||
59 | guest_args_read(vm, VCPU_ID, &args); | ||
60 | TEST_ASSERT(args.port == GUEST_PORT_SYNC, | ||
61 | "Received IO from port other than PORT_HOST_SYNC: %u\n", | ||
62 | run->io.port); | ||
63 | TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == | ||
64 | MSR_PLATFORM_INFO_MAX_TURBO_RATIO, | ||
65 | "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.", | ||
66 | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); | ||
67 | } | ||
68 | |||
69 | static void test_msr_platform_info_disabled(struct kvm_vm *vm) | ||
70 | { | ||
71 | struct kvm_run *run = vcpu_state(vm, VCPU_ID); | ||
72 | |||
73 | set_msr_platform_info_enabled(vm, false); | ||
74 | vcpu_run(vm, VCPU_ID); | ||
75 | TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, | ||
76 | "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n", | ||
77 | run->exit_reason, | ||
78 | exit_reason_str(run->exit_reason)); | ||
79 | } | ||
80 | |||
81 | int main(int argc, char *argv[]) | ||
82 | { | ||
83 | struct kvm_vm *vm; | ||
84 | struct kvm_run *state; | ||
85 | int rv; | ||
86 | uint64_t msr_platform_info; | ||
87 | |||
88 | /* Tell stdout not to buffer its content */ | ||
89 | setbuf(stdout, NULL); | ||
90 | |||
91 | rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO); | ||
92 | if (!rv) { | ||
93 | fprintf(stderr, | ||
94 | "KVM_CAP_MSR_PLATFORM_INFO not supported, skip test\n"); | ||
95 | exit(KSFT_SKIP); | ||
96 | } | ||
97 | |||
98 | vm = vm_create_default(VCPU_ID, 0, guest_code); | ||
99 | |||
100 | msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); | ||
101 | vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, | ||
102 | msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); | ||
103 | test_msr_platform_info_disabled(vm); | ||
104 | test_msr_platform_info_enabled(vm); | ||
105 | vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); | ||
106 | |||
107 | kvm_vm_free(vm); | ||
108 | |||
109 | return 0; | ||
110 | } | ||
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 17ab36605a8e..0a8e75886224 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk | |||
@@ -16,8 +16,20 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS)) | |||
16 | TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED)) | 16 | TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED)) |
17 | TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) | 17 | TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) |
18 | 18 | ||
19 | top_srcdir ?= ../../../.. | ||
20 | include $(top_srcdir)/scripts/subarch.include | ||
21 | ARCH ?= $(SUBARCH) | ||
22 | |||
19 | all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) | 23 | all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) |
20 | 24 | ||
25 | .PHONY: khdr | ||
26 | khdr: | ||
27 | make ARCH=$(ARCH) -C $(top_srcdir) headers_install | ||
28 | |||
29 | ifdef KSFT_KHDR_INSTALL | ||
30 | $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr | ||
31 | endif | ||
32 | |||
21 | .ONESHELL: | 33 | .ONESHELL: |
22 | define RUN_TEST_PRINT_RESULT | 34 | define RUN_TEST_PRINT_RESULT |
23 | TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \ | 35 | TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \ |
diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config index 2fde30191a47..a7e8cd5bb265 100644 --- a/tools/testing/selftests/memory-hotplug/config +++ b/tools/testing/selftests/memory-hotplug/config | |||
@@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y | |||
2 | CONFIG_MEMORY_HOTPLUG_SPARSE=y | 2 | CONFIG_MEMORY_HOTPLUG_SPARSE=y |
3 | CONFIG_NOTIFIER_ERROR_INJECTION=y | 3 | CONFIG_NOTIFIER_ERROR_INJECTION=y |
4 | CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m | 4 | CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m |
5 | CONFIG_MEMORY_HOTREMOVE=y | ||
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 9cca68e440a0..919aa2ac00af 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile | |||
@@ -15,6 +15,7 @@ TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx | |||
15 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa | 15 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa |
16 | TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls | 16 | TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls |
17 | 17 | ||
18 | KSFT_KHDR_INSTALL := 1 | ||
18 | include ../lib.mk | 19 | include ../lib.mk |
19 | 20 | ||
20 | $(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma | 21 | $(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma |
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 32a194e3e07a..0ab9423d009f 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh | |||
@@ -178,8 +178,8 @@ setup() { | |||
178 | 178 | ||
179 | cleanup() { | 179 | cleanup() { |
180 | [ ${cleanup_done} -eq 1 ] && return | 180 | [ ${cleanup_done} -eq 1 ] && return |
181 | ip netns del ${NS_A} 2 > /dev/null | 181 | ip netns del ${NS_A} 2> /dev/null |
182 | ip netns del ${NS_B} 2 > /dev/null | 182 | ip netns del ${NS_B} 2> /dev/null |
183 | cleanup_done=1 | 183 | cleanup_done=1 |
184 | } | 184 | } |
185 | 185 | ||
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index b3ebf2646e52..8fdfeafaf8c0 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c | |||
@@ -502,6 +502,55 @@ TEST_F(tls, recv_peek_multiple) | |||
502 | EXPECT_EQ(memcmp(test_str, buf, send_len), 0); | 502 | EXPECT_EQ(memcmp(test_str, buf, send_len), 0); |
503 | } | 503 | } |
504 | 504 | ||
505 | TEST_F(tls, recv_peek_multiple_records) | ||
506 | { | ||
507 | char const *test_str = "test_read_peek_mult_recs"; | ||
508 | char const *test_str_first = "test_read_peek"; | ||
509 | char const *test_str_second = "_mult_recs"; | ||
510 | int len; | ||
511 | char buf[64]; | ||
512 | |||
513 | len = strlen(test_str_first); | ||
514 | EXPECT_EQ(send(self->fd, test_str_first, len, 0), len); | ||
515 | |||
516 | len = strlen(test_str_second) + 1; | ||
517 | EXPECT_EQ(send(self->fd, test_str_second, len, 0), len); | ||
518 | |||
519 | len = sizeof(buf); | ||
520 | memset(buf, 0, len); | ||
521 | EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1); | ||
522 | |||
523 | /* MSG_PEEK can only peek into the current record. */ | ||
524 | len = strlen(test_str_first) + 1; | ||
525 | EXPECT_EQ(memcmp(test_str_first, buf, len), 0); | ||
526 | |||
527 | len = sizeof(buf); | ||
528 | memset(buf, 0, len); | ||
529 | EXPECT_NE(recv(self->cfd, buf, len, 0), -1); | ||
530 | |||
531 | /* Non-MSG_PEEK will advance strparser (and therefore record) | ||
532 | * however. | ||
533 | */ | ||
534 | len = strlen(test_str) + 1; | ||
535 | EXPECT_EQ(memcmp(test_str, buf, len), 0); | ||
536 | |||
537 | /* MSG_MORE will hold current record open, so later MSG_PEEK | ||
538 | * will see everything. | ||
539 | */ | ||
540 | len = strlen(test_str_first); | ||
541 | EXPECT_EQ(send(self->fd, test_str_first, len, MSG_MORE), len); | ||
542 | |||
543 | len = strlen(test_str_second) + 1; | ||
544 | EXPECT_EQ(send(self->fd, test_str_second, len, 0), len); | ||
545 | |||
546 | len = sizeof(buf); | ||
547 | memset(buf, 0, len); | ||
548 | EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1); | ||
549 | |||
550 | len = strlen(test_str) + 1; | ||
551 | EXPECT_EQ(memcmp(test_str, buf, len), 0); | ||
552 | } | ||
553 | |||
505 | TEST_F(tls, pollin) | 554 | TEST_F(tls, pollin) |
506 | { | 555 | { |
507 | char const *test_str = "test_poll"; | 556 | char const *test_str = "test_poll"; |
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile index a728040edbe1..14cfcf006936 100644 --- a/tools/testing/selftests/networking/timestamping/Makefile +++ b/tools/testing/selftests/networking/timestamping/Makefile | |||
@@ -5,6 +5,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp | |||
5 | 5 | ||
6 | all: $(TEST_PROGS) | 6 | all: $(TEST_PROGS) |
7 | 7 | ||
8 | top_srcdir = ../../../../.. | ||
8 | include ../../lib.mk | 9 | include ../../lib.mk |
9 | 10 | ||
10 | clean: | 11 | clean: |
diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile index 93baacab7693..d056486f49de 100644 --- a/tools/testing/selftests/powerpc/alignment/Makefile +++ b/tools/testing/selftests/powerpc/alignment/Makefile | |||
@@ -1,5 +1,6 @@ | |||
1 | TEST_GEN_PROGS := copy_first_unaligned alignment_handler | 1 | TEST_GEN_PROGS := copy_first_unaligned alignment_handler |
2 | 2 | ||
3 | top_srcdir = ../../../../.. | ||
3 | include ../../lib.mk | 4 | include ../../lib.mk |
4 | 5 | ||
5 | $(TEST_GEN_PROGS): ../harness.c ../utils.c | 6 | $(TEST_GEN_PROGS): ../harness.c ../utils.c |
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile index b4d7432a0ecd..d40300a65b42 100644 --- a/tools/testing/selftests/powerpc/benchmarks/Makefile +++ b/tools/testing/selftests/powerpc/benchmarks/Makefile | |||
@@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target | |||
4 | 4 | ||
5 | CFLAGS += -O2 | 5 | CFLAGS += -O2 |
6 | 6 | ||
7 | top_srcdir = ../../../../.. | ||
7 | include ../../lib.mk | 8 | include ../../lib.mk |
8 | 9 | ||
9 | $(TEST_GEN_PROGS): ../harness.c | 10 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile index 1be547434a49..ede4d3dae750 100644 --- a/tools/testing/selftests/powerpc/cache_shape/Makefile +++ b/tools/testing/selftests/powerpc/cache_shape/Makefile | |||
@@ -5,6 +5,7 @@ all: $(TEST_PROGS) | |||
5 | 5 | ||
6 | $(TEST_PROGS): ../harness.c ../utils.c | 6 | $(TEST_PROGS): ../harness.c ../utils.c |
7 | 7 | ||
8 | top_srcdir = ../../../../.. | ||
8 | include ../../lib.mk | 9 | include ../../lib.mk |
9 | 10 | ||
10 | clean: | 11 | clean: |
diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile index 1cf89a34d97c..44574f3818b3 100644 --- a/tools/testing/selftests/powerpc/copyloops/Makefile +++ b/tools/testing/selftests/powerpc/copyloops/Makefile | |||
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \ | |||
17 | 17 | ||
18 | EXTRA_SOURCES := validate.c ../harness.c stubs.S | 18 | EXTRA_SOURCES := validate.c ../harness.c stubs.S |
19 | 19 | ||
20 | top_srcdir = ../../../../.. | ||
20 | include ../../lib.mk | 21 | include ../../lib.mk |
21 | 22 | ||
22 | $(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES) | 23 | $(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES) |
diff --git a/tools/testing/selftests/powerpc/dscr/Makefile b/tools/testing/selftests/powerpc/dscr/Makefile index 55d7db7a616b..5df476364b4d 100644 --- a/tools/testing/selftests/powerpc/dscr/Makefile +++ b/tools/testing/selftests/powerpc/dscr/Makefile | |||
@@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \ | |||
3 | dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \ | 3 | dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \ |
4 | dscr_sysfs_thread_test | 4 | dscr_sysfs_thread_test |
5 | 5 | ||
6 | top_srcdir = ../../../../.. | ||
6 | include ../../lib.mk | 7 | include ../../lib.mk |
7 | 8 | ||
8 | $(OUTPUT)/dscr_default_test: LDLIBS += -lpthread | 9 | $(OUTPUT)/dscr_default_test: LDLIBS += -lpthread |
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile index 0dd3a01fdab9..11a10d7a2bbd 100644 --- a/tools/testing/selftests/powerpc/math/Makefile +++ b/tools/testing/selftests/powerpc/math/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt | 2 | TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt |
3 | 3 | ||
4 | top_srcdir = ../../../../.. | ||
4 | include ../../lib.mk | 5 | include ../../lib.mk |
5 | 6 | ||
6 | $(TEST_GEN_PROGS): ../harness.c | 7 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 8ebbe96d80a8..33ced6e0ad25 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile | |||
@@ -5,6 +5,7 @@ noarg: | |||
5 | TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors | 5 | TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors |
6 | TEST_GEN_FILES := tempfile | 6 | TEST_GEN_FILES := tempfile |
7 | 7 | ||
8 | top_srcdir = ../../../../.. | ||
8 | include ../../lib.mk | 9 | include ../../lib.mk |
9 | 10 | ||
10 | $(TEST_GEN_PROGS): ../harness.c | 11 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile index 6e1629bf5b09..19046db995fe 100644 --- a/tools/testing/selftests/powerpc/pmu/Makefile +++ b/tools/testing/selftests/powerpc/pmu/Makefile | |||
@@ -5,6 +5,7 @@ noarg: | |||
5 | TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes | 5 | TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes |
6 | EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c | 6 | EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c |
7 | 7 | ||
8 | top_srcdir = ../../../../.. | ||
8 | include ../../lib.mk | 9 | include ../../lib.mk |
9 | 10 | ||
10 | all: $(TEST_GEN_PROGS) ebb | 11 | all: $(TEST_GEN_PROGS) ebb |
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile index c4e64bc2e265..bd5dfa509272 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile +++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile | |||
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \ | |||
17 | lost_exception_test no_handler_test \ | 17 | lost_exception_test no_handler_test \ |
18 | cycles_with_mmcr2_test | 18 | cycles_with_mmcr2_test |
19 | 19 | ||
20 | top_srcdir = ../../../../../.. | ||
20 | include ../../../lib.mk | 21 | include ../../../lib.mk |
21 | 22 | ||
22 | $(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \ | 23 | $(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \ |
diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile index 175366db7be8..ea2b7bd09e36 100644 --- a/tools/testing/selftests/powerpc/primitives/Makefile +++ b/tools/testing/selftests/powerpc/primitives/Makefile | |||
@@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR) | |||
2 | 2 | ||
3 | TEST_GEN_PROGS := load_unaligned_zeropad | 3 | TEST_GEN_PROGS := load_unaligned_zeropad |
4 | 4 | ||
5 | top_srcdir = ../../../../.. | ||
5 | include ../../lib.mk | 6 | include ../../lib.mk |
6 | 7 | ||
7 | $(TEST_GEN_PROGS): ../harness.c | 8 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index 28f5b781a553..923d531265f8 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile | |||
@@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ | |||
4 | ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \ | 4 | ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \ |
5 | perf-hwbreak | 5 | perf-hwbreak |
6 | 6 | ||
7 | top_srcdir = ../../../../.. | ||
7 | include ../../lib.mk | 8 | include ../../lib.mk |
8 | 9 | ||
9 | all: $(TEST_PROGS) | 10 | all: $(TEST_PROGS) |
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile index a7cbd5082e27..1fca25c6ace0 100644 --- a/tools/testing/selftests/powerpc/signal/Makefile +++ b/tools/testing/selftests/powerpc/signal/Makefile | |||
@@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S | |||
8 | CFLAGS += -maltivec | 8 | CFLAGS += -maltivec |
9 | signal_tm: CFLAGS += -mhtm | 9 | signal_tm: CFLAGS += -mhtm |
10 | 10 | ||
11 | top_srcdir = ../../../../.. | ||
11 | include ../../lib.mk | 12 | include ../../lib.mk |
12 | 13 | ||
13 | clean: | 14 | clean: |
diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile index 10b35c87a4f4..7fc0623d85c3 100644 --- a/tools/testing/selftests/powerpc/stringloops/Makefile +++ b/tools/testing/selftests/powerpc/stringloops/Makefile | |||
@@ -29,6 +29,7 @@ endif | |||
29 | 29 | ||
30 | ASFLAGS = $(CFLAGS) | 30 | ASFLAGS = $(CFLAGS) |
31 | 31 | ||
32 | top_srcdir = ../../../../.. | ||
32 | include ../../lib.mk | 33 | include ../../lib.mk |
33 | 34 | ||
34 | $(TEST_GEN_PROGS): $(EXTRA_SOURCES) | 35 | $(TEST_GEN_PROGS): $(EXTRA_SOURCES) |
diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile index 30b8ff8fb82e..fcd2dcb8972b 100644 --- a/tools/testing/selftests/powerpc/switch_endian/Makefile +++ b/tools/testing/selftests/powerpc/switch_endian/Makefile | |||
@@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64 | |||
5 | 5 | ||
6 | EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S | 6 | EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S |
7 | 7 | ||
8 | top_srcdir = ../../../../.. | ||
8 | include ../../lib.mk | 9 | include ../../lib.mk |
9 | 10 | ||
10 | $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S | 11 | $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S |
diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile index da22ca7c38c1..161b8846336f 100644 --- a/tools/testing/selftests/powerpc/syscalls/Makefile +++ b/tools/testing/selftests/powerpc/syscalls/Makefile | |||
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed | |||
2 | 2 | ||
3 | CFLAGS += -I../../../../../usr/include | 3 | CFLAGS += -I../../../../../usr/include |
4 | 4 | ||
5 | top_srcdir = ../../../../.. | ||
5 | include ../../lib.mk | 6 | include ../../lib.mk |
6 | 7 | ||
7 | $(TEST_GEN_PROGS): ../harness.c | 8 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index c0e45d2dde25..9fc2cf6fbc92 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile | |||
@@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack | |||
6 | tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \ | 6 | tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \ |
7 | $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn | 7 | $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn |
8 | 8 | ||
9 | top_srcdir = ../../../../.. | ||
9 | include ../../lib.mk | 10 | include ../../lib.mk |
10 | 11 | ||
11 | $(TEST_GEN_PROGS): ../harness.c ../utils.c | 12 | $(TEST_GEN_PROGS): ../harness.c ../utils.c |
diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile index f8ced26748f8..fb82068c9fda 100644 --- a/tools/testing/selftests/powerpc/vphn/Makefile +++ b/tools/testing/selftests/powerpc/vphn/Makefile | |||
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn | |||
2 | 2 | ||
3 | CFLAGS += -m64 | 3 | CFLAGS += -m64 |
4 | 4 | ||
5 | top_srcdir = ../../../../.. | ||
5 | include ../../lib.mk | 6 | include ../../lib.mk |
6 | 7 | ||
7 | $(TEST_GEN_PROGS): ../harness.c | 8 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 9881876d2aa0..e94b7b14bcb2 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile | |||
@@ -26,10 +26,6 @@ TEST_PROGS := run_vmtests | |||
26 | 26 | ||
27 | include ../lib.mk | 27 | include ../lib.mk |
28 | 28 | ||
29 | $(OUTPUT)/userfaultfd: ../../../../usr/include/linux/kernel.h | ||
30 | $(OUTPUT)/userfaultfd: LDLIBS += -lpthread | 29 | $(OUTPUT)/userfaultfd: LDLIBS += -lpthread |
31 | 30 | ||
32 | $(OUTPUT)/mlock-random-test: LDLIBS += -lcap | 31 | $(OUTPUT)/mlock-random-test: LDLIBS += -lcap |
33 | |||
34 | ../../../../usr/include/linux/kernel.h: | ||
35 | make -C ../../../.. headers_install | ||