aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/accelerators/ocxl.rst5
-rw-r--r--Documentation/devicetree/bindings/dma/mv-xor-v2.txt6
-rw-r--r--Documentation/sphinx/kerneldoc.py3
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/kernel/cpu_errata.c4
-rw-r--r--arch/arm64/mm/mmu.c8
-rw-r--r--arch/mips/ath25/board.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c8
-rw-r--r--arch/mips/loongson64/Kconfig6
-rw-r--r--arch/powerpc/boot/Makefile3
-rw-r--r--arch/powerpc/kernel/prom_init.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c69
-rw-r--r--arch/powerpc/kvm/book3s_hv.c17
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/entry/entry_64_compat.S16
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl38
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c16
-rw-r--r--arch/x86/events/intel/uncore_snbep.c2
-rw-r--r--arch/x86/ia32/sys_ia32.c74
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/sections.h1
-rw-r--r--arch/x86/include/asm/sys_ia32.h48
-rw-r--r--arch/x86/include/uapi/asm/mce.h1
-rw-r--r--arch/x86/kernel/cpu/intel.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c26
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c158
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c48
-rw-r--r--arch/x86/kernel/ioport.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c10
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/mm/pti.c2
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/xen-blkfront.c17
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/dma/mv_xor_v2.c25
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/gpio/gpio-rcar.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c50
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c165
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c40
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c5
-rw-r--r--drivers/gpu/drm/radeon/cik.c31
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c92
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/infiniband/core/addr.c15
-rw-r--r--drivers/infiniband/core/cq.c21
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/ucma.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c109
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h25
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c21
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c11
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c19
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c13
-rw-r--r--drivers/md/bcache/super.c27
-rw-r--r--drivers/md/dm-bufio.c16
-rw-r--r--drivers/md/dm-mpath.c66
-rw-r--r--drivers/md/dm-raid.c7
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/md/dm.c35
-rw-r--r--drivers/misc/ocxl/file.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/fabrics.c5
-rw-r--r--drivers/nvme/host/fc.c27
-rw-r--r--drivers/nvme/host/multipath.c30
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c15
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c2
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c22
-rw-r--r--drivers/platform/x86/Kconfig27
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/dell-smbios-base.c (renamed from drivers/platform/x86/dell-smbios.c)29
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c18
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c14
-rw-r--r--drivers/platform/x86/dell-smbios.h27
-rw-r--r--drivers/video/fbdev/sbuslib.c4
-rw-r--r--drivers/watchdog/f71808e_wdt.c3
-rw-r--r--drivers/watchdog/hpwdt.c501
-rw-r--r--drivers/watchdog/sbsa_gwdt.c3
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c5
-rw-r--r--fs/overlayfs/Kconfig14
-rw-r--r--fs/overlayfs/export.c216
-rw-r--r--fs/overlayfs/inode.c58
-rw-r--r--fs/overlayfs/namei.c6
-rw-r--r--fs/overlayfs/overlayfs.h1
-rw-r--r--fs/overlayfs/super.c1
-rw-r--r--fs/xfs/xfs_iomap.c42
-rw-r--r--include/linux/compat.h26
-rw-r--r--include/linux/of_pci.h17
-rw-r--r--include/uapi/misc/ocxl.h17
-rw-r--r--kernel/compat.c19
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/locking/rtmutex.c5
-rw-r--r--kernel/panic.c2
-rw-r--r--lib/bug.c4
-rw-r--r--lib/test_kmod.c2
-rw-r--r--mm/gup.c7
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memblock.c10
-rw-r--r--mm/page_alloc.c9
-rw-r--r--scripts/Makefile.lib8
-rw-r--r--scripts/basic/fixdep.c15
-rwxr-xr-xscripts/bloat-o-meter2
-rw-r--r--sound/core/seq/seq_clientmgr.c22
-rw-r--r--sound/core/seq/seq_fifo.c2
-rw-r--r--sound/core/seq/seq_memory.c14
-rw-r--r--sound/core/seq/seq_memory.h3
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_realtek.c45
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/include/uapi/linux/kvm.h2
-rw-r--r--tools/objtool/check.c27
-rw-r--r--tools/perf/Documentation/perf-kallsyms.txt2
-rw-r--r--tools/perf/builtin-record.c9
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/ui/browsers/annotate.c25
-rw-r--r--tools/perf/util/auxtrace.c15
-rw-r--r--tools/perf/util/record.c8
-rw-r--r--tools/perf/util/trigger.h9
-rw-r--r--tools/testing/selftests/powerpc/mm/subpage_prot.c14
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-trap.c2
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests25
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c11
177 files changed, 1884 insertions, 1660 deletions
diff --git a/Documentation/accelerators/ocxl.rst b/Documentation/accelerators/ocxl.rst
index 4f7af841d935..ddcc58d01cfb 100644
--- a/Documentation/accelerators/ocxl.rst
+++ b/Documentation/accelerators/ocxl.rst
@@ -152,6 +152,11 @@ OCXL_IOCTL_IRQ_SET_FD:
152 Associate an event fd to an AFU interrupt so that the user process 152 Associate an event fd to an AFU interrupt so that the user process
153 can be notified when the AFU sends an interrupt. 153 can be notified when the AFU sends an interrupt.
154 154
155OCXL_IOCTL_GET_METADATA:
156
157 Obtains configuration information from the card, such at the size of
158 MMIO areas, the AFU version, and the PASID for the current context.
159
155 160
156mmap 161mmap
157---- 162----
diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
index 217a90eaabe7..9c38bbe7e6d7 100644
--- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
+++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
@@ -11,7 +11,11 @@ Required properties:
11 interrupts. 11 interrupts.
12 12
13Optional properties: 13Optional properties:
14- clocks: Optional reference to the clock used by the XOR engine. 14- clocks: Optional reference to the clocks used by the XOR engine.
15- clock-names: mandatory if there is a second clock, in this case the
16 name must be "core" for the first clock and "reg" for the second
17 one
18
15 19
16Example: 20Example:
17 21
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index 39aa9e8697cc..fbedcc39460b 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -36,8 +36,7 @@ import glob
36 36
37from docutils import nodes, statemachine 37from docutils import nodes, statemachine
38from docutils.statemachine import ViewList 38from docutils.statemachine import ViewList
39from docutils.parsers.rst import directives 39from docutils.parsers.rst import directives, Directive
40from sphinx.util.compat import Directive
41from sphinx.ext.autodoc import AutodocReporter 40from sphinx.ext.autodoc import AutodocReporter
42 41
43__version__ = '1.0' 42__version__ = '1.0'
diff --git a/Makefile b/Makefile
index c4322dea3ca2..e02d092bc2d6 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 16 3PATCHLEVEL = 16
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc4 5EXTRAVERSION = -rc5
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 52f15cd896e1..b5a28336c077 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data)
178 case PSCI_CONDUIT_HVC: 178 case PSCI_CONDUIT_HVC:
179 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 179 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
180 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 180 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
181 if (res.a0) 181 if ((int)res.a0 < 0)
182 return 0; 182 return 0;
183 cb = call_hvc_arch_workaround_1; 183 cb = call_hvc_arch_workaround_1;
184 smccc_start = __smccc_workaround_1_hvc_start; 184 smccc_start = __smccc_workaround_1_hvc_start;
@@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data)
188 case PSCI_CONDUIT_SMC: 188 case PSCI_CONDUIT_SMC:
189 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 189 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
190 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 190 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
191 if (res.a0) 191 if ((int)res.a0 < 0)
192 return 0; 192 return 0;
193 cb = call_smc_arch_workaround_1; 193 cb = call_smc_arch_workaround_1;
194 smccc_start = __smccc_workaround_1_smc_start; 194 smccc_start = __smccc_workaround_1_smc_start;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 84a019f55022..8c704f1e53c2 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -108,7 +108,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
108 * The following mapping attributes may be updated in live 108 * The following mapping attributes may be updated in live
109 * kernel mappings without the need for break-before-make. 109 * kernel mappings without the need for break-before-make.
110 */ 110 */
111 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE; 111 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
112 112
113 /* creating or taking down mappings is always safe */ 113 /* creating or taking down mappings is always safe */
114 if (old == 0 || new == 0) 114 if (old == 0 || new == 0)
@@ -118,9 +118,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
118 if ((old | new) & PTE_CONT) 118 if ((old | new) & PTE_CONT)
119 return false; 119 return false;
120 120
121 /* Transitioning from Global to Non-Global is safe */ 121 /* Transitioning from Non-Global to Global is unsafe */
122 if (((old ^ new) == PTE_NG) && (new & PTE_NG)) 122 if (old & ~new & PTE_NG)
123 return true; 123 return false;
124 124
125 return ((old ^ new) & ~mask) == 0; 125 return ((old ^ new) & ~mask) == 0;
126} 126}
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
index 9ab48ff80c1c..6d11ae581ea7 100644
--- a/arch/mips/ath25/board.c
+++ b/arch/mips/ath25/board.c
@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
135 } 135 }
136 136
137 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); 137 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
138 if (!board_data)
139 goto error;
138 ath25_board.config = (struct ath25_boarddata *)board_data; 140 ath25_board.config = (struct ath25_boarddata *)board_data;
139 memcpy_fromio(board_data, bcfg, 0x100); 141 memcpy_fromio(board_data, bcfg, 0x100);
140 if (broken_boarddata) { 142 if (broken_boarddata) {
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 5b3a3f6a9ad3..d99f5242169e 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
2277 } 2277 }
2278 2278
2279 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); 2279 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
2280 if (!host_data)
2281 return -ENOMEM;
2280 raw_spin_lock_init(&host_data->lock); 2282 raw_spin_lock_init(&host_data->lock);
2281 2283
2282 addr = of_get_address(ciu_node, 0, NULL, NULL); 2284 addr = of_get_address(ciu_node, 0, NULL, NULL);
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 9d41732a9146..159e83add4bb 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
168 return; 168 return;
169 } 169 }
170 170
171 if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, 171 if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
172 "smp_ipi0", NULL)) 172 IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
173 panic("Can't request IPI0 interrupt"); 173 panic("Can't request IPI0 interrupt");
174 if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, 174 if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
175 "smp_ipi1", NULL)) 175 IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
176 panic("Can't request IPI1 interrupt"); 176 panic("Can't request IPI1 interrupt");
177} 177}
178 178
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig
index bc2fdbfa8223..72af0c183969 100644
--- a/arch/mips/loongson64/Kconfig
+++ b/arch/mips/loongson64/Kconfig
@@ -7,6 +7,8 @@ choice
7config LEMOTE_FULOONG2E 7config LEMOTE_FULOONG2E
8 bool "Lemote Fuloong(2e) mini-PC" 8 bool "Lemote Fuloong(2e) mini-PC"
9 select ARCH_SPARSEMEM_ENABLE 9 select ARCH_SPARSEMEM_ENABLE
10 select ARCH_MIGHT_HAVE_PC_PARPORT
11 select ARCH_MIGHT_HAVE_PC_SERIO
10 select CEVT_R4K 12 select CEVT_R4K
11 select CSRC_R4K 13 select CSRC_R4K
12 select SYS_HAS_CPU_LOONGSON2E 14 select SYS_HAS_CPU_LOONGSON2E
@@ -33,6 +35,8 @@ config LEMOTE_FULOONG2E
33config LEMOTE_MACH2F 35config LEMOTE_MACH2F
34 bool "Lemote Loongson 2F family machines" 36 bool "Lemote Loongson 2F family machines"
35 select ARCH_SPARSEMEM_ENABLE 37 select ARCH_SPARSEMEM_ENABLE
38 select ARCH_MIGHT_HAVE_PC_PARPORT
39 select ARCH_MIGHT_HAVE_PC_SERIO
36 select BOARD_SCACHE 40 select BOARD_SCACHE
37 select BOOT_ELF32 41 select BOOT_ELF32
38 select CEVT_R4K if ! MIPS_EXTERNAL_TIMER 42 select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
@@ -62,6 +66,8 @@ config LEMOTE_MACH2F
62config LOONGSON_MACH3X 66config LOONGSON_MACH3X
63 bool "Generic Loongson 3 family machines" 67 bool "Generic Loongson 3 family machines"
64 select ARCH_SPARSEMEM_ENABLE 68 select ARCH_SPARSEMEM_ENABLE
69 select ARCH_MIGHT_HAVE_PC_PARPORT
70 select ARCH_MIGHT_HAVE_PC_SERIO
65 select GENERIC_ISA_DMA_SUPPORT_BROKEN 71 select GENERIC_ISA_DMA_SUPPORT_BROKEN
66 select BOOT_ELF32 72 select BOOT_ELF32
67 select BOARD_SCACHE 73 select BOARD_SCACHE
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index ef6549e57157..26d5d2a5b8e9 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \
101libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c 101libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
102libfdtheader := fdt.h libfdt.h libfdt_internal.h 102libfdtheader := fdt.h libfdt.h libfdt_internal.h
103 103
104$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \ 104$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
105 treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
105 $(addprefix $(obj)/,$(libfdtheader)) 106 $(addprefix $(obj)/,$(libfdtheader))
106 107
107src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ 108src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index d22c41c26bb3..acf4b2e0530c 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -874,7 +874,6 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
874 .mmu = 0, 874 .mmu = 0,
875 .hash_ext = 0, 875 .hash_ext = 0,
876 .radix_ext = 0, 876 .radix_ext = 0,
877 .byte22 = 0,
878 }, 877 },
879 878
880 /* option vector 6: IBM PAPR hints */ 879 /* option vector 6: IBM PAPR hints */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 0c854816e653..5cb4e4687107 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -195,6 +195,12 @@ static void kvmppc_pte_free(pte_t *ptep)
195 kmem_cache_free(kvm_pte_cache, ptep); 195 kmem_cache_free(kvm_pte_cache, ptep);
196} 196}
197 197
198/* Like pmd_huge() and pmd_large(), but works regardless of config options */
199static inline int pmd_is_leaf(pmd_t pmd)
200{
201 return !!(pmd_val(pmd) & _PAGE_PTE);
202}
203
198static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, 204static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
199 unsigned int level, unsigned long mmu_seq) 205 unsigned int level, unsigned long mmu_seq)
200{ 206{
@@ -219,7 +225,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
219 else 225 else
220 new_pmd = pmd_alloc_one(kvm->mm, gpa); 226 new_pmd = pmd_alloc_one(kvm->mm, gpa);
221 227
222 if (level == 0 && !(pmd && pmd_present(*pmd))) 228 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
223 new_ptep = kvmppc_pte_alloc(); 229 new_ptep = kvmppc_pte_alloc();
224 230
225 /* Check if we might have been invalidated; let the guest retry if so */ 231 /* Check if we might have been invalidated; let the guest retry if so */
@@ -244,12 +250,30 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
244 new_pmd = NULL; 250 new_pmd = NULL;
245 } 251 }
246 pmd = pmd_offset(pud, gpa); 252 pmd = pmd_offset(pud, gpa);
247 if (pmd_large(*pmd)) { 253 if (pmd_is_leaf(*pmd)) {
248 /* Someone else has instantiated a large page here; retry */ 254 unsigned long lgpa = gpa & PMD_MASK;
249 ret = -EAGAIN; 255
250 goto out_unlock; 256 /*
251 } 257 * If we raced with another CPU which has just put
252 if (level == 1 && !pmd_none(*pmd)) { 258 * a 2MB pte in after we saw a pte page, try again.
259 */
260 if (level == 0 && !new_ptep) {
261 ret = -EAGAIN;
262 goto out_unlock;
263 }
264 /* Valid 2MB page here already, remove it */
265 old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
266 ~0UL, 0, lgpa, PMD_SHIFT);
267 kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
268 if (old & _PAGE_DIRTY) {
269 unsigned long gfn = lgpa >> PAGE_SHIFT;
270 struct kvm_memory_slot *memslot;
271 memslot = gfn_to_memslot(kvm, gfn);
272 if (memslot && memslot->dirty_bitmap)
273 kvmppc_update_dirty_map(memslot,
274 gfn, PMD_SIZE);
275 }
276 } else if (level == 1 && !pmd_none(*pmd)) {
253 /* 277 /*
254 * There's a page table page here, but we wanted 278 * There's a page table page here, but we wanted
255 * to install a large page. Tell the caller and let 279 * to install a large page. Tell the caller and let
@@ -412,28 +436,24 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
412 } else { 436 } else {
413 page = pages[0]; 437 page = pages[0];
414 pfn = page_to_pfn(page); 438 pfn = page_to_pfn(page);
415 if (PageHuge(page)) { 439 if (PageCompound(page)) {
416 page = compound_head(page); 440 pte_size <<= compound_order(compound_head(page));
417 pte_size <<= compound_order(page);
418 /* See if we can insert a 2MB large-page PTE here */ 441 /* See if we can insert a 2MB large-page PTE here */
419 if (pte_size >= PMD_SIZE && 442 if (pte_size >= PMD_SIZE &&
420 (gpa & PMD_MASK & PAGE_MASK) == 443 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
421 (hva & PMD_MASK & PAGE_MASK)) { 444 (hva & (PMD_SIZE - PAGE_SIZE))) {
422 level = 1; 445 level = 1;
423 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1); 446 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
424 } 447 }
425 } 448 }
426 /* See if we can provide write access */ 449 /* See if we can provide write access */
427 if (writing) { 450 if (writing) {
428 /*
429 * We assume gup_fast has set dirty on the host PTE.
430 */
431 pgflags |= _PAGE_WRITE; 451 pgflags |= _PAGE_WRITE;
432 } else { 452 } else {
433 local_irq_save(flags); 453 local_irq_save(flags);
434 ptep = find_current_mm_pte(current->mm->pgd, 454 ptep = find_current_mm_pte(current->mm->pgd,
435 hva, NULL, NULL); 455 hva, NULL, NULL);
436 if (ptep && pte_write(*ptep) && pte_dirty(*ptep)) 456 if (ptep && pte_write(*ptep))
437 pgflags |= _PAGE_WRITE; 457 pgflags |= _PAGE_WRITE;
438 local_irq_restore(flags); 458 local_irq_restore(flags);
439 } 459 }
@@ -459,18 +479,15 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
459 pte = pfn_pte(pfn, __pgprot(pgflags)); 479 pte = pfn_pte(pfn, __pgprot(pgflags));
460 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); 480 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
461 } 481 }
462 if (ret == 0 || ret == -EAGAIN)
463 ret = RESUME_GUEST;
464 482
465 if (page) { 483 if (page) {
466 /* 484 if (!ret && (pgflags & _PAGE_WRITE))
467 * We drop pages[0] here, not page because page might 485 set_page_dirty_lock(page);
468 * have been set to the head page of a compound, but 486 put_page(page);
469 * we have to drop the reference on the correct tail
470 * page to match the get inside gup()
471 */
472 put_page(pages[0]);
473 } 487 }
488
489 if (ret == 0 || ret == -EAGAIN)
490 ret = RESUME_GUEST;
474 return ret; 491 return ret;
475} 492}
476 493
@@ -644,7 +661,7 @@ void kvmppc_free_radix(struct kvm *kvm)
644 continue; 661 continue;
645 pmd = pmd_offset(pud, 0); 662 pmd = pmd_offset(pud, 0);
646 for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) { 663 for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
647 if (pmd_huge(*pmd)) { 664 if (pmd_is_leaf(*pmd)) {
648 pmd_clear(pmd); 665 pmd_clear(pmd);
649 continue; 666 continue;
650 } 667 }
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 89707354c2ef..9cb9448163c4 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2885,7 +2885,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2885 */ 2885 */
2886 trace_hardirqs_on(); 2886 trace_hardirqs_on();
2887 2887
2888 guest_enter(); 2888 guest_enter_irqoff();
2889 2889
2890 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 2890 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2891 2891
@@ -2893,8 +2893,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2893 2893
2894 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); 2894 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2895 2895
2896 guest_exit();
2897
2898 trace_hardirqs_off(); 2896 trace_hardirqs_off();
2899 set_irq_happened(trap); 2897 set_irq_happened(trap);
2900 2898
@@ -2937,6 +2935,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2937 kvmppc_set_host_core(pcpu); 2935 kvmppc_set_host_core(pcpu);
2938 2936
2939 local_irq_enable(); 2937 local_irq_enable();
2938 guest_exit();
2940 2939
2941 /* Let secondaries go back to the offline loop */ 2940 /* Let secondaries go back to the offline loop */
2942 for (i = 0; i < controlled_threads; ++i) { 2941 for (i = 0; i < controlled_threads; ++i) {
@@ -3656,15 +3655,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
3656 goto up_out; 3655 goto up_out;
3657 3656
3658 psize = vma_kernel_pagesize(vma); 3657 psize = vma_kernel_pagesize(vma);
3659 porder = __ilog2(psize);
3660 3658
3661 up_read(&current->mm->mmap_sem); 3659 up_read(&current->mm->mmap_sem);
3662 3660
3663 /* We can handle 4k, 64k or 16M pages in the VRMA */ 3661 /* We can handle 4k, 64k or 16M pages in the VRMA */
3664 err = -EINVAL; 3662 if (psize >= 0x1000000)
3665 if (!(psize == 0x1000 || psize == 0x10000 || 3663 psize = 0x1000000;
3666 psize == 0x1000000)) 3664 else if (psize >= 0x10000)
3667 goto out_srcu; 3665 psize = 0x10000;
3666 else
3667 psize = 0x1000;
3668 porder = __ilog2(psize);
3668 3669
3669 senc = slb_pgsize_encoding(psize); 3670 senc = slb_pgsize_encoding(psize);
3670 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 3671 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 403e642c78f5..52c205373986 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1345,7 +1345,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1345int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, 1345int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
1346 unsigned int rt, int is_default_endian) 1346 unsigned int rt, int is_default_endian)
1347{ 1347{
1348 enum emulation_result emulated; 1348 enum emulation_result emulated = EMULATE_DONE;
1349 1349
1350 while (vcpu->arch.mmio_vmx_copy_nums) { 1350 while (vcpu->arch.mmio_vmx_copy_nums) {
1351 emulated = __kvmppc_handle_load(run, vcpu, rt, 8, 1351 emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
@@ -1608,7 +1608,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1608 1608
1609 kvm_sigset_deactivate(vcpu); 1609 kvm_sigset_deactivate(vcpu);
1610 1610
1611#ifdef CONFIG_ALTIVEC
1611out: 1612out:
1613#endif
1612 vcpu_put(vcpu); 1614 vcpu_put(vcpu);
1613 return r; 1615 return r;
1614} 1616}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 77d7818130db..339ac0964590 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
86 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 86 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
87 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 87 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
88 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 88 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
89 { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
89 { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 90 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
90 { "instruction_epsw", VCPU_STAT(instruction_epsw) }, 91 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
91 { "instruction_gs", VCPU_STAT(instruction_gs) }, 92 { "instruction_gs", VCPU_STAT(instruction_gs) },
@@ -2146,6 +2147,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
2146 /* we still need the basic sca for the ipte control */ 2147 /* we still need the basic sca for the ipte control */
2147 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); 2148 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2148 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; 2149 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2150 return;
2149 } 2151 }
2150 read_lock(&vcpu->kvm->arch.sca_lock); 2152 read_lock(&vcpu->kvm->arch.sca_lock);
2151 if (vcpu->kvm->arch.use_esca) { 2153 if (vcpu->kvm->arch.use_esca) {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index eb7f43f23521..0fa71a78ec99 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2307,7 +2307,7 @@ choice
2307 it can be used to assist security vulnerability exploitation. 2307 it can be used to assist security vulnerability exploitation.
2308 2308
2309 This setting can be changed at boot time via the kernel command 2309 This setting can be changed at boot time via the kernel command
2310 line parameter vsyscall=[native|emulate|none]. 2310 line parameter vsyscall=[emulate|none].
2311 2311
2312 On a system with recent enough glibc (2.14 or newer) and no 2312 On a system with recent enough glibc (2.14 or newer) and no
2313 static binaries, you can say None without a performance penalty 2313 static binaries, you can say None without a performance penalty
@@ -2315,15 +2315,6 @@ choice
2315 2315
2316 If unsure, select "Emulate". 2316 If unsure, select "Emulate".
2317 2317
2318 config LEGACY_VSYSCALL_NATIVE
2319 bool "Native"
2320 help
2321 Actual executable code is located in the fixed vsyscall
2322 address mapping, implementing time() efficiently. Since
2323 this makes the mapping executable, it can be used during
2324 security vulnerability exploitation (traditionally as
2325 ROP gadgets). This configuration is not recommended.
2326
2327 config LEGACY_VSYSCALL_EMULATE 2318 config LEGACY_VSYSCALL_EMULATE
2328 bool "Emulate" 2319 bool "Emulate"
2329 help 2320 help
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index e811dd9c5e99..08425c42f8b7 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -363,9 +363,7 @@ ENTRY(entry_INT80_compat)
363 pushq 2*8(%rdi) /* regs->ip */ 363 pushq 2*8(%rdi) /* regs->ip */
364 pushq 1*8(%rdi) /* regs->orig_ax */ 364 pushq 1*8(%rdi) /* regs->orig_ax */
365 365
366 movq (%rdi), %rdi /* restore %rdi */ 366 pushq (%rdi) /* pt_regs->di */
367
368 pushq %rdi /* pt_regs->di */
369 pushq %rsi /* pt_regs->si */ 367 pushq %rsi /* pt_regs->si */
370 pushq %rdx /* pt_regs->dx */ 368 pushq %rdx /* pt_regs->dx */
371 pushq %rcx /* pt_regs->cx */ 369 pushq %rcx /* pt_regs->cx */
@@ -406,15 +404,3 @@ ENTRY(entry_INT80_compat)
406 TRACE_IRQS_ON 404 TRACE_IRQS_ON
407 jmp swapgs_restore_regs_and_return_to_usermode 405 jmp swapgs_restore_regs_and_return_to_usermode
408END(entry_INT80_compat) 406END(entry_INT80_compat)
409
410ENTRY(stub32_clone)
411 /*
412 * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
413 * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
414 *
415 * The native 64-bit kernel's sys_clone() implements the latter,
416 * so we need to swap arguments here before calling it:
417 */
418 xchg %r8, %rcx
419 jmp sys_clone
420ENDPROC(stub32_clone)
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 448ac2161112..2a5e99cff859 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -8,12 +8,12 @@
8# 8#
90 i386 restart_syscall sys_restart_syscall 90 i386 restart_syscall sys_restart_syscall
101 i386 exit sys_exit 101 i386 exit sys_exit
112 i386 fork sys_fork sys_fork 112 i386 fork sys_fork
123 i386 read sys_read 123 i386 read sys_read
134 i386 write sys_write 134 i386 write sys_write
145 i386 open sys_open compat_sys_open 145 i386 open sys_open compat_sys_open
156 i386 close sys_close 156 i386 close sys_close
167 i386 waitpid sys_waitpid sys32_waitpid 167 i386 waitpid sys_waitpid compat_sys_x86_waitpid
178 i386 creat sys_creat 178 i386 creat sys_creat
189 i386 link sys_link 189 i386 link sys_link
1910 i386 unlink sys_unlink 1910 i386 unlink sys_unlink
@@ -78,7 +78,7 @@
7869 i386 ssetmask sys_ssetmask 7869 i386 ssetmask sys_ssetmask
7970 i386 setreuid sys_setreuid16 7970 i386 setreuid sys_setreuid16
8071 i386 setregid sys_setregid16 8071 i386 setregid sys_setregid16
8172 i386 sigsuspend sys_sigsuspend sys_sigsuspend 8172 i386 sigsuspend sys_sigsuspend
8273 i386 sigpending sys_sigpending compat_sys_sigpending 8273 i386 sigpending sys_sigpending compat_sys_sigpending
8374 i386 sethostname sys_sethostname 8374 i386 sethostname sys_sethostname
8475 i386 setrlimit sys_setrlimit compat_sys_setrlimit 8475 i386 setrlimit sys_setrlimit compat_sys_setrlimit
@@ -96,7 +96,7 @@
9687 i386 swapon sys_swapon 9687 i386 swapon sys_swapon
9788 i386 reboot sys_reboot 9788 i386 reboot sys_reboot
9889 i386 readdir sys_old_readdir compat_sys_old_readdir 9889 i386 readdir sys_old_readdir compat_sys_old_readdir
9990 i386 mmap sys_old_mmap sys32_mmap 9990 i386 mmap sys_old_mmap compat_sys_x86_mmap
10091 i386 munmap sys_munmap 10091 i386 munmap sys_munmap
10192 i386 truncate sys_truncate compat_sys_truncate 10192 i386 truncate sys_truncate compat_sys_truncate
10293 i386 ftruncate sys_ftruncate compat_sys_ftruncate 10293 i386 ftruncate sys_ftruncate compat_sys_ftruncate
@@ -126,7 +126,7 @@
126117 i386 ipc sys_ipc compat_sys_ipc 126117 i386 ipc sys_ipc compat_sys_ipc
127118 i386 fsync sys_fsync 127118 i386 fsync sys_fsync
128119 i386 sigreturn sys_sigreturn sys32_sigreturn 128119 i386 sigreturn sys_sigreturn sys32_sigreturn
129120 i386 clone sys_clone stub32_clone 129120 i386 clone sys_clone compat_sys_x86_clone
130121 i386 setdomainname sys_setdomainname 130121 i386 setdomainname sys_setdomainname
131122 i386 uname sys_newuname 131122 i386 uname sys_newuname
132123 i386 modify_ldt sys_modify_ldt 132123 i386 modify_ldt sys_modify_ldt
@@ -186,8 +186,8 @@
186177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait 186177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
187178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 187178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
188179 i386 rt_sigsuspend sys_rt_sigsuspend 188179 i386 rt_sigsuspend sys_rt_sigsuspend
189180 i386 pread64 sys_pread64 sys32_pread 189180 i386 pread64 sys_pread64 compat_sys_x86_pread
190181 i386 pwrite64 sys_pwrite64 sys32_pwrite 190181 i386 pwrite64 sys_pwrite64 compat_sys_x86_pwrite
191182 i386 chown sys_chown16 191182 i386 chown sys_chown16
192183 i386 getcwd sys_getcwd 192183 i386 getcwd sys_getcwd
193184 i386 capget sys_capget 193184 i386 capget sys_capget
@@ -196,14 +196,14 @@
196187 i386 sendfile sys_sendfile compat_sys_sendfile 196187 i386 sendfile sys_sendfile compat_sys_sendfile
197188 i386 getpmsg 197188 i386 getpmsg
198189 i386 putpmsg 198189 i386 putpmsg
199190 i386 vfork sys_vfork sys_vfork 199190 i386 vfork sys_vfork
200191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit 200191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit
201192 i386 mmap2 sys_mmap_pgoff 201192 i386 mmap2 sys_mmap_pgoff
202193 i386 truncate64 sys_truncate64 sys32_truncate64 202193 i386 truncate64 sys_truncate64 compat_sys_x86_truncate64
203194 i386 ftruncate64 sys_ftruncate64 sys32_ftruncate64 203194 i386 ftruncate64 sys_ftruncate64 compat_sys_x86_ftruncate64
204195 i386 stat64 sys_stat64 sys32_stat64 204195 i386 stat64 sys_stat64 compat_sys_x86_stat64
205196 i386 lstat64 sys_lstat64 sys32_lstat64 205196 i386 lstat64 sys_lstat64 compat_sys_x86_lstat64
206197 i386 fstat64 sys_fstat64 sys32_fstat64 206197 i386 fstat64 sys_fstat64 compat_sys_x86_fstat64
207198 i386 lchown32 sys_lchown 207198 i386 lchown32 sys_lchown
208199 i386 getuid32 sys_getuid 208199 i386 getuid32 sys_getuid
209200 i386 getgid32 sys_getgid 209200 i386 getgid32 sys_getgid
@@ -231,7 +231,7 @@
231# 222 is unused 231# 222 is unused
232# 223 is unused 232# 223 is unused
233224 i386 gettid sys_gettid 233224 i386 gettid sys_gettid
234225 i386 readahead sys_readahead sys32_readahead 234225 i386 readahead sys_readahead compat_sys_x86_readahead
235226 i386 setxattr sys_setxattr 235226 i386 setxattr sys_setxattr
236227 i386 lsetxattr sys_lsetxattr 236227 i386 lsetxattr sys_lsetxattr
237228 i386 fsetxattr sys_fsetxattr 237228 i386 fsetxattr sys_fsetxattr
@@ -256,7 +256,7 @@
256247 i386 io_getevents sys_io_getevents compat_sys_io_getevents 256247 i386 io_getevents sys_io_getevents compat_sys_io_getevents
257248 i386 io_submit sys_io_submit compat_sys_io_submit 257248 i386 io_submit sys_io_submit compat_sys_io_submit
258249 i386 io_cancel sys_io_cancel 258249 i386 io_cancel sys_io_cancel
259250 i386 fadvise64 sys_fadvise64 sys32_fadvise64 259250 i386 fadvise64 sys_fadvise64 compat_sys_x86_fadvise64
260# 251 is available for reuse (was briefly sys_set_zone_reclaim) 260# 251 is available for reuse (was briefly sys_set_zone_reclaim)
261252 i386 exit_group sys_exit_group 261252 i386 exit_group sys_exit_group
262253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie 262253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
@@ -278,7 +278,7 @@
278269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 278269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
279270 i386 tgkill sys_tgkill 279270 i386 tgkill sys_tgkill
280271 i386 utimes sys_utimes compat_sys_utimes 280271 i386 utimes sys_utimes compat_sys_utimes
281272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64 281272 i386 fadvise64_64 sys_fadvise64_64 compat_sys_x86_fadvise64_64
282273 i386 vserver 282273 i386 vserver
283274 i386 mbind sys_mbind 283274 i386 mbind sys_mbind
284275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy 284275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
@@ -306,7 +306,7 @@
306297 i386 mknodat sys_mknodat 306297 i386 mknodat sys_mknodat
307298 i386 fchownat sys_fchownat 307298 i386 fchownat sys_fchownat
308299 i386 futimesat sys_futimesat compat_sys_futimesat 308299 i386 futimesat sys_futimesat compat_sys_futimesat
309300 i386 fstatat64 sys_fstatat64 sys32_fstatat 309300 i386 fstatat64 sys_fstatat64 compat_sys_x86_fstatat
310301 i386 unlinkat sys_unlinkat 310301 i386 unlinkat sys_unlinkat
311302 i386 renameat sys_renameat 311302 i386 renameat sys_renameat
312303 i386 linkat sys_linkat 312303 i386 linkat sys_linkat
@@ -320,7 +320,7 @@
320311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list 320311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list
321312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list 321312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list
322313 i386 splice sys_splice 322313 i386 splice sys_splice
323314 i386 sync_file_range sys_sync_file_range sys32_sync_file_range 323314 i386 sync_file_range sys_sync_file_range compat_sys_x86_sync_file_range
324315 i386 tee sys_tee 324315 i386 tee sys_tee
325316 i386 vmsplice sys_vmsplice compat_sys_vmsplice 325316 i386 vmsplice sys_vmsplice compat_sys_vmsplice
326317 i386 move_pages sys_move_pages compat_sys_move_pages 326317 i386 move_pages sys_move_pages compat_sys_move_pages
@@ -330,7 +330,7 @@
330321 i386 signalfd sys_signalfd compat_sys_signalfd 330321 i386 signalfd sys_signalfd compat_sys_signalfd
331322 i386 timerfd_create sys_timerfd_create 331322 i386 timerfd_create sys_timerfd_create
332323 i386 eventfd sys_eventfd 332323 i386 eventfd sys_eventfd
333324 i386 fallocate sys_fallocate sys32_fallocate 333324 i386 fallocate sys_fallocate compat_sys_x86_fallocate
334325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime 334325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
335326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime 335326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
336327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4 336327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 577fa8adb785..8560ef68a9d6 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -42,10 +42,8 @@
42#define CREATE_TRACE_POINTS 42#define CREATE_TRACE_POINTS
43#include "vsyscall_trace.h" 43#include "vsyscall_trace.h"
44 44
45static enum { EMULATE, NATIVE, NONE } vsyscall_mode = 45static enum { EMULATE, NONE } vsyscall_mode =
46#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE) 46#ifdef CONFIG_LEGACY_VSYSCALL_NONE
47 NATIVE;
48#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
49 NONE; 47 NONE;
50#else 48#else
51 EMULATE; 49 EMULATE;
@@ -56,8 +54,6 @@ static int __init vsyscall_setup(char *str)
56 if (str) { 54 if (str) {
57 if (!strcmp("emulate", str)) 55 if (!strcmp("emulate", str))
58 vsyscall_mode = EMULATE; 56 vsyscall_mode = EMULATE;
59 else if (!strcmp("native", str))
60 vsyscall_mode = NATIVE;
61 else if (!strcmp("none", str)) 57 else if (!strcmp("none", str))
62 vsyscall_mode = NONE; 58 vsyscall_mode = NONE;
63 else 59 else
@@ -139,10 +135,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
139 135
140 WARN_ON_ONCE(address != regs->ip); 136 WARN_ON_ONCE(address != regs->ip);
141 137
142 /* This should be unreachable in NATIVE mode. */
143 if (WARN_ON(vsyscall_mode == NATIVE))
144 return false;
145
146 if (vsyscall_mode == NONE) { 138 if (vsyscall_mode == NONE) {
147 warn_bad_vsyscall(KERN_INFO, regs, 139 warn_bad_vsyscall(KERN_INFO, regs,
148 "vsyscall attempted with vsyscall=none"); 140 "vsyscall attempted with vsyscall=none");
@@ -370,9 +362,7 @@ void __init map_vsyscall(void)
370 362
371 if (vsyscall_mode != NONE) { 363 if (vsyscall_mode != NONE) {
372 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, 364 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
373 vsyscall_mode == NATIVE 365 PAGE_KERNEL_VVAR);
374 ? PAGE_KERNEL_VSYSCALL
375 : PAGE_KERNEL_VVAR);
376 set_vsyscall_pgtable_user_bits(swapper_pg_dir); 366 set_vsyscall_pgtable_user_bits(swapper_pg_dir);
377 } 367 }
378 368
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 6d8044ab1060..22ec65bc033a 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3606,7 +3606,7 @@ static struct intel_uncore_type skx_uncore_imc = {
3606}; 3606};
3607 3607
3608static struct attribute *skx_upi_uncore_formats_attr[] = { 3608static struct attribute *skx_upi_uncore_formats_attr[] = {
3609 &format_attr_event_ext.attr, 3609 &format_attr_event.attr,
3610 &format_attr_umask_ext.attr, 3610 &format_attr_umask_ext.attr,
3611 &format_attr_edge.attr, 3611 &format_attr_edge.attr,
3612 &format_attr_inv.attr, 3612 &format_attr_inv.attr,
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 96cd33bbfc85..6512498bbef6 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -51,15 +51,14 @@
51#define AA(__x) ((unsigned long)(__x)) 51#define AA(__x) ((unsigned long)(__x))
52 52
53 53
54asmlinkage long sys32_truncate64(const char __user *filename, 54COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename,
55 unsigned long offset_low, 55 unsigned long, offset_low, unsigned long, offset_high)
56 unsigned long offset_high)
57{ 56{
58 return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); 57 return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
59} 58}
60 59
61asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, 60COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd,
62 unsigned long offset_high) 61 unsigned long, offset_low, unsigned long, offset_high)
63{ 62{
64 return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); 63 return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
65} 64}
@@ -96,8 +95,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
96 return 0; 95 return 0;
97} 96}
98 97
99asmlinkage long sys32_stat64(const char __user *filename, 98COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename,
100 struct stat64 __user *statbuf) 99 struct stat64 __user *, statbuf)
101{ 100{
102 struct kstat stat; 101 struct kstat stat;
103 int ret = vfs_stat(filename, &stat); 102 int ret = vfs_stat(filename, &stat);
@@ -107,8 +106,8 @@ asmlinkage long sys32_stat64(const char __user *filename,
107 return ret; 106 return ret;
108} 107}
109 108
110asmlinkage long sys32_lstat64(const char __user *filename, 109COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename,
111 struct stat64 __user *statbuf) 110 struct stat64 __user *, statbuf)
112{ 111{
113 struct kstat stat; 112 struct kstat stat;
114 int ret = vfs_lstat(filename, &stat); 113 int ret = vfs_lstat(filename, &stat);
@@ -117,7 +116,8 @@ asmlinkage long sys32_lstat64(const char __user *filename,
117 return ret; 116 return ret;
118} 117}
119 118
120asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) 119COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd,
120 struct stat64 __user *, statbuf)
121{ 121{
122 struct kstat stat; 122 struct kstat stat;
123 int ret = vfs_fstat(fd, &stat); 123 int ret = vfs_fstat(fd, &stat);
@@ -126,8 +126,9 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
126 return ret; 126 return ret;
127} 127}
128 128
129asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename, 129COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd,
130 struct stat64 __user *statbuf, int flag) 130 const char __user *, filename,
131 struct stat64 __user *, statbuf, int, flag)
131{ 132{
132 struct kstat stat; 133 struct kstat stat;
133 int error; 134 int error;
@@ -153,7 +154,7 @@ struct mmap_arg_struct32 {
153 unsigned int offset; 154 unsigned int offset;
154}; 155};
155 156
156asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) 157COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg)
157{ 158{
158 struct mmap_arg_struct32 a; 159 struct mmap_arg_struct32 a;
159 160
@@ -167,22 +168,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
167 a.offset>>PAGE_SHIFT); 168 a.offset>>PAGE_SHIFT);
168} 169}
169 170
170asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr, 171COMPAT_SYSCALL_DEFINE3(x86_waitpid, compat_pid_t, pid, unsigned int __user *,
171 int options) 172 stat_addr, int, options)
172{ 173{
173 return compat_sys_wait4(pid, stat_addr, options, NULL); 174 return compat_sys_wait4(pid, stat_addr, options, NULL);
174} 175}
175 176
176/* warning: next two assume little endian */ 177/* warning: next two assume little endian */
177asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count, 178COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf,
178 u32 poslo, u32 poshi) 179 u32, count, u32, poslo, u32, poshi)
179{ 180{
180 return sys_pread64(fd, ubuf, count, 181 return sys_pread64(fd, ubuf, count,
181 ((loff_t)AA(poshi) << 32) | AA(poslo)); 182 ((loff_t)AA(poshi) << 32) | AA(poslo));
182} 183}
183 184
184asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, 185COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf,
185 u32 count, u32 poslo, u32 poshi) 186 u32, count, u32, poslo, u32, poshi)
186{ 187{
187 return sys_pwrite64(fd, ubuf, count, 188 return sys_pwrite64(fd, ubuf, count,
188 ((loff_t)AA(poshi) << 32) | AA(poslo)); 189 ((loff_t)AA(poshi) << 32) | AA(poslo));
@@ -193,8 +194,9 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
193 * Some system calls that need sign extended arguments. This could be 194 * Some system calls that need sign extended arguments. This could be
194 * done by a generic wrapper. 195 * done by a generic wrapper.
195 */ 196 */
196long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 197COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low,
197 __u32 len_low, __u32 len_high, int advice) 198 __u32, offset_high, __u32, len_low, __u32, len_high,
199 int, advice)
198{ 200{
199 return sys_fadvise64_64(fd, 201 return sys_fadvise64_64(fd,
200 (((u64)offset_high)<<32) | offset_low, 202 (((u64)offset_high)<<32) | offset_low,
@@ -202,31 +204,43 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
202 advice); 204 advice);
203} 205}
204 206
205asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, 207COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo,
206 size_t count) 208 unsigned int, off_hi, size_t, count)
207{ 209{
208 return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); 210 return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
209} 211}
210 212
211asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi, 213COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low,
212 unsigned n_low, unsigned n_hi, int flags) 214 unsigned int, off_hi, unsigned int, n_low,
215 unsigned int, n_hi, int, flags)
213{ 216{
214 return sys_sync_file_range(fd, 217 return sys_sync_file_range(fd,
215 ((u64)off_hi << 32) | off_low, 218 ((u64)off_hi << 32) | off_low,
216 ((u64)n_hi << 32) | n_low, flags); 219 ((u64)n_hi << 32) | n_low, flags);
217} 220}
218 221
219asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, 222COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo,
220 size_t len, int advice) 223 unsigned int, offset_hi, size_t, len, int, advice)
221{ 224{
222 return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, 225 return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
223 len, advice); 226 len, advice);
224} 227}
225 228
226asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, 229COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
227 unsigned offset_hi, unsigned len_lo, 230 unsigned int, offset_lo, unsigned int, offset_hi,
228 unsigned len_hi) 231 unsigned int, len_lo, unsigned int, len_hi)
229{ 232{
230 return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, 233 return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
231 ((u64)len_hi << 32) | len_lo); 234 ((u64)len_hi << 32) | len_lo);
232} 235}
236
237/*
238 * The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS
239 */
240COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
241 unsigned long, newsp, int __user *, parent_tidptr,
242 unsigned long, tls_val, int __user *, child_tidptr)
243{
244 return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr,
245 tls_val);
246}
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 246f15b4e64c..acfe755562a6 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -174,7 +174,6 @@ enum page_cache_mode {
174#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) 174#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
175#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) 175#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
176#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) 176#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
177#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
178#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) 177#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
179#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 178#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
180#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) 179#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -206,7 +205,6 @@ enum page_cache_mode {
206#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) 205#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
207#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) 206#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
208#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) 207#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
209#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
210#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) 208#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
211 209
212#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) 210#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index d6baf23782bc..5c019d23d06b 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[];
10 10
11#if defined(CONFIG_X86_64) 11#if defined(CONFIG_X86_64)
12extern char __end_rodata_hpage_align[]; 12extern char __end_rodata_hpage_align[];
13extern char __entry_trampoline_start[], __entry_trampoline_end[];
13#endif 14#endif
14 15
15#endif /* _ASM_X86_SECTIONS_H */ 16#endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 82c34ee25a65..906794aa034e 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -20,31 +20,43 @@
20#include <asm/ia32.h> 20#include <asm/ia32.h>
21 21
22/* ia32/sys_ia32.c */ 22/* ia32/sys_ia32.c */
23asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long); 23asmlinkage long compat_sys_x86_truncate64(const char __user *, unsigned long,
24asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); 24 unsigned long);
25asmlinkage long compat_sys_x86_ftruncate64(unsigned int, unsigned long,
26 unsigned long);
25 27
26asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *); 28asmlinkage long compat_sys_x86_stat64(const char __user *,
27asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *); 29 struct stat64 __user *);
28asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); 30asmlinkage long compat_sys_x86_lstat64(const char __user *,
29asmlinkage long sys32_fstatat(unsigned int, const char __user *, 31 struct stat64 __user *);
32asmlinkage long compat_sys_x86_fstat64(unsigned int, struct stat64 __user *);
33asmlinkage long compat_sys_x86_fstatat(unsigned int, const char __user *,
30 struct stat64 __user *, int); 34 struct stat64 __user *, int);
31struct mmap_arg_struct32; 35struct mmap_arg_struct32;
32asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *); 36asmlinkage long compat_sys_x86_mmap(struct mmap_arg_struct32 __user *);
33 37
34asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); 38asmlinkage long compat_sys_x86_waitpid(compat_pid_t, unsigned int __user *,
39 int);
35 40
36asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); 41asmlinkage long compat_sys_x86_pread(unsigned int, char __user *, u32, u32,
37asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); 42 u32);
43asmlinkage long compat_sys_x86_pwrite(unsigned int, const char __user *, u32,
44 u32, u32);
38 45
39long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); 46asmlinkage long compat_sys_x86_fadvise64_64(int, __u32, __u32, __u32, __u32,
40long sys32_vm86_warning(void); 47 int);
41 48
42asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); 49asmlinkage ssize_t compat_sys_x86_readahead(int, unsigned int, unsigned int,
43asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, 50 size_t);
44 unsigned, unsigned, int); 51asmlinkage long compat_sys_x86_sync_file_range(int, unsigned int, unsigned int,
45asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); 52 unsigned int, unsigned int,
46asmlinkage long sys32_fallocate(int, int, unsigned, 53 int);
47 unsigned, unsigned, unsigned); 54asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int,
55 size_t, int);
56asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int,
57 unsigned int, unsigned int);
58asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *,
59 unsigned long, int __user *);
48 60
49/* ia32/ia32_signal.c */ 61/* ia32/ia32_signal.c */
50asmlinkage long sys32_sigreturn(void); 62asmlinkage long sys32_sigreturn(void);
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 91723461dc1f..435db58a7bad 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -30,6 +30,7 @@ struct mce {
30 __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ 30 __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
31 __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ 31 __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
32 __u64 ppin; /* Protected Processor Inventory Number */ 32 __u64 ppin; /* Protected Processor Inventory Number */
33 __u32 microcode;/* Microcode revision */
33}; 34};
34 35
35#define MCE_GET_RECORD_LEN _IOR('M', 1, int) 36#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index d19e903214b4..4aa9fd379390 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -144,6 +144,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
144{ 144{
145 int i; 145 int i;
146 146
147 /*
148 * We know that the hypervisor lie to us on the microcode version so
149 * we may as well hope that it is running the correct version.
150 */
151 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
152 return false;
153
147 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { 154 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
148 if (c->x86_model == spectre_bad_microcodes[i].model && 155 if (c->x86_model == spectre_bad_microcodes[i].model &&
149 c->x86_stepping == spectre_bad_microcodes[i].stepping) 156 c->x86_stepping == spectre_bad_microcodes[i].stepping)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8ff94d1e2dce..466f47301334 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -56,6 +56,9 @@
56 56
57static DEFINE_MUTEX(mce_log_mutex); 57static DEFINE_MUTEX(mce_log_mutex);
58 58
59/* sysfs synchronization */
60static DEFINE_MUTEX(mce_sysfs_mutex);
61
59#define CREATE_TRACE_POINTS 62#define CREATE_TRACE_POINTS
60#include <trace/events/mce.h> 63#include <trace/events/mce.h>
61 64
@@ -130,6 +133,8 @@ void mce_setup(struct mce *m)
130 133
131 if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) 134 if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
132 rdmsrl(MSR_PPIN, m->ppin); 135 rdmsrl(MSR_PPIN, m->ppin);
136
137 m->microcode = boot_cpu_data.microcode;
133} 138}
134 139
135DEFINE_PER_CPU(struct mce, injectm); 140DEFINE_PER_CPU(struct mce, injectm);
@@ -262,7 +267,7 @@ static void __print_mce(struct mce *m)
262 */ 267 */
263 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", 268 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
264 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, 269 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
265 cpu_data(m->extcpu).microcode); 270 m->microcode);
266} 271}
267 272
268static void print_mce(struct mce *m) 273static void print_mce(struct mce *m)
@@ -2086,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s,
2086 if (kstrtou64(buf, 0, &new) < 0) 2091 if (kstrtou64(buf, 0, &new) < 0)
2087 return -EINVAL; 2092 return -EINVAL;
2088 2093
2094 mutex_lock(&mce_sysfs_mutex);
2089 if (mca_cfg.ignore_ce ^ !!new) { 2095 if (mca_cfg.ignore_ce ^ !!new) {
2090 if (new) { 2096 if (new) {
2091 /* disable ce features */ 2097 /* disable ce features */
@@ -2098,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s,
2098 on_each_cpu(mce_enable_ce, (void *)1, 1); 2104 on_each_cpu(mce_enable_ce, (void *)1, 1);
2099 } 2105 }
2100 } 2106 }
2107 mutex_unlock(&mce_sysfs_mutex);
2108
2101 return size; 2109 return size;
2102} 2110}
2103 2111
@@ -2110,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s,
2110 if (kstrtou64(buf, 0, &new) < 0) 2118 if (kstrtou64(buf, 0, &new) < 0)
2111 return -EINVAL; 2119 return -EINVAL;
2112 2120
2121 mutex_lock(&mce_sysfs_mutex);
2113 if (mca_cfg.cmci_disabled ^ !!new) { 2122 if (mca_cfg.cmci_disabled ^ !!new) {
2114 if (new) { 2123 if (new) {
2115 /* disable cmci */ 2124 /* disable cmci */
@@ -2121,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s,
2121 on_each_cpu(mce_enable_ce, NULL, 1); 2130 on_each_cpu(mce_enable_ce, NULL, 1);
2122 } 2131 }
2123 } 2132 }
2133 mutex_unlock(&mce_sysfs_mutex);
2134
2124 return size; 2135 return size;
2125} 2136}
2126 2137
@@ -2128,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s,
2128 struct device_attribute *attr, 2139 struct device_attribute *attr,
2129 const char *buf, size_t size) 2140 const char *buf, size_t size)
2130{ 2141{
2131 ssize_t ret = device_store_int(s, attr, buf, size); 2142 unsigned long old_check_interval = check_interval;
2143 ssize_t ret = device_store_ulong(s, attr, buf, size);
2144
2145 if (check_interval == old_check_interval)
2146 return ret;
2147
2148 if (check_interval < 1)
2149 check_interval = 1;
2150
2151 mutex_lock(&mce_sysfs_mutex);
2132 mce_restart(); 2152 mce_restart();
2153 mutex_unlock(&mce_sysfs_mutex);
2154
2133 return ret; 2155 return ret;
2134} 2156}
2135 2157
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index aa1b9a422f2b..70ecbc8099c9 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -22,13 +22,16 @@
22#define pr_fmt(fmt) "microcode: " fmt 22#define pr_fmt(fmt) "microcode: " fmt
23 23
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/stop_machine.h>
25#include <linux/syscore_ops.h> 26#include <linux/syscore_ops.h>
26#include <linux/miscdevice.h> 27#include <linux/miscdevice.h>
27#include <linux/capability.h> 28#include <linux/capability.h>
28#include <linux/firmware.h> 29#include <linux/firmware.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/delay.h>
30#include <linux/mutex.h> 32#include <linux/mutex.h>
31#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/nmi.h>
32#include <linux/fs.h> 35#include <linux/fs.h>
33#include <linux/mm.h> 36#include <linux/mm.h>
34 37
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
64 */ 67 */
65static DEFINE_MUTEX(microcode_mutex); 68static DEFINE_MUTEX(microcode_mutex);
66 69
70/*
71 * Serialize late loading so that CPUs get updated one-by-one.
72 */
73static DEFINE_SPINLOCK(update_lock);
74
67struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 75struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
68 76
69struct cpu_info_ctx { 77struct cpu_info_ctx {
@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
373 return ret; 381 return ret;
374} 382}
375 383
376struct apply_microcode_ctx {
377 enum ucode_state err;
378};
379
380static void apply_microcode_local(void *arg) 384static void apply_microcode_local(void *arg)
381{ 385{
382 struct apply_microcode_ctx *ctx = arg; 386 enum ucode_state *err = arg;
383 387
384 ctx->err = microcode_ops->apply_microcode(smp_processor_id()); 388 *err = microcode_ops->apply_microcode(smp_processor_id());
385} 389}
386 390
387static int apply_microcode_on_target(int cpu) 391static int apply_microcode_on_target(int cpu)
388{ 392{
389 struct apply_microcode_ctx ctx = { .err = 0 }; 393 enum ucode_state err;
390 int ret; 394 int ret;
391 395
392 ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); 396 ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
393 if (!ret) 397 if (!ret) {
394 ret = ctx.err; 398 if (err == UCODE_ERROR)
395 399 ret = 1;
400 }
396 return ret; 401 return ret;
397} 402}
398 403
@@ -489,19 +494,100 @@ static void __exit microcode_dev_exit(void)
489/* fake device for request_firmware */ 494/* fake device for request_firmware */
490static struct platform_device *microcode_pdev; 495static struct platform_device *microcode_pdev;
491 496
492static enum ucode_state reload_for_cpu(int cpu) 497/*
498 * Late loading dance. Why the heavy-handed stomp_machine effort?
499 *
500 * - HT siblings must be idle and not execute other code while the other sibling
501 * is loading microcode in order to avoid any negative interactions caused by
502 * the loading.
503 *
504 * - In addition, microcode update on the cores must be serialized until this
505 * requirement can be relaxed in the future. Right now, this is conservative
506 * and good.
507 */
508#define SPINUNIT 100 /* 100 nsec */
509
510static int check_online_cpus(void)
493{ 511{
494 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 512 if (num_online_cpus() == num_present_cpus())
495 enum ucode_state ustate; 513 return 0;
496 514
497 if (!uci->valid) 515 pr_err("Not all CPUs online, aborting microcode update.\n");
498 return UCODE_OK; 516
517 return -EINVAL;
518}
519
520static atomic_t late_cpus;
521
522/*
523 * Returns:
524 * < 0 - on error
525 * 0 - no update done
526 * 1 - microcode was updated
527 */
528static int __reload_late(void *info)
529{
530 unsigned int timeout = NSEC_PER_SEC;
531 int all_cpus = num_online_cpus();
532 int cpu = smp_processor_id();
533 enum ucode_state err;
534 int ret = 0;
535
536 atomic_dec(&late_cpus);
537
538 /*
539 * Wait for all CPUs to arrive. A load will not be attempted unless all
540 * CPUs show up.
541 * */
542 while (atomic_read(&late_cpus)) {
543 if (timeout < SPINUNIT) {
544 pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
545 atomic_read(&late_cpus));
546 return -1;
547 }
499 548
500 ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true); 549 ndelay(SPINUNIT);
501 if (ustate != UCODE_OK) 550 timeout -= SPINUNIT;
502 return ustate;
503 551
504 return apply_microcode_on_target(cpu); 552 touch_nmi_watchdog();
553 }
554
555 spin_lock(&update_lock);
556 apply_microcode_local(&err);
557 spin_unlock(&update_lock);
558
559 if (err > UCODE_NFOUND) {
560 pr_warn("Error reloading microcode on CPU %d\n", cpu);
561 ret = -1;
562 } else if (err == UCODE_UPDATED) {
563 ret = 1;
564 }
565
566 atomic_inc(&late_cpus);
567
568 while (atomic_read(&late_cpus) != all_cpus)
569 cpu_relax();
570
571 return ret;
572}
573
574/*
575 * Reload microcode late on all CPUs. Wait for a sec until they
576 * all gather together.
577 */
578static int microcode_reload_late(void)
579{
580 int ret;
581
582 atomic_set(&late_cpus, num_online_cpus());
583
584 ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
585 if (ret < 0)
586 return ret;
587 else if (ret > 0)
588 microcode_check();
589
590 return ret;
505} 591}
506 592
507static ssize_t reload_store(struct device *dev, 593static ssize_t reload_store(struct device *dev,
@@ -509,10 +595,9 @@ static ssize_t reload_store(struct device *dev,
509 const char *buf, size_t size) 595 const char *buf, size_t size)
510{ 596{
511 enum ucode_state tmp_ret = UCODE_OK; 597 enum ucode_state tmp_ret = UCODE_OK;
512 bool do_callback = false; 598 int bsp = boot_cpu_data.cpu_index;
513 unsigned long val; 599 unsigned long val;
514 ssize_t ret = 0; 600 ssize_t ret = 0;
515 int cpu;
516 601
517 ret = kstrtoul(buf, 0, &val); 602 ret = kstrtoul(buf, 0, &val);
518 if (ret) 603 if (ret)
@@ -521,29 +606,24 @@ static ssize_t reload_store(struct device *dev,
521 if (val != 1) 606 if (val != 1)
522 return size; 607 return size;
523 608
524 get_online_cpus(); 609 tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
525 mutex_lock(&microcode_mutex); 610 if (tmp_ret != UCODE_OK)
526 for_each_online_cpu(cpu) { 611 return size;
527 tmp_ret = reload_for_cpu(cpu);
528 if (tmp_ret > UCODE_NFOUND) {
529 pr_warn("Error reloading microcode on CPU %d\n", cpu);
530
531 /* set retval for the first encountered reload error */
532 if (!ret)
533 ret = -EINVAL;
534 }
535 612
536 if (tmp_ret == UCODE_UPDATED) 613 get_online_cpus();
537 do_callback = true;
538 }
539 614
540 if (!ret && do_callback) 615 ret = check_online_cpus();
541 microcode_check(); 616 if (ret)
617 goto put;
542 618
619 mutex_lock(&microcode_mutex);
620 ret = microcode_reload_late();
543 mutex_unlock(&microcode_mutex); 621 mutex_unlock(&microcode_mutex);
622
623put:
544 put_online_cpus(); 624 put_online_cpus();
545 625
546 if (!ret) 626 if (ret >= 0)
547 ret = size; 627 ret = size;
548 628
549 return ret; 629 return ret;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 923054a6b760..2aded9db1d42 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
589 if (!mc) 589 if (!mc)
590 return 0; 590 return 0;
591 591
592 /*
593 * Save us the MSR write below - which is a particular expensive
594 * operation - when the other hyperthread has updated the microcode
595 * already.
596 */
597 rev = intel_get_microcode_revision();
598 if (rev >= mc->hdr.rev) {
599 uci->cpu_sig.rev = rev;
600 return UCODE_OK;
601 }
602
603 /*
604 * Writeback and invalidate caches before updating microcode to avoid
605 * internal issues depending on what the microcode is updating.
606 */
607 native_wbinvd();
608
592 /* write microcode via MSR 0x79 */ 609 /* write microcode via MSR 0x79 */
593 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 610 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
594 611
@@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
774 791
775static enum ucode_state apply_microcode_intel(int cpu) 792static enum ucode_state apply_microcode_intel(int cpu)
776{ 793{
794 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
795 struct cpuinfo_x86 *c = &cpu_data(cpu);
777 struct microcode_intel *mc; 796 struct microcode_intel *mc;
778 struct ucode_cpu_info *uci;
779 struct cpuinfo_x86 *c;
780 static int prev_rev; 797 static int prev_rev;
781 u32 rev; 798 u32 rev;
782 799
@@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu)
784 if (WARN_ON(raw_smp_processor_id() != cpu)) 801 if (WARN_ON(raw_smp_processor_id() != cpu))
785 return UCODE_ERROR; 802 return UCODE_ERROR;
786 803
787 uci = ucode_cpu_info + cpu; 804 /* Look for a newer patch in our cache: */
788 mc = uci->mc; 805 mc = find_patch(uci);
789 if (!mc) { 806 if (!mc) {
790 /* Look for a newer patch in our cache: */ 807 mc = uci->mc;
791 mc = find_patch(uci);
792 if (!mc) 808 if (!mc)
793 return UCODE_NFOUND; 809 return UCODE_NFOUND;
794 } 810 }
795 811
812 /*
813 * Save us the MSR write below - which is a particular expensive
814 * operation - when the other hyperthread has updated the microcode
815 * already.
816 */
817 rev = intel_get_microcode_revision();
818 if (rev >= mc->hdr.rev) {
819 uci->cpu_sig.rev = rev;
820 c->microcode = rev;
821 return UCODE_OK;
822 }
823
824 /*
825 * Writeback and invalidate caches before updating microcode to avoid
826 * internal issues depending on what the microcode is updating.
827 */
828 native_wbinvd();
829
796 /* write microcode via MSR 0x79 */ 830 /* write microcode via MSR 0x79 */
797 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 831 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
798 832
@@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
813 prev_rev = rev; 847 prev_rev = rev;
814 } 848 }
815 849
816 c = &cpu_data(cpu);
817
818 uci->cpu_sig.rev = rev; 850 uci->cpu_sig.rev = rev;
819 c->microcode = rev; 851 c->microcode = rev;
820 852
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 2f723301eb58..38deafebb21b 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -23,7 +23,7 @@
23/* 23/*
24 * this changes the io permissions bitmap in the current task. 24 * this changes the io permissions bitmap in the current task.
25 */ 25 */
26asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) 26SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
27{ 27{
28 struct thread_struct *t = &current->thread; 28 struct thread_struct *t = &current->thread;
29 struct tss_struct *tss; 29 struct tss_struct *tss;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index bd36f3c33cd0..0715f827607c 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
1168 1168
1169bool arch_within_kprobe_blacklist(unsigned long addr) 1169bool arch_within_kprobe_blacklist(unsigned long addr)
1170{ 1170{
1171 bool is_in_entry_trampoline_section = false;
1172
1173#ifdef CONFIG_X86_64
1174 is_in_entry_trampoline_section =
1175 (addr >= (unsigned long)__entry_trampoline_start &&
1176 addr < (unsigned long)__entry_trampoline_end);
1177#endif
1171 return (addr >= (unsigned long)__kprobes_text_start && 1178 return (addr >= (unsigned long)__kprobes_text_start &&
1172 addr < (unsigned long)__kprobes_text_end) || 1179 addr < (unsigned long)__kprobes_text_end) ||
1173 (addr >= (unsigned long)__entry_text_start && 1180 (addr >= (unsigned long)__entry_text_start &&
1174 addr < (unsigned long)__entry_text_end); 1181 addr < (unsigned long)__entry_text_end) ||
1182 is_in_entry_trampoline_section;
1175} 1183}
1176 1184
1177int __init arch_init_kprobes(void) 1185int __init arch_init_kprobes(void)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 9b138a06c1a4..b854ebf5851b 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -118,9 +118,11 @@ SECTIONS
118 118
119#ifdef CONFIG_X86_64 119#ifdef CONFIG_X86_64
120 . = ALIGN(PAGE_SIZE); 120 . = ALIGN(PAGE_SIZE);
121 VMLINUX_SYMBOL(__entry_trampoline_start) = .;
121 _entry_trampoline = .; 122 _entry_trampoline = .;
122 *(.entry_trampoline) 123 *(.entry_trampoline)
123 . = ALIGN(PAGE_SIZE); 124 . = ALIGN(PAGE_SIZE);
125 VMLINUX_SYMBOL(__entry_trampoline_end) = .;
124 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); 126 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
125#endif 127#endif
126 128
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index ce38f165489b..631507f0c198 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -332,7 +332,7 @@ static void __init pti_clone_user_shared(void)
332} 332}
333 333
334/* 334/*
335 * Clone the ESPFIX P4D into the user space visinble page table 335 * Clone the ESPFIX P4D into the user space visible page table
336 */ 336 */
337static void __init pti_setup_espfix64(void) 337static void __init pti_setup_espfix64(void)
338{ 338{
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 87855b5123a6..ee62d2d517bf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
266 struct iov_iter i; 266 struct iov_iter i;
267 ssize_t bw; 267 ssize_t bw;
268 268
269 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); 269 iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
270 270
271 file_start_write(file); 271 file_start_write(file);
272 bw = vfs_iter_write(file, &i, ppos, 0); 272 bw = vfs_iter_write(file, &i, ppos, 0);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e126e4cac2ca..92ec1bbece51 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);
262 262
263static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); 263static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
264static void blkfront_gather_backend_features(struct blkfront_info *info); 264static void blkfront_gather_backend_features(struct blkfront_info *info);
265static int negotiate_mq(struct blkfront_info *info);
265 266
266static int get_id_from_freelist(struct blkfront_ring_info *rinfo) 267static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
267{ 268{
@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,
1774 unsigned int i, max_page_order; 1775 unsigned int i, max_page_order;
1775 unsigned int ring_page_order; 1776 unsigned int ring_page_order;
1776 1777
1778 if (!info)
1779 return -ENODEV;
1780
1777 max_page_order = xenbus_read_unsigned(info->xbdev->otherend, 1781 max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1778 "max-ring-page-order", 0); 1782 "max-ring-page-order", 0);
1779 ring_page_order = min(xen_blkif_max_ring_order, max_page_order); 1783 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1780 info->nr_ring_pages = 1 << ring_page_order; 1784 info->nr_ring_pages = 1 << ring_page_order;
1781 1785
1786 err = negotiate_mq(info);
1787 if (err)
1788 goto destroy_blkring;
1789
1782 for (i = 0; i < info->nr_rings; i++) { 1790 for (i = 0; i < info->nr_rings; i++) {
1783 struct blkfront_ring_info *rinfo = &info->rinfo[i]; 1791 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1784 1792
@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,
1978 } 1986 }
1979 1987
1980 info->xbdev = dev; 1988 info->xbdev = dev;
1981 err = negotiate_mq(info);
1982 if (err) {
1983 kfree(info);
1984 return err;
1985 }
1986 1989
1987 mutex_init(&info->mutex); 1990 mutex_init(&info->mutex);
1988 info->vdevice = vdevice; 1991 info->vdevice = vdevice;
@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)
2099 2102
2100 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2103 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2101 2104
2102 err = negotiate_mq(info);
2103 if (err)
2104 return err;
2105
2106 err = talk_to_blkback(dev, info); 2105 err = talk_to_blkback(dev, info);
2107 if (!err) 2106 if (!err)
2108 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); 2107 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index b3b4ed9b6874..d2e5382821a4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -386,6 +386,7 @@ config ATMEL_PIT
386 386
387config ATMEL_ST 387config ATMEL_ST
388 bool "Atmel ST timer support" if COMPILE_TEST 388 bool "Atmel ST timer support" if COMPILE_TEST
389 depends on HAS_IOMEM
389 select TIMER_OF 390 select TIMER_OF
390 select MFD_SYSCON 391 select MFD_SYSCON
391 help 392 help
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f652a0e0f5a2..3548caa9e933 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -163,6 +163,7 @@ struct mv_xor_v2_device {
163 void __iomem *dma_base; 163 void __iomem *dma_base;
164 void __iomem *glob_base; 164 void __iomem *glob_base;
165 struct clk *clk; 165 struct clk *clk;
166 struct clk *reg_clk;
166 struct tasklet_struct irq_tasklet; 167 struct tasklet_struct irq_tasklet;
167 struct list_head free_sw_desc; 168 struct list_head free_sw_desc;
168 struct dma_device dmadev; 169 struct dma_device dmadev;
@@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
749 if (ret) 750 if (ret)
750 return ret; 751 return ret;
751 752
753 xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
754 if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
755 if (!IS_ERR(xor_dev->reg_clk)) {
756 ret = clk_prepare_enable(xor_dev->reg_clk);
757 if (ret)
758 return ret;
759 } else {
760 return PTR_ERR(xor_dev->reg_clk);
761 }
762 }
763
752 xor_dev->clk = devm_clk_get(&pdev->dev, NULL); 764 xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
753 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) 765 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
754 return -EPROBE_DEFER; 766 ret = EPROBE_DEFER;
767 goto disable_reg_clk;
768 }
755 if (!IS_ERR(xor_dev->clk)) { 769 if (!IS_ERR(xor_dev->clk)) {
756 ret = clk_prepare_enable(xor_dev->clk); 770 ret = clk_prepare_enable(xor_dev->clk);
757 if (ret) 771 if (ret)
758 return ret; 772 goto disable_reg_clk;
759 } 773 }
760 774
761 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, 775 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
@@ -866,8 +880,9 @@ free_hw_desq:
866free_msi_irqs: 880free_msi_irqs:
867 platform_msi_domain_free_irqs(&pdev->dev); 881 platform_msi_domain_free_irqs(&pdev->dev);
868disable_clk: 882disable_clk:
869 if (!IS_ERR(xor_dev->clk)) 883 clk_disable_unprepare(xor_dev->clk);
870 clk_disable_unprepare(xor_dev->clk); 884disable_reg_clk:
885 clk_disable_unprepare(xor_dev->reg_clk);
871 return ret; 886 return ret;
872} 887}
873 888
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index e3ff162c03fc..d0cacdb0713e 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -917,7 +917,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
917 917
918 rcar_dmac_chan_configure_desc(chan, desc); 918 rcar_dmac_chan_configure_desc(chan, desc);
919 919
920 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; 920 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
921 921
922 /* 922 /*
923 * Allocate and fill the transfer chunk descriptors. We own the only 923 * Allocate and fill the transfer chunk descriptors. We own the only
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index e76de57dd617..ebaea8b1594b 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -14,7 +14,6 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <linux/clk.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <linux/gpio.h> 18#include <linux/gpio.h>
20#include <linux/init.h> 19#include <linux/init.h>
@@ -37,10 +36,9 @@ struct gpio_rcar_priv {
37 struct platform_device *pdev; 36 struct platform_device *pdev;
38 struct gpio_chip gpio_chip; 37 struct gpio_chip gpio_chip;
39 struct irq_chip irq_chip; 38 struct irq_chip irq_chip;
40 struct clk *clk;
41 unsigned int irq_parent; 39 unsigned int irq_parent;
40 atomic_t wakeup_path;
42 bool has_both_edge_trigger; 41 bool has_both_edge_trigger;
43 bool needs_clk;
44}; 42};
45 43
46#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */ 44#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
@@ -186,13 +184,10 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
186 } 184 }
187 } 185 }
188 186
189 if (!p->clk)
190 return 0;
191
192 if (on) 187 if (on)
193 clk_enable(p->clk); 188 atomic_inc(&p->wakeup_path);
194 else 189 else
195 clk_disable(p->clk); 190 atomic_dec(&p->wakeup_path);
196 191
197 return 0; 192 return 0;
198} 193}
@@ -330,17 +325,14 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
330 325
331struct gpio_rcar_info { 326struct gpio_rcar_info {
332 bool has_both_edge_trigger; 327 bool has_both_edge_trigger;
333 bool needs_clk;
334}; 328};
335 329
336static const struct gpio_rcar_info gpio_rcar_info_gen1 = { 330static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
337 .has_both_edge_trigger = false, 331 .has_both_edge_trigger = false,
338 .needs_clk = false,
339}; 332};
340 333
341static const struct gpio_rcar_info gpio_rcar_info_gen2 = { 334static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
342 .has_both_edge_trigger = true, 335 .has_both_edge_trigger = true,
343 .needs_clk = true,
344}; 336};
345 337
346static const struct of_device_id gpio_rcar_of_table[] = { 338static const struct of_device_id gpio_rcar_of_table[] = {
@@ -403,7 +395,6 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
403 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); 395 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
404 *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; 396 *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
405 p->has_both_edge_trigger = info->has_both_edge_trigger; 397 p->has_both_edge_trigger = info->has_both_edge_trigger;
406 p->needs_clk = info->needs_clk;
407 398
408 if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { 399 if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
409 dev_warn(&p->pdev->dev, 400 dev_warn(&p->pdev->dev,
@@ -440,16 +431,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
440 431
441 platform_set_drvdata(pdev, p); 432 platform_set_drvdata(pdev, p);
442 433
443 p->clk = devm_clk_get(dev, NULL);
444 if (IS_ERR(p->clk)) {
445 if (p->needs_clk) {
446 dev_err(dev, "unable to get clock\n");
447 ret = PTR_ERR(p->clk);
448 goto err0;
449 }
450 p->clk = NULL;
451 }
452
453 pm_runtime_enable(dev); 434 pm_runtime_enable(dev);
454 435
455 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 436 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -531,11 +512,24 @@ static int gpio_rcar_remove(struct platform_device *pdev)
531 return 0; 512 return 0;
532} 513}
533 514
515static int __maybe_unused gpio_rcar_suspend(struct device *dev)
516{
517 struct gpio_rcar_priv *p = dev_get_drvdata(dev);
518
519 if (atomic_read(&p->wakeup_path))
520 device_set_wakeup_path(dev);
521
522 return 0;
523}
524
525static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, NULL);
526
534static struct platform_driver gpio_rcar_device_driver = { 527static struct platform_driver gpio_rcar_device_driver = {
535 .probe = gpio_rcar_probe, 528 .probe = gpio_rcar_probe,
536 .remove = gpio_rcar_remove, 529 .remove = gpio_rcar_remove,
537 .driver = { 530 .driver = {
538 .name = "gpio_rcar", 531 .name = "gpio_rcar",
532 .pm = &gpio_rcar_pm_ops,
539 .of_match_table = of_match_ptr(gpio_rcar_of_table), 533 .of_match_table = of_match_ptr(gpio_rcar_of_table),
540 } 534 }
541}; 535};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 57afad79f55d..8fa850a070e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
540 size_t size; 540 size_t size;
541 u32 retry = 3; 541 u32 retry = 3;
542 542
543 if (amdgpu_acpi_pcie_notify_device_ready(adev))
544 return -EINVAL;
545
543 /* Get the device handle */ 546 /* Get the device handle */
544 handle = ACPI_HANDLE(&adev->pdev->dev); 547 handle = ACPI_HANDLE(&adev->pdev->dev);
545 if (!handle) 548 if (!handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 13044e66dcaf..561d3312af32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -481,7 +481,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
481 result = 0; 481 result = 0;
482 482
483 if (*pos < 12) { 483 if (*pos < 12) {
484 early[0] = amdgpu_ring_get_rptr(ring); 484 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
485 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; 485 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
486 early[2] = ring->wptr & ring->buf_mask; 486 early[2] = ring->wptr & ring->buf_mask;
487 for (i = *pos / 4; i < 3 && size; i++) { 487 for (i = *pos / 4; i < 3 && size; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b2eae86bf906..5c26a8e806b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
299 299
300 cancel_delayed_work_sync(&adev->uvd.idle_work); 300 cancel_delayed_work_sync(&adev->uvd.idle_work);
301 301
302 for (i = 0; i < adev->uvd.max_handles; ++i) 302 /* only valid for physical mode */
303 if (atomic_read(&adev->uvd.handles[i])) 303 if (adev->asic_type < CHIP_POLARIS10) {
304 break; 304 for (i = 0; i < adev->uvd.max_handles; ++i)
305 if (atomic_read(&adev->uvd.handles[i]))
306 break;
305 307
306 if (i == AMDGPU_MAX_UVD_HANDLES) 308 if (i == adev->uvd.max_handles)
307 return 0; 309 return 0;
310 }
308 311
309 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 312 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
310 ptr = adev->uvd.cpu_addr; 313 ptr = adev->uvd.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index bd2c4f727df6..a712f4b285f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -3093,7 +3093,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3093 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3093 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3094 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 3094 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3095 schedule_work(&adev->hotplug_work); 3095 schedule_work(&adev->hotplug_work);
3096 DRM_INFO("IH: HPD%d\n", hpd + 1); 3096 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3097 } 3097 }
3098 3098
3099 return 0; 3099 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index a066c5eda135..a4309698e76c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4384,34 +4384,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4384 case CHIP_KAVERI: 4384 case CHIP_KAVERI:
4385 adev->gfx.config.max_shader_engines = 1; 4385 adev->gfx.config.max_shader_engines = 1;
4386 adev->gfx.config.max_tile_pipes = 4; 4386 adev->gfx.config.max_tile_pipes = 4;
4387 if ((adev->pdev->device == 0x1304) || 4387 adev->gfx.config.max_cu_per_sh = 8;
4388 (adev->pdev->device == 0x1305) || 4388 adev->gfx.config.max_backends_per_se = 2;
4389 (adev->pdev->device == 0x130C) ||
4390 (adev->pdev->device == 0x130F) ||
4391 (adev->pdev->device == 0x1310) ||
4392 (adev->pdev->device == 0x1311) ||
4393 (adev->pdev->device == 0x131C)) {
4394 adev->gfx.config.max_cu_per_sh = 8;
4395 adev->gfx.config.max_backends_per_se = 2;
4396 } else if ((adev->pdev->device == 0x1309) ||
4397 (adev->pdev->device == 0x130A) ||
4398 (adev->pdev->device == 0x130D) ||
4399 (adev->pdev->device == 0x1313) ||
4400 (adev->pdev->device == 0x131D)) {
4401 adev->gfx.config.max_cu_per_sh = 6;
4402 adev->gfx.config.max_backends_per_se = 2;
4403 } else if ((adev->pdev->device == 0x1306) ||
4404 (adev->pdev->device == 0x1307) ||
4405 (adev->pdev->device == 0x130B) ||
4406 (adev->pdev->device == 0x130E) ||
4407 (adev->pdev->device == 0x1315) ||
4408 (adev->pdev->device == 0x131B)) {
4409 adev->gfx.config.max_cu_per_sh = 4;
4410 adev->gfx.config.max_backends_per_se = 1;
4411 } else {
4412 adev->gfx.config.max_cu_per_sh = 3;
4413 adev->gfx.config.max_backends_per_se = 1;
4414 }
4415 adev->gfx.config.max_sh_per_se = 1; 4389 adev->gfx.config.max_sh_per_se = 1;
4416 adev->gfx.config.max_texture_channel_caches = 4; 4390 adev->gfx.config.max_texture_channel_caches = 4;
4417 adev->gfx.config.max_gprs = 256; 4391 adev->gfx.config.max_gprs = 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 543101d5a5ed..2095173aaabf 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -31,6 +31,7 @@
31#include "amdgpu_uvd.h" 31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h" 32#include "amdgpu_vce.h"
33#include "atom.h" 33#include "atom.h"
34#include "amd_pcie.h"
34#include "amdgpu_powerplay.h" 35#include "amdgpu_powerplay.h"
35#include "sid.h" 36#include "sid.h"
36#include "si_ih.h" 37#include "si_ih.h"
@@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1461{ 1462{
1462 struct pci_dev *root = adev->pdev->bus->self; 1463 struct pci_dev *root = adev->pdev->bus->self;
1463 int bridge_pos, gpu_pos; 1464 int bridge_pos, gpu_pos;
1464 u32 speed_cntl, mask, current_data_rate; 1465 u32 speed_cntl, current_data_rate;
1465 int ret, i; 1466 int i;
1466 u16 tmp16; 1467 u16 tmp16;
1467 1468
1468 if (pci_is_root_bus(adev->pdev->bus)) 1469 if (pci_is_root_bus(adev->pdev->bus))
@@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1474 if (adev->flags & AMD_IS_APU) 1475 if (adev->flags & AMD_IS_APU)
1475 return; 1476 return;
1476 1477
1477 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1478 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1478 if (ret != 0) 1479 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1479 return;
1480
1481 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1482 return; 1480 return;
1483 1481
1484 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 1482 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1485 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> 1483 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
1486 LC_CURRENT_DATA_RATE_SHIFT; 1484 LC_CURRENT_DATA_RATE_SHIFT;
1487 if (mask & DRM_PCIE_SPEED_80) { 1485 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1488 if (current_data_rate == 2) { 1486 if (current_data_rate == 2) {
1489 DRM_INFO("PCIE gen 3 link speeds already enabled\n"); 1487 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
1490 return; 1488 return;
1491 } 1489 }
1492 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1490 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
1493 } else if (mask & DRM_PCIE_SPEED_50) { 1491 } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
1494 if (current_data_rate == 1) { 1492 if (current_data_rate == 1) {
1495 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 1493 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
1496 return; 1494 return;
@@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1506 if (!gpu_pos) 1504 if (!gpu_pos)
1507 return; 1505 return;
1508 1506
1509 if (mask & DRM_PCIE_SPEED_80) { 1507 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1510 if (current_data_rate != 2) { 1508 if (current_data_rate != 2) {
1511 u16 bridge_cfg, gpu_cfg; 1509 u16 bridge_cfg, gpu_cfg;
1512 u16 bridge_cfg2, gpu_cfg2; 1510 u16 bridge_cfg2, gpu_cfg2;
@@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1589 1587
1590 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); 1588 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
1591 tmp16 &= ~0xf; 1589 tmp16 &= ~0xf;
1592 if (mask & DRM_PCIE_SPEED_80) 1590 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1593 tmp16 |= 3; 1591 tmp16 |= 3;
1594 else if (mask & DRM_PCIE_SPEED_50) 1592 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1595 tmp16 |= 2; 1593 tmp16 |= 2;
1596 else 1594 else
1597 tmp16 |= 1; 1595 tmp16 |= 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index ce675a7f179a..22f0b7ff3ac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -26,6 +26,7 @@
26#include "amdgpu_pm.h" 26#include "amdgpu_pm.h"
27#include "amdgpu_dpm.h" 27#include "amdgpu_dpm.h"
28#include "amdgpu_atombios.h" 28#include "amdgpu_atombios.h"
29#include "amd_pcie.h"
29#include "sid.h" 30#include "sid.h"
30#include "r600_dpm.h" 31#include "r600_dpm.h"
31#include "si_dpm.h" 32#include "si_dpm.h"
@@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
3331 } 3332 }
3332} 3333}
3333 3334
3334static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
3335 u32 sys_mask,
3336 enum amdgpu_pcie_gen asic_gen,
3337 enum amdgpu_pcie_gen default_gen)
3338{
3339 switch (asic_gen) {
3340 case AMDGPU_PCIE_GEN1:
3341 return AMDGPU_PCIE_GEN1;
3342 case AMDGPU_PCIE_GEN2:
3343 return AMDGPU_PCIE_GEN2;
3344 case AMDGPU_PCIE_GEN3:
3345 return AMDGPU_PCIE_GEN3;
3346 default:
3347 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
3348 return AMDGPU_PCIE_GEN3;
3349 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
3350 return AMDGPU_PCIE_GEN2;
3351 else
3352 return AMDGPU_PCIE_GEN1;
3353 }
3354 return AMDGPU_PCIE_GEN1;
3355}
3356
3357static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 3335static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
3358 u32 *p, u32 *u) 3336 u32 *p, u32 *u)
3359{ 3337{
@@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
5028 table->ACPIState.levels[0].vddc.index, 5006 table->ACPIState.levels[0].vddc.index,
5029 &table->ACPIState.levels[0].std_vddc); 5007 &table->ACPIState.levels[0].std_vddc);
5030 } 5008 }
5031 table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, 5009 table->ACPIState.levels[0].gen2PCIE =
5032 si_pi->sys_pcie_mask, 5010 (u8)amdgpu_get_pcie_gen_support(adev,
5033 si_pi->boot_pcie_gen, 5011 si_pi->sys_pcie_mask,
5034 AMDGPU_PCIE_GEN1); 5012 si_pi->boot_pcie_gen,
5013 AMDGPU_PCIE_GEN1);
5035 5014
5036 if (si_pi->vddc_phase_shed_control) 5015 if (si_pi->vddc_phase_shed_control)
5037 si_populate_phase_shedding_value(adev, 5016 si_populate_phase_shedding_value(adev,
@@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
7168 pl->vddc = le16_to_cpu(clock_info->si.usVDDC); 7147 pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
7169 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); 7148 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
7170 pl->flags = le32_to_cpu(clock_info->si.ulFlags); 7149 pl->flags = le32_to_cpu(clock_info->si.ulFlags);
7171 pl->pcie_gen = r600_get_pcie_gen_support(adev, 7150 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
7172 si_pi->sys_pcie_mask, 7151 si_pi->sys_pcie_mask,
7173 si_pi->boot_pcie_gen, 7152 si_pi->boot_pcie_gen,
7174 clock_info->si.ucPCIEGen); 7153 clock_info->si.ucPCIEGen);
7175 7154
7176 /* patch up vddc if necessary */ 7155 /* patch up vddc if necessary */
7177 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, 7156 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
@@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
7326 struct si_power_info *si_pi; 7305 struct si_power_info *si_pi;
7327 struct atom_clock_dividers dividers; 7306 struct atom_clock_dividers dividers;
7328 int ret; 7307 int ret;
7329 u32 mask;
7330 7308
7331 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); 7309 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
7332 if (si_pi == NULL) 7310 if (si_pi == NULL)
@@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
7336 eg_pi = &ni_pi->eg; 7314 eg_pi = &ni_pi->eg;
7337 pi = &eg_pi->rv7xx; 7315 pi = &eg_pi->rv7xx;
7338 7316
7339 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 7317 si_pi->sys_pcie_mask =
7340 if (ret) 7318 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
7341 si_pi->sys_pcie_mask = 0; 7319 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
7342 else
7343 si_pi->sys_pcie_mask = mask;
7344 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 7320 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
7345 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); 7321 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
7346 7322
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 862835dc054e..c345e645f1d7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1037,6 +1037,10 @@ static void handle_hpd_rx_irq(void *param)
1037 !is_mst_root_connector) { 1037 !is_mst_root_connector) {
1038 /* Downstream Port status changed. */ 1038 /* Downstream Port status changed. */
1039 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 1039 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1040
1041 if (aconnector->fake_enable)
1042 aconnector->fake_enable = false;
1043
1040 amdgpu_dm_update_connector_after_detect(aconnector); 1044 amdgpu_dm_update_connector_after_detect(aconnector);
1041 1045
1042 1046
@@ -2012,30 +2016,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2012 dst.width = stream->timing.h_addressable; 2016 dst.width = stream->timing.h_addressable;
2013 dst.height = stream->timing.v_addressable; 2017 dst.height = stream->timing.v_addressable;
2014 2018
2015 rmx_type = dm_state->scaling; 2019 if (dm_state) {
2016 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 2020 rmx_type = dm_state->scaling;
2017 if (src.width * dst.height < 2021 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2018 src.height * dst.width) { 2022 if (src.width * dst.height <
2019 /* height needs less upscaling/more downscaling */ 2023 src.height * dst.width) {
2020 dst.width = src.width * 2024 /* height needs less upscaling/more downscaling */
2021 dst.height / src.height; 2025 dst.width = src.width *
2022 } else { 2026 dst.height / src.height;
2023 /* width needs less upscaling/more downscaling */ 2027 } else {
2024 dst.height = src.height * 2028 /* width needs less upscaling/more downscaling */
2025 dst.width / src.width; 2029 dst.height = src.height *
2030 dst.width / src.width;
2031 }
2032 } else if (rmx_type == RMX_CENTER) {
2033 dst = src;
2026 } 2034 }
2027 } else if (rmx_type == RMX_CENTER) {
2028 dst = src;
2029 }
2030 2035
2031 dst.x = (stream->timing.h_addressable - dst.width) / 2; 2036 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2032 dst.y = (stream->timing.v_addressable - dst.height) / 2; 2037 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2033 2038
2034 if (dm_state->underscan_enable) { 2039 if (dm_state->underscan_enable) {
2035 dst.x += dm_state->underscan_hborder / 2; 2040 dst.x += dm_state->underscan_hborder / 2;
2036 dst.y += dm_state->underscan_vborder / 2; 2041 dst.y += dm_state->underscan_vborder / 2;
2037 dst.width -= dm_state->underscan_hborder; 2042 dst.width -= dm_state->underscan_hborder;
2038 dst.height -= dm_state->underscan_vborder; 2043 dst.height -= dm_state->underscan_vborder;
2044 }
2039 } 2045 }
2040 2046
2041 stream->src = src; 2047 stream->src = src;
@@ -2360,12 +2366,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2360 2366
2361 if (aconnector == NULL) { 2367 if (aconnector == NULL) {
2362 DRM_ERROR("aconnector is NULL!\n"); 2368 DRM_ERROR("aconnector is NULL!\n");
2363 goto drm_connector_null; 2369 return stream;
2364 }
2365
2366 if (dm_state == NULL) {
2367 DRM_ERROR("dm_state is NULL!\n");
2368 goto dm_state_null;
2369 } 2370 }
2370 2371
2371 drm_connector = &aconnector->base; 2372 drm_connector = &aconnector->base;
@@ -2377,18 +2378,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2377 */ 2378 */
2378 if (aconnector->mst_port) { 2379 if (aconnector->mst_port) {
2379 dm_dp_mst_dc_sink_create(drm_connector); 2380 dm_dp_mst_dc_sink_create(drm_connector);
2380 goto mst_dc_sink_create_done; 2381 return stream;
2381 } 2382 }
2382 2383
2383 if (create_fake_sink(aconnector)) 2384 if (create_fake_sink(aconnector))
2384 goto stream_create_fail; 2385 return stream;
2385 } 2386 }
2386 2387
2387 stream = dc_create_stream_for_sink(aconnector->dc_sink); 2388 stream = dc_create_stream_for_sink(aconnector->dc_sink);
2388 2389
2389 if (stream == NULL) { 2390 if (stream == NULL) {
2390 DRM_ERROR("Failed to create stream for sink!\n"); 2391 DRM_ERROR("Failed to create stream for sink!\n");
2391 goto stream_create_fail; 2392 return stream;
2392 } 2393 }
2393 2394
2394 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 2395 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
@@ -2414,9 +2415,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2414 } else { 2415 } else {
2415 decide_crtc_timing_for_drm_display_mode( 2416 decide_crtc_timing_for_drm_display_mode(
2416 &mode, preferred_mode, 2417 &mode, preferred_mode,
2417 dm_state->scaling != RMX_OFF); 2418 dm_state ? (dm_state->scaling != RMX_OFF) : false);
2418 } 2419 }
2419 2420
2421 if (!dm_state)
2422 drm_mode_set_crtcinfo(&mode, 0);
2423
2420 fill_stream_properties_from_drm_display_mode(stream, 2424 fill_stream_properties_from_drm_display_mode(stream,
2421 &mode, &aconnector->base); 2425 &mode, &aconnector->base);
2422 update_stream_scaling_settings(&mode, dm_state, stream); 2426 update_stream_scaling_settings(&mode, dm_state, stream);
@@ -2426,10 +2430,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2426 drm_connector, 2430 drm_connector,
2427 aconnector->dc_sink); 2431 aconnector->dc_sink);
2428 2432
2429stream_create_fail: 2433 update_stream_signal(stream);
2430dm_state_null: 2434
2431drm_connector_null:
2432mst_dc_sink_create_done:
2433 return stream; 2435 return stream;
2434} 2436}
2435 2437
@@ -2497,6 +2499,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
2497 return &state->base; 2499 return &state->base;
2498} 2500}
2499 2501
2502
2503static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
2504{
2505 enum dc_irq_source irq_source;
2506 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2507 struct amdgpu_device *adev = crtc->dev->dev_private;
2508
2509 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2510 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2511}
2512
2513static int dm_enable_vblank(struct drm_crtc *crtc)
2514{
2515 return dm_set_vblank(crtc, true);
2516}
2517
2518static void dm_disable_vblank(struct drm_crtc *crtc)
2519{
2520 dm_set_vblank(crtc, false);
2521}
2522
2500/* Implemented only the options currently availible for the driver */ 2523/* Implemented only the options currently availible for the driver */
2501static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 2524static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2502 .reset = dm_crtc_reset_state, 2525 .reset = dm_crtc_reset_state,
@@ -2506,6 +2529,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2506 .page_flip = drm_atomic_helper_page_flip, 2529 .page_flip = drm_atomic_helper_page_flip,
2507 .atomic_duplicate_state = dm_crtc_duplicate_state, 2530 .atomic_duplicate_state = dm_crtc_duplicate_state,
2508 .atomic_destroy_state = dm_crtc_destroy_state, 2531 .atomic_destroy_state = dm_crtc_destroy_state,
2532 .enable_vblank = dm_enable_vblank,
2533 .disable_vblank = dm_disable_vblank,
2509}; 2534};
2510 2535
2511static enum drm_connector_status 2536static enum drm_connector_status
@@ -2800,7 +2825,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2800 goto fail; 2825 goto fail;
2801 } 2826 }
2802 2827
2803 stream = dc_create_stream_for_sink(dc_sink); 2828 stream = create_stream_for_sink(aconnector, mode, NULL);
2804 if (stream == NULL) { 2829 if (stream == NULL) {
2805 DRM_ERROR("Failed to create stream for sink!\n"); 2830 DRM_ERROR("Failed to create stream for sink!\n");
2806 goto fail; 2831 goto fail;
@@ -3060,6 +3085,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
3060 if (!dm_plane_state->dc_state) 3085 if (!dm_plane_state->dc_state)
3061 return 0; 3086 return 0;
3062 3087
3088 if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3089 return -EINVAL;
3090
3063 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 3091 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3064 return 0; 3092 return 0;
3065 3093
@@ -4632,8 +4660,6 @@ static int dm_update_planes_state(struct dc *dc,
4632 bool pflip_needed = !state->allow_modeset; 4660 bool pflip_needed = !state->allow_modeset;
4633 int ret = 0; 4661 int ret = 0;
4634 4662
4635 if (pflip_needed)
4636 return ret;
4637 4663
4638 /* Add new planes */ 4664 /* Add new planes */
4639 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 4665 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
@@ -4648,6 +4674,8 @@ static int dm_update_planes_state(struct dc *dc,
4648 4674
4649 /* Remove any changed/removed planes */ 4675 /* Remove any changed/removed planes */
4650 if (!enable) { 4676 if (!enable) {
4677 if (pflip_needed)
4678 continue;
4651 4679
4652 if (!old_plane_crtc) 4680 if (!old_plane_crtc)
4653 continue; 4681 continue;
@@ -4679,6 +4707,7 @@ static int dm_update_planes_state(struct dc *dc,
4679 *lock_and_validation_needed = true; 4707 *lock_and_validation_needed = true;
4680 4708
4681 } else { /* Add new planes */ 4709 } else { /* Add new planes */
4710 struct dc_plane_state *dc_new_plane_state;
4682 4711
4683 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 4712 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4684 continue; 4713 continue;
@@ -4692,38 +4721,50 @@ static int dm_update_planes_state(struct dc *dc,
4692 if (!dm_new_crtc_state->stream) 4721 if (!dm_new_crtc_state->stream)
4693 continue; 4722 continue;
4694 4723
4724 if (pflip_needed)
4725 continue;
4695 4726
4696 WARN_ON(dm_new_plane_state->dc_state); 4727 WARN_ON(dm_new_plane_state->dc_state);
4697 4728
4698 dm_new_plane_state->dc_state = dc_create_plane_state(dc); 4729 dc_new_plane_state = dc_create_plane_state(dc);
4699 4730 if (!dc_new_plane_state) {
4700 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4701 plane->base.id, new_plane_crtc->base.id);
4702
4703 if (!dm_new_plane_state->dc_state) {
4704 ret = -EINVAL; 4731 ret = -EINVAL;
4705 return ret; 4732 return ret;
4706 } 4733 }
4707 4734
4735 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4736 plane->base.id, new_plane_crtc->base.id);
4737
4708 ret = fill_plane_attributes( 4738 ret = fill_plane_attributes(
4709 new_plane_crtc->dev->dev_private, 4739 new_plane_crtc->dev->dev_private,
4710 dm_new_plane_state->dc_state, 4740 dc_new_plane_state,
4711 new_plane_state, 4741 new_plane_state,
4712 new_crtc_state); 4742 new_crtc_state);
4713 if (ret) 4743 if (ret) {
4744 dc_plane_state_release(dc_new_plane_state);
4714 return ret; 4745 return ret;
4746 }
4715 4747
4716 4748 /*
4749 * Any atomic check errors that occur after this will
4750 * not need a release. The plane state will be attached
4751 * to the stream, and therefore part of the atomic
4752 * state. It'll be released when the atomic state is
4753 * cleaned.
4754 */
4717 if (!dc_add_plane_to_context( 4755 if (!dc_add_plane_to_context(
4718 dc, 4756 dc,
4719 dm_new_crtc_state->stream, 4757 dm_new_crtc_state->stream,
4720 dm_new_plane_state->dc_state, 4758 dc_new_plane_state,
4721 dm_state->context)) { 4759 dm_state->context)) {
4722 4760
4761 dc_plane_state_release(dc_new_plane_state);
4723 ret = -EINVAL; 4762 ret = -EINVAL;
4724 return ret; 4763 return ret;
4725 } 4764 }
4726 4765
4766 dm_new_plane_state->dc_state = dc_new_plane_state;
4767
4727 /* Tell DC to do a full surface update every time there 4768 /* Tell DC to do a full surface update every time there
4728 * is a plane change. Inefficient, but works for now. 4769 * is a plane change. Inefficient, but works for now.
4729 */ 4770 */
@@ -4737,6 +4778,30 @@ static int dm_update_planes_state(struct dc *dc,
4737 return ret; 4778 return ret;
4738} 4779}
4739 4780
4781static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
4782 struct drm_crtc *crtc)
4783{
4784 struct drm_plane *plane;
4785 struct drm_crtc_state *crtc_state;
4786
4787 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
4788
4789 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
4790 struct drm_plane_state *plane_state =
4791 drm_atomic_get_plane_state(state, plane);
4792
4793 if (IS_ERR(plane_state))
4794 return -EDEADLK;
4795
4796 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
4797 if (crtc->primary == plane && crtc_state->active) {
4798 if (!plane_state->fb)
4799 return -EINVAL;
4800 }
4801 }
4802 return 0;
4803}
4804
4740static int amdgpu_dm_atomic_check(struct drm_device *dev, 4805static int amdgpu_dm_atomic_check(struct drm_device *dev,
4741 struct drm_atomic_state *state) 4806 struct drm_atomic_state *state)
4742{ 4807{
@@ -4760,6 +4825,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
4760 goto fail; 4825 goto fail;
4761 4826
4762 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4827 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4828 ret = dm_atomic_check_plane_state_fb(state, crtc);
4829 if (ret)
4830 goto fail;
4831
4763 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 4832 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4764 !new_crtc_state->color_mgmt_changed) 4833 !new_crtc_state->color_mgmt_changed)
4765 continue; 4834 continue;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 1874b6cee6af..422055080df4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -683,10 +683,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
683 683
684void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) 684void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
685{ 685{
686 if (adev->mode_info.num_crtc > 0) 686
687 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; 687 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
688 else
689 adev->crtc_irq.num_types = 0;
690 adev->crtc_irq.funcs = &dm_crtc_irq_funcs; 688 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
691 689
692 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 690 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f3d87f418d2e..93421dad21bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -189,6 +189,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
189 .link = aconnector->dc_link, 189 .link = aconnector->dc_link,
190 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 190 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
191 191
192 /*
193 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
194 */
195 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
196 return;
197
192 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 198 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
193 199
194 if (!edid) { 200 if (!edid) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 35e84ed031de..12868c769606 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1358,13 +1358,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(
1358 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 1358 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1359} 1359}
1360 1360
1361void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 1361bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1362{ 1362{
1363 1363
1364 if (dc == NULL) 1364 if (dc == NULL)
1365 return; 1365 return false;
1366 1366
1367 dal_irq_service_set(dc->res_pool->irqs, src, enable); 1367 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
1368} 1368}
1369 1369
1370void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 1370void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a37428271573..be5546181fa8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1749,8 +1749,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
1749 link->link_enc, 1749 link->link_enc,
1750 pipe_ctx->clock_source->id, 1750 pipe_ctx->clock_source->id,
1751 display_color_depth, 1751 display_color_depth,
1752 pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A, 1752 pipe_ctx->stream->signal,
1753 pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
1754 stream->phy_pix_clk); 1753 stream->phy_pix_clk);
1755 1754
1756 if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 1755 if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 95b8dd0e53c6..4d07ffebfd31 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1360,9 +1360,6 @@ bool dc_is_stream_scaling_unchanged(
1360 return true; 1360 return true;
1361} 1361}
1362 1362
1363/* Maximum TMDS single link pixel clock 165MHz */
1364#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
1365
1366static void update_stream_engine_usage( 1363static void update_stream_engine_usage(
1367 struct resource_context *res_ctx, 1364 struct resource_context *res_ctx,
1368 const struct resource_pool *pool, 1365 const struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 539c3e0a6292..cd5819789d76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -33,8 +33,7 @@
33/******************************************************************************* 33/*******************************************************************************
34 * Private functions 34 * Private functions
35 ******************************************************************************/ 35 ******************************************************************************/
36#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000 36void update_stream_signal(struct dc_stream_state *stream)
37static void update_stream_signal(struct dc_stream_state *stream)
38{ 37{
39 38
40 struct dc_sink *dc_sink = stream->sink; 39 struct dc_sink *dc_sink = stream->sink;
@@ -45,8 +44,9 @@ static void update_stream_signal(struct dc_stream_state *stream)
45 stream->signal = dc_sink->sink_signal; 44 stream->signal = dc_sink->sink_signal;
46 45
47 if (dc_is_dvi_signal(stream->signal)) { 46 if (dc_is_dvi_signal(stream->signal)) {
48 if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST && 47 if (stream->ctx->dc->caps.dual_link_dvi &&
49 stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) 48 stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK &&
49 stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
50 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; 50 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
51 else 51 else
52 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 52 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@ -193,6 +193,7 @@ bool dc_stream_set_cursor_attributes(
193 193
194 core_dc = stream->ctx->dc; 194 core_dc = stream->ctx->dc;
195 res_ctx = &core_dc->current_state->res_ctx; 195 res_ctx = &core_dc->current_state->res_ctx;
196 stream->cursor_attributes = *attributes;
196 197
197 for (i = 0; i < MAX_PIPES; i++) { 198 for (i = 0; i < MAX_PIPES; i++) {
198 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 199 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -204,34 +205,8 @@ bool dc_stream_set_cursor_attributes(
204 continue; 205 continue;
205 206
206 207
207 if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL) 208 core_dc->hwss.set_cursor_attribute(pipe_ctx);
208 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
209 pipe_ctx->plane_res.ipp, attributes);
210
211 if (pipe_ctx->plane_res.hubp != NULL &&
212 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
213 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
214 pipe_ctx->plane_res.hubp, attributes);
215
216 if (pipe_ctx->plane_res.mi != NULL &&
217 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
218 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
219 pipe_ctx->plane_res.mi, attributes);
220
221
222 if (pipe_ctx->plane_res.xfm != NULL &&
223 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
224 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
225 pipe_ctx->plane_res.xfm, attributes);
226
227 if (pipe_ctx->plane_res.dpp != NULL &&
228 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
229 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
230 pipe_ctx->plane_res.dpp, attributes->color_format);
231 } 209 }
232
233 stream->cursor_attributes = *attributes;
234
235 return true; 210 return true;
236} 211}
237 212
@@ -255,21 +230,10 @@ bool dc_stream_set_cursor_position(
255 230
256 core_dc = stream->ctx->dc; 231 core_dc = stream->ctx->dc;
257 res_ctx = &core_dc->current_state->res_ctx; 232 res_ctx = &core_dc->current_state->res_ctx;
233 stream->cursor_position = *position;
258 234
259 for (i = 0; i < MAX_PIPES; i++) { 235 for (i = 0; i < MAX_PIPES; i++) {
260 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 236 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
261 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
262 struct mem_input *mi = pipe_ctx->plane_res.mi;
263 struct hubp *hubp = pipe_ctx->plane_res.hubp;
264 struct dpp *dpp = pipe_ctx->plane_res.dpp;
265 struct dc_cursor_position pos_cpy = *position;
266 struct dc_cursor_mi_param param = {
267 .pixel_clk_khz = stream->timing.pix_clk_khz,
268 .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
269 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
270 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
271 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
272 };
273 237
274 if (pipe_ctx->stream != stream || 238 if (pipe_ctx->stream != stream ||
275 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || 239 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
@@ -278,33 +242,9 @@ bool dc_stream_set_cursor_position(
278 !pipe_ctx->plane_res.ipp) 242 !pipe_ctx->plane_res.ipp)
279 continue; 243 continue;
280 244
281 if (pipe_ctx->plane_state->address.type 245 core_dc->hwss.set_cursor_position(pipe_ctx);
282 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
283 pos_cpy.enable = false;
284
285 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
286 pos_cpy.enable = false;
287
288
289 if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
290 ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
291
292 if (mi != NULL && mi->funcs->set_cursor_position != NULL)
293 mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
294
295 if (!hubp)
296 continue;
297
298 if (hubp->funcs->set_cursor_position != NULL)
299 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
300
301 if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
302 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
303
304 } 246 }
305 247
306 stream->cursor_position = *position;
307
308 return true; 248 return true;
309} 249}
310 250
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e2e3c9df79ea..d6d56611604e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -62,6 +62,7 @@ struct dc_caps {
62 bool dcc_const_color; 62 bool dcc_const_color;
63 bool dynamic_audio; 63 bool dynamic_audio;
64 bool is_apu; 64 bool is_apu;
65 bool dual_link_dvi;
65}; 66};
66 67
67struct dc_dcc_surface_param { 68struct dc_dcc_surface_param {
@@ -672,7 +673,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
672 struct dc *dc, 673 struct dc *dc,
673 uint32_t src_id, 674 uint32_t src_id,
674 uint32_t ext_id); 675 uint32_t ext_id);
675void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable); 676bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
676void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src); 677void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
677enum dc_irq_source dc_get_hpd_irq_source_at_index( 678enum dc_irq_source dc_get_hpd_irq_source_at_index(
678 struct dc *dc, uint32_t link_index); 679 struct dc *dc, uint32_t link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 01c60f11b2bd..456e4d29eadd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -237,6 +237,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
237 */ 237 */
238struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); 238struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
239 239
240void update_stream_signal(struct dc_stream_state *stream);
241
240void dc_stream_retain(struct dc_stream_state *dc_stream); 242void dc_stream_retain(struct dc_stream_state *dc_stream);
241void dc_stream_release(struct dc_stream_state *dc_stream); 243void dc_stream_release(struct dc_stream_state *dc_stream);
242 244
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index b73db9e78437..a993279a8f2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -236,6 +236,7 @@
236 SR(D2VGA_CONTROL), \ 236 SR(D2VGA_CONTROL), \
237 SR(D3VGA_CONTROL), \ 237 SR(D3VGA_CONTROL), \
238 SR(D4VGA_CONTROL), \ 238 SR(D4VGA_CONTROL), \
239 SR(VGA_TEST_CONTROL), \
239 SR(DC_IP_REQUEST_CNTL), \ 240 SR(DC_IP_REQUEST_CNTL), \
240 BL_REG_LIST() 241 BL_REG_LIST()
241 242
@@ -337,6 +338,7 @@ struct dce_hwseq_registers {
337 uint32_t D2VGA_CONTROL; 338 uint32_t D2VGA_CONTROL;
338 uint32_t D3VGA_CONTROL; 339 uint32_t D3VGA_CONTROL;
339 uint32_t D4VGA_CONTROL; 340 uint32_t D4VGA_CONTROL;
341 uint32_t VGA_TEST_CONTROL;
340 /* MMHUB registers. read only. temporary hack */ 342 /* MMHUB registers. read only. temporary hack */
341 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; 343 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
342 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 344 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
@@ -493,6 +495,9 @@ struct dce_hwseq_registers {
493 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \ 495 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
494 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ 496 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
495 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ 497 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
498 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
499 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
500 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
496 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ 501 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
497 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) 502 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
498 503
@@ -583,7 +588,10 @@ struct dce_hwseq_registers {
583 type DCFCLK_GATE_DIS; \ 588 type DCFCLK_GATE_DIS; \
584 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ 589 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
585 type DENTIST_DPPCLK_WDIVIDER; \ 590 type DENTIST_DPPCLK_WDIVIDER; \
586 type DENTIST_DISPCLK_WDIVIDER; 591 type DENTIST_DISPCLK_WDIVIDER; \
592 type VGA_TEST_ENABLE; \
593 type VGA_TEST_RENDER_START; \
594 type D1VGA_MODE_ENABLE;
587 595
588struct dce_hwseq_shift { 596struct dce_hwseq_shift {
589 HWSEQ_REG_FIELD_LIST(uint8_t) 597 HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index a266e3f5e75f..e4741f1a2b01 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -82,13 +82,6 @@
82#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20 82#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
83#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40 83#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
84 84
85/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
86#define TMDS_MIN_PIXEL_CLOCK 25000
87/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
88#define TMDS_MAX_PIXEL_CLOCK 165000
89/* For current ASICs pixel clock - 600MHz */
90#define MAX_ENCODER_CLOCK 600000
91
92enum { 85enum {
93 DP_MST_UPDATE_MAX_RETRY = 50 86 DP_MST_UPDATE_MAX_RETRY = 50
94}; 87};
@@ -683,6 +676,7 @@ void dce110_link_encoder_construct(
683{ 676{
684 struct bp_encoder_cap_info bp_cap_info = {0}; 677 struct bp_encoder_cap_info bp_cap_info = {0};
685 const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; 678 const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
679 enum bp_result result = BP_RESULT_OK;
686 680
687 enc110->base.funcs = &dce110_lnk_enc_funcs; 681 enc110->base.funcs = &dce110_lnk_enc_funcs;
688 enc110->base.ctx = init_data->ctx; 682 enc110->base.ctx = init_data->ctx;
@@ -757,15 +751,24 @@ void dce110_link_encoder_construct(
757 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; 751 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
758 } 752 }
759 753
754 /* default to one to mirror Windows behavior */
755 enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
756
757 result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
758 enc110->base.id, &bp_cap_info);
759
760 /* Override features with DCE-specific values */ 760 /* Override features with DCE-specific values */
761 if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info( 761 if (BP_RESULT_OK == result) {
762 enc110->base.ctx->dc_bios, enc110->base.id,
763 &bp_cap_info)) {
764 enc110->base.features.flags.bits.IS_HBR2_CAPABLE = 762 enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
765 bp_cap_info.DP_HBR2_EN; 763 bp_cap_info.DP_HBR2_EN;
766 enc110->base.features.flags.bits.IS_HBR3_CAPABLE = 764 enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
767 bp_cap_info.DP_HBR3_EN; 765 bp_cap_info.DP_HBR3_EN;
768 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; 766 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
767 } else {
768 dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
769 "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
770 __func__,
771 result);
769 } 772 }
770} 773}
771 774
@@ -904,8 +907,7 @@ void dce110_link_encoder_enable_tmds_output(
904 struct link_encoder *enc, 907 struct link_encoder *enc,
905 enum clock_source_id clock_source, 908 enum clock_source_id clock_source,
906 enum dc_color_depth color_depth, 909 enum dc_color_depth color_depth,
907 bool hdmi, 910 enum signal_type signal,
908 bool dual_link,
909 uint32_t pixel_clock) 911 uint32_t pixel_clock)
910{ 912{
911 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); 913 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
@@ -919,16 +921,12 @@ void dce110_link_encoder_enable_tmds_output(
919 cntl.engine_id = enc->preferred_engine; 921 cntl.engine_id = enc->preferred_engine;
920 cntl.transmitter = enc110->base.transmitter; 922 cntl.transmitter = enc110->base.transmitter;
921 cntl.pll_id = clock_source; 923 cntl.pll_id = clock_source;
922 if (hdmi) { 924 cntl.signal = signal;
923 cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; 925 if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
924 cntl.lanes_number = 4;
925 } else if (dual_link) {
926 cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
927 cntl.lanes_number = 8; 926 cntl.lanes_number = 8;
928 } else { 927 else
929 cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
930 cntl.lanes_number = 4; 928 cntl.lanes_number = 4;
931 } 929
932 cntl.hpd_sel = enc110->base.hpd_source; 930 cntl.hpd_sel = enc110->base.hpd_source;
933 931
934 cntl.pixel_clock = pixel_clock; 932 cntl.pixel_clock = pixel_clock;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 8ca9afe47a2b..0ec3433d34b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output(
210 struct link_encoder *enc, 210 struct link_encoder *enc,
211 enum clock_source_id clock_source, 211 enum clock_source_id clock_source,
212 enum dc_color_depth color_depth, 212 enum dc_color_depth color_depth,
213 bool hdmi, 213 enum signal_type signal,
214 bool dual_link,
215 uint32_t pixel_clock); 214 uint32_t pixel_clock);
216 215
217/* enables DP PHY output */ 216/* enables DP PHY output */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3ea43e2a9450..442dd2d93618 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -852,6 +852,7 @@ static bool construct(
852 dc->caps.max_downscale_ratio = 200; 852 dc->caps.max_downscale_ratio = 200;
853 dc->caps.i2c_speed_in_khz = 40; 853 dc->caps.i2c_speed_in_khz = 40;
854 dc->caps.max_cursor_size = 128; 854 dc->caps.max_cursor_size = 128;
855 dc->caps.dual_link_dvi = true;
855 856
856 for (i = 0; i < pool->base.pipe_count; i++) { 857 for (i = 0; i < pool->base.pipe_count; i++) {
857 pool->base.timing_generators[i] = 858 pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 86cdd7b4811f..6f382a3ac90f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -688,15 +688,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
688 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 688 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
689 struct dc_link *link = pipe_ctx->stream->sink->link; 689 struct dc_link *link = pipe_ctx->stream->sink->link;
690 690
691 /* 1. update AVI info frame (HDMI, DP) 691
692 * we always need to update info frame
693 */
694 uint32_t active_total_with_borders; 692 uint32_t active_total_with_borders;
695 uint32_t early_control = 0; 693 uint32_t early_control = 0;
696 struct timing_generator *tg = pipe_ctx->stream_res.tg; 694 struct timing_generator *tg = pipe_ctx->stream_res.tg;
697 695
698 /* TODOFPGA may change to hwss.update_info_frame */ 696 /* For MST, there are multiply stream go to only one link.
697 * connect DIG back_end to front_end while enable_stream and
698 * disconnect them during disable_stream
699 * BY this, it is logic clean to separate stream and link */
700 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
701 pipe_ctx->stream_res.stream_enc->id, true);
702
703 /* update AVI info frame (HDMI, DP)*/
704 /* TODO: FPGA may change to hwss.update_info_frame */
699 dce110_update_info_frame(pipe_ctx); 705 dce110_update_info_frame(pipe_ctx);
706
700 /* enable early control to avoid corruption on DP monitor*/ 707 /* enable early control to avoid corruption on DP monitor*/
701 active_total_with_borders = 708 active_total_with_borders =
702 timing->h_addressable 709 timing->h_addressable
@@ -717,12 +724,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
717 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); 724 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
718 } 725 }
719 726
720 /* For MST, there are multiply stream go to only one link. 727
721 * connect DIG back_end to front_end while enable_stream and 728
722 * disconnect them during disable_stream
723 * BY this, it is logic clean to separate stream and link */
724 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
725 pipe_ctx->stream_res.stream_enc->id, true);
726 729
727} 730}
728 731
@@ -1690,9 +1693,13 @@ static void apply_min_clocks(
1690 * Check if FBC can be enabled 1693 * Check if FBC can be enabled
1691 */ 1694 */
1692static bool should_enable_fbc(struct dc *dc, 1695static bool should_enable_fbc(struct dc *dc,
1693 struct dc_state *context) 1696 struct dc_state *context,
1697 uint32_t *pipe_idx)
1694{ 1698{
1695 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; 1699 uint32_t i;
1700 struct pipe_ctx *pipe_ctx = NULL;
1701 struct resource_context *res_ctx = &context->res_ctx;
1702
1696 1703
1697 ASSERT(dc->fbc_compressor); 1704 ASSERT(dc->fbc_compressor);
1698 1705
@@ -1704,6 +1711,14 @@ static bool should_enable_fbc(struct dc *dc,
1704 if (context->stream_count != 1) 1711 if (context->stream_count != 1)
1705 return false; 1712 return false;
1706 1713
1714 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1715 if (res_ctx->pipe_ctx[i].stream) {
1716 pipe_ctx = &res_ctx->pipe_ctx[i];
1717 *pipe_idx = i;
1718 break;
1719 }
1720 }
1721
1707 /* Only supports eDP */ 1722 /* Only supports eDP */
1708 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) 1723 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
1709 return false; 1724 return false;
@@ -1729,11 +1744,14 @@ static bool should_enable_fbc(struct dc *dc,
1729static void enable_fbc(struct dc *dc, 1744static void enable_fbc(struct dc *dc,
1730 struct dc_state *context) 1745 struct dc_state *context)
1731{ 1746{
1732 if (should_enable_fbc(dc, context)) { 1747 uint32_t pipe_idx = 0;
1748
1749 if (should_enable_fbc(dc, context, &pipe_idx)) {
1733 /* Program GRPH COMPRESSED ADDRESS and PITCH */ 1750 /* Program GRPH COMPRESSED ADDRESS and PITCH */
1734 struct compr_addr_and_pitch_params params = {0, 0, 0}; 1751 struct compr_addr_and_pitch_params params = {0, 0, 0};
1735 struct compressor *compr = dc->fbc_compressor; 1752 struct compressor *compr = dc->fbc_compressor;
1736 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; 1753 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
1754
1737 1755
1738 params.source_view_width = pipe_ctx->stream->timing.h_addressable; 1756 params.source_view_width = pipe_ctx->stream->timing.h_addressable;
1739 params.source_view_height = pipe_ctx->stream->timing.v_addressable; 1757 params.source_view_height = pipe_ctx->stream->timing.v_addressable;
@@ -2915,6 +2933,49 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
2915 } 2933 }
2916} 2934}
2917 2935
2936void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
2937{
2938 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2939 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
2940 struct mem_input *mi = pipe_ctx->plane_res.mi;
2941 struct dc_cursor_mi_param param = {
2942 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2943 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2944 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
2945 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
2946 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
2947 };
2948
2949 if (pipe_ctx->plane_state->address.type
2950 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2951 pos_cpy.enable = false;
2952
2953 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
2954 pos_cpy.enable = false;
2955
2956 if (ipp->funcs->ipp_cursor_set_position)
2957 ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
2958 if (mi->funcs->set_cursor_position)
2959 mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
2960}
2961
2962void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2963{
2964 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2965
2966 if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
2967 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
2968 pipe_ctx->plane_res.ipp, attributes);
2969
2970 if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
2971 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
2972 pipe_ctx->plane_res.mi, attributes);
2973
2974 if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
2975 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
2976 pipe_ctx->plane_res.xfm, attributes);
2977}
2978
2918static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} 2979static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
2919 2980
2920static void optimize_shared_resources(struct dc *dc) {} 2981static void optimize_shared_resources(struct dc *dc) {}
@@ -2957,6 +3018,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
2957 .edp_backlight_control = hwss_edp_backlight_control, 3018 .edp_backlight_control = hwss_edp_backlight_control,
2958 .edp_power_control = hwss_edp_power_control, 3019 .edp_power_control = hwss_edp_power_control,
2959 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, 3020 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
3021 .set_cursor_position = dce110_set_cursor_position,
3022 .set_cursor_attribute = dce110_set_cursor_attribute
2960}; 3023};
2961 3024
2962void dce110_hw_sequencer_construct(struct dc *dc) 3025void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7c4779578fb7..00f18c485e1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth(
846 return result; 846 return result;
847} 847}
848 848
849enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
850 struct dc_caps *caps)
851{
852 if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
853 ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
854 return DC_FAIL_SURFACE_VALIDATE;
855
856 return DC_OK;
857}
858
849static bool dce110_validate_surface_sets( 859static bool dce110_validate_surface_sets(
850 struct dc_state *context) 860 struct dc_state *context)
851{ 861{
@@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets(
869 plane->src_rect.height > 1080)) 879 plane->src_rect.height > 1080))
870 return false; 880 return false;
871 881
882 /* we don't have the logic to support underlay
883 * only yet so block the use case where we get
884 * NV12 plane as top layer
885 */
886 if (j == 0)
887 return false;
888
872 /* irrespective of plane format, 889 /* irrespective of plane format,
873 * stream should be RGB encoded 890 * stream should be RGB encoded
874 */ 891 */
@@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
1021 .link_enc_create = dce110_link_encoder_create, 1038 .link_enc_create = dce110_link_encoder_create,
1022 .validate_guaranteed = dce110_validate_guaranteed, 1039 .validate_guaranteed = dce110_validate_guaranteed,
1023 .validate_bandwidth = dce110_validate_bandwidth, 1040 .validate_bandwidth = dce110_validate_bandwidth,
1041 .validate_plane = dce110_validate_plane,
1024 .acquire_idle_pipe_for_layer = dce110_acquire_underlay, 1042 .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
1025 .add_stream_to_ctx = dce110_add_stream_to_ctx, 1043 .add_stream_to_ctx = dce110_add_stream_to_ctx,
1026 .validate_global = dce110_validate_global 1044 .validate_global = dce110_validate_global
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 663e0a047a4b..98d9cd0109e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -1103,6 +1103,8 @@ static bool construct(
1103 dc->caps.max_downscale_ratio = 200; 1103 dc->caps.max_downscale_ratio = 200;
1104 dc->caps.i2c_speed_in_khz = 100; 1104 dc->caps.i2c_speed_in_khz = 100;
1105 dc->caps.max_cursor_size = 128; 1105 dc->caps.max_cursor_size = 128;
1106 dc->caps.dual_link_dvi = true;
1107
1106 1108
1107 /************************************************* 1109 /*************************************************
1108 * Create resources * 1110 * Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 57cd67359567..5aab01db28ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -835,6 +835,8 @@ static bool construct(
835 dc->caps.max_downscale_ratio = 200; 835 dc->caps.max_downscale_ratio = 200;
836 dc->caps.i2c_speed_in_khz = 100; 836 dc->caps.i2c_speed_in_khz = 100;
837 dc->caps.max_cursor_size = 128; 837 dc->caps.max_cursor_size = 128;
838 dc->caps.dual_link_dvi = true;
839
838 dc->debug = debug_defaults; 840 dc->debug = debug_defaults;
839 841
840 /************************************************* 842 /*************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 8f2bd56f3461..25d7eb1567ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -793,6 +793,7 @@ static bool dce80_construct(
793 dc->caps.max_downscale_ratio = 200; 793 dc->caps.max_downscale_ratio = 200;
794 dc->caps.i2c_speed_in_khz = 40; 794 dc->caps.i2c_speed_in_khz = 40;
795 dc->caps.max_cursor_size = 128; 795 dc->caps.max_cursor_size = 128;
796 dc->caps.dual_link_dvi = true;
796 797
797 /************************************************* 798 /*************************************************
798 * Create resources * 799 * Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 82572863acab..072e4485e85e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -238,10 +238,24 @@ static void enable_power_gating_plane(
238static void disable_vga( 238static void disable_vga(
239 struct dce_hwseq *hws) 239 struct dce_hwseq *hws)
240{ 240{
241 unsigned int in_vga_mode = 0;
242
243 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode);
244
245 if (in_vga_mode == 0)
246 return;
247
241 REG_WRITE(D1VGA_CONTROL, 0); 248 REG_WRITE(D1VGA_CONTROL, 0);
242 REG_WRITE(D2VGA_CONTROL, 0); 249
243 REG_WRITE(D3VGA_CONTROL, 0); 250 /* HW Engineer's Notes:
244 REG_WRITE(D4VGA_CONTROL, 0); 251 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
252 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
253 *
254 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
255 * VGA_TEST_ENABLE, to leave it in the same state as before.
256 */
257 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
258 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
245} 259}
246 260
247static void dpp_pg_control( 261static void dpp_pg_control(
@@ -1761,6 +1775,11 @@ static void update_dchubp_dpp(
1761 &pipe_ctx->plane_res.scl_data.viewport_c); 1775 &pipe_ctx->plane_res.scl_data.viewport_c);
1762 } 1776 }
1763 1777
1778 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
1779 dc->hwss.set_cursor_position(pipe_ctx);
1780 dc->hwss.set_cursor_attribute(pipe_ctx);
1781 }
1782
1764 if (plane_state->update_flags.bits.full_update) { 1783 if (plane_state->update_flags.bits.full_update) {
1765 /*gamut remap*/ 1784 /*gamut remap*/
1766 program_gamut_remap(pipe_ctx); 1785 program_gamut_remap(pipe_ctx);
@@ -2296,7 +2315,7 @@ static bool dcn10_dummy_display_power_gating(
2296 return true; 2315 return true;
2297} 2316}
2298 2317
2299void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) 2318static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2300{ 2319{
2301 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2320 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2302 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2321 struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -2316,12 +2335,46 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2316 } 2335 }
2317} 2336}
2318 2337
2319void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) 2338static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2320{ 2339{
2321 if (hws->ctx->dc->res_pool->hubbub != NULL) 2340 if (hws->ctx->dc->res_pool->hubbub != NULL)
2322 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data); 2341 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
2323} 2342}
2324 2343
2344static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2345{
2346 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2347 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2348 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2349 struct dc_cursor_mi_param param = {
2350 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2351 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2352 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
2353 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
2354 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
2355 };
2356
2357 if (pipe_ctx->plane_state->address.type
2358 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2359 pos_cpy.enable = false;
2360
2361 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
2362 pos_cpy.enable = false;
2363
2364 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
2365 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
2366}
2367
2368static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2369{
2370 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2371
2372 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
2373 pipe_ctx->plane_res.hubp, attributes);
2374 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
2375 pipe_ctx->plane_res.dpp, attributes->color_format);
2376}
2377
2325static const struct hw_sequencer_funcs dcn10_funcs = { 2378static const struct hw_sequencer_funcs dcn10_funcs = {
2326 .program_gamut_remap = program_gamut_remap, 2379 .program_gamut_remap = program_gamut_remap,
2327 .program_csc_matrix = program_csc_matrix, 2380 .program_csc_matrix = program_csc_matrix,
@@ -2362,6 +2415,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2362 .edp_backlight_control = hwss_edp_backlight_control, 2415 .edp_backlight_control = hwss_edp_backlight_control,
2363 .edp_power_control = hwss_edp_power_control, 2416 .edp_power_control = hwss_edp_power_control,
2364 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, 2417 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
2418 .set_cursor_position = dcn10_set_cursor_position,
2419 .set_cursor_attribute = dcn10_set_cursor_attribute
2365}; 2420};
2366 2421
2367 2422
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 0fd329deacd8..54d8a1386142 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -123,8 +123,7 @@ struct link_encoder_funcs {
123 void (*enable_tmds_output)(struct link_encoder *enc, 123 void (*enable_tmds_output)(struct link_encoder *enc,
124 enum clock_source_id clock_source, 124 enum clock_source_id clock_source,
125 enum dc_color_depth color_depth, 125 enum dc_color_depth color_depth,
126 bool hdmi, 126 enum signal_type signal,
127 bool dual_link,
128 uint32_t pixel_clock); 127 uint32_t pixel_clock);
129 void (*enable_dp_output)(struct link_encoder *enc, 128 void (*enable_dp_output)(struct link_encoder *enc,
130 const struct dc_link_settings *link_settings, 129 const struct dc_link_settings *link_settings,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 4c0aa56f7bae..379c6ecd271a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -198,6 +198,9 @@ struct hw_sequencer_funcs {
198 bool enable); 198 bool enable);
199 void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); 199 void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
200 200
201 void (*set_cursor_position)(struct pipe_ctx *pipe);
202 void (*set_cursor_attribute)(struct pipe_ctx *pipe);
203
201}; 204};
202 205
203void color_space_to_black_color( 206void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index f7e40b292dfb..d3e1923b01a8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -217,7 +217,7 @@ bool dce110_vblank_set(
217 core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; 217 core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
218 218
219 if (enable) { 219 if (enable) {
220 if (!tg->funcs->arm_vert_intr(tg, 2)) { 220 if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
221 DC_ERROR("Failed to get VBLANK!\n"); 221 DC_ERROR("Failed to get VBLANK!\n");
222 return false; 222 return false;
223 } 223 }
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
index 57a54a7b89e5..1c079ba37c30 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output(
42 struct link_encoder *enc, 42 struct link_encoder *enc,
43 enum clock_source_id clock_source, 43 enum clock_source_id clock_source,
44 enum dc_color_depth color_depth, 44 enum dc_color_depth color_depth,
45 bool hdmi, 45 enum signal_type signal,
46 bool dual_link,
47 uint32_t pixel_clock) {} 46 uint32_t pixel_clock) {}
48 47
49static void virtual_link_encoder_enable_dp_output( 48static void virtual_link_encoder_enable_dp_output(
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 7a9b43f84a31..36bbad594267 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -419,11 +419,6 @@ struct bios_event_info {
419 bool backlight_changed; 419 bool backlight_changed;
420}; 420};
421 421
422enum {
423 HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
424 TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
425};
426
427/* 422/*
428 * DFS-bypass flag 423 * DFS-bypass flag
429 */ 424 */
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index b5ebde642207..199c5db67cbc 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -26,6 +26,11 @@
26#ifndef __DC_SIGNAL_TYPES_H__ 26#ifndef __DC_SIGNAL_TYPES_H__
27#define __DC_SIGNAL_TYPES_H__ 27#define __DC_SIGNAL_TYPES_H__
28 28
29/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
30#define TMDS_MIN_PIXEL_CLOCK 25000
31/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
32#define TMDS_MAX_PIXEL_CLOCK 165000
33
29enum signal_type { 34enum signal_type {
30 SIGNAL_TYPE_NONE = 0L, /* no signal */ 35 SIGNAL_TYPE_NONE = 0L, /* no signal */
31 SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0), 36 SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dd89abd2263d..66ee9d888d16 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3205,8 +3205,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3205 * rolling the global seqno forward (since this would complete requests 3205 * rolling the global seqno forward (since this would complete requests
3206 * for which we haven't set the fence error to EIO yet). 3206 * for which we haven't set the fence error to EIO yet).
3207 */ 3207 */
3208 for_each_engine(engine, i915, id) 3208 for_each_engine(engine, i915, id) {
3209 i915_gem_reset_prepare_engine(engine);
3209 engine->submit_request = nop_submit_request; 3210 engine->submit_request = nop_submit_request;
3211 }
3210 3212
3211 /* 3213 /*
3212 * Make sure no one is running the old callback before we proceed with 3214 * Make sure no one is running the old callback before we proceed with
@@ -3244,6 +3246,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3244 intel_engine_init_global_seqno(engine, 3246 intel_engine_init_global_seqno(engine,
3245 intel_engine_last_submit(engine)); 3247 intel_engine_last_submit(engine));
3246 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3248 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3249
3250 i915_gem_reset_finish_engine(engine);
3247 } 3251 }
3248 3252
3249 set_bit(I915_WEDGED, &i915->gpu_error.flags); 3253 set_bit(I915_WEDGED, &i915->gpu_error.flags);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0be50e43507d..f8fe5ffcdcff 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1303 */ 1303 */
1304 mutex_lock(&dev_priv->drm.struct_mutex); 1304 mutex_lock(&dev_priv->drm.struct_mutex);
1305 dev_priv->perf.oa.exclusive_stream = NULL; 1305 dev_priv->perf.oa.exclusive_stream = NULL;
1306 mutex_unlock(&dev_priv->drm.struct_mutex);
1307
1308 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 1306 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
1307 mutex_unlock(&dev_priv->drm.struct_mutex);
1309 1308
1310 free_oa_buffer(dev_priv); 1309 free_oa_buffer(dev_priv);
1311 1310
@@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
1756 * Note: it's only the RCS/Render context that has any OA state. 1755 * Note: it's only the RCS/Render context that has any OA state.
1757 */ 1756 */
1758static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, 1757static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1759 const struct i915_oa_config *oa_config, 1758 const struct i915_oa_config *oa_config)
1760 bool interruptible)
1761{ 1759{
1762 struct i915_gem_context *ctx; 1760 struct i915_gem_context *ctx;
1763 int ret; 1761 int ret;
1764 unsigned int wait_flags = I915_WAIT_LOCKED; 1762 unsigned int wait_flags = I915_WAIT_LOCKED;
1765 1763
1766 if (interruptible) { 1764 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1767 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1768 if (ret)
1769 return ret;
1770
1771 wait_flags |= I915_WAIT_INTERRUPTIBLE;
1772 } else {
1773 mutex_lock(&dev_priv->drm.struct_mutex);
1774 }
1775 1765
1776 /* Switch away from any user context. */ 1766 /* Switch away from any user context. */
1777 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); 1767 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
@@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1819 } 1809 }
1820 1810
1821 out: 1811 out:
1822 mutex_unlock(&dev_priv->drm.struct_mutex);
1823
1824 return ret; 1812 return ret;
1825} 1813}
1826 1814
@@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1863 * to make sure all slices/subslices are ON before writing to NOA 1851 * to make sure all slices/subslices are ON before writing to NOA
1864 * registers. 1852 * registers.
1865 */ 1853 */
1866 ret = gen8_configure_all_contexts(dev_priv, oa_config, true); 1854 ret = gen8_configure_all_contexts(dev_priv, oa_config);
1867 if (ret) 1855 if (ret)
1868 return ret; 1856 return ret;
1869 1857
@@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1878static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) 1866static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1879{ 1867{
1880 /* Reset all contexts' slices/subslices configurations. */ 1868 /* Reset all contexts' slices/subslices configurations. */
1881 gen8_configure_all_contexts(dev_priv, NULL, false); 1869 gen8_configure_all_contexts(dev_priv, NULL);
1882 1870
1883 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1871 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1884 ~GT_NOA_ENABLE)); 1872 ~GT_NOA_ENABLE));
@@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1888static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) 1876static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
1889{ 1877{
1890 /* Reset all contexts' slices/subslices configurations. */ 1878 /* Reset all contexts' slices/subslices configurations. */
1891 gen8_configure_all_contexts(dev_priv, NULL, false); 1879 gen8_configure_all_contexts(dev_priv, NULL);
1892 1880
1893 /* Make sure we disable noa to save power. */ 1881 /* Make sure we disable noa to save power. */
1894 I915_WRITE(RPM_CONFIG1, 1882 I915_WRITE(RPM_CONFIG1,
@@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2138 if (ret) 2126 if (ret)
2139 goto err_oa_buf_alloc; 2127 goto err_oa_buf_alloc;
2140 2128
2129 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2130 if (ret)
2131 goto err_lock;
2132
2141 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, 2133 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
2142 stream->oa_config); 2134 stream->oa_config);
2143 if (ret) 2135 if (ret)
@@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2145 2137
2146 stream->ops = &i915_oa_stream_ops; 2138 stream->ops = &i915_oa_stream_ops;
2147 2139
2148 /* Lock device for exclusive_stream access late because
2149 * enable_metric_set() might lock as well on gen8+.
2150 */
2151 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2152 if (ret)
2153 goto err_lock;
2154
2155 dev_priv->perf.oa.exclusive_stream = stream; 2140 dev_priv->perf.oa.exclusive_stream = stream;
2156 2141
2157 mutex_unlock(&dev_priv->drm.struct_mutex); 2142 mutex_unlock(&dev_priv->drm.struct_mutex);
2158 2143
2159 return 0; 2144 return 0;
2160 2145
2161err_lock: 2146err_enable:
2162 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 2147 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2148 mutex_unlock(&dev_priv->drm.struct_mutex);
2163 2149
2164err_enable: 2150err_lock:
2165 free_oa_buffer(dev_priv); 2151 free_oa_buffer(dev_priv);
2166 2152
2167err_oa_buf_alloc: 2153err_oa_buf_alloc:
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7ece2f061b9e..e0fca035ff78 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -719,6 +719,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
719 struct rb_node *rb; 719 struct rb_node *rb;
720 unsigned long flags; 720 unsigned long flags;
721 721
722 GEM_TRACE("%s\n", engine->name);
723
722 spin_lock_irqsave(&engine->timeline->lock, flags); 724 spin_lock_irqsave(&engine->timeline->lock, flags);
723 725
724 /* Cancel the requests on the HW and clear the ELSP tracker. */ 726 /* Cancel the requests on the HW and clear the ELSP tracker. */
@@ -765,6 +767,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
765 */ 767 */
766 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 768 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
767 769
770 /* Mark all CS interrupts as complete */
771 execlists->active = 0;
772
768 spin_unlock_irqrestore(&engine->timeline->lock, flags); 773 spin_unlock_irqrestore(&engine->timeline->lock, flags);
769} 774}
770 775
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index d3045a371a55..7c73bc7e2f85 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3221,35 +3221,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
3221 case CHIP_KAVERI: 3221 case CHIP_KAVERI:
3222 rdev->config.cik.max_shader_engines = 1; 3222 rdev->config.cik.max_shader_engines = 1;
3223 rdev->config.cik.max_tile_pipes = 4; 3223 rdev->config.cik.max_tile_pipes = 4;
3224 if ((rdev->pdev->device == 0x1304) || 3224 rdev->config.cik.max_cu_per_sh = 8;
3225 (rdev->pdev->device == 0x1305) || 3225 rdev->config.cik.max_backends_per_se = 2;
3226 (rdev->pdev->device == 0x130C) ||
3227 (rdev->pdev->device == 0x130F) ||
3228 (rdev->pdev->device == 0x1310) ||
3229 (rdev->pdev->device == 0x1311) ||
3230 (rdev->pdev->device == 0x131C)) {
3231 rdev->config.cik.max_cu_per_sh = 8;
3232 rdev->config.cik.max_backends_per_se = 2;
3233 } else if ((rdev->pdev->device == 0x1309) ||
3234 (rdev->pdev->device == 0x130A) ||
3235 (rdev->pdev->device == 0x130D) ||
3236 (rdev->pdev->device == 0x1313) ||
3237 (rdev->pdev->device == 0x131D)) {
3238 rdev->config.cik.max_cu_per_sh = 6;
3239 rdev->config.cik.max_backends_per_se = 2;
3240 } else if ((rdev->pdev->device == 0x1306) ||
3241 (rdev->pdev->device == 0x1307) ||
3242 (rdev->pdev->device == 0x130B) ||
3243 (rdev->pdev->device == 0x130E) ||
3244 (rdev->pdev->device == 0x1315) ||
3245 (rdev->pdev->device == 0x1318) ||
3246 (rdev->pdev->device == 0x131B)) {
3247 rdev->config.cik.max_cu_per_sh = 4;
3248 rdev->config.cik.max_backends_per_se = 1;
3249 } else {
3250 rdev->config.cik.max_cu_per_sh = 3;
3251 rdev->config.cik.max_backends_per_se = 1;
3252 }
3253 rdev->config.cik.max_sh_per_se = 1; 3226 rdev->config.cik.max_sh_per_se = 1;
3254 rdev->config.cik.max_texture_channel_caches = 4; 3227 rdev->config.cik.max_texture_channel_caches = 4;
3255 rdev->config.cik.max_gprs = 256; 3228 rdev->config.cik.max_gprs = 256;
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 5decae0069d0..78cbc3145e44 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -93,6 +93,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
93 93
94 DRM_DEBUG_DRIVER("Disabling the CRTC\n"); 94 DRM_DEBUG_DRIVER("Disabling the CRTC\n");
95 95
96 drm_crtc_vblank_off(crtc);
97
96 sun4i_tcon_set_status(scrtc->tcon, encoder, false); 98 sun4i_tcon_set_status(scrtc->tcon, encoder, false);
97 99
98 if (crtc->state->event && !crtc->state->active) { 100 if (crtc->state->event && !crtc->state->active) {
@@ -113,6 +115,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
113 DRM_DEBUG_DRIVER("Enabling the CRTC\n"); 115 DRM_DEBUG_DRIVER("Enabling the CRTC\n");
114 116
115 sun4i_tcon_set_status(scrtc->tcon, encoder, true); 117 sun4i_tcon_set_status(scrtc->tcon, encoder, true);
118
119 drm_crtc_vblank_on(crtc);
116} 120}
117 121
118static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) 122static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index 023f39bda633..e36004fbe453 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -132,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw)
132static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) 132static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
133{ 133{
134 struct sun4i_dclk *dclk = hw_to_dclk(hw); 134 struct sun4i_dclk *dclk = hw_to_dclk(hw);
135 u32 val = degrees / 120;
136
137 val <<= 28;
135 138
136 regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, 139 regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
137 GENMASK(29, 28), 140 GENMASK(29, 28),
138 degrees / 120); 141 val);
139 142
140 return 0; 143 return 0;
141} 144}
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 832f8f9bc47f..b8da5a50a61d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -92,6 +92,8 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
92 92
93 DRM_DEBUG_DRIVER("Vertical parameters OK\n"); 93 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
94 94
95 tcon->dclk_min_div = 6;
96 tcon->dclk_max_div = 127;
95 rounded_rate = clk_round_rate(tcon->dclk, rate); 97 rounded_rate = clk_round_rate(tcon->dclk, rate);
96 if (rounded_rate < rate) 98 if (rounded_rate < rate)
97 return MODE_CLOCK_LOW; 99 return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index b3960118deb9..2de586b7c98b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -101,10 +101,12 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
101 return; 101 return;
102 } 102 }
103 103
104 if (enabled) 104 if (enabled) {
105 clk_prepare_enable(clk); 105 clk_prepare_enable(clk);
106 else 106 } else {
107 clk_rate_exclusive_put(clk);
107 clk_disable_unprepare(clk); 108 clk_disable_unprepare(clk);
109 }
108} 110}
109 111
110static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon, 112static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
@@ -873,52 +875,56 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
873 return ret; 875 return ret;
874 } 876 }
875 877
876 /* 878 if (tcon->quirks->supports_lvds) {
877 * This can only be made optional since we've had DT nodes 879 /*
878 * without the LVDS reset properties. 880 * This can only be made optional since we've had DT
879 * 881 * nodes without the LVDS reset properties.
880 * If the property is missing, just disable LVDS, and print a 882 *
881 * warning. 883 * If the property is missing, just disable LVDS, and
882 */ 884 * print a warning.
883 tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); 885 */
884 if (IS_ERR(tcon->lvds_rst)) { 886 tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
885 dev_err(dev, "Couldn't get our reset line\n"); 887 if (IS_ERR(tcon->lvds_rst)) {
886 return PTR_ERR(tcon->lvds_rst); 888 dev_err(dev, "Couldn't get our reset line\n");
887 } else if (tcon->lvds_rst) { 889 return PTR_ERR(tcon->lvds_rst);
888 has_lvds_rst = true; 890 } else if (tcon->lvds_rst) {
889 reset_control_reset(tcon->lvds_rst); 891 has_lvds_rst = true;
890 } else { 892 reset_control_reset(tcon->lvds_rst);
891 has_lvds_rst = false; 893 } else {
892 } 894 has_lvds_rst = false;
895 }
893 896
894 /* 897 /*
895 * This can only be made optional since we've had DT nodes 898 * This can only be made optional since we've had DT
896 * without the LVDS reset properties. 899 * nodes without the LVDS reset properties.
897 * 900 *
898 * If the property is missing, just disable LVDS, and print a 901 * If the property is missing, just disable LVDS, and
899 * warning. 902 * print a warning.
900 */ 903 */
901 if (tcon->quirks->has_lvds_alt) { 904 if (tcon->quirks->has_lvds_alt) {
902 tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); 905 tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
903 if (IS_ERR(tcon->lvds_pll)) { 906 if (IS_ERR(tcon->lvds_pll)) {
904 if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { 907 if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
905 has_lvds_alt = false; 908 has_lvds_alt = false;
909 } else {
910 dev_err(dev, "Couldn't get the LVDS PLL\n");
911 return PTR_ERR(tcon->lvds_pll);
912 }
906 } else { 913 } else {
907 dev_err(dev, "Couldn't get the LVDS PLL\n"); 914 has_lvds_alt = true;
908 return PTR_ERR(tcon->lvds_pll);
909 } 915 }
910 } else {
911 has_lvds_alt = true;
912 } 916 }
913 }
914 917
915 if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { 918 if (!has_lvds_rst ||
916 dev_warn(dev, 919 (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
917 "Missing LVDS properties, Please upgrade your DT\n"); 920 dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n");
918 dev_warn(dev, "LVDS output disabled\n"); 921 dev_warn(dev, "LVDS output disabled\n");
919 can_lvds = false; 922 can_lvds = false;
923 } else {
924 can_lvds = true;
925 }
920 } else { 926 } else {
921 can_lvds = true; 927 can_lvds = false;
922 } 928 }
923 929
924 ret = sun4i_tcon_init_clocks(dev, tcon); 930 ret = sun4i_tcon_init_clocks(dev, tcon);
@@ -1137,7 +1143,7 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
1137}; 1143};
1138 1144
1139static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { 1145static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
1140 /* nothing is supported */ 1146 .supports_lvds = true,
1141}; 1147};
1142 1148
1143static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { 1149static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index b761c7b823c5..278700c7bf9f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -175,6 +175,7 @@ struct sun4i_tcon_quirks {
175 bool has_channel_1; /* a33 does not have channel 1 */ 175 bool has_channel_1; /* a33 does not have channel 1 */
176 bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */ 176 bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */
177 bool needs_de_be_mux; /* sun6i needs mux to select backend */ 177 bool needs_de_be_mux; /* sun6i needs mux to select backend */
178 bool supports_lvds; /* Does the TCON support an LVDS output? */
178 179
179 /* callback to handle tcon muxing options */ 180 /* callback to handle tcon muxing options */
180 int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); 181 int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a5b4cf030c11..9183d148d644 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in,
550 dst_release(dst); 550 dst_release(dst);
551 } 551 }
552 552
553 if (ndev->flags & IFF_LOOPBACK) { 553 if (ndev) {
554 ret = rdma_translate_ip(dst_in, addr); 554 if (ndev->flags & IFF_LOOPBACK)
555 /* 555 ret = rdma_translate_ip(dst_in, addr);
556 * Put the loopback device and get the translated 556 else
557 * device instead. 557 addr->bound_dev_if = ndev->ifindex;
558 */
559 dev_put(ndev); 558 dev_put(ndev);
560 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
561 } else {
562 addr->bound_dev_if = ndev->ifindex;
563 } 559 }
564 dev_put(ndev);
565 560
566 return ret; 561 return ret;
567} 562}
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index bc79ca8215d7..af5ad6a56ae4 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -17,6 +17,7 @@
17 17
18/* # of WCs to poll for with a single call to ib_poll_cq */ 18/* # of WCs to poll for with a single call to ib_poll_cq */
19#define IB_POLL_BATCH 16 19#define IB_POLL_BATCH 16
20#define IB_POLL_BATCH_DIRECT 8
20 21
21/* # of WCs to iterate over before yielding */ 22/* # of WCs to iterate over before yielding */
22#define IB_POLL_BUDGET_IRQ 256 23#define IB_POLL_BUDGET_IRQ 256
@@ -25,18 +26,18 @@
25#define IB_POLL_FLAGS \ 26#define IB_POLL_FLAGS \
26 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) 27 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
27 28
28static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) 29static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
30 int batch)
29{ 31{
30 int i, n, completed = 0; 32 int i, n, completed = 0;
31 struct ib_wc *wcs = poll_wc ? : cq->wc;
32 33
33 /* 34 /*
34 * budget might be (-1) if the caller does not 35 * budget might be (-1) if the caller does not
35 * want to bound this call, thus we need unsigned 36 * want to bound this call, thus we need unsigned
36 * minimum here. 37 * minimum here.
37 */ 38 */
38 while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, 39 while ((n = ib_poll_cq(cq, min_t(u32, batch,
39 budget - completed), wcs)) > 0) { 40 budget - completed), wcs)) > 0) {
40 for (i = 0; i < n; i++) { 41 for (i = 0; i < n; i++) {
41 struct ib_wc *wc = &wcs[i]; 42 struct ib_wc *wc = &wcs[i];
42 43
@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
48 49
49 completed += n; 50 completed += n;
50 51
51 if (n != IB_POLL_BATCH || 52 if (n != batch || (budget != -1 && completed >= budget))
52 (budget != -1 && completed >= budget))
53 break; 53 break;
54 } 54 }
55 55
@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
72 */ 72 */
73int ib_process_cq_direct(struct ib_cq *cq, int budget) 73int ib_process_cq_direct(struct ib_cq *cq, int budget)
74{ 74{
75 struct ib_wc wcs[IB_POLL_BATCH]; 75 struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
76 76
77 return __ib_process_cq(cq, budget, wcs); 77 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
78} 78}
79EXPORT_SYMBOL(ib_process_cq_direct); 79EXPORT_SYMBOL(ib_process_cq_direct);
80 80
@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); 88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
89 int completed; 89 int completed;
90 90
91 completed = __ib_process_cq(cq, budget, NULL); 91 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
92 if (completed < budget) { 92 if (completed < budget) {
93 irq_poll_complete(&cq->iop); 93 irq_poll_complete(&cq->iop);
94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)
108 struct ib_cq *cq = container_of(work, struct ib_cq, work); 108 struct ib_cq *cq = container_of(work, struct ib_cq, work);
109 int completed; 109 int completed;
110 110
111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); 111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
112 IB_POLL_BATCH);
112 if (completed >= IB_POLL_BUDGET_WORKQUEUE || 113 if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
113 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 114 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
114 queue_work(ib_comp_wq, &cq->work); 115 queue_work(ib_comp_wq, &cq->work);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index e8010e73a1cf..bb065c9449be 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device,
536 ret = device->query_device(device, &device->attrs, &uhw); 536 ret = device->query_device(device, &device->attrs, &uhw);
537 if (ret) { 537 if (ret) {
538 pr_warn("Couldn't query the device attributes\n"); 538 pr_warn("Couldn't query the device attributes\n");
539 goto cache_cleanup; 539 goto cg_cleanup;
540 } 540 }
541 541
542 ret = ib_device_register_sysfs(device, port_callback); 542 ret = ib_device_register_sysfs(device, port_callback);
543 if (ret) { 543 if (ret) {
544 pr_warn("Couldn't register device %s with driver model\n", 544 pr_warn("Couldn't register device %s with driver model\n",
545 device->name); 545 device->name);
546 goto cache_cleanup; 546 goto cg_cleanup;
547 } 547 }
548 548
549 device->reg_state = IB_DEV_REGISTERED; 549 device->reg_state = IB_DEV_REGISTERED;
@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device,
559 mutex_unlock(&device_mutex); 559 mutex_unlock(&device_mutex);
560 return 0; 560 return 0;
561 561
562cg_cleanup:
563 ib_device_unregister_rdmacg(device);
562cache_cleanup: 564cache_cleanup:
563 ib_cache_cleanup_one(device); 565 ib_cache_cleanup_one(device);
564 ib_cache_release_one(device); 566 ib_cache_release_one(device);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8cf15d4a8ac4..9f029a1ca5ea 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1291 1291
1292 resolved_dev = dev_get_by_index(dev_addr.net, 1292 resolved_dev = dev_get_by_index(dev_addr.net,
1293 dev_addr.bound_dev_if); 1293 dev_addr.bound_dev_if);
1294 if (resolved_dev->flags & IFF_LOOPBACK) { 1294 if (!resolved_dev) {
1295 dev_put(resolved_dev); 1295 dev_put(idev);
1296 resolved_dev = idev; 1296 return -ENODEV;
1297 dev_hold(resolved_dev);
1298 } 1297 }
1299 ndev = ib_get_ndev_from_path(rec); 1298 ndev = ib_get_ndev_from_path(rec);
1300 rcu_read_lock(); 1299 rcu_read_lock();
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index f015f1bf88c9..3a9d0f5b5881 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1149 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1149 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1150 return -EFAULT; 1150 return -EFAULT;
1151 1151
1152 if (cmd.qp_state > IB_QPS_ERR)
1153 return -EINVAL;
1154
1152 ctx = ucma_get_ctx(file, cmd.id); 1155 ctx = ucma_get_ctx(file, cmd.id);
1153 if (IS_ERR(ctx)) 1156 if (IS_ERR(ctx))
1154 return PTR_ERR(ctx); 1157 return PTR_ERR(ctx);
@@ -1294,6 +1297,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1294 if (IS_ERR(ctx)) 1297 if (IS_ERR(ctx))
1295 return PTR_ERR(ctx); 1298 return PTR_ERR(ctx);
1296 1299
1300 if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
1301 return -EINVAL;
1302
1297 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1303 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1298 cmd.optlen); 1304 cmd.optlen);
1299 if (IS_ERR(optval)) { 1305 if (IS_ERR(optval)) {
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 643174d949a8..0dd75f449872 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
785 return 0; 785 return 0;
786} 786}
787 787
788static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) 788unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
789 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) 789 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
790{ 790{
791 unsigned long flags; 791 unsigned long flags;
@@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
799 return flags; 799 return flags;
800} 800}
801 801
802static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, 802void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
803 unsigned long flags) 803 unsigned long flags)
804 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) 804 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
805{ 805{
806 if (qp->rcq != qp->scq) 806 if (qp->rcq != qp->scq)
@@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1606 int status; 1606 int status;
1607 union ib_gid sgid; 1607 union ib_gid sgid;
1608 struct ib_gid_attr sgid_attr; 1608 struct ib_gid_attr sgid_attr;
1609 unsigned int flags;
1609 u8 nw_type; 1610 u8 nw_type;
1610 1611
1611 qp->qplib_qp.modify_flags = 0; 1612 qp->qplib_qp.modify_flags = 0;
@@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1634 dev_dbg(rdev_to_dev(rdev), 1635 dev_dbg(rdev_to_dev(rdev),
1635 "Move QP = %p to flush list\n", 1636 "Move QP = %p to flush list\n",
1636 qp); 1637 qp);
1638 flags = bnxt_re_lock_cqs(qp);
1637 bnxt_qplib_add_flush_qp(&qp->qplib_qp); 1639 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1640 bnxt_re_unlock_cqs(qp, flags);
1638 } 1641 }
1639 if (!qp->sumem && 1642 if (!qp->sumem &&
1640 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { 1643 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1641 dev_dbg(rdev_to_dev(rdev), 1644 dev_dbg(rdev_to_dev(rdev),
1642 "Move QP = %p out of flush list\n", 1645 "Move QP = %p out of flush list\n",
1643 qp); 1646 qp);
1647 flags = bnxt_re_lock_cqs(qp);
1644 bnxt_qplib_clean_qp(&qp->qplib_qp); 1648 bnxt_qplib_clean_qp(&qp->qplib_qp);
1649 bnxt_re_unlock_cqs(qp, flags);
1645 } 1650 }
1646 } 1651 }
1647 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { 1652 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
@@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2227 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; 2232 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2228 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; 2233 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2229 2234
2235 /* Need unconditional fence for local invalidate
2236 * opcode to work as expected.
2237 */
2238 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2239
2230 if (wr->send_flags & IB_SEND_SIGNALED) 2240 if (wr->send_flags & IB_SEND_SIGNALED)
2231 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2241 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2232 if (wr->send_flags & IB_SEND_FENCE)
2233 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2234 if (wr->send_flags & IB_SEND_SOLICITED) 2242 if (wr->send_flags & IB_SEND_SOLICITED)
2235 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2243 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2236 2244
@@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2251 wqe->frmr.levels = qplib_frpl->hwq.level + 1; 2259 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2252 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; 2260 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2253 2261
2254 if (wr->wr.send_flags & IB_SEND_FENCE) 2262 /* Need unconditional fence for reg_mr
2255 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2263 * opcode to function as expected.
2264 */
2265
2266 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2267
2256 if (wr->wr.send_flags & IB_SEND_SIGNALED) 2268 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2257 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2269 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2258 2270
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index b88a48d43a9d..e62b7c2c7da6 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
222 struct ib_udata *udata); 222 struct ib_udata *udata);
223int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); 223int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
224int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 224int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
225
226unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
227void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
225#endif /* __BNXT_RE_IB_VERBS_H__ */ 228#endif /* __BNXT_RE_IB_VERBS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 33a448036c2e..f6e361750466 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
730 struct bnxt_re_qp *qp) 730 struct bnxt_re_qp *qp)
731{ 731{
732 struct ib_event event; 732 struct ib_event event;
733 unsigned int flags;
734
735 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
736 flags = bnxt_re_lock_cqs(qp);
737 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
738 bnxt_re_unlock_cqs(qp, flags);
739 }
733 740
734 memset(&event, 0, sizeof(event)); 741 memset(&event, 0, sizeof(event));
735 if (qp->qplib_qp.srq) { 742 if (qp->qplib_qp.srq) {
@@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work)
1416 switch (re_work->event) { 1423 switch (re_work->event) {
1417 case NETDEV_REGISTER: 1424 case NETDEV_REGISTER:
1418 rc = bnxt_re_ib_reg(rdev); 1425 rc = bnxt_re_ib_reg(rdev);
1419 if (rc) 1426 if (rc) {
1420 dev_err(rdev_to_dev(rdev), 1427 dev_err(rdev_to_dev(rdev),
1421 "Failed to register with IB: %#x", rc); 1428 "Failed to register with IB: %#x", rc);
1429 bnxt_re_remove_one(rdev);
1430 bnxt_re_dev_unreg(rdev);
1431 }
1422 break; 1432 break;
1423 case NETDEV_UP: 1433 case NETDEV_UP:
1424 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1434 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 3ea5b9624f6b..06b42c880fd4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
88 } 88 }
89} 89}
90 90
91void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, 91static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92 unsigned long *flags) 92 unsigned long *flags)
93 __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) 93 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
94{ 94{
95 spin_lock_irqsave(&qp->scq->hwq.lock, *flags); 95 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 if (qp->scq == qp->rcq) 96 if (qp->scq == qp->rcq)
97 __acquire(&qp->rcq->hwq.lock); 97 __acquire(&qp->rcq->flush_lock);
98 else 98 else
99 spin_lock(&qp->rcq->hwq.lock); 99 spin_lock(&qp->rcq->flush_lock);
100} 100}
101 101
102void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, 102static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 unsigned long *flags) 103 unsigned long *flags)
104 __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) 104 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
105{ 105{
106 if (qp->scq == qp->rcq) 106 if (qp->scq == qp->rcq)
107 __release(&qp->rcq->hwq.lock); 107 __release(&qp->rcq->flush_lock);
108 else 108 else
109 spin_unlock(&qp->rcq->hwq.lock); 109 spin_unlock(&qp->rcq->flush_lock);
110 spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); 110 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
111}
112
113static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
114 struct bnxt_qplib_cq *cq)
115{
116 struct bnxt_qplib_cq *buddy_cq = NULL;
117
118 if (qp->scq == qp->rcq)
119 buddy_cq = NULL;
120 else if (qp->scq == cq)
121 buddy_cq = qp->rcq;
122 else
123 buddy_cq = qp->scq;
124 return buddy_cq;
125}
126
127static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
128 struct bnxt_qplib_cq *cq)
129 __acquires(&buddy_cq->hwq.lock)
130{
131 struct bnxt_qplib_cq *buddy_cq = NULL;
132
133 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
134 if (!buddy_cq)
135 __acquire(&cq->hwq.lock);
136 else
137 spin_lock(&buddy_cq->hwq.lock);
138}
139
140static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
141 struct bnxt_qplib_cq *cq)
142 __releases(&buddy_cq->hwq.lock)
143{
144 struct bnxt_qplib_cq *buddy_cq = NULL;
145
146 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
147 if (!buddy_cq)
148 __release(&cq->hwq.lock);
149 else
150 spin_unlock(&buddy_cq->hwq.lock);
151} 111}
152 112
153void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 113void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
154{ 114{
155 unsigned long flags; 115 unsigned long flags;
156 116
157 bnxt_qplib_acquire_cq_locks(qp, &flags); 117 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
158 __bnxt_qplib_add_flush_qp(qp); 118 __bnxt_qplib_add_flush_qp(qp);
159 bnxt_qplib_release_cq_locks(qp, &flags); 119 bnxt_qplib_release_cq_flush_locks(qp, &flags);
160} 120}
161 121
162static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) 122static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
@@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
177{ 137{
178 unsigned long flags; 138 unsigned long flags;
179 139
180 bnxt_qplib_acquire_cq_locks(qp, &flags); 140 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
181 __clean_cq(qp->scq, (u64)(unsigned long)qp); 141 __clean_cq(qp->scq, (u64)(unsigned long)qp);
182 qp->sq.hwq.prod = 0; 142 qp->sq.hwq.prod = 0;
183 qp->sq.hwq.cons = 0; 143 qp->sq.hwq.cons = 0;
@@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
186 qp->rq.hwq.cons = 0; 146 qp->rq.hwq.cons = 0;
187 147
188 __bnxt_qplib_del_flush_qp(qp); 148 __bnxt_qplib_del_flush_qp(qp);
189 bnxt_qplib_release_cq_locks(qp, &flags); 149 bnxt_qplib_release_cq_flush_locks(qp, &flags);
190} 150}
191 151
192static void bnxt_qpn_cqn_sched_task(struct work_struct *work) 152static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
@@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
2107 /* Must block new posting of SQ and RQ */ 2067 /* Must block new posting of SQ and RQ */
2108 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2068 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2109 bnxt_qplib_cancel_phantom_processing(qp); 2069 bnxt_qplib_cancel_phantom_processing(qp);
2110
2111 /* Add qp to flush list of the CQ */
2112 __bnxt_qplib_add_flush_qp(qp);
2113} 2070}
2114 2071
2115/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) 2072/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
@@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2285 sw_sq_cons, cqe->wr_id, cqe->status); 2242 sw_sq_cons, cqe->wr_id, cqe->status);
2286 cqe++; 2243 cqe++;
2287 (*budget)--; 2244 (*budget)--;
2288 bnxt_qplib_lock_buddy_cq(qp, cq);
2289 bnxt_qplib_mark_qp_error(qp); 2245 bnxt_qplib_mark_qp_error(qp);
2290 bnxt_qplib_unlock_buddy_cq(qp, cq); 2246 /* Add qp to flush list of the CQ */
2247 bnxt_qplib_add_flush_qp(qp);
2291 } else { 2248 } else {
2292 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 2249 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2293 /* Before we complete, do WA 9060 */ 2250 /* Before we complete, do WA 9060 */
@@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2403 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2360 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2404 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2361 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2405 /* Add qp to flush list of the CQ */ 2362 /* Add qp to flush list of the CQ */
2406 bnxt_qplib_lock_buddy_cq(qp, cq); 2363 bnxt_qplib_add_flush_qp(qp);
2407 __bnxt_qplib_add_flush_qp(qp);
2408 bnxt_qplib_unlock_buddy_cq(qp, cq);
2409 } 2364 }
2410 } 2365 }
2411 2366
@@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2489 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2444 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2490 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2445 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2491 /* Add qp to flush list of the CQ */ 2446 /* Add qp to flush list of the CQ */
2492 bnxt_qplib_lock_buddy_cq(qp, cq); 2447 bnxt_qplib_add_flush_qp(qp);
2493 __bnxt_qplib_add_flush_qp(qp);
2494 bnxt_qplib_unlock_buddy_cq(qp, cq);
2495 } 2448 }
2496 } 2449 }
2497done: 2450done:
@@ -2501,11 +2454,9 @@ done:
2501bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) 2454bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2502{ 2455{
2503 struct cq_base *hw_cqe, **hw_cqe_ptr; 2456 struct cq_base *hw_cqe, **hw_cqe_ptr;
2504 unsigned long flags;
2505 u32 sw_cons, raw_cons; 2457 u32 sw_cons, raw_cons;
2506 bool rc = true; 2458 bool rc = true;
2507 2459
2508 spin_lock_irqsave(&cq->hwq.lock, flags);
2509 raw_cons = cq->hwq.cons; 2460 raw_cons = cq->hwq.cons;
2510 sw_cons = HWQ_CMP(raw_cons, &cq->hwq); 2461 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2511 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; 2462 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
@@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2513 2464
2514 /* Check for Valid bit. If the CQE is valid, return false */ 2465 /* Check for Valid bit. If the CQE is valid, return false */
2515 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); 2466 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2516 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2517 return rc; 2467 return rc;
2518} 2468}
2519 2469
@@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2602 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2552 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2603 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2553 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2604 /* Add qp to flush list of the CQ */ 2554 /* Add qp to flush list of the CQ */
2605 bnxt_qplib_lock_buddy_cq(qp, cq); 2555 bnxt_qplib_add_flush_qp(qp);
2606 __bnxt_qplib_add_flush_qp(qp);
2607 bnxt_qplib_unlock_buddy_cq(qp, cq);
2608 } 2556 }
2609 } 2557 }
2610 2558
@@ -2719,9 +2667,7 @@ do_rq:
2719 */ 2667 */
2720 2668
2721 /* Add qp to flush list of the CQ */ 2669 /* Add qp to flush list of the CQ */
2722 bnxt_qplib_lock_buddy_cq(qp, cq); 2670 bnxt_qplib_add_flush_qp(qp);
2723 __bnxt_qplib_add_flush_qp(qp);
2724 bnxt_qplib_unlock_buddy_cq(qp, cq);
2725done: 2671done:
2726 return rc; 2672 return rc;
2727} 2673}
@@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2750 u32 budget = num_cqes; 2696 u32 budget = num_cqes;
2751 unsigned long flags; 2697 unsigned long flags;
2752 2698
2753 spin_lock_irqsave(&cq->hwq.lock, flags); 2699 spin_lock_irqsave(&cq->flush_lock, flags);
2754 list_for_each_entry(qp, &cq->sqf_head, sq_flush) { 2700 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2755 dev_dbg(&cq->hwq.pdev->dev, 2701 dev_dbg(&cq->hwq.pdev->dev,
2756 "QPLIB: FP: Flushing SQ QP= %p", 2702 "QPLIB: FP: Flushing SQ QP= %p",
@@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2764 qp); 2710 qp);
2765 __flush_rq(&qp->rq, qp, &cqe, &budget); 2711 __flush_rq(&qp->rq, qp, &cqe, &budget);
2766 } 2712 }
2767 spin_unlock_irqrestore(&cq->hwq.lock, flags); 2713 spin_unlock_irqrestore(&cq->flush_lock, flags);
2768 2714
2769 return num_cqes - budget; 2715 return num_cqes - budget;
2770} 2716}
@@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2773 int num_cqes, struct bnxt_qplib_qp **lib_qp) 2719 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2774{ 2720{
2775 struct cq_base *hw_cqe, **hw_cqe_ptr; 2721 struct cq_base *hw_cqe, **hw_cqe_ptr;
2776 unsigned long flags;
2777 u32 sw_cons, raw_cons; 2722 u32 sw_cons, raw_cons;
2778 int budget, rc = 0; 2723 int budget, rc = 0;
2779 2724
2780 spin_lock_irqsave(&cq->hwq.lock, flags);
2781 raw_cons = cq->hwq.cons; 2725 raw_cons = cq->hwq.cons;
2782 budget = num_cqes; 2726 budget = num_cqes;
2783 2727
@@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2853 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); 2797 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2854 } 2798 }
2855exit: 2799exit:
2856 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2857 return num_cqes - budget; 2800 return num_cqes - budget;
2858} 2801}
2859 2802
2860void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) 2803void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2861{ 2804{
2862 unsigned long flags;
2863
2864 spin_lock_irqsave(&cq->hwq.lock, flags);
2865 if (arm_type) 2805 if (arm_type)
2866 bnxt_qplib_arm_cq(cq, arm_type); 2806 bnxt_qplib_arm_cq(cq, arm_type);
2867 /* Using cq->arm_state variable to track whether to issue cq handler */ 2807 /* Using cq->arm_state variable to track whether to issue cq handler */
2868 atomic_set(&cq->arm_state, 1); 2808 atomic_set(&cq->arm_state, 1);
2869 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2870} 2809}
2871 2810
2872void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) 2811void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ca0a2ffa3509..ade9f13c0fd1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -389,6 +389,18 @@ struct bnxt_qplib_cq {
389 struct list_head sqf_head, rqf_head; 389 struct list_head sqf_head, rqf_head;
390 atomic_t arm_state; 390 atomic_t arm_state;
391 spinlock_t compl_lock; /* synch CQ handlers */ 391 spinlock_t compl_lock; /* synch CQ handlers */
392/* Locking Notes:
393 * QP can move to error state from modify_qp, async error event or error
394 * CQE as part of poll_cq. When QP is moved to error state, it gets added
395 * to two flush lists, one each for SQ and RQ.
396 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
397 * flush_locks should be acquired when QP is moved to error. The control path
398 * operations(modify_qp and async error events) are synchronized with poll_cq
399 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
400 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
401 * of the same QP while manipulating the flush list.
402 */
403 spinlock_t flush_lock; /* QP flush management */
392}; 404};
393 405
394#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) 406#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 8329ec6a7946..80027a494730 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
305 err_event->res_err_state_reason); 305 err_event->res_err_state_reason);
306 if (!qp) 306 if (!qp)
307 break; 307 break;
308 bnxt_qplib_acquire_cq_locks(qp, &flags);
309 bnxt_qplib_mark_qp_error(qp); 308 bnxt_qplib_mark_qp_error(qp);
310 bnxt_qplib_release_cq_locks(qp, &flags); 309 rcfw->aeq_handler(rcfw, qp_event, qp);
311 break; 310 break;
312 default: 311 default:
313 /* Command Response */ 312 /* Command Response */
@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
460 int rc; 459 int rc;
461 460
462 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 461 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
463 462 /* Supply (log-base-2-of-host-page-size - base-page-shift)
463 * to bono to adjust the doorbell page sizes.
464 */
465 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
466 RCFW_DBR_BASE_PAGE_SHIFT);
464 /* 467 /*
465 * VFs need not setup the HW context area, PF 468 * VFs need not setup the HW context area, PF
466 * shall setup this area for VF. Skipping the 469 * shall setup this area for VF. Skipping the
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 6bee6e3636ea..c7cce2e4185e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -49,6 +49,7 @@
49#define RCFW_COMM_SIZE 0x104 49#define RCFW_COMM_SIZE 0x104
50 50
51#define RCFW_DBR_PCI_BAR_REGION 2 51#define RCFW_DBR_PCI_BAR_REGION 2
52#define RCFW_DBR_BASE_PAGE_SHIFT 12
52 53
53#define RCFW_CMD_PREP(req, CMD, cmd_flags) \ 54#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
54 do { \ 55 do { \
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 03057983341f..ee98e5efef84 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
139 attr->max_pkey = le32_to_cpu(sb->max_pkeys); 139 attr->max_pkey = le32_to_cpu(sb->max_pkeys);
140 140
141 attr->max_inline_data = le32_to_cpu(sb->max_inline_data); 141 attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
142 attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; 142 attr->l2_db_size = (sb->l2_db_space_size + 1) *
143 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
143 attr->max_sgid = le32_to_cpu(sb->max_gid); 144 attr->max_sgid = le32_to_cpu(sb->max_gid);
144 145
145 bnxt_qplib_query_version(rcfw, attr->fw_ver); 146 bnxt_qplib_query_version(rcfw, attr->fw_ver);
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 2d7ea096a247..3e5a4f760d0e 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw {
1761 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) 1761 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
1762 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) 1762 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
1763 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) 1763 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
1764 __le16 reserved16; 1764 /* This value is (log-base-2-of-DBR-page-size - 12).
1765 * 0 for 4KB. HW supported values are enumerated below.
1766 */
1767 __le16 log2_dbr_pg_size;
1768 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
1769 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
1770 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
1771 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
1772 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
1773 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
1774 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
1775 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
1776 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
1777 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
1778 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
1779 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
1780 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
1781 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
1782 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
1783 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
1784 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
1785 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
1786 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
1787 CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
1765 __le64 qpc_page_dir; 1788 __le64 qpc_page_dir;
1766 __le64 mrw_page_dir; 1789 __le64 mrw_page_dir;
1767 __le64 srq_page_dir; 1790 __le64 srq_page_dir;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 9a566ee3ceff..82adc0d1d30e 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
601 wc->dlid_path_bits = 0; 601 wc->dlid_path_bits = 0;
602 602
603 if (is_eth) { 603 if (is_eth) {
604 wc->slid = 0;
604 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); 605 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
605 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); 606 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
606 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); 607 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
@@ -851,7 +852,6 @@ repoll:
851 } 852 }
852 } 853 }
853 854
854 wc->slid = be16_to_cpu(cqe->rlid);
855 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 855 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
856 wc->src_qp = g_mlpath_rqpn & 0xffffff; 856 wc->src_qp = g_mlpath_rqpn & 0xffffff;
857 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 857 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
@@ -860,6 +860,7 @@ repoll:
860 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, 860 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
861 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; 861 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
862 if (is_eth) { 862 if (is_eth) {
863 wc->slid = 0;
863 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 864 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
864 if (be32_to_cpu(cqe->vlan_my_qpn) & 865 if (be32_to_cpu(cqe->vlan_my_qpn) &
865 MLX4_CQE_CVLAN_PRESENT_MASK) { 866 MLX4_CQE_CVLAN_PRESENT_MASK) {
@@ -871,6 +872,7 @@ repoll:
871 memcpy(wc->smac, cqe->smac, ETH_ALEN); 872 memcpy(wc->smac, cqe->smac, ETH_ALEN);
872 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); 873 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
873 } else { 874 } else {
875 wc->slid = be16_to_cpu(cqe->rlid);
874 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 876 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
875 wc->vlan_id = 0xffff; 877 wc->vlan_id = 0xffff;
876 } 878 }
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d2ee9322f2e..5a0e4fc4785a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
219 gid_tbl[i].version = 2; 219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) 220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 gid_tbl[i].type = 1; 221 gid_tbl[i].type = 1;
222 else
223 memset(&gid_tbl[i].gid, 0, 12);
224 } 222 }
225 } 223 }
226 224
@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
366 if (!gids) { 364 if (!gids) {
367 ret = -ENOMEM; 365 ret = -ENOMEM;
368 } else { 366 } else {
369 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) 367 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
370 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); 368 memcpy(&gids[i].gid,
369 &port_gid_table->gids[i].gid,
370 sizeof(union ib_gid));
371 gids[i].gid_type =
372 port_gid_table->gids[i].gid_type;
373 }
371 } 374 }
372 } 375 }
373 spin_unlock_bh(&iboe->lock); 376 spin_unlock_bh(&iboe->lock);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5b974fb97611..15457c9569a7 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
226 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); 226 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
227 break; 227 break;
228 } 228 }
229 wc->slid = be16_to_cpu(cqe->slid);
230 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 229 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
231 wc->dlid_path_bits = cqe->ml_path; 230 wc->dlid_path_bits = cqe->ml_path;
232 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 231 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
@@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
241 } 240 }
242 241
243 if (ll != IB_LINK_LAYER_ETHERNET) { 242 if (ll != IB_LINK_LAYER_ETHERNET) {
243 wc->slid = be16_to_cpu(cqe->slid);
244 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 244 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
245 return; 245 return;
246 } 246 }
247 247
248 wc->slid = 0;
248 vlan_present = cqe->l4_l3_hdr_type & 0x1; 249 vlan_present = cqe->l4_l3_hdr_type & 0x1;
249 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; 250 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
250 if (vlan_present) { 251 if (vlan_present) {
@@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1177 if (ucmd.reserved0 || ucmd.reserved1) 1178 if (ucmd.reserved0 || ucmd.reserved1)
1178 return -EINVAL; 1179 return -EINVAL;
1179 1180
1180 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, 1181 /* check multiplication overflow */
1182 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1183 return -EINVAL;
1184
1185 umem = ib_umem_get(context, ucmd.buf_addr,
1186 (size_t)ucmd.cqe_size * entries,
1181 IB_ACCESS_LOCAL_WRITE, 1); 1187 IB_ACCESS_LOCAL_WRITE, 1);
1182 if (IS_ERR(umem)) { 1188 if (IS_ERR(umem)) {
1183 err = PTR_ERR(umem); 1189 err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 4236c8086820..033b6af90de9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
245 struct mlx5_ib_multiport_info *mpi; 245 struct mlx5_ib_multiport_info *mpi;
246 struct mlx5_ib_port *port; 246 struct mlx5_ib_port *port;
247 247
248 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
249 ll != IB_LINK_LAYER_ETHERNET) {
250 if (native_port_num)
251 *native_port_num = ib_port_num;
252 return ibdev->mdev;
253 }
254
248 if (native_port_num) 255 if (native_port_num)
249 *native_port_num = 1; 256 *native_port_num = 1;
250 257
251 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
252 return ibdev->mdev;
253
254 port = &ibdev->port[ib_port_num - 1]; 258 port = &ibdev->port[ib_port_num - 1];
255 if (!port) 259 if (!port)
256 return NULL; 260 return NULL;
@@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3263 struct mlx5_ib_dev *ibdev; 3267 struct mlx5_ib_dev *ibdev;
3264 struct ib_event ibev; 3268 struct ib_event ibev;
3265 bool fatal = false; 3269 bool fatal = false;
3266 u8 port = 0; 3270 u8 port = (u8)work->param;
3267 3271
3268 if (mlx5_core_is_mp_slave(work->dev)) { 3272 if (mlx5_core_is_mp_slave(work->dev)) {
3269 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); 3273 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
@@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3283 case MLX5_DEV_EVENT_PORT_UP: 3287 case MLX5_DEV_EVENT_PORT_UP:
3284 case MLX5_DEV_EVENT_PORT_DOWN: 3288 case MLX5_DEV_EVENT_PORT_DOWN:
3285 case MLX5_DEV_EVENT_PORT_INITIALIZED: 3289 case MLX5_DEV_EVENT_PORT_INITIALIZED:
3286 port = (u8)work->param;
3287
3288 /* In RoCE, port up/down events are handled in 3290 /* In RoCE, port up/down events are handled in
3289 * mlx5_netdev_event(). 3291 * mlx5_netdev_event().
3290 */ 3292 */
@@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3298 3300
3299 case MLX5_DEV_EVENT_LID_CHANGE: 3301 case MLX5_DEV_EVENT_LID_CHANGE:
3300 ibev.event = IB_EVENT_LID_CHANGE; 3302 ibev.event = IB_EVENT_LID_CHANGE;
3301 port = (u8)work->param;
3302 break; 3303 break;
3303 3304
3304 case MLX5_DEV_EVENT_PKEY_CHANGE: 3305 case MLX5_DEV_EVENT_PKEY_CHANGE:
3305 ibev.event = IB_EVENT_PKEY_CHANGE; 3306 ibev.event = IB_EVENT_PKEY_CHANGE;
3306 port = (u8)work->param;
3307
3308 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 3307 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3309 break; 3308 break;
3310 3309
3311 case MLX5_DEV_EVENT_GUID_CHANGE: 3310 case MLX5_DEV_EVENT_GUID_CHANGE:
3312 ibev.event = IB_EVENT_GID_CHANGE; 3311 ibev.event = IB_EVENT_GID_CHANGE;
3313 port = (u8)work->param;
3314 break; 3312 break;
3315 3313
3316 case MLX5_DEV_EVENT_CLIENT_REREG: 3314 case MLX5_DEV_EVENT_CLIENT_REREG:
3317 ibev.event = IB_EVENT_CLIENT_REREGISTER; 3315 ibev.event = IB_EVENT_CLIENT_REREGISTER;
3318 port = (u8)work->param;
3319 break; 3316 break;
3320 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: 3317 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
3321 schedule_work(&ibdev->delay_drop.delay_drop_work); 3318 schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3327 ibev.device = &ibdev->ib_dev; 3324 ibev.device = &ibdev->ib_dev;
3328 ibev.element.port_num = port; 3325 ibev.element.port_num = port;
3329 3326
3330 if (port < 1 || port > ibdev->num_ports) { 3327 if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
3331 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 3328 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
3332 goto out; 3329 goto out;
3333 } 3330 }
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 556e015678de..1961c6a45437 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1816,7 +1816,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1816 1816
1817 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 1817 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1818 mr->ibmr.length = 0; 1818 mr->ibmr.length = 0;
1819 mr->ndescs = sg_nents;
1820 1819
1821 for_each_sg(sgl, sg, sg_nents, i) { 1820 for_each_sg(sgl, sg, sg_nents, i) {
1822 if (unlikely(i >= mr->max_descs)) 1821 if (unlikely(i >= mr->max_descs))
@@ -1828,6 +1827,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1828 1827
1829 sg_offset = 0; 1828 sg_offset = 0;
1830 } 1829 }
1830 mr->ndescs = i;
1831 1831
1832 if (sg_offset_p) 1832 if (sg_offset_p)
1833 *sg_offset_p = sg_offset; 1833 *sg_offset_p = sg_offset;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 39d24bf694a8..36197fbac63a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1584,6 +1584,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1584 u32 uidx = MLX5_IB_DEFAULT_UIDX; 1584 u32 uidx = MLX5_IB_DEFAULT_UIDX;
1585 struct mlx5_ib_create_qp ucmd; 1585 struct mlx5_ib_create_qp ucmd;
1586 struct mlx5_ib_qp_base *base; 1586 struct mlx5_ib_qp_base *base;
1587 int mlx5_st;
1587 void *qpc; 1588 void *qpc;
1588 u32 *in; 1589 u32 *in;
1589 int err; 1590 int err;
@@ -1592,6 +1593,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1592 spin_lock_init(&qp->sq.lock); 1593 spin_lock_init(&qp->sq.lock);
1593 spin_lock_init(&qp->rq.lock); 1594 spin_lock_init(&qp->rq.lock);
1594 1595
1596 mlx5_st = to_mlx5_st(init_attr->qp_type);
1597 if (mlx5_st < 0)
1598 return -EINVAL;
1599
1595 if (init_attr->rwq_ind_tbl) { 1600 if (init_attr->rwq_ind_tbl) {
1596 if (!udata) 1601 if (!udata)
1597 return -ENOSYS; 1602 return -ENOSYS;
@@ -1753,7 +1758,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1753 1758
1754 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1759 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1755 1760
1756 MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); 1761 MLX5_SET(qpc, qpc, st, mlx5_st);
1757 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1762 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1758 1763
1759 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) 1764 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
@@ -3095,8 +3100,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
3095 goto out; 3100 goto out;
3096 3101
3097 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 3102 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
3098 !optab[mlx5_cur][mlx5_new]) 3103 !optab[mlx5_cur][mlx5_new]) {
3104 err = -EINVAL;
3099 goto out; 3105 goto out;
3106 }
3100 3107
3101 op = optab[mlx5_cur][mlx5_new]; 3108 op = optab[mlx5_cur][mlx5_new];
3102 optpar = ib_mask_to_mlx5_opt(attr_mask); 3109 optpar = ib_mask_to_mlx5_opt(attr_mask);
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 478b7317b80a..26dc374787f7 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev,
458 } 458 }
459 return -EINVAL; 459 return -EINVAL;
460 } 460 }
461 neigh = dst_neigh_lookup(dst, &dst_in); 461 neigh = dst_neigh_lookup(dst, &fl6.daddr);
462
463 if (neigh) { 462 if (neigh) {
464 rcu_read_lock(); 463 rcu_read_lock();
465 if (neigh->nud_state & NUD_VALID) { 464 if (neigh->nud_state & NUD_VALID) {
@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
494 493
495 qp = idr_find(&dev->qpidr, conn_param->qpn); 494 qp = idr_find(&dev->qpidr, conn_param->qpn);
496 495
497 laddr = (struct sockaddr_in *)&cm_id->local_addr; 496 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
498 raddr = (struct sockaddr_in *)&cm_id->remote_addr; 497 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
499 laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; 498 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
500 raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; 499 raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
500
501 DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
502 ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
503 ntohs(raddr->sin_port));
501 504
502 DP_DEBUG(dev, QEDR_MSG_IWARP, 505 DP_DEBUG(dev, QEDR_MSG_IWARP,
503 "Connect source address: %pISpc, remote address: %pISpc\n", 506 "Connect source address: %pISpc, remote address: %pISpc\n",
@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
599 int rc; 602 int rc;
600 int i; 603 int i;
601 604
602 laddr = (struct sockaddr_in *)&cm_id->local_addr; 605 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
603 laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; 606 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
604 607
605 DP_DEBUG(dev, QEDR_MSG_IWARP, 608 DP_DEBUG(dev, QEDR_MSG_IWARP,
606 "Create Listener address: %pISpc\n", &cm_id->local_addr); 609 "Create Listener address: %pISpc\n", &cm_id->local_addr);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 53f00dbf313f..875b17272d65 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3034 3034
3035 switch (wr->opcode) { 3035 switch (wr->opcode) {
3036 case IB_WR_SEND_WITH_IMM: 3036 case IB_WR_SEND_WITH_IMM:
3037 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3038 rc = -EINVAL;
3039 *bad_wr = wr;
3040 break;
3041 }
3037 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; 3042 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3038 swqe = (struct rdma_sq_send_wqe_1st *)wqe; 3043 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3039 swqe->wqe_size = 2; 3044 swqe->wqe_size = 2;
@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3075 break; 3080 break;
3076 3081
3077 case IB_WR_RDMA_WRITE_WITH_IMM: 3082 case IB_WR_RDMA_WRITE_WITH_IMM:
3083 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3084 rc = -EINVAL;
3085 *bad_wr = wr;
3086 break;
3087 }
3078 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; 3088 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3079 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; 3089 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3080 3090
@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3724{ 3734{
3725 struct qedr_dev *dev = get_qedr_dev(ibcq->device); 3735 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3726 struct qedr_cq *cq = get_qedr_cq(ibcq); 3736 struct qedr_cq *cq = get_qedr_cq(ibcq);
3727 union rdma_cqe *cqe = cq->latest_cqe; 3737 union rdma_cqe *cqe;
3728 u32 old_cons, new_cons; 3738 u32 old_cons, new_cons;
3729 unsigned long flags; 3739 unsigned long flags;
3730 int update = 0; 3740 int update = 0;
@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3741 return qedr_gsi_poll_cq(ibcq, num_entries, wc); 3751 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3742 3752
3743 spin_lock_irqsave(&cq->cq_lock, flags); 3753 spin_lock_irqsave(&cq->cq_lock, flags);
3754 cqe = cq->latest_cqe;
3744 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); 3755 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3745 while (num_entries && is_valid_cqe(cq, cqe)) { 3756 while (num_entries && is_valid_cqe(cq, cqe)) {
3746 struct qedr_qp *qp; 3757 struct qedr_qp *qp;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 4d1d8dfb2d2a..f2273143b3cb 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
963 uint32_t rtime = cpu_to_le32(get_seconds()); 963 uint32_t rtime = cpu_to_le32(get_seconds());
964 struct uuid_entry *u; 964 struct uuid_entry *u;
965 char buf[BDEVNAME_SIZE]; 965 char buf[BDEVNAME_SIZE];
966 struct cached_dev *exist_dc, *t;
966 967
967 bdevname(dc->bdev, buf); 968 bdevname(dc->bdev, buf);
968 969
@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
987 return -EINVAL; 988 return -EINVAL;
988 } 989 }
989 990
991 /* Check whether already attached */
992 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
993 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
994 pr_err("Tried to attach %s but duplicate UUID already attached",
995 buf);
996
997 return -EINVAL;
998 }
999 }
1000
990 u = uuid_find(c, dc->sb.uuid); 1001 u = uuid_find(c, dc->sb.uuid);
991 1002
992 if (u && 1003 if (u &&
@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1204 1215
1205 return; 1216 return;
1206err: 1217err:
1207 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1218 pr_notice("error %s: %s", bdevname(bdev, name), err);
1208 bcache_device_stop(&dc->disk); 1219 bcache_device_stop(&dc->disk);
1209} 1220}
1210 1221
@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1883 const char *err = NULL; /* must be set for any error case */ 1894 const char *err = NULL; /* must be set for any error case */
1884 int ret = 0; 1895 int ret = 0;
1885 1896
1897 bdevname(bdev, name);
1898
1886 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1899 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1887 ca->bdev = bdev; 1900 ca->bdev = bdev;
1888 ca->bdev->bd_holder = ca; 1901 ca->bdev->bd_holder = ca;
@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1891 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; 1904 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
1892 get_page(sb_page); 1905 get_page(sb_page);
1893 1906
1894 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1907 if (blk_queue_discard(bdev_get_queue(bdev)))
1895 ca->discard = CACHE_DISCARD(&ca->sb); 1908 ca->discard = CACHE_DISCARD(&ca->sb);
1896 1909
1897 ret = cache_alloc(ca); 1910 ret = cache_alloc(ca);
1898 if (ret != 0) { 1911 if (ret != 0) {
1912 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1899 if (ret == -ENOMEM) 1913 if (ret == -ENOMEM)
1900 err = "cache_alloc(): -ENOMEM"; 1914 err = "cache_alloc(): -ENOMEM";
1901 else 1915 else
@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1918 goto out; 1932 goto out;
1919 } 1933 }
1920 1934
1921 pr_info("registered cache device %s", bdevname(bdev, name)); 1935 pr_info("registered cache device %s", name);
1922 1936
1923out: 1937out:
1924 kobject_put(&ca->kobj); 1938 kobject_put(&ca->kobj);
1925 1939
1926err: 1940err:
1927 if (err) 1941 if (err)
1928 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1942 pr_notice("error %s: %s", name, err);
1929 1943
1930 return ret; 1944 return ret;
1931} 1945}
@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2014 if (err) 2028 if (err)
2015 goto err_close; 2029 goto err_close;
2016 2030
2031 err = "failed to register device";
2017 if (SB_IS_BDEV(sb)) { 2032 if (SB_IS_BDEV(sb)) {
2018 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2033 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2019 if (!dc) 2034 if (!dc)
@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2028 goto err_close; 2043 goto err_close;
2029 2044
2030 if (register_cache(sb, sb_page, bdev, ca) != 0) 2045 if (register_cache(sb, sb_page, bdev, ca) != 0)
2031 goto err_close; 2046 goto err;
2032 } 2047 }
2033out: 2048out:
2034 if (sb_page) 2049 if (sb_page)
@@ -2041,7 +2056,7 @@ out:
2041err_close: 2056err_close:
2042 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2057 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2043err: 2058err:
2044 pr_info("error opening %s: %s", path, err); 2059 pr_info("error %s: %s", path, err);
2045 ret = -EINVAL; 2060 ret = -EINVAL;
2046 goto out; 2061 goto out;
2047} 2062}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 414c9af54ded..aa2032fa80d4 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -386,9 +386,6 @@ static void __cache_size_refresh(void)
386static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, 386static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
387 enum data_mode *data_mode) 387 enum data_mode *data_mode)
388{ 388{
389 unsigned noio_flag;
390 void *ptr;
391
392 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { 389 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
393 *data_mode = DATA_MODE_SLAB; 390 *data_mode = DATA_MODE_SLAB;
394 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); 391 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
412 * all allocations done by this process (including pagetables) are done 409 * all allocations done by this process (including pagetables) are done
413 * as if GFP_NOIO was specified. 410 * as if GFP_NOIO was specified.
414 */ 411 */
412 if (gfp_mask & __GFP_NORETRY) {
413 unsigned noio_flag = memalloc_noio_save();
414 void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
415 415
416 if (gfp_mask & __GFP_NORETRY)
417 noio_flag = memalloc_noio_save();
418
419 ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
420
421 if (gfp_mask & __GFP_NORETRY)
422 memalloc_noio_restore(noio_flag); 416 memalloc_noio_restore(noio_flag);
417 return ptr;
418 }
423 419
424 return ptr; 420 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
425} 421}
426 422
427/* 423/*
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 7d3e572072f5..3fde9e9faddd 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -22,6 +22,7 @@
22#include <linux/time.h> 22#include <linux/time.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <scsi/scsi_device.h>
25#include <scsi/scsi_dh.h> 26#include <scsi/scsi_dh.h>
26#include <linux/atomic.h> 27#include <linux/atomic.h>
27#include <linux/blk-mq.h> 28#include <linux/blk-mq.h>
@@ -211,25 +212,13 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
211 else 212 else
212 m->queue_mode = DM_TYPE_REQUEST_BASED; 213 m->queue_mode = DM_TYPE_REQUEST_BASED;
213 214
214 } else if (m->queue_mode == DM_TYPE_BIO_BASED || 215 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
215 m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
216 INIT_WORK(&m->process_queued_bios, process_queued_bios); 216 INIT_WORK(&m->process_queued_bios, process_queued_bios);
217 217 /*
218 if (m->queue_mode == DM_TYPE_BIO_BASED) { 218 * bio-based doesn't support any direct scsi_dh management;
219 /* 219 * it just discovers if a scsi_dh is attached.
220 * bio-based doesn't support any direct scsi_dh management; 220 */
221 * it just discovers if a scsi_dh is attached. 221 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
222 */
223 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
224 }
225 }
226
227 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
228 set_bit(MPATHF_QUEUE_IO, &m->flags);
229 atomic_set(&m->pg_init_in_progress, 0);
230 atomic_set(&m->pg_init_count, 0);
231 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
232 init_waitqueue_head(&m->pg_init_wait);
233 } 222 }
234 223
235 dm_table_set_type(ti->table, m->queue_mode); 224 dm_table_set_type(ti->table, m->queue_mode);
@@ -337,14 +326,12 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
337{ 326{
338 m->current_pg = pg; 327 m->current_pg = pg;
339 328
340 if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
341 return;
342
343 /* Must we initialise the PG first, and queue I/O till it's ready? */ 329 /* Must we initialise the PG first, and queue I/O till it's ready? */
344 if (m->hw_handler_name) { 330 if (m->hw_handler_name) {
345 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 331 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
346 set_bit(MPATHF_QUEUE_IO, &m->flags); 332 set_bit(MPATHF_QUEUE_IO, &m->flags);
347 } else { 333 } else {
334 /* FIXME: not needed if no scsi_dh is attached */
348 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 335 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
349 clear_bit(MPATHF_QUEUE_IO, &m->flags); 336 clear_bit(MPATHF_QUEUE_IO, &m->flags);
350 } 337 }
@@ -385,8 +372,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
385 unsigned bypassed = 1; 372 unsigned bypassed = 1;
386 373
387 if (!atomic_read(&m->nr_valid_paths)) { 374 if (!atomic_read(&m->nr_valid_paths)) {
388 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) 375 clear_bit(MPATHF_QUEUE_IO, &m->flags);
389 clear_bit(MPATHF_QUEUE_IO, &m->flags);
390 goto failed; 376 goto failed;
391 } 377 }
392 378
@@ -599,7 +585,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
599 return pgpath; 585 return pgpath;
600} 586}
601 587
602static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio) 588static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
603{ 589{
604 struct pgpath *pgpath; 590 struct pgpath *pgpath;
605 unsigned long flags; 591 unsigned long flags;
@@ -634,8 +620,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
634{ 620{
635 struct pgpath *pgpath; 621 struct pgpath *pgpath;
636 622
637 if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) 623 if (!m->hw_handler_name)
638 pgpath = __map_bio_nvme(m, bio); 624 pgpath = __map_bio_fast(m, bio);
639 else 625 else
640 pgpath = __map_bio(m, bio); 626 pgpath = __map_bio(m, bio);
641 627
@@ -675,8 +661,7 @@ static void process_queued_io_list(struct multipath *m)
675{ 661{
676 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) 662 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
677 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); 663 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
678 else if (m->queue_mode == DM_TYPE_BIO_BASED || 664 else if (m->queue_mode == DM_TYPE_BIO_BASED)
679 m->queue_mode == DM_TYPE_NVME_BIO_BASED)
680 queue_work(kmultipathd, &m->process_queued_bios); 665 queue_work(kmultipathd, &m->process_queued_bios);
681} 666}
682 667
@@ -838,6 +823,16 @@ retain:
838 */ 823 */
839 kfree(m->hw_handler_name); 824 kfree(m->hw_handler_name);
840 m->hw_handler_name = attached_handler_name; 825 m->hw_handler_name = attached_handler_name;
826
827 /*
828 * Init fields that are only used when a scsi_dh is attached
829 */
830 if (!test_and_set_bit(MPATHF_QUEUE_IO, &m->flags)) {
831 atomic_set(&m->pg_init_in_progress, 0);
832 atomic_set(&m->pg_init_count, 0);
833 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
834 init_waitqueue_head(&m->pg_init_wait);
835 }
841 } 836 }
842 } 837 }
843 838
@@ -873,6 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
873 int r; 868 int r;
874 struct pgpath *p; 869 struct pgpath *p;
875 struct multipath *m = ti->private; 870 struct multipath *m = ti->private;
871 struct scsi_device *sdev;
876 872
877 /* we need at least a path arg */ 873 /* we need at least a path arg */
878 if (as->argc < 1) { 874 if (as->argc < 1) {
@@ -891,7 +887,9 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
891 goto bad; 887 goto bad;
892 } 888 }
893 889
894 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { 890 sdev = scsi_device_from_queue(bdev_get_queue(p->path.dev->bdev));
891 if (sdev) {
892 put_device(&sdev->sdev_gendev);
895 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 893 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
896 r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); 894 r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
897 if (r) { 895 if (r) {
@@ -1001,8 +999,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1001 if (!hw_argc) 999 if (!hw_argc)
1002 return 0; 1000 return 0;
1003 1001
1004 if (m->queue_mode == DM_TYPE_BIO_BASED || 1002 if (m->queue_mode == DM_TYPE_BIO_BASED) {
1005 m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
1006 dm_consume_args(as, hw_argc); 1003 dm_consume_args(as, hw_argc);
1007 DMERR("bio-based multipath doesn't allow hardware handler args"); 1004 DMERR("bio-based multipath doesn't allow hardware handler args");
1008 return 0; 1005 return 0;
@@ -1091,8 +1088,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
1091 1088
1092 if (!strcasecmp(queue_mode_name, "bio")) 1089 if (!strcasecmp(queue_mode_name, "bio"))
1093 m->queue_mode = DM_TYPE_BIO_BASED; 1090 m->queue_mode = DM_TYPE_BIO_BASED;
1094 else if (!strcasecmp(queue_mode_name, "nvme"))
1095 m->queue_mode = DM_TYPE_NVME_BIO_BASED;
1096 else if (!strcasecmp(queue_mode_name, "rq")) 1091 else if (!strcasecmp(queue_mode_name, "rq"))
1097 m->queue_mode = DM_TYPE_REQUEST_BASED; 1092 m->queue_mode = DM_TYPE_REQUEST_BASED;
1098 else if (!strcasecmp(queue_mode_name, "mq")) 1093 else if (!strcasecmp(queue_mode_name, "mq"))
@@ -1193,7 +1188,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1193 ti->num_discard_bios = 1; 1188 ti->num_discard_bios = 1;
1194 ti->num_write_same_bios = 1; 1189 ti->num_write_same_bios = 1;
1195 ti->num_write_zeroes_bios = 1; 1190 ti->num_write_zeroes_bios = 1;
1196 if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED) 1191 if (m->queue_mode == DM_TYPE_BIO_BASED)
1197 ti->per_io_data_size = multipath_per_bio_data_size(); 1192 ti->per_io_data_size = multipath_per_bio_data_size();
1198 else 1193 else
1199 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1194 ti->per_io_data_size = sizeof(struct dm_mpath_io);
@@ -1730,9 +1725,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
1730 case DM_TYPE_BIO_BASED: 1725 case DM_TYPE_BIO_BASED:
1731 DMEMIT("queue_mode bio "); 1726 DMEMIT("queue_mode bio ");
1732 break; 1727 break;
1733 case DM_TYPE_NVME_BIO_BASED:
1734 DMEMIT("queue_mode nvme ");
1735 break;
1736 case DM_TYPE_MQ_REQUEST_BASED: 1728 case DM_TYPE_MQ_REQUEST_BASED:
1737 DMEMIT("queue_mode mq "); 1729 DMEMIT("queue_mode mq ");
1738 break; 1730 break;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 7ef469e902c6..c1d1034ff7b7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3408 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3408 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3409 3409
3410 } else { 3410 } else {
3411 if (test_bit(MD_RECOVERY_NEEDED, &recovery) || 3411 if (!test_bit(MD_RECOVERY_INTR, &recovery) &&
3412 test_bit(MD_RECOVERY_RESHAPE, &recovery) || 3412 (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
3413 test_bit(MD_RECOVERY_RUNNING, &recovery)) 3413 test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
3414 test_bit(MD_RECOVERY_RUNNING, &recovery)))
3414 r = mddev->curr_resync_completed; 3415 r = mddev->curr_resync_completed;
3415 else 3416 else
3416 r = mddev->recovery_cp; 3417 r = mddev->recovery_cp;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5fe7ec356c33..7eb3e2a3c07d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t)
942 942
943 if (t->type != DM_TYPE_NONE) { 943 if (t->type != DM_TYPE_NONE) {
944 /* target already set the table's type */ 944 /* target already set the table's type */
945 if (t->type == DM_TYPE_BIO_BASED) 945 if (t->type == DM_TYPE_BIO_BASED) {
946 return 0; 946 /* possibly upgrade to a variant of bio-based */
947 else if (t->type == DM_TYPE_NVME_BIO_BASED) { 947 goto verify_bio_based;
948 if (!dm_table_does_not_support_partial_completion(t)) {
949 DMERR("nvme bio-based is only possible with devices"
950 " that don't support partial completion");
951 return -EINVAL;
952 }
953 /* Fallthru, also verify all devices are blk-mq */
954 } 948 }
955 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); 949 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
950 BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
956 goto verify_rq_based; 951 goto verify_rq_based;
957 } 952 }
958 953
@@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t)
985 } 980 }
986 981
987 if (bio_based) { 982 if (bio_based) {
983verify_bio_based:
988 /* We must use this table as bio-based */ 984 /* We must use this table as bio-based */
989 t->type = DM_TYPE_BIO_BASED; 985 t->type = DM_TYPE_BIO_BASED;
990 if (dm_table_supports_dax(t) || 986 if (dm_table_supports_dax(t) ||
@@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev
1755 char b[BDEVNAME_SIZE]; 1751 char b[BDEVNAME_SIZE];
1756 1752
1757 /* For now, NVMe devices are the only devices of this class */ 1753 /* For now, NVMe devices are the only devices of this class */
1758 return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0); 1754 return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
1759} 1755}
1760 1756
1761static bool dm_table_does_not_support_partial_completion(struct dm_table *t) 1757static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 68136806d365..45328d8b2859 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
458 return dm_get_geometry(md, geo); 458 return dm_get_geometry(md, geo);
459} 459}
460 460
461static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 461static char *_dm_claim_ptr = "I belong to device-mapper";
462 struct block_device **bdev, 462
463 fmode_t *mode) 463static int dm_get_bdev_for_ioctl(struct mapped_device *md,
464 struct block_device **bdev,
465 fmode_t *mode)
464{ 466{
465 struct dm_target *tgt; 467 struct dm_target *tgt;
466 struct dm_table *map; 468 struct dm_table *map;
@@ -490,6 +492,10 @@ retry:
490 goto out; 492 goto out;
491 493
492 bdgrab(*bdev); 494 bdgrab(*bdev);
495 r = blkdev_get(*bdev, *mode, _dm_claim_ptr);
496 if (r < 0)
497 goto out;
498
493 dm_put_live_table(md, srcu_idx); 499 dm_put_live_table(md, srcu_idx);
494 return r; 500 return r;
495 501
@@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
508 struct mapped_device *md = bdev->bd_disk->private_data; 514 struct mapped_device *md = bdev->bd_disk->private_data;
509 int r; 515 int r;
510 516
511 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 517 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
512 if (r < 0) 518 if (r < 0)
513 return r; 519 return r;
514 520
@@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
528 534
529 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 535 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
530out: 536out:
531 bdput(bdev); 537 blkdev_put(bdev, mode);
532 return r; 538 return r;
533} 539}
534 540
@@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
708static int open_table_device(struct table_device *td, dev_t dev, 714static int open_table_device(struct table_device *td, dev_t dev,
709 struct mapped_device *md) 715 struct mapped_device *md)
710{ 716{
711 static char *_claim_ptr = "I belong to device-mapper";
712 struct block_device *bdev; 717 struct block_device *bdev;
713 718
714 int r; 719 int r;
715 720
716 BUG_ON(td->dm_dev.bdev); 721 BUG_ON(td->dm_dev.bdev);
717 722
718 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 723 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
719 if (IS_ERR(bdev)) 724 if (IS_ERR(bdev))
720 return PTR_ERR(bdev); 725 return PTR_ERR(bdev);
721 726
@@ -3011,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3011 fmode_t mode; 3016 fmode_t mode;
3012 int r; 3017 int r;
3013 3018
3014 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3019 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3015 if (r < 0) 3020 if (r < 0)
3016 return r; 3021 return r;
3017 3022
@@ -3021,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3021 else 3026 else
3022 r = -EOPNOTSUPP; 3027 r = -EOPNOTSUPP;
3023 3028
3024 bdput(bdev); 3029 blkdev_put(bdev, mode);
3025 return r; 3030 return r;
3026} 3031}
3027 3032
@@ -3032,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3032 fmode_t mode; 3037 fmode_t mode;
3033 int r; 3038 int r;
3034 3039
3035 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3040 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3036 if (r < 0) 3041 if (r < 0)
3037 return r; 3042 return r;
3038 3043
@@ -3042,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3042 else 3047 else
3043 r = -EOPNOTSUPP; 3048 r = -EOPNOTSUPP;
3044 3049
3045 bdput(bdev); 3050 blkdev_put(bdev, mode);
3046 return r; 3051 return r;
3047} 3052}
3048 3053
@@ -3054,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3054 fmode_t mode; 3059 fmode_t mode;
3055 int r; 3060 int r;
3056 3061
3057 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3062 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3058 if (r < 0) 3063 if (r < 0)
3059 return r; 3064 return r;
3060 3065
@@ -3064,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3064 else 3069 else
3065 r = -EOPNOTSUPP; 3070 r = -EOPNOTSUPP;
3066 3071
3067 bdput(bdev); 3072 blkdev_put(bdev, mode);
3068 return r; 3073 return r;
3069} 3074}
3070 3075
@@ -3075,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
3075 fmode_t mode; 3080 fmode_t mode;
3076 int r; 3081 int r;
3077 3082
3078 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3083 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3079 if (r < 0) 3084 if (r < 0)
3080 return r; 3085 return r;
3081 3086
@@ -3085,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
3085 else 3090 else
3086 r = -EOPNOTSUPP; 3091 r = -EOPNOTSUPP;
3087 3092
3088 bdput(bdev); 3093 blkdev_put(bdev, mode);
3089 return r; 3094 return r;
3090} 3095}
3091 3096
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 337462e1569f..038509e5d031 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -102,10 +102,32 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,
102 return rc; 102 return rc;
103} 103}
104 104
105static long afu_ioctl_get_metadata(struct ocxl_context *ctx,
106 struct ocxl_ioctl_metadata __user *uarg)
107{
108 struct ocxl_ioctl_metadata arg;
109
110 memset(&arg, 0, sizeof(arg));
111
112 arg.version = 0;
113
114 arg.afu_version_major = ctx->afu->config.version_major;
115 arg.afu_version_minor = ctx->afu->config.version_minor;
116 arg.pasid = ctx->pasid;
117 arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride;
118 arg.global_mmio_size = ctx->afu->config.global_mmio_size;
119
120 if (copy_to_user(uarg, &arg, sizeof(arg)))
121 return -EFAULT;
122
123 return 0;
124}
125
105#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ 126#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \
106 x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ 127 x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \
107 x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ 128 x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \
108 x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ 129 x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \
130 x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \
109 "UNKNOWN") 131 "UNKNOWN")
110 132
111static long afu_ioctl(struct file *file, unsigned int cmd, 133static long afu_ioctl(struct file *file, unsigned int cmd,
@@ -159,6 +181,11 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
159 irq_fd.eventfd); 181 irq_fd.eventfd);
160 break; 182 break;
161 183
184 case OCXL_IOCTL_GET_METADATA:
185 rc = afu_ioctl_get_metadata(ctx,
186 (struct ocxl_ioctl_metadata __user *) args);
187 break;
188
162 default: 189 default:
163 rc = -EINVAL; 190 rc = -EINVAL;
164 } 191 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 21d29f7936f6..d39b0b7011b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
124 trigger_cmd_completions(dev); 124 trigger_cmd_completions(dev);
125 } 125 }
126 126
127 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); 127 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
128 mlx5_core_err(dev, "end\n"); 128 mlx5_core_err(dev, "end\n");
129 129
130unlock: 130unlock:
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 817e5e2766da..7aeca5db7916 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3033,7 +3033,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3033 ns->disk->disk_name); 3033 ns->disk->disk_name);
3034 3034
3035 nvme_mpath_add_disk(ns->head); 3035 nvme_mpath_add_disk(ns->head);
3036 nvme_mpath_add_disk_links(ns);
3037 return; 3036 return;
3038 out_unlink_ns: 3037 out_unlink_ns:
3039 mutex_lock(&ctrl->subsys->lock); 3038 mutex_lock(&ctrl->subsys->lock);
@@ -3053,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
3053 return; 3052 return;
3054 3053
3055 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 3054 if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3056 nvme_mpath_remove_disk_links(ns);
3057 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 3055 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
3058 &nvme_ns_id_attr_group); 3056 &nvme_ns_id_attr_group);
3059 if (ns->ndev) 3057 if (ns->ndev)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index a1c58e35075e..8f0f34d06d46 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
650 ret = -EINVAL; 650 ret = -EINVAL;
651 goto out; 651 goto out;
652 } 652 }
653 if (opts->discovery_nqn) {
654 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
655 break;
656 }
657
653 opts->nr_io_queues = min_t(unsigned int, 658 opts->nr_io_queues = min_t(unsigned int,
654 num_online_cpus(), token); 659 num_online_cpus(), token);
655 break; 660 break;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7f51f8414b97..1dc1387b7134 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1206 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1206 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1207 1207
1208 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1208 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1209 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); 1209 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1210 /* Linux supports only Dynamic controllers */ 1210 /* Linux supports only Dynamic controllers */
1211 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); 1211 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1212 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); 1212 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1321 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1321 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1322 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1322 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1323 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); 1323 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1324 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); 1324 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1325 1325
1326 lsop->queue = queue; 1326 lsop->queue = queue;
1327 lsreq->rqstaddr = conn_rqst; 1327 lsreq->rqstaddr = conn_rqst;
@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2481 goto out_free_tag_set; 2481 goto out_free_tag_set;
2482 } 2482 }
2483 2483
2484 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2484 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2485 if (ret) 2485 if (ret)
2486 goto out_cleanup_blk_queue; 2486 goto out_cleanup_blk_queue;
2487 2487
2488 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2488 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2489 if (ret) 2489 if (ret)
2490 goto out_delete_hw_queues; 2490 goto out_delete_hw_queues;
2491 2491
@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2532 if (ret) 2532 if (ret)
2533 goto out_free_io_queues; 2533 goto out_free_io_queues;
2534 2534
2535 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2535 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2536 if (ret) 2536 if (ret)
2537 goto out_free_io_queues; 2537 goto out_free_io_queues;
2538 2538
2539 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2539 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2540 if (ret) 2540 if (ret)
2541 goto out_delete_hw_queues; 2541 goto out_delete_hw_queues;
2542 2542
@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2632 nvme_fc_init_queue(ctrl, 0); 2632 nvme_fc_init_queue(ctrl, 0);
2633 2633
2634 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, 2634 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2635 NVME_AQ_BLK_MQ_DEPTH); 2635 NVME_AQ_DEPTH);
2636 if (ret) 2636 if (ret)
2637 goto out_free_queue; 2637 goto out_free_queue;
2638 2638
2639 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], 2639 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2640 NVME_AQ_BLK_MQ_DEPTH, 2640 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2641 (NVME_AQ_BLK_MQ_DEPTH / 4));
2642 if (ret) 2641 if (ret)
2643 goto out_delete_hw_queue; 2642 goto out_delete_hw_queue;
2644 2643
@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2666 } 2665 }
2667 2666
2668 ctrl->ctrl.sqsize = 2667 ctrl->ctrl.sqsize =
2669 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize); 2668 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
2670 2669
2671 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 2670 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2672 if (ret) 2671 if (ret)
@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2699 opts->queue_size = ctrl->ctrl.maxcmd; 2698 opts->queue_size = ctrl->ctrl.maxcmd;
2700 } 2699 }
2701 2700
2701 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2702 /* warn if sqsize is lower than queue_size */
2703 dev_warn(ctrl->ctrl.device,
2704 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2705 opts->queue_size, ctrl->ctrl.sqsize + 1);
2706 opts->queue_size = ctrl->ctrl.sqsize + 1;
2707 }
2708
2702 ret = nvme_fc_init_aen_ops(ctrl); 2709 ret = nvme_fc_init_aen_ops(ctrl);
2703 if (ret) 2710 if (ret)
2704 goto out_term_aen_ops; 2711 goto out_term_aen_ops;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index b7e5c6db4d92..060f69e03427 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -210,25 +210,6 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
210 mutex_unlock(&head->subsys->lock); 210 mutex_unlock(&head->subsys->lock);
211} 211}
212 212
213void nvme_mpath_add_disk_links(struct nvme_ns *ns)
214{
215 struct kobject *slave_disk_kobj, *holder_disk_kobj;
216
217 if (!ns->head->disk)
218 return;
219
220 slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
221 if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
222 kobject_name(slave_disk_kobj)))
223 return;
224
225 holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
226 if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
227 kobject_name(holder_disk_kobj)))
228 sysfs_remove_link(ns->head->disk->slave_dir,
229 kobject_name(slave_disk_kobj));
230}
231
232void nvme_mpath_remove_disk(struct nvme_ns_head *head) 213void nvme_mpath_remove_disk(struct nvme_ns_head *head)
233{ 214{
234 if (!head->disk) 215 if (!head->disk)
@@ -243,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
243 blk_cleanup_queue(head->disk->queue); 224 blk_cleanup_queue(head->disk->queue);
244 put_disk(head->disk); 225 put_disk(head->disk);
245} 226}
246
247void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
248{
249 if (!ns->head->disk)
250 return;
251
252 sysfs_remove_link(ns->disk->part0.holder_dir,
253 kobject_name(&disk_to_dev(ns->head->disk)->kobj));
254 sysfs_remove_link(ns->head->disk->slave_dir,
255 kobject_name(&disk_to_dev(ns->disk)->kobj));
256}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0521e4707d1c..d733b14ede9d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -410,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error);
410void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 410void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
411int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 411int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
412void nvme_mpath_add_disk(struct nvme_ns_head *head); 412void nvme_mpath_add_disk(struct nvme_ns_head *head);
413void nvme_mpath_add_disk_links(struct nvme_ns *ns);
414void nvme_mpath_remove_disk(struct nvme_ns_head *head); 413void nvme_mpath_remove_disk(struct nvme_ns_head *head);
415void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
416 414
417static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 415static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
418{ 416{
@@ -454,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
454static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 452static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
455{ 453{
456} 454}
457static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
458{
459}
460static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
461{
462}
463static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 455static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
464{ 456{
465} 457}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5933a5c732e8..b6f43b738f03 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1153 if (!(csts & NVME_CSTS_CFS) && !nssro) 1153 if (!(csts & NVME_CSTS_CFS) && !nssro)
1154 return false; 1154 return false;
1155 1155
1156 /* If PCI error recovery process is happening, we cannot reset or
1157 * the recovery mechanism will surely fail.
1158 */
1159 if (pci_channel_offline(to_pci_dev(dev->dev)))
1160 return false;
1161
1162 return true; 1156 return true;
1163} 1157}
1164 1158
@@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1189 struct nvme_command cmd; 1183 struct nvme_command cmd;
1190 u32 csts = readl(dev->bar + NVME_REG_CSTS); 1184 u32 csts = readl(dev->bar + NVME_REG_CSTS);
1191 1185
1186 /* If PCI error recovery process is happening, we cannot reset or
1187 * the recovery mechanism will surely fail.
1188 */
1189 mb();
1190 if (pci_channel_offline(to_pci_dev(dev->dev)))
1191 return BLK_EH_RESET_TIMER;
1192
1192 /* 1193 /*
1193 * Reset immediately if the controller is failed 1194 * Reset immediately if the controller is failed
1194 */ 1195 */
@@ -1913,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1913 int result, nr_io_queues; 1914 int result, nr_io_queues;
1914 unsigned long size; 1915 unsigned long size;
1915 1916
1916 nr_io_queues = num_present_cpus(); 1917 nr_io_queues = num_possible_cpus();
1917 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 1918 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
1918 if (result < 0) 1919 if (result < 0)
1919 return result; 1920 return result;
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 8de2d5c69b1d..dc9303abda42 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -613,7 +613,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
613 /* setup bus numbers */ 613 /* setup bus numbers */
614 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 614 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
615 val &= 0xff000000; 615 val &= 0xff000000;
616 val |= 0x00010100; 616 val |= 0x00ff0100;
617 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); 617 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
618 618
619 /* setup command register */ 619 /* setup command register */
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 0c2ed11c0603..f63db346c219 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -638,7 +638,7 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
638 if (irq_is_percpu_devid(irq)) 638 if (irq_is_percpu_devid(irq))
639 disable_percpu_irq(irq); 639 disable_percpu_irq(irq);
640 else 640 else
641 disable_irq(irq); 641 disable_irq_nosync(irq);
642 } 642 }
643 643
644 per_cpu(cpu_armpmu, cpu) = NULL; 644 per_cpu(cpu_armpmu, cpu) = NULL;
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 6dec6ab13300..d8599736a41a 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -423,7 +423,7 @@ static int chromeos_laptop_probe(struct platform_device *pdev)
423 return ret; 423 return ret;
424} 424}
425 425
426static const struct chromeos_laptop samsung_series_5_550 = { 426static struct chromeos_laptop samsung_series_5_550 = {
427 .i2c_peripherals = { 427 .i2c_peripherals = {
428 /* Touchpad. */ 428 /* Touchpad. */
429 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, 429 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
@@ -432,14 +432,14 @@ static const struct chromeos_laptop samsung_series_5_550 = {
432 }, 432 },
433}; 433};
434 434
435static const struct chromeos_laptop samsung_series_5 = { 435static struct chromeos_laptop samsung_series_5 = {
436 .i2c_peripherals = { 436 .i2c_peripherals = {
437 /* Light Sensor. */ 437 /* Light Sensor. */
438 { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS }, 438 { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
439 }, 439 },
440}; 440};
441 441
442static const struct chromeos_laptop chromebook_pixel = { 442static struct chromeos_laptop chromebook_pixel = {
443 .i2c_peripherals = { 443 .i2c_peripherals = {
444 /* Touch Screen. */ 444 /* Touch Screen. */
445 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, 445 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
@@ -450,14 +450,14 @@ static const struct chromeos_laptop chromebook_pixel = {
450 }, 450 },
451}; 451};
452 452
453static const struct chromeos_laptop hp_chromebook_14 = { 453static struct chromeos_laptop hp_chromebook_14 = {
454 .i2c_peripherals = { 454 .i2c_peripherals = {
455 /* Touchpad. */ 455 /* Touchpad. */
456 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 456 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
457 }, 457 },
458}; 458};
459 459
460static const struct chromeos_laptop dell_chromebook_11 = { 460static struct chromeos_laptop dell_chromebook_11 = {
461 .i2c_peripherals = { 461 .i2c_peripherals = {
462 /* Touchpad. */ 462 /* Touchpad. */
463 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 463 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
@@ -466,28 +466,28 @@ static const struct chromeos_laptop dell_chromebook_11 = {
466 }, 466 },
467}; 467};
468 468
469static const struct chromeos_laptop toshiba_cb35 = { 469static struct chromeos_laptop toshiba_cb35 = {
470 .i2c_peripherals = { 470 .i2c_peripherals = {
471 /* Touchpad. */ 471 /* Touchpad. */
472 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 472 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
473 }, 473 },
474}; 474};
475 475
476static const struct chromeos_laptop acer_c7_chromebook = { 476static struct chromeos_laptop acer_c7_chromebook = {
477 .i2c_peripherals = { 477 .i2c_peripherals = {
478 /* Touchpad. */ 478 /* Touchpad. */
479 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, 479 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
480 }, 480 },
481}; 481};
482 482
483static const struct chromeos_laptop acer_ac700 = { 483static struct chromeos_laptop acer_ac700 = {
484 .i2c_peripherals = { 484 .i2c_peripherals = {
485 /* Light Sensor. */ 485 /* Light Sensor. */
486 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, 486 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
487 }, 487 },
488}; 488};
489 489
490static const struct chromeos_laptop acer_c720 = { 490static struct chromeos_laptop acer_c720 = {
491 .i2c_peripherals = { 491 .i2c_peripherals = {
492 /* Touchscreen. */ 492 /* Touchscreen. */
493 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, 493 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
@@ -500,14 +500,14 @@ static const struct chromeos_laptop acer_c720 = {
500 }, 500 },
501}; 501};
502 502
503static const struct chromeos_laptop hp_pavilion_14_chromebook = { 503static struct chromeos_laptop hp_pavilion_14_chromebook = {
504 .i2c_peripherals = { 504 .i2c_peripherals = {
505 /* Touchpad. */ 505 /* Touchpad. */
506 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, 506 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
507 }, 507 },
508}; 508};
509 509
510static const struct chromeos_laptop cr48 = { 510static struct chromeos_laptop cr48 = {
511 .i2c_peripherals = { 511 .i2c_peripherals = {
512 /* Light Sensor. */ 512 /* Light Sensor. */
513 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, 513 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 9a8f96465cdc..d10ffe51da24 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -105,31 +105,44 @@ config ASUS_LAPTOP
105 105
106 If you have an ACPI-compatible ASUS laptop, say Y or M here. 106 If you have an ACPI-compatible ASUS laptop, say Y or M here.
107 107
108#
109# If the DELL_SMBIOS_SMM feature is enabled, the DELL_SMBIOS driver
110# becomes dependent on the DCDBAS driver. The "depends" line prevents a
111# configuration where DELL_SMBIOS=y while DCDBAS=m.
112#
108config DELL_SMBIOS 113config DELL_SMBIOS
109 tristate 114 tristate "Dell SMBIOS driver"
115 depends on DCDBAS || DCDBAS=n
116 ---help---
117 This provides support for the Dell SMBIOS calling interface.
118 If you have a Dell computer you should enable this option.
119
120 Be sure to select at least one backend for it to work properly.
110 121
111config DELL_SMBIOS_WMI 122config DELL_SMBIOS_WMI
112 tristate "Dell SMBIOS calling interface (WMI implementation)" 123 bool "Dell SMBIOS driver WMI backend"
124 default y
113 depends on ACPI_WMI 125 depends on ACPI_WMI
114 select DELL_WMI_DESCRIPTOR 126 select DELL_WMI_DESCRIPTOR
115 select DELL_SMBIOS 127 depends on DELL_SMBIOS
116 ---help--- 128 ---help---
117 This provides an implementation for the Dell SMBIOS calling interface 129 This provides an implementation for the Dell SMBIOS calling interface
118 communicated over ACPI-WMI. 130 communicated over ACPI-WMI.
119 131
120 If you have a Dell computer from >2007 you should say Y or M here. 132 If you have a Dell computer from >2007 you should say Y here.
121 If you aren't sure and this module doesn't work for your computer 133 If you aren't sure and this module doesn't work for your computer
122 it just won't load. 134 it just won't load.
123 135
124config DELL_SMBIOS_SMM 136config DELL_SMBIOS_SMM
125 tristate "Dell SMBIOS calling interface (SMM implementation)" 137 bool "Dell SMBIOS driver SMM backend"
138 default y
126 depends on DCDBAS 139 depends on DCDBAS
127 select DELL_SMBIOS 140 depends on DELL_SMBIOS
128 ---help--- 141 ---help---
129 This provides an implementation for the Dell SMBIOS calling interface 142 This provides an implementation for the Dell SMBIOS calling interface
130 communicated over SMI/SMM. 143 communicated over SMI/SMM.
131 144
132 If you have a Dell computer from <=2017 you should say Y or M here. 145 If you have a Dell computer from <=2017 you should say Y here.
133 If you aren't sure and this module doesn't work for your computer 146 If you aren't sure and this module doesn't work for your computer
134 it just won't load. 147 it just won't load.
135 148
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index c388608ad2a3..2ba6cb795338 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -13,8 +13,9 @@ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
13obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o 13obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
14obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o 14obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
15obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o 15obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
16obj-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o 16dell-smbios-objs := dell-smbios-base.o
17obj-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o 17dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
18dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
18obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o 19obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
19obj-$(CONFIG_DELL_WMI) += dell-wmi.o 20obj-$(CONFIG_DELL_WMI) += dell-wmi.o
20obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o 21obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios-base.c
index 8541cde4cb7d..5bcf8a18f785 100644
--- a/drivers/platform/x86/dell-smbios.c
+++ b/drivers/platform/x86/dell-smbios-base.c
@@ -36,7 +36,7 @@ static DEFINE_MUTEX(smbios_mutex);
36struct smbios_device { 36struct smbios_device {
37 struct list_head list; 37 struct list_head list;
38 struct device *device; 38 struct device *device;
39 int (*call_fn)(struct calling_interface_buffer *); 39 int (*call_fn)(struct calling_interface_buffer *arg);
40}; 40};
41 41
42struct smbios_call { 42struct smbios_call {
@@ -352,8 +352,10 @@ static void __init parse_da_table(const struct dmi_header *dm)
352 struct calling_interface_structure *table = 352 struct calling_interface_structure *table =
353 container_of(dm, struct calling_interface_structure, header); 353 container_of(dm, struct calling_interface_structure, header);
354 354
355 /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least 355 /*
356 6 bytes of entry */ 356 * 4 bytes of table header, plus 7 bytes of Dell header
357 * plus at least 6 bytes of entry
358 */
357 359
358 if (dm->length < 17) 360 if (dm->length < 17)
359 return; 361 return;
@@ -554,7 +556,7 @@ static void free_group(struct platform_device *pdev)
554static int __init dell_smbios_init(void) 556static int __init dell_smbios_init(void)
555{ 557{
556 const struct dmi_device *valid; 558 const struct dmi_device *valid;
557 int ret; 559 int ret, wmi, smm;
558 560
559 valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL); 561 valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL);
560 if (!valid) { 562 if (!valid) {
@@ -589,8 +591,24 @@ static int __init dell_smbios_init(void)
589 if (ret) 591 if (ret)
590 goto fail_create_group; 592 goto fail_create_group;
591 593
594 /* register backends */
595 wmi = init_dell_smbios_wmi();
596 if (wmi)
597 pr_debug("Failed to initialize WMI backend: %d\n", wmi);
598 smm = init_dell_smbios_smm();
599 if (smm)
600 pr_debug("Failed to initialize SMM backend: %d\n", smm);
601 if (wmi && smm) {
602 pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n",
603 wmi, smm);
604 goto fail_sysfs;
605 }
606
592 return 0; 607 return 0;
593 608
609fail_sysfs:
610 free_group(platform_device);
611
594fail_create_group: 612fail_create_group:
595 platform_device_del(platform_device); 613 platform_device_del(platform_device);
596 614
@@ -607,6 +625,8 @@ fail_platform_driver:
607 625
608static void __exit dell_smbios_exit(void) 626static void __exit dell_smbios_exit(void)
609{ 627{
628 exit_dell_smbios_wmi();
629 exit_dell_smbios_smm();
610 mutex_lock(&smbios_mutex); 630 mutex_lock(&smbios_mutex);
611 if (platform_device) { 631 if (platform_device) {
612 free_group(platform_device); 632 free_group(platform_device);
@@ -623,5 +643,6 @@ module_exit(dell_smbios_exit);
623MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); 643MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
624MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); 644MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
625MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); 645MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
646MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
626MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS"); 647MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
627MODULE_LICENSE("GPL"); 648MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
index 89f65c4651a0..e9e9da556318 100644
--- a/drivers/platform/x86/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell-smbios-smm.c
@@ -58,7 +58,7 @@ static const struct dmi_system_id dell_device_table[] __initconst = {
58}; 58};
59MODULE_DEVICE_TABLE(dmi, dell_device_table); 59MODULE_DEVICE_TABLE(dmi, dell_device_table);
60 60
61static void __init parse_da_table(const struct dmi_header *dm) 61static void parse_da_table(const struct dmi_header *dm)
62{ 62{
63 struct calling_interface_structure *table = 63 struct calling_interface_structure *table =
64 container_of(dm, struct calling_interface_structure, header); 64 container_of(dm, struct calling_interface_structure, header);
@@ -73,7 +73,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
73 da_command_code = table->cmdIOCode; 73 da_command_code = table->cmdIOCode;
74} 74}
75 75
76static void __init find_cmd_address(const struct dmi_header *dm, void *dummy) 76static void find_cmd_address(const struct dmi_header *dm, void *dummy)
77{ 77{
78 switch (dm->type) { 78 switch (dm->type) {
79 case 0xda: /* Calling interface */ 79 case 0xda: /* Calling interface */
@@ -128,7 +128,7 @@ static bool test_wsmt_enabled(void)
128 return false; 128 return false;
129} 129}
130 130
131static int __init dell_smbios_smm_init(void) 131int init_dell_smbios_smm(void)
132{ 132{
133 int ret; 133 int ret;
134 /* 134 /*
@@ -176,7 +176,7 @@ fail_platform_device_alloc:
176 return ret; 176 return ret;
177} 177}
178 178
179static void __exit dell_smbios_smm_exit(void) 179void exit_dell_smbios_smm(void)
180{ 180{
181 if (platform_device) { 181 if (platform_device) {
182 dell_smbios_unregister_device(&platform_device->dev); 182 dell_smbios_unregister_device(&platform_device->dev);
@@ -184,13 +184,3 @@ static void __exit dell_smbios_smm_exit(void)
184 free_page((unsigned long)buffer); 184 free_page((unsigned long)buffer);
185 } 185 }
186} 186}
187
188subsys_initcall(dell_smbios_smm_init);
189module_exit(dell_smbios_smm_exit);
190
191MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
192MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
193MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
194MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
195MODULE_DESCRIPTION("Dell SMBIOS communications over SMI");
196MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 609557aa5868..fbefedb1c172 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -228,7 +228,7 @@ static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
228 { }, 228 { },
229}; 229};
230 230
231static void __init parse_b1_table(const struct dmi_header *dm) 231static void parse_b1_table(const struct dmi_header *dm)
232{ 232{
233 struct misc_bios_flags_structure *flags = 233 struct misc_bios_flags_structure *flags =
234 container_of(dm, struct misc_bios_flags_structure, header); 234 container_of(dm, struct misc_bios_flags_structure, header);
@@ -242,7 +242,7 @@ static void __init parse_b1_table(const struct dmi_header *dm)
242 wmi_supported = 1; 242 wmi_supported = 1;
243} 243}
244 244
245static void __init find_b1(const struct dmi_header *dm, void *dummy) 245static void find_b1(const struct dmi_header *dm, void *dummy)
246{ 246{
247 switch (dm->type) { 247 switch (dm->type) {
248 case 0xb1: /* misc bios flags */ 248 case 0xb1: /* misc bios flags */
@@ -261,7 +261,7 @@ static struct wmi_driver dell_smbios_wmi_driver = {
261 .filter_callback = dell_smbios_wmi_filter, 261 .filter_callback = dell_smbios_wmi_filter,
262}; 262};
263 263
264static int __init init_dell_smbios_wmi(void) 264int init_dell_smbios_wmi(void)
265{ 265{
266 dmi_walk(find_b1, NULL); 266 dmi_walk(find_b1, NULL);
267 267
@@ -271,15 +271,9 @@ static int __init init_dell_smbios_wmi(void)
271 return wmi_driver_register(&dell_smbios_wmi_driver); 271 return wmi_driver_register(&dell_smbios_wmi_driver);
272} 272}
273 273
274static void __exit exit_dell_smbios_wmi(void) 274void exit_dell_smbios_wmi(void)
275{ 275{
276 wmi_driver_unregister(&dell_smbios_wmi_driver); 276 wmi_driver_unregister(&dell_smbios_wmi_driver);
277} 277}
278 278
279module_init(init_dell_smbios_wmi);
280module_exit(exit_dell_smbios_wmi);
281
282MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); 279MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID);
283MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
284MODULE_DESCRIPTION("Dell SMBIOS communications over WMI");
285MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h
index 138d478d9adc..d8adaf959740 100644
--- a/drivers/platform/x86/dell-smbios.h
+++ b/drivers/platform/x86/dell-smbios.h
@@ -75,4 +75,29 @@ int dell_laptop_register_notifier(struct notifier_block *nb);
75int dell_laptop_unregister_notifier(struct notifier_block *nb); 75int dell_laptop_unregister_notifier(struct notifier_block *nb);
76void dell_laptop_call_notifier(unsigned long action, void *data); 76void dell_laptop_call_notifier(unsigned long action, void *data);
77 77
78#endif 78/* for the supported backends */
79#ifdef CONFIG_DELL_SMBIOS_WMI
80int init_dell_smbios_wmi(void);
81void exit_dell_smbios_wmi(void);
82#else /* CONFIG_DELL_SMBIOS_WMI */
83static inline int init_dell_smbios_wmi(void)
84{
85 return -ENODEV;
86}
87static inline void exit_dell_smbios_wmi(void)
88{}
89#endif /* CONFIG_DELL_SMBIOS_WMI */
90
91#ifdef CONFIG_DELL_SMBIOS_SMM
92int init_dell_smbios_smm(void);
93void exit_dell_smbios_smm(void);
94#else /* CONFIG_DELL_SMBIOS_SMM */
95static inline int init_dell_smbios_smm(void)
96{
97 return -ENODEV;
98}
99static inline void exit_dell_smbios_smm(void)
100{}
101#endif /* CONFIG_DELL_SMBIOS_SMM */
102
103#endif /* _DELL_SMBIOS_H_ */
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index af6fc97f4ba4..a436d44f1b7f 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
122 unsigned char __user *ured; 122 unsigned char __user *ured;
123 unsigned char __user *ugreen; 123 unsigned char __user *ugreen;
124 unsigned char __user *ublue; 124 unsigned char __user *ublue;
125 int index, count, i; 125 unsigned int index, count, i;
126 126
127 if (get_user(index, &c->index) || 127 if (get_user(index, &c->index) ||
128 __get_user(count, &c->count) || 128 __get_user(count, &c->count) ||
@@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
161 unsigned char __user *ugreen; 161 unsigned char __user *ugreen;
162 unsigned char __user *ublue; 162 unsigned char __user *ublue;
163 struct fb_cmap *cmap = &info->cmap; 163 struct fb_cmap *cmap = &info->cmap;
164 int index, count, i; 164 unsigned int index, count, i;
165 u8 red, green, blue; 165 u8 red, green, blue;
166 166
167 if (get_user(index, &c->index) || 167 if (get_user(index, &c->index) ||
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index e0678c14480f..3a33c5344bd5 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf,
566 char c; 566 char c;
567 if (get_user(c, buf + i)) 567 if (get_user(c, buf + i))
568 return -EFAULT; 568 return -EFAULT;
569 expect_close = (c == 'V'); 569 if (c == 'V')
570 expect_close = true;
570 } 571 }
571 572
572 /* Properly order writes across fork()ed processes */ 573 /* Properly order writes across fork()ed processes */
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index f1f00dfc0e68..b0a158073abd 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -28,16 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30#include <linux/watchdog.h> 30#include <linux/watchdog.h>
31#ifdef CONFIG_HPWDT_NMI_DECODING
32#include <linux/dmi.h>
33#include <linux/spinlock.h>
34#include <linux/nmi.h>
35#include <linux/kdebug.h>
36#include <linux/notifier.h>
37#include <asm/set_memory.h>
38#endif /* CONFIG_HPWDT_NMI_DECODING */
39#include <asm/nmi.h> 31#include <asm/nmi.h>
40#include <asm/frame.h>
41 32
42#define HPWDT_VERSION "1.4.0" 33#define HPWDT_VERSION "1.4.0"
43#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) 34#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
@@ -48,6 +39,9 @@
48static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ 39static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
49static unsigned int reload; /* the computed soft_margin */ 40static unsigned int reload; /* the computed soft_margin */
50static bool nowayout = WATCHDOG_NOWAYOUT; 41static bool nowayout = WATCHDOG_NOWAYOUT;
42#ifdef CONFIG_HPWDT_NMI_DECODING
43static unsigned int allow_kdump = 1;
44#endif
51static char expect_release; 45static char expect_release;
52static unsigned long hpwdt_is_open; 46static unsigned long hpwdt_is_open;
53 47
@@ -63,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = {
63}; 57};
64MODULE_DEVICE_TABLE(pci, hpwdt_devices); 58MODULE_DEVICE_TABLE(pci, hpwdt_devices);
65 59
66#ifdef CONFIG_HPWDT_NMI_DECODING
67#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
68#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
69#define PCI_BIOS32_PARAGRAPH_LEN 16
70#define PCI_ROM_BASE1 0x000F0000
71#define ROM_SIZE 0x10000
72
73struct bios32_service_dir {
74 u32 signature;
75 u32 entry_point;
76 u8 revision;
77 u8 length;
78 u8 checksum;
79 u8 reserved[5];
80};
81
82/* type 212 */
83struct smbios_cru64_info {
84 u8 type;
85 u8 byte_length;
86 u16 handle;
87 u32 signature;
88 u64 physical_address;
89 u32 double_length;
90 u32 double_offset;
91};
92#define SMBIOS_CRU64_INFORMATION 212
93
94/* type 219 */
95struct smbios_proliant_info {
96 u8 type;
97 u8 byte_length;
98 u16 handle;
99 u32 power_features;
100 u32 omega_features;
101 u32 reserved;
102 u32 misc_features;
103};
104#define SMBIOS_ICRU_INFORMATION 219
105
106
107struct cmn_registers {
108 union {
109 struct {
110 u8 ral;
111 u8 rah;
112 u16 rea2;
113 };
114 u32 reax;
115 } u1;
116 union {
117 struct {
118 u8 rbl;
119 u8 rbh;
120 u8 reb2l;
121 u8 reb2h;
122 };
123 u32 rebx;
124 } u2;
125 union {
126 struct {
127 u8 rcl;
128 u8 rch;
129 u16 rec2;
130 };
131 u32 recx;
132 } u3;
133 union {
134 struct {
135 u8 rdl;
136 u8 rdh;
137 u16 red2;
138 };
139 u32 redx;
140 } u4;
141
142 u32 resi;
143 u32 redi;
144 u16 rds;
145 u16 res;
146 u32 reflags;
147} __attribute__((packed));
148
149static unsigned int hpwdt_nmi_decoding;
150static unsigned int allow_kdump = 1;
151static unsigned int is_icru;
152static unsigned int is_uefi;
153static DEFINE_SPINLOCK(rom_lock);
154static void *cru_rom_addr;
155static struct cmn_registers cmn_regs;
156
157extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
158 unsigned long *pRomEntry);
159
160#ifdef CONFIG_X86_32
161/* --32 Bit Bios------------------------------------------------------------ */
162
163#define HPWDT_ARCH 32
164
165asm(".text \n\t"
166 ".align 4 \n\t"
167 ".globl asminline_call \n"
168 "asminline_call: \n\t"
169 "pushl %ebp \n\t"
170 "movl %esp, %ebp \n\t"
171 "pusha \n\t"
172 "pushf \n\t"
173 "push %es \n\t"
174 "push %ds \n\t"
175 "pop %es \n\t"
176 "movl 8(%ebp),%eax \n\t"
177 "movl 4(%eax),%ebx \n\t"
178 "movl 8(%eax),%ecx \n\t"
179 "movl 12(%eax),%edx \n\t"
180 "movl 16(%eax),%esi \n\t"
181 "movl 20(%eax),%edi \n\t"
182 "movl (%eax),%eax \n\t"
183 "push %cs \n\t"
184 "call *12(%ebp) \n\t"
185 "pushf \n\t"
186 "pushl %eax \n\t"
187 "movl 8(%ebp),%eax \n\t"
188 "movl %ebx,4(%eax) \n\t"
189 "movl %ecx,8(%eax) \n\t"
190 "movl %edx,12(%eax) \n\t"
191 "movl %esi,16(%eax) \n\t"
192 "movl %edi,20(%eax) \n\t"
193 "movw %ds,24(%eax) \n\t"
194 "movw %es,26(%eax) \n\t"
195 "popl %ebx \n\t"
196 "movl %ebx,(%eax) \n\t"
197 "popl %ebx \n\t"
198 "movl %ebx,28(%eax) \n\t"
199 "pop %es \n\t"
200 "popf \n\t"
201 "popa \n\t"
202 "leave \n\t"
203 "ret \n\t"
204 ".previous");
205
206
207/*
208 * cru_detect
209 *
210 * Routine Description:
211 * This function uses the 32-bit BIOS Service Directory record to
212 * search for a $CRU record.
213 *
214 * Return Value:
215 * 0 : SUCCESS
216 * <0 : FAILURE
217 */
218static int cru_detect(unsigned long map_entry,
219 unsigned long map_offset)
220{
221 void *bios32_map;
222 unsigned long *bios32_entrypoint;
223 unsigned long cru_physical_address;
224 unsigned long cru_length;
225 unsigned long physical_bios_base = 0;
226 unsigned long physical_bios_offset = 0;
227 int retval = -ENODEV;
228
229 bios32_map = ioremap(map_entry, (2 * PAGE_SIZE));
230
231 if (bios32_map == NULL)
232 return -ENODEV;
233
234 bios32_entrypoint = bios32_map + map_offset;
235
236 cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
237
238 set_memory_x((unsigned long)bios32_map, 2);
239 asminline_call(&cmn_regs, bios32_entrypoint);
240
241 if (cmn_regs.u1.ral != 0) {
242 pr_warn("Call succeeded but with an error: 0x%x\n",
243 cmn_regs.u1.ral);
244 } else {
245 physical_bios_base = cmn_regs.u2.rebx;
246 physical_bios_offset = cmn_regs.u4.redx;
247 cru_length = cmn_regs.u3.recx;
248 cru_physical_address =
249 physical_bios_base + physical_bios_offset;
250
251 /* If the values look OK, then map it in. */
252 if ((physical_bios_base + physical_bios_offset)) {
253 cru_rom_addr =
254 ioremap(cru_physical_address, cru_length);
255 if (cru_rom_addr) {
256 set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
257 (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
258 retval = 0;
259 }
260 }
261
262 pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base);
263 pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset);
264 pr_debug("CRU Length: 0x%lx\n", cru_length);
265 pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr);
266 }
267 iounmap(bios32_map);
268 return retval;
269}
270
271/*
272 * bios_checksum
273 */
274static int bios_checksum(const char __iomem *ptr, int len)
275{
276 char sum = 0;
277 int i;
278
279 /*
280 * calculate checksum of size bytes. This should add up
281 * to zero if we have a valid header.
282 */
283 for (i = 0; i < len; i++)
284 sum += ptr[i];
285
286 return ((sum == 0) && (len > 0));
287}
288
289/*
290 * bios32_present
291 *
292 * Routine Description:
293 * This function finds the 32-bit BIOS Service Directory
294 *
295 * Return Value:
296 * 0 : SUCCESS
297 * <0 : FAILURE
298 */
299static int bios32_present(const char __iomem *p)
300{
301 struct bios32_service_dir *bios_32_ptr;
302 int length;
303 unsigned long map_entry, map_offset;
304
305 bios_32_ptr = (struct bios32_service_dir *) p;
306
307 /*
308 * Search for signature by checking equal to the swizzled value
309 * instead of calling another routine to perform a strcmp.
310 */
311 if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) {
312 length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN;
313 if (bios_checksum(p, length)) {
314 /*
315 * According to the spec, we're looking for the
316 * first 4KB-aligned address below the entrypoint
317 * listed in the header. The Service Directory code
318 * is guaranteed to occupy no more than 2 4KB pages.
319 */
320 map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1);
321 map_offset = bios_32_ptr->entry_point - map_entry;
322
323 return cru_detect(map_entry, map_offset);
324 }
325 }
326 return -ENODEV;
327}
328
329static int detect_cru_service(void)
330{
331 char __iomem *p, *q;
332 int rc = -1;
333
334 /*
335 * Search from 0x0f0000 through 0x0fffff, inclusive.
336 */
337 p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
338 if (p == NULL)
339 return -ENOMEM;
340
341 for (q = p; q < p + ROM_SIZE; q += 16) {
342 rc = bios32_present(q);
343 if (!rc)
344 break;
345 }
346 iounmap(p);
347 return rc;
348}
349/* ------------------------------------------------------------------------- */
350#endif /* CONFIG_X86_32 */
351#ifdef CONFIG_X86_64
352/* --64 Bit Bios------------------------------------------------------------ */
353
354#define HPWDT_ARCH 64
355
356asm(".text \n\t"
357 ".align 4 \n\t"
358 ".globl asminline_call \n\t"
359 ".type asminline_call, @function \n\t"
360 "asminline_call: \n\t"
361 FRAME_BEGIN
362 "pushq %rax \n\t"
363 "pushq %rbx \n\t"
364 "pushq %rdx \n\t"
365 "pushq %r12 \n\t"
366 "pushq %r9 \n\t"
367 "movq %rsi, %r12 \n\t"
368 "movq %rdi, %r9 \n\t"
369 "movl 4(%r9),%ebx \n\t"
370 "movl 8(%r9),%ecx \n\t"
371 "movl 12(%r9),%edx \n\t"
372 "movl 16(%r9),%esi \n\t"
373 "movl 20(%r9),%edi \n\t"
374 "movl (%r9),%eax \n\t"
375 "call *%r12 \n\t"
376 "pushfq \n\t"
377 "popq %r12 \n\t"
378 "movl %eax, (%r9) \n\t"
379 "movl %ebx, 4(%r9) \n\t"
380 "movl %ecx, 8(%r9) \n\t"
381 "movl %edx, 12(%r9) \n\t"
382 "movl %esi, 16(%r9) \n\t"
383 "movl %edi, 20(%r9) \n\t"
384 "movq %r12, %rax \n\t"
385 "movl %eax, 28(%r9) \n\t"
386 "popq %r9 \n\t"
387 "popq %r12 \n\t"
388 "popq %rdx \n\t"
389 "popq %rbx \n\t"
390 "popq %rax \n\t"
391 FRAME_END
392 "ret \n\t"
393 ".previous");
394
395/*
396 * dmi_find_cru
397 *
398 * Routine Description:
399 * This function checks whether or not a SMBIOS/DMI record is
400 * the 64bit CRU info or not
401 */
402static void dmi_find_cru(const struct dmi_header *dm, void *dummy)
403{
404 struct smbios_cru64_info *smbios_cru64_ptr;
405 unsigned long cru_physical_address;
406
407 if (dm->type == SMBIOS_CRU64_INFORMATION) {
408 smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
409 if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
410 cru_physical_address =
411 smbios_cru64_ptr->physical_address +
412 smbios_cru64_ptr->double_offset;
413 cru_rom_addr = ioremap(cru_physical_address,
414 smbios_cru64_ptr->double_length);
415 set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
416 smbios_cru64_ptr->double_length >> PAGE_SHIFT);
417 }
418 }
419}
420
421static int detect_cru_service(void)
422{
423 cru_rom_addr = NULL;
424
425 dmi_walk(dmi_find_cru, NULL);
426
427 /* if cru_rom_addr has been set then we found a CRU service */
428 return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
429}
430/* ------------------------------------------------------------------------- */
431#endif /* CONFIG_X86_64 */
432#endif /* CONFIG_HPWDT_NMI_DECODING */
433 60
434/* 61/*
435 * Watchdog operations 62 * Watchdog operations
@@ -486,30 +113,12 @@ static int hpwdt_my_nmi(void)
486 */ 113 */
487static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) 114static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
488{ 115{
489 unsigned long rom_pl;
490 static int die_nmi_called;
491
492 if (!hpwdt_nmi_decoding)
493 return NMI_DONE;
494
495 if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) 116 if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
496 return NMI_DONE; 117 return NMI_DONE;
497 118
498 spin_lock_irqsave(&rom_lock, rom_pl);
499 if (!die_nmi_called && !is_icru && !is_uefi)
500 asminline_call(&cmn_regs, cru_rom_addr);
501 die_nmi_called = 1;
502 spin_unlock_irqrestore(&rom_lock, rom_pl);
503
504 if (allow_kdump) 119 if (allow_kdump)
505 hpwdt_stop(); 120 hpwdt_stop();
506 121
507 if (!is_icru && !is_uefi) {
508 if (cmn_regs.u1.ral == 0) {
509 nmi_panic(regs, "An NMI occurred, but unable to determine source.\n");
510 return NMI_HANDLED;
511 }
512 }
513 nmi_panic(regs, "An NMI occurred. Depending on your system the reason " 122 nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
514 "for the NMI is logged in any one of the following " 123 "for the NMI is logged in any one of the following "
515 "resources:\n" 124 "resources:\n"
@@ -675,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = {
675 * Init & Exit 284 * Init & Exit
676 */ 285 */
677 286
678#ifdef CONFIG_HPWDT_NMI_DECODING
679#ifdef CONFIG_X86_LOCAL_APIC
680static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
681{
682 /*
683 * If nmi_watchdog is turned off then we can turn on
684 * our nmi decoding capability.
685 */
686 hpwdt_nmi_decoding = 1;
687}
688#else
689static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
690{
691 dev_warn(&dev->dev, "NMI decoding is disabled. "
692 "Your kernel does not support a NMI Watchdog.\n");
693}
694#endif /* CONFIG_X86_LOCAL_APIC */
695
696/*
697 * dmi_find_icru
698 *
699 * Routine Description:
700 * This function checks whether or not we are on an iCRU-based server.
701 * This check is independent of architecture and needs to be made for
702 * any ProLiant system.
703 */
704static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
705{
706 struct smbios_proliant_info *smbios_proliant_ptr;
707
708 if (dm->type == SMBIOS_ICRU_INFORMATION) {
709 smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
710 if (smbios_proliant_ptr->misc_features & 0x01)
711 is_icru = 1;
712 if (smbios_proliant_ptr->misc_features & 0x1400)
713 is_uefi = 1;
714 }
715}
716 287
717static int hpwdt_init_nmi_decoding(struct pci_dev *dev) 288static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
718{ 289{
290#ifdef CONFIG_HPWDT_NMI_DECODING
719 int retval; 291 int retval;
720
721 /*
722 * On typical CRU-based systems we need to map that service in
723 * the BIOS. For 32 bit Operating Systems we need to go through
724 * the 32 Bit BIOS Service Directory. For 64 bit Operating
725 * Systems we get that service through SMBIOS.
726 *
727 * On systems that support the new iCRU service all we need to
728 * do is call dmi_walk to get the supported flag value and skip
729 * the old cru detect code.
730 */
731 dmi_walk(dmi_find_icru, NULL);
732 if (!is_icru && !is_uefi) {
733
734 /*
735 * We need to map the ROM to get the CRU service.
736 * For 32 bit Operating Systems we need to go through the 32 Bit
737 * BIOS Service Directory
738 * For 64 bit Operating Systems we get that service through SMBIOS.
739 */
740 retval = detect_cru_service();
741 if (retval < 0) {
742 dev_warn(&dev->dev,
743 "Unable to detect the %d Bit CRU Service.\n",
744 HPWDT_ARCH);
745 return retval;
746 }
747
748 /*
749 * We know this is the only CRU call we need to make so lets keep as
750 * few instructions as possible once the NMI comes in.
751 */
752 cmn_regs.u1.rah = 0x0D;
753 cmn_regs.u1.ral = 0x02;
754 }
755
756 /* 292 /*
757 * Only one function can register for NMI_UNKNOWN 293 * Only one function can register for NMI_UNKNOWN
758 */ 294 */
@@ -780,45 +316,26 @@ error:
780 dev_warn(&dev->dev, 316 dev_warn(&dev->dev,
781 "Unable to register a die notifier (err=%d).\n", 317 "Unable to register a die notifier (err=%d).\n",
782 retval); 318 retval);
783 if (cru_rom_addr)
784 iounmap(cru_rom_addr);
785 return retval; 319 return retval;
320#endif /* CONFIG_HPWDT_NMI_DECODING */
321 return 0;
786} 322}
787 323
788static void hpwdt_exit_nmi_decoding(void) 324static void hpwdt_exit_nmi_decoding(void)
789{ 325{
326#ifdef CONFIG_HPWDT_NMI_DECODING
790 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); 327 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
791 unregister_nmi_handler(NMI_SERR, "hpwdt"); 328 unregister_nmi_handler(NMI_SERR, "hpwdt");
792 unregister_nmi_handler(NMI_IO_CHECK, "hpwdt"); 329 unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
793 if (cru_rom_addr) 330#endif
794 iounmap(cru_rom_addr);
795}
796#else /* !CONFIG_HPWDT_NMI_DECODING */
797static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
798{
799}
800
801static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
802{
803 return 0;
804} 331}
805 332
806static void hpwdt_exit_nmi_decoding(void)
807{
808}
809#endif /* CONFIG_HPWDT_NMI_DECODING */
810
811static int hpwdt_init_one(struct pci_dev *dev, 333static int hpwdt_init_one(struct pci_dev *dev,
812 const struct pci_device_id *ent) 334 const struct pci_device_id *ent)
813{ 335{
814 int retval; 336 int retval;
815 337
816 /* 338 /*
817 * Check if we can do NMI decoding or not
818 */
819 hpwdt_check_nmi_decoding(dev);
820
821 /*
822 * First let's find out if we are on an iLO2+ server. We will 339 * First let's find out if we are on an iLO2+ server. We will
823 * not run on a legacy ASM box. 340 * not run on a legacy ASM box.
824 * So we only support the G5 ProLiant servers and higher. 341 * So we only support the G5 ProLiant servers and higher.
@@ -922,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
922#ifdef CONFIG_HPWDT_NMI_DECODING 439#ifdef CONFIG_HPWDT_NMI_DECODING
923module_param(allow_kdump, int, 0); 440module_param(allow_kdump, int, 0);
924MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); 441MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
925#endif /* !CONFIG_HPWDT_NMI_DECODING */ 442#endif /* CONFIG_HPWDT_NMI_DECODING */
926 443
927module_pci_driver(hpwdt_driver); 444module_pci_driver(hpwdt_driver);
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index 316c2eb122d2..e8bd9887c566 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -50,6 +50,7 @@
50 */ 50 */
51 51
52#include <linux/io.h> 52#include <linux/io.h>
53#include <linux/io-64-nonatomic-lo-hi.h>
53#include <linux/interrupt.h> 54#include <linux/interrupt.h>
54#include <linux/module.h> 55#include <linux/module.h>
55#include <linux/moduleparam.h> 56#include <linux/moduleparam.h>
@@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
159 !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) 160 !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
160 timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); 161 timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
161 162
162 timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) - 163 timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
163 arch_counter_get_cntvct(); 164 arch_counter_get_cntvct();
164 165
165 do_div(timeleft, gwdt->clk); 166 do_div(timeleft, gwdt->clk);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 74888cacd0b0..ec9eb4fba59c 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus,
466 466
467 /* Register with generic device framework. */ 467 /* Register with generic device framework. */
468 err = device_register(&xendev->dev); 468 err = device_register(&xendev->dev);
469 if (err) 469 if (err) {
470 put_device(&xendev->dev);
471 xendev = NULL;
470 goto fail; 472 goto fail;
473 }
471 474
472 return 0; 475 return 0;
473fail: 476fail:
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index 406e72de88f6..ce6ff5a0a6e4 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -24,6 +24,8 @@ config OVERLAY_FS_REDIRECT_DIR
24 an overlay which has redirects on a kernel that doesn't support this 24 an overlay which has redirects on a kernel that doesn't support this
25 feature will have unexpected results. 25 feature will have unexpected results.
26 26
27 If unsure, say N.
28
27config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW 29config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW
28 bool "Overlayfs: follow redirects even if redirects are turned off" 30 bool "Overlayfs: follow redirects even if redirects are turned off"
29 default y 31 default y
@@ -32,8 +34,13 @@ config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW
32 Disable this to get a possibly more secure configuration, but that 34 Disable this to get a possibly more secure configuration, but that
33 might not be backward compatible with previous kernels. 35 might not be backward compatible with previous kernels.
34 36
37 If backward compatibility is not an issue, then it is safe and
38 recommended to say N here.
39
35 For more information, see Documentation/filesystems/overlayfs.txt 40 For more information, see Documentation/filesystems/overlayfs.txt
36 41
42 If unsure, say Y.
43
37config OVERLAY_FS_INDEX 44config OVERLAY_FS_INDEX
38 bool "Overlayfs: turn on inodes index feature by default" 45 bool "Overlayfs: turn on inodes index feature by default"
39 depends on OVERLAY_FS 46 depends on OVERLAY_FS
@@ -51,6 +58,8 @@ config OVERLAY_FS_INDEX
51 That is, mounting an overlay which has an inodes index on a kernel 58 That is, mounting an overlay which has an inodes index on a kernel
52 that doesn't support this feature will have unexpected results. 59 that doesn't support this feature will have unexpected results.
53 60
61 If unsure, say N.
62
54config OVERLAY_FS_NFS_EXPORT 63config OVERLAY_FS_NFS_EXPORT
55 bool "Overlayfs: turn on NFS export feature by default" 64 bool "Overlayfs: turn on NFS export feature by default"
56 depends on OVERLAY_FS 65 depends on OVERLAY_FS
@@ -72,3 +81,8 @@ config OVERLAY_FS_NFS_EXPORT
72 Note, that the NFS export feature is not backward compatible. 81 Note, that the NFS export feature is not backward compatible.
73 That is, mounting an overlay which has a full index on a kernel 82 That is, mounting an overlay which has a full index on a kernel
74 that doesn't support this feature will have unexpected results. 83 that doesn't support this feature will have unexpected results.
84
85 Most users should say N here and enable this feature on a case-by-
86 case basis with the "nfs_export=on" mount option.
87
88 Say N unless you fully understand the consequences.
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index bb94ce9da5c8..87bd4148f4fb 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -19,6 +19,142 @@
19#include <linux/ratelimit.h> 19#include <linux/ratelimit.h>
20#include "overlayfs.h" 20#include "overlayfs.h"
21 21
22static int ovl_encode_maybe_copy_up(struct dentry *dentry)
23{
24 int err;
25
26 if (ovl_dentry_upper(dentry))
27 return 0;
28
29 err = ovl_want_write(dentry);
30 if (!err) {
31 err = ovl_copy_up(dentry);
32 ovl_drop_write(dentry);
33 }
34
35 if (err) {
36 pr_warn_ratelimited("overlayfs: failed to copy up on encode (%pd2, err=%i)\n",
37 dentry, err);
38 }
39
40 return err;
41}
42
43/*
44 * Before encoding a non-upper directory file handle from real layer N, we need
45 * to check if it will be possible to reconnect an overlay dentry from the real
46 * lower decoded dentry. This is done by following the overlay ancestry up to a
47 * "layer N connected" ancestor and verifying that all parents along the way are
48 * "layer N connectable". If an ancestor that is NOT "layer N connectable" is
49 * found, we need to copy up an ancestor, which is "layer N connectable", thus
50 * making that ancestor "layer N connected". For example:
51 *
52 * layer 1: /a
53 * layer 2: /a/b/c
54 *
55 * The overlay dentry /a is NOT "layer 2 connectable", because if dir /a is
56 * copied up and renamed, upper dir /a will be indexed by lower dir /a from
57 * layer 1. The dir /a from layer 2 will never be indexed, so the algorithm (*)
58 * in ovl_lookup_real_ancestor() will not be able to lookup a connected overlay
59 * dentry from the connected lower dentry /a/b/c.
60 *
61 * To avoid this problem on decode time, we need to copy up an ancestor of
62 * /a/b/c, which is "layer 2 connectable", on encode time. That ancestor is
63 * /a/b. After copy up (and index) of /a/b, it will become "layer 2 connected"
64 * and when the time comes to decode the file handle from lower dentry /a/b/c,
65 * ovl_lookup_real_ancestor() will find the indexed ancestor /a/b and decoding
66 * a connected overlay dentry will be accomplished.
67 *
68 * (*) the algorithm in ovl_lookup_real_ancestor() can be improved to lookup an
69 * entry /a in the lower layers above layer N and find the indexed dir /a from
70 * layer 1. If that improvement is made, then the check for "layer N connected"
71 * will need to verify there are no redirects in lower layers above N. In the
72 * example above, /a will be "layer 2 connectable". However, if layer 2 dir /a
73 * is a target of a layer 1 redirect, then /a will NOT be "layer 2 connectable":
74 *
75 * layer 1: /A (redirect = /a)
76 * layer 2: /a/b/c
77 */
78
79/* Return the lowest layer for encoding a connectable file handle */
80static int ovl_connectable_layer(struct dentry *dentry)
81{
82 struct ovl_entry *oe = OVL_E(dentry);
83
84 /* We can get overlay root from root of any layer */
85 if (dentry == dentry->d_sb->s_root)
86 return oe->numlower;
87
88 /*
89 * If it's an unindexed merge dir, then it's not connectable with any
90 * lower layer
91 */
92 if (ovl_dentry_upper(dentry) &&
93 !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
94 return 0;
95
96 /* We can get upper/overlay path from indexed/lower dentry */
97 return oe->lowerstack[0].layer->idx;
98}
99
100/*
101 * @dentry is "connected" if all ancestors up to root or a "connected" ancestor
102 * have the same uppermost lower layer as the origin's layer. We may need to
103 * copy up a "connectable" ancestor to make it "connected". A "connected" dentry
104 * cannot become non "connected", so cache positive result in dentry flags.
105 *
106 * Return the connected origin layer or < 0 on error.
107 */
108static int ovl_connect_layer(struct dentry *dentry)
109{
110 struct dentry *next, *parent = NULL;
111 int origin_layer;
112 int err = 0;
113
114 if (WARN_ON(dentry == dentry->d_sb->s_root) ||
115 WARN_ON(!ovl_dentry_lower(dentry)))
116 return -EIO;
117
118 origin_layer = OVL_E(dentry)->lowerstack[0].layer->idx;
119 if (ovl_dentry_test_flag(OVL_E_CONNECTED, dentry))
120 return origin_layer;
121
122 /* Find the topmost origin layer connectable ancestor of @dentry */
123 next = dget(dentry);
124 for (;;) {
125 parent = dget_parent(next);
126 if (WARN_ON(parent == next)) {
127 err = -EIO;
128 break;
129 }
130
131 /*
132 * If @parent is not origin layer connectable, then copy up
133 * @next which is origin layer connectable and we are done.
134 */
135 if (ovl_connectable_layer(parent) < origin_layer) {
136 err = ovl_encode_maybe_copy_up(next);
137 break;
138 }
139
140 /* If @parent is connected or indexed we are done */
141 if (ovl_dentry_test_flag(OVL_E_CONNECTED, parent) ||
142 ovl_test_flag(OVL_INDEX, d_inode(parent)))
143 break;
144
145 dput(next);
146 next = parent;
147 }
148
149 dput(parent);
150 dput(next);
151
152 if (!err)
153 ovl_dentry_set_flag(OVL_E_CONNECTED, dentry);
154
155 return err ?: origin_layer;
156}
157
22/* 158/*
23 * We only need to encode origin if there is a chance that the same object was 159 * We only need to encode origin if there is a chance that the same object was
24 * encoded pre copy up and then we need to stay consistent with the same 160 * encoded pre copy up and then we need to stay consistent with the same
@@ -41,73 +177,59 @@
41 * L = lower file handle 177 * L = lower file handle
42 * 178 *
43 * (*) Connecting an overlay dir from real lower dentry is not always 179 * (*) Connecting an overlay dir from real lower dentry is not always
44 * possible when there are redirects in lower layers. To mitigate this case, 180 * possible when there are redirects in lower layers and non-indexed merge dirs.
45 * we copy up the lower dir first and then encode an upper dir file handle. 181 * To mitigate those case, we may copy up the lower dir ancestor before encode
182 * a lower dir file handle.
183 *
184 * Return 0 for upper file handle, > 0 for lower file handle or < 0 on error.
46 */ 185 */
47static bool ovl_should_encode_origin(struct dentry *dentry) 186static int ovl_check_encode_origin(struct dentry *dentry)
48{ 187{
49 struct ovl_fs *ofs = dentry->d_sb->s_fs_info; 188 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
50 189
190 /* Upper file handle for pure upper */
51 if (!ovl_dentry_lower(dentry)) 191 if (!ovl_dentry_lower(dentry))
52 return false; 192 return 0;
53 193
54 /* 194 /*
55 * Decoding a merge dir, whose origin's parent is under a redirected 195 * Upper file handle for non-indexed upper.
56 * lower dir is not always possible. As a simple aproximation, we do
57 * not encode lower dir file handles when overlay has multiple lower
58 * layers and origin is below the topmost lower layer.
59 * 196 *
60 * TODO: copy up only the parent that is under redirected lower. 197 * Root is never indexed, so if there's an upper layer, encode upper for
198 * root.
61 */ 199 */
62 if (d_is_dir(dentry) && ofs->upper_mnt &&
63 OVL_E(dentry)->lowerstack[0].layer->idx > 1)
64 return false;
65
66 /* Decoding a non-indexed upper from origin is not implemented */
67 if (ovl_dentry_upper(dentry) && 200 if (ovl_dentry_upper(dentry) &&
68 !ovl_test_flag(OVL_INDEX, d_inode(dentry))) 201 !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
69 return false;
70
71 return true;
72}
73
74static int ovl_encode_maybe_copy_up(struct dentry *dentry)
75{
76 int err;
77
78 if (ovl_dentry_upper(dentry))
79 return 0; 202 return 0;
80 203
81 err = ovl_want_write(dentry); 204 /*
82 if (err) 205 * Decoding a merge dir, whose origin's ancestor is under a redirected
83 return err; 206 * lower dir or under a non-indexed upper is not always possible.
84 207 * ovl_connect_layer() will try to make origin's layer "connected" by
85 err = ovl_copy_up(dentry); 208 * copying up a "connectable" ancestor.
209 */
210 if (d_is_dir(dentry) && ofs->upper_mnt)
211 return ovl_connect_layer(dentry);
86 212
87 ovl_drop_write(dentry); 213 /* Lower file handle for indexed and non-upper dir/non-dir */
88 return err; 214 return 1;
89} 215}
90 216
91static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen) 217static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
92{ 218{
93 struct dentry *origin = ovl_dentry_lower(dentry);
94 struct ovl_fh *fh = NULL; 219 struct ovl_fh *fh = NULL;
95 int err; 220 int err, enc_lower;
96 221
97 /* 222 /*
98 * If we should not encode a lower dir file handle, copy up and encode 223 * Check if we should encode a lower or upper file handle and maybe
99 * an upper dir file handle. 224 * copy up an ancestor to make lower file handle connectable.
100 */ 225 */
101 if (!ovl_should_encode_origin(dentry)) { 226 err = enc_lower = ovl_check_encode_origin(dentry);
102 err = ovl_encode_maybe_copy_up(dentry); 227 if (enc_lower < 0)
103 if (err) 228 goto fail;
104 goto fail;
105
106 origin = NULL;
107 }
108 229
109 /* Encode an upper or origin file handle */ 230 /* Encode an upper or lower file handle */
110 fh = ovl_encode_fh(origin ?: ovl_dentry_upper(dentry), !origin); 231 fh = ovl_encode_fh(enc_lower ? ovl_dentry_lower(dentry) :
232 ovl_dentry_upper(dentry), !enc_lower);
111 err = PTR_ERR(fh); 233 err = PTR_ERR(fh);
112 if (IS_ERR(fh)) 234 if (IS_ERR(fh))
113 goto fail; 235 goto fail;
@@ -355,8 +477,8 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
355 dput(upper); 477 dput(upper);
356 } 478 }
357 479
358 if (!this) 480 if (IS_ERR_OR_NULL(this))
359 return NULL; 481 return this;
360 482
361 if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) { 483 if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) {
362 dput(this); 484 dput(this);
@@ -498,7 +620,7 @@ static struct dentry *ovl_lookup_real(struct super_block *sb,
498 if (err == -ECHILD) { 620 if (err == -ECHILD) {
499 this = ovl_lookup_real_ancestor(sb, real, 621 this = ovl_lookup_real_ancestor(sb, real,
500 layer); 622 layer);
501 err = IS_ERR(this) ? PTR_ERR(this) : 0; 623 err = PTR_ERR_OR_ZERO(this);
502 } 624 }
503 if (!err) { 625 if (!err) {
504 dput(connected); 626 dput(connected);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index fcd97b783fa1..3b1bd469accd 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -669,38 +669,59 @@ struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
669 return inode; 669 return inode;
670} 670}
671 671
672/*
673 * Does overlay inode need to be hashed by lower inode?
674 */
675static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
676 struct dentry *lower, struct dentry *index)
677{
678 struct ovl_fs *ofs = sb->s_fs_info;
679
680 /* No, if pure upper */
681 if (!lower)
682 return false;
683
684 /* Yes, if already indexed */
685 if (index)
686 return true;
687
688 /* Yes, if won't be copied up */
689 if (!ofs->upper_mnt)
690 return true;
691
692 /* No, if lower hardlink is or will be broken on copy up */
693 if ((upper || !ovl_indexdir(sb)) &&
694 !d_is_dir(lower) && d_inode(lower)->i_nlink > 1)
695 return false;
696
697 /* No, if non-indexed upper with NFS export */
698 if (sb->s_export_op && upper)
699 return false;
700
701 /* Otherwise, hash by lower inode for fsnotify */
702 return true;
703}
704
672struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry, 705struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
673 struct dentry *lowerdentry, struct dentry *index, 706 struct dentry *lowerdentry, struct dentry *index,
674 unsigned int numlower) 707 unsigned int numlower)
675{ 708{
676 struct ovl_fs *ofs = sb->s_fs_info;
677 struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; 709 struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
678 struct inode *inode; 710 struct inode *inode;
679 /* Already indexed or could be indexed on copy up? */ 711 bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, index);
680 bool indexed = (index || (ovl_indexdir(sb) && !upperdentry));
681 struct dentry *origin = indexed ? lowerdentry : NULL;
682 bool is_dir; 712 bool is_dir;
683 713
684 if (WARN_ON(upperdentry && indexed && !lowerdentry))
685 return ERR_PTR(-EIO);
686
687 if (!realinode) 714 if (!realinode)
688 realinode = d_inode(lowerdentry); 715 realinode = d_inode(lowerdentry);
689 716
690 /* 717 /*
691 * Copy up origin (lower) may exist for non-indexed non-dir upper, but 718 * Copy up origin (lower) may exist for non-indexed upper, but we must
692 * we must not use lower as hash key in that case. 719 * not use lower as hash key if this is a broken hardlink.
693 * Hash non-dir that is or could be indexed by origin inode.
694 * Hash dir that is or could be merged by origin inode.
695 * Hash pure upper and non-indexed non-dir by upper inode.
696 * Hash non-indexed dir by upper inode for NFS export.
697 */ 720 */
698 is_dir = S_ISDIR(realinode->i_mode); 721 is_dir = S_ISDIR(realinode->i_mode);
699 if (is_dir && (indexed || !sb->s_export_op || !ofs->upper_mnt)) 722 if (upperdentry || bylower) {
700 origin = lowerdentry; 723 struct inode *key = d_inode(bylower ? lowerdentry :
701 724 upperdentry);
702 if (upperdentry || origin) {
703 struct inode *key = d_inode(origin ?: upperdentry);
704 unsigned int nlink = is_dir ? 1 : realinode->i_nlink; 725 unsigned int nlink = is_dir ? 1 : realinode->i_nlink;
705 726
706 inode = iget5_locked(sb, (unsigned long) key, 727 inode = iget5_locked(sb, (unsigned long) key,
@@ -728,6 +749,7 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
728 nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink); 749 nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink);
729 set_nlink(inode, nlink); 750 set_nlink(inode, nlink);
730 } else { 751 } else {
752 /* Lower hardlink that will be broken on copy up */
731 inode = new_inode(sb); 753 inode = new_inode(sb);
732 if (!inode) 754 if (!inode)
733 goto out_nomem; 755 goto out_nomem;
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index de3e6da1d5a5..70fcfcc684cc 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -913,9 +913,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
913 stack[ctr].layer = lower.layer; 913 stack[ctr].layer = lower.layer;
914 ctr++; 914 ctr++;
915 915
916 if (d.stop)
917 break;
918
919 /* 916 /*
920 * Following redirects can have security consequences: it's like 917 * Following redirects can have security consequences: it's like
921 * a symlink into the lower layer without the permission checks. 918 * a symlink into the lower layer without the permission checks.
@@ -933,6 +930,9 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
933 goto out_put; 930 goto out_put;
934 } 931 }
935 932
933 if (d.stop)
934 break;
935
936 if (d.redirect && d.redirect[0] == '/' && poe != roe) { 936 if (d.redirect && d.redirect[0] == '/' && poe != roe) {
937 poe = roe; 937 poe = roe;
938 /* Find the current layer on the root dentry */ 938 /* Find the current layer on the root dentry */
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 0df25a9c94bd..225ff1171147 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -40,6 +40,7 @@ enum ovl_inode_flag {
40enum ovl_entry_flag { 40enum ovl_entry_flag {
41 OVL_E_UPPER_ALIAS, 41 OVL_E_UPPER_ALIAS,
42 OVL_E_OPAQUE, 42 OVL_E_OPAQUE,
43 OVL_E_CONNECTED,
43}; 44};
44 45
45/* 46/*
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 9ee37c76091d..7c24619ae7fc 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1359,6 +1359,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1359 1359
1360 /* Root is always merge -> can have whiteouts */ 1360 /* Root is always merge -> can have whiteouts */
1361 ovl_set_flag(OVL_WHITEOUTS, d_inode(root_dentry)); 1361 ovl_set_flag(OVL_WHITEOUTS, d_inode(root_dentry));
1362 ovl_dentry_set_flag(OVL_E_CONNECTED, root_dentry);
1362 ovl_inode_init(d_inode(root_dentry), upperpath.dentry, 1363 ovl_inode_init(d_inode(root_dentry), upperpath.dentry,
1363 ovl_dentry_lower(root_dentry)); 1364 ovl_dentry_lower(root_dentry));
1364 1365
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 66e1edbfb2b2..046469fcc1b8 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -955,15 +955,29 @@ static inline bool imap_needs_alloc(struct inode *inode,
955 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); 955 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
956} 956}
957 957
958static inline bool needs_cow_for_zeroing(struct xfs_bmbt_irec *imap, int nimaps)
959{
960 return nimaps &&
961 imap->br_startblock != HOLESTARTBLOCK &&
962 imap->br_state != XFS_EXT_UNWRITTEN;
963}
964
958static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags) 965static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
959{ 966{
960 /* 967 /*
961 * COW writes will allocate delalloc space, so we need to make sure 968 * COW writes may allocate delalloc space or convert unwritten COW
962 * to take the lock exclusively here. 969 * extents, so we need to make sure to take the lock exclusively here.
963 */ 970 */
964 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) 971 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO)))
965 return true; 972 return true;
966 if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE)) 973
974 /*
975 * Extents not yet cached requires exclusive access, don't block.
976 * This is an opencoded xfs_ilock_data_map_shared() to cater for the
977 * non-blocking behaviour.
978 */
979 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
980 !(ip->i_df.if_flags & XFS_IFEXTENTS))
967 return true; 981 return true;
968 return false; 982 return false;
969} 983}
@@ -993,16 +1007,18 @@ xfs_file_iomap_begin(
993 return xfs_file_iomap_begin_delay(inode, offset, length, iomap); 1007 return xfs_file_iomap_begin_delay(inode, offset, length, iomap);
994 } 1008 }
995 1009
996 if (need_excl_ilock(ip, flags)) { 1010 if (need_excl_ilock(ip, flags))
997 lockmode = XFS_ILOCK_EXCL; 1011 lockmode = XFS_ILOCK_EXCL;
998 xfs_ilock(ip, XFS_ILOCK_EXCL); 1012 else
999 } else { 1013 lockmode = XFS_ILOCK_SHARED;
1000 lockmode = xfs_ilock_data_map_shared(ip);
1001 }
1002 1014
1003 if ((flags & IOMAP_NOWAIT) && !(ip->i_df.if_flags & XFS_IFEXTENTS)) { 1015 if (flags & IOMAP_NOWAIT) {
1004 error = -EAGAIN; 1016 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
1005 goto out_unlock; 1017 return -EAGAIN;
1018 if (!xfs_ilock_nowait(ip, lockmode))
1019 return -EAGAIN;
1020 } else {
1021 xfs_ilock(ip, lockmode);
1006 } 1022 }
1007 1023
1008 ASSERT(offset <= mp->m_super->s_maxbytes); 1024 ASSERT(offset <= mp->m_super->s_maxbytes);
@@ -1024,7 +1040,9 @@ xfs_file_iomap_begin(
1024 goto out_unlock; 1040 goto out_unlock;
1025 } 1041 }
1026 1042
1027 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) { 1043 if (xfs_is_reflink_inode(ip) &&
1044 ((flags & IOMAP_WRITE) ||
1045 ((flags & IOMAP_ZERO) && needs_cow_for_zeroing(&imap, nimaps)))) {
1028 if (flags & IOMAP_DIRECT) { 1046 if (flags & IOMAP_DIRECT) {
1029 /* 1047 /*
1030 * A reflinked inode will result in CoW alloc. 1048 * A reflinked inode will result in CoW alloc.
diff --git a/include/linux/compat.h b/include/linux/compat.h
index e16d07eb08cf..16c3027074a2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -17,6 +17,7 @@
17#include <linux/if.h> 17#include <linux/if.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/aio_abi.h> /* for aio_context_t */ 19#include <linux/aio_abi.h> /* for aio_context_t */
20#include <linux/uaccess.h>
20#include <linux/unistd.h> 21#include <linux/unistd.h>
21 22
22#include <asm/compat.h> 23#include <asm/compat.h>
@@ -550,8 +551,29 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
550asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); 551asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
551 552
552extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat); 553extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
553extern int put_compat_sigset(compat_sigset_t __user *compat, 554
554 const sigset_t *set, unsigned int size); 555/*
556 * Defined inline such that size can be compile time constant, which avoids
557 * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct
558 */
559static inline int
560put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
561 unsigned int size)
562{
563 /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
564#ifdef __BIG_ENDIAN
565 compat_sigset_t v;
566 switch (_NSIG_WORDS) {
567 case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
568 case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
569 case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
570 case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
571 }
572 return copy_to_user(compat, &v, size) ? -EFAULT : 0;
573#else
574 return copy_to_user(compat, set, size) ? -EFAULT : 0;
575#endif
576}
555 577
556asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, 578asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
557 compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, 579 compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 88865e0ebf4d..091033a6b836 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -13,7 +13,6 @@ struct device_node;
13struct device_node *of_pci_find_child_device(struct device_node *parent, 13struct device_node *of_pci_find_child_device(struct device_node *parent,
14 unsigned int devfn); 14 unsigned int devfn);
15int of_pci_get_devfn(struct device_node *np); 15int of_pci_get_devfn(struct device_node *np);
16int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
17int of_pci_parse_bus_range(struct device_node *node, struct resource *res); 16int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
18int of_get_pci_domain_nr(struct device_node *node); 17int of_get_pci_domain_nr(struct device_node *node);
19int of_pci_get_max_link_speed(struct device_node *node); 18int of_pci_get_max_link_speed(struct device_node *node);
@@ -34,12 +33,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
34} 33}
35 34
36static inline int 35static inline int
37of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
38{
39 return 0;
40}
41
42static inline int
43of_pci_parse_bus_range(struct device_node *node, struct resource *res) 36of_pci_parse_bus_range(struct device_node *node, struct resource *res)
44{ 37{
45 return -EINVAL; 38 return -EINVAL;
@@ -67,6 +60,16 @@ of_pci_get_max_link_speed(struct device_node *node)
67static inline void of_pci_check_probe_only(void) { } 60static inline void of_pci_check_probe_only(void) { }
68#endif 61#endif
69 62
63#if IS_ENABLED(CONFIG_OF_IRQ)
64int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
65#else
66static inline int
67of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
68{
69 return 0;
70}
71#endif
72
70#if defined(CONFIG_OF_ADDRESS) 73#if defined(CONFIG_OF_ADDRESS)
71int of_pci_get_host_bridge_resources(struct device_node *dev, 74int of_pci_get_host_bridge_resources(struct device_node *dev,
72 unsigned char busno, unsigned char bus_max, 75 unsigned char busno, unsigned char bus_max,
diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h
index 4b0b0b756f3e..0af83d80fb3e 100644
--- a/include/uapi/misc/ocxl.h
+++ b/include/uapi/misc/ocxl.h
@@ -32,6 +32,22 @@ struct ocxl_ioctl_attach {
32 __u64 reserved3; 32 __u64 reserved3;
33}; 33};
34 34
35struct ocxl_ioctl_metadata {
36 __u16 version; // struct version, always backwards compatible
37
38 // Version 0 fields
39 __u8 afu_version_major;
40 __u8 afu_version_minor;
41 __u32 pasid; // PASID assigned to the current context
42
43 __u64 pp_mmio_size; // Per PASID MMIO size
44 __u64 global_mmio_size;
45
46 // End version 0 fields
47
48 __u64 reserved[13]; // Total of 16*u64
49};
50
35struct ocxl_ioctl_irq_fd { 51struct ocxl_ioctl_irq_fd {
36 __u64 irq_offset; 52 __u64 irq_offset;
37 __s32 eventfd; 53 __s32 eventfd;
@@ -45,5 +61,6 @@ struct ocxl_ioctl_irq_fd {
45#define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64) 61#define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64)
46#define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64) 62#define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64)
47#define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd) 63#define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd)
64#define OCXL_IOCTL_GET_METADATA _IOR(OCXL_MAGIC, 0x14, struct ocxl_ioctl_metadata)
48 65
49#endif /* _UAPI_MISC_OCXL_H */ 66#endif /* _UAPI_MISC_OCXL_H */
diff --git a/kernel/compat.c b/kernel/compat.c
index 3247fe761f60..3f5fa8902e7d 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -488,25 +488,6 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat)
488} 488}
489EXPORT_SYMBOL_GPL(get_compat_sigset); 489EXPORT_SYMBOL_GPL(get_compat_sigset);
490 490
491int
492put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
493 unsigned int size)
494{
495 /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
496#ifdef __BIG_ENDIAN
497 compat_sigset_t v;
498 switch (_NSIG_WORDS) {
499 case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
500 case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
501 case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
502 case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
503 }
504 return copy_to_user(compat, &v, size) ? -EFAULT : 0;
505#else
506 return copy_to_user(compat, set, size) ? -EFAULT : 0;
507#endif
508}
509
510#ifdef CONFIG_NUMA 491#ifdef CONFIG_NUMA
511COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, 492COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
512 compat_uptr_t __user *, pages32, 493 compat_uptr_t __user *, pages32,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 96db9ae5d5af..4b838470fac4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2246,7 +2246,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
2246 struct perf_event_context *task_ctx, 2246 struct perf_event_context *task_ctx,
2247 enum event_type_t event_type) 2247 enum event_type_t event_type)
2248{ 2248{
2249 enum event_type_t ctx_event_type = event_type & EVENT_ALL; 2249 enum event_type_t ctx_event_type;
2250 bool cpu_event = !!(event_type & EVENT_CPU); 2250 bool cpu_event = !!(event_type & EVENT_CPU);
2251 2251
2252 /* 2252 /*
@@ -2256,6 +2256,8 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
2256 if (event_type & EVENT_PINNED) 2256 if (event_type & EVENT_PINNED)
2257 event_type |= EVENT_FLEXIBLE; 2257 event_type |= EVENT_FLEXIBLE;
2258 2258
2259 ctx_event_type = event_type & EVENT_ALL;
2260
2259 perf_pmu_disable(cpuctx->ctx.pmu); 2261 perf_pmu_disable(cpuctx->ctx.pmu);
2260 if (task_ctx) 2262 if (task_ctx)
2261 task_ctx_sched_out(cpuctx, task_ctx, event_type); 2263 task_ctx_sched_out(cpuctx, task_ctx, event_type);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 65cc0cb984e6..940633c63254 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1616,11 +1616,12 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1616void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) 1616void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1617{ 1617{
1618 DEFINE_WAKE_Q(wake_q); 1618 DEFINE_WAKE_Q(wake_q);
1619 unsigned long flags;
1619 bool postunlock; 1620 bool postunlock;
1620 1621
1621 raw_spin_lock_irq(&lock->wait_lock); 1622 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1622 postunlock = __rt_mutex_futex_unlock(lock, &wake_q); 1623 postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
1623 raw_spin_unlock_irq(&lock->wait_lock); 1624 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1624 1625
1625 if (postunlock) 1626 if (postunlock)
1626 rt_mutex_postunlock(&wake_q); 1627 rt_mutex_postunlock(&wake_q);
diff --git a/kernel/panic.c b/kernel/panic.c
index 2cfef408fec9..4b794f1d8561 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs);
640 */ 640 */
641__visible void __stack_chk_fail(void) 641__visible void __stack_chk_fail(void)
642{ 642{
643 panic("stack-protector: Kernel stack is corrupted in: %p\n", 643 panic("stack-protector: Kernel stack is corrupted in: %pB\n",
644 __builtin_return_address(0)); 644 __builtin_return_address(0));
645} 645}
646EXPORT_SYMBOL(__stack_chk_fail); 646EXPORT_SYMBOL(__stack_chk_fail);
diff --git a/lib/bug.c b/lib/bug.c
index c1b0fad31b10..1077366f496b 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
150 return BUG_TRAP_TYPE_NONE; 150 return BUG_TRAP_TYPE_NONE;
151 151
152 bug = find_bug(bugaddr); 152 bug = find_bug(bugaddr);
153 if (!bug)
154 return BUG_TRAP_TYPE_NONE;
153 155
154 file = NULL; 156 file = NULL;
155 line = 0; 157 line = 0;
@@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
191 if (file) 193 if (file)
192 pr_crit("kernel BUG at %s:%u!\n", file, line); 194 pr_crit("kernel BUG at %s:%u!\n", file, line);
193 else 195 else
194 pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n", 196 pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
195 (void *)bugaddr); 197 (void *)bugaddr);
196 198
197 return BUG_TRAP_TYPE_BUG; 199 return BUG_TRAP_TYPE_BUG;
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e372b97eee13..0e5b7a61460b 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1141,7 +1141,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
1141 mutex_lock(&reg_dev_mutex); 1141 mutex_lock(&reg_dev_mutex);
1142 1142
1143 /* int should suffice for number of devices, test for wrap */ 1143 /* int should suffice for number of devices, test for wrap */
1144 if (unlikely(num_test_devs + 1) < 0) { 1144 if (num_test_devs + 1 == INT_MAX) {
1145 pr_err("reached limit of number of test devices\n"); 1145 pr_err("reached limit of number of test devices\n");
1146 goto out; 1146 goto out;
1147 } 1147 }
diff --git a/mm/gup.c b/mm/gup.c
index 1b46e6e74881..6afae32571ca 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -516,7 +516,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
516 } 516 }
517 517
518 if (ret & VM_FAULT_RETRY) { 518 if (ret & VM_FAULT_RETRY) {
519 if (nonblocking) 519 if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
520 *nonblocking = 0; 520 *nonblocking = 0;
521 return -EBUSY; 521 return -EBUSY;
522 } 522 }
@@ -890,7 +890,10 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
890 break; 890 break;
891 } 891 }
892 if (*locked) { 892 if (*locked) {
893 /* VM_FAULT_RETRY didn't trigger */ 893 /*
894 * VM_FAULT_RETRY didn't trigger or it was a
895 * FOLL_NOWAIT.
896 */
894 if (!pages_done) 897 if (!pages_done)
895 pages_done = ret; 898 pages_done = ret;
896 break; 899 break;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7c204e3d132b..a963f2034dfc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1583,7 +1583,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1583 page = NULL; 1583 page = NULL;
1584 } else { 1584 } else {
1585 h->surplus_huge_pages++; 1585 h->surplus_huge_pages++;
1586 h->nr_huge_pages_node[page_to_nid(page)]++; 1586 h->surplus_huge_pages_node[page_to_nid(page)]++;
1587 } 1587 }
1588 1588
1589out_unlock: 1589out_unlock:
diff --git a/mm/memblock.c b/mm/memblock.c
index 5a9ca2a1751b..b6ba6b7adadc 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1107,7 +1107,7 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
1107 struct memblock_type *type = &memblock.memory; 1107 struct memblock_type *type = &memblock.memory;
1108 unsigned int right = type->cnt; 1108 unsigned int right = type->cnt;
1109 unsigned int mid, left = 0; 1109 unsigned int mid, left = 0;
1110 phys_addr_t addr = PFN_PHYS(pfn + 1); 1110 phys_addr_t addr = PFN_PHYS(++pfn);
1111 1111
1112 do { 1112 do {
1113 mid = (right + left) / 2; 1113 mid = (right + left) / 2;
@@ -1118,15 +1118,15 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
1118 type->regions[mid].size)) 1118 type->regions[mid].size))
1119 left = mid + 1; 1119 left = mid + 1;
1120 else { 1120 else {
1121 /* addr is within the region, so pfn + 1 is valid */ 1121 /* addr is within the region, so pfn is valid */
1122 return min(pfn + 1, max_pfn); 1122 return pfn;
1123 } 1123 }
1124 } while (left < right); 1124 } while (left < right);
1125 1125
1126 if (right == type->cnt) 1126 if (right == type->cnt)
1127 return max_pfn; 1127 return -1UL;
1128 else 1128 else
1129 return min(PHYS_PFN(type->regions[right].base), max_pfn); 1129 return PHYS_PFN(type->regions[right].base);
1130} 1130}
1131 1131
1132/** 1132/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cb416723538f..3d974cb2a1a1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5359,9 +5359,14 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5359 /* 5359 /*
5360 * Skip to the pfn preceding the next valid one (or 5360 * Skip to the pfn preceding the next valid one (or
5361 * end_pfn), such that we hit a valid pfn (or end_pfn) 5361 * end_pfn), such that we hit a valid pfn (or end_pfn)
5362 * on our next iteration of the loop. 5362 * on our next iteration of the loop. Note that it needs
5363 * to be pageblock aligned even when the region itself
5364 * is not. move_freepages_block() can shift ahead of
5365 * the valid region but still depends on correct page
5366 * metadata.
5363 */ 5367 */
5364 pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1; 5368 pfn = (memblock_next_valid_pfn(pfn, end_pfn) &
5369 ~(pageblock_nr_pages-1)) - 1;
5365#endif 5370#endif
5366 continue; 5371 continue;
5367 } 5372 }
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 5589bae34af6..a6f538b31ad6 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -297,11 +297,11 @@ cmd_dt_S_dtb= \
297 echo '\#include <asm-generic/vmlinux.lds.h>'; \ 297 echo '\#include <asm-generic/vmlinux.lds.h>'; \
298 echo '.section .dtb.init.rodata,"a"'; \ 298 echo '.section .dtb.init.rodata,"a"'; \
299 echo '.balign STRUCT_ALIGNMENT'; \ 299 echo '.balign STRUCT_ALIGNMENT'; \
300 echo '.global __dtb_$(*F)_begin'; \ 300 echo '.global __dtb_$(subst -,_,$(*F))_begin'; \
301 echo '__dtb_$(*F)_begin:'; \ 301 echo '__dtb_$(subst -,_,$(*F))_begin:'; \
302 echo '.incbin "$<" '; \ 302 echo '.incbin "$<" '; \
303 echo '__dtb_$(*F)_end:'; \ 303 echo '__dtb_$(subst -,_,$(*F))_end:'; \
304 echo '.global __dtb_$(*F)_end'; \ 304 echo '.global __dtb_$(subst -,_,$(*F))_end'; \
305 echo '.balign STRUCT_ALIGNMENT'; \ 305 echo '.balign STRUCT_ALIGNMENT'; \
306) > $@ 306) > $@
307 307
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index fa3d39b6f23b..449b68c4c90c 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -93,14 +93,6 @@
93 * (Note: it'd be easy to port over the complete mkdep state machine, 93 * (Note: it'd be easy to port over the complete mkdep state machine,
94 * but I don't think the added complexity is worth it) 94 * but I don't think the added complexity is worth it)
95 */ 95 */
96/*
97 * Note 2: if somebody writes HELLO_CONFIG_BOOM in a file, it will depend onto
98 * CONFIG_BOOM. This could seem a bug (not too hard to fix), but please do not
99 * fix it! Some UserModeLinux files (look at arch/um/) call CONFIG_BOOM as
100 * UML_CONFIG_BOOM, to avoid conflicts with /usr/include/linux/autoconf.h,
101 * through arch/um/include/uml-config.h; this fixdep "bug" makes sure that
102 * those files will have correct dependencies.
103 */
104 96
105#include <sys/types.h> 97#include <sys/types.h>
106#include <sys/stat.h> 98#include <sys/stat.h>
@@ -233,8 +225,13 @@ static int str_ends_with(const char *s, int slen, const char *sub)
233static void parse_config_file(const char *p) 225static void parse_config_file(const char *p)
234{ 226{
235 const char *q, *r; 227 const char *q, *r;
228 const char *start = p;
236 229
237 while ((p = strstr(p, "CONFIG_"))) { 230 while ((p = strstr(p, "CONFIG_"))) {
231 if (p > start && (isalnum(p[-1]) || p[-1] == '_')) {
232 p += 7;
233 continue;
234 }
238 p += 7; 235 p += 7;
239 q = p; 236 q = p;
240 while (*q && (isalnum(*q) || *q == '_')) 237 while (*q && (isalnum(*q) || *q == '_'))
@@ -286,8 +283,6 @@ static int is_ignored_file(const char *s, int len)
286{ 283{
287 return str_ends_with(s, len, "include/generated/autoconf.h") || 284 return str_ends_with(s, len, "include/generated/autoconf.h") ||
288 str_ends_with(s, len, "include/generated/autoksyms.h") || 285 str_ends_with(s, len, "include/generated/autoksyms.h") ||
289 str_ends_with(s, len, "arch/um/include/uml-config.h") ||
290 str_ends_with(s, len, "include/linux/kconfig.h") ||
291 str_ends_with(s, len, ".ver"); 286 str_ends_with(s, len, ".ver");
292} 287}
293 288
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 94b664817ad9..d84a5674e95e 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -15,7 +15,7 @@ signal(SIGPIPE, SIG_DFL)
15if len(sys.argv) < 3: 15if len(sys.argv) < 3:
16 sys.stderr.write("usage: %s [option] file1 file2\n" % sys.argv[0]) 16 sys.stderr.write("usage: %s [option] file1 file2\n" % sys.argv[0])
17 sys.stderr.write("The options are:\n") 17 sys.stderr.write("The options are:\n")
18 sys.stderr.write("-c cateogrize output based on symbole type\n") 18 sys.stderr.write("-c categorize output based on symbol type\n")
19 sys.stderr.write("-d Show delta of Data Section\n") 19 sys.stderr.write("-d Show delta of Data Section\n")
20 sys.stderr.write("-t Show delta of text Section\n") 20 sys.stderr.write("-t Show delta of text Section\n")
21 sys.exit(-1) 21 sys.exit(-1)
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 04d4db44fae5..918338dea5b9 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -910,7 +910,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
910static int snd_seq_client_enqueue_event(struct snd_seq_client *client, 910static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
911 struct snd_seq_event *event, 911 struct snd_seq_event *event,
912 struct file *file, int blocking, 912 struct file *file, int blocking,
913 int atomic, int hop) 913 int atomic, int hop,
914 struct mutex *mutexp)
914{ 915{
915 struct snd_seq_event_cell *cell; 916 struct snd_seq_event_cell *cell;
916 int err; 917 int err;
@@ -948,7 +949,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
948 return -ENXIO; /* queue is not allocated */ 949 return -ENXIO; /* queue is not allocated */
949 950
950 /* allocate an event cell */ 951 /* allocate an event cell */
951 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); 952 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
953 file, mutexp);
952 if (err < 0) 954 if (err < 0)
953 return err; 955 return err;
954 956
@@ -1017,12 +1019,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1017 return -ENXIO; 1019 return -ENXIO;
1018 1020
1019 /* allocate the pool now if the pool is not allocated yet */ 1021 /* allocate the pool now if the pool is not allocated yet */
1022 mutex_lock(&client->ioctl_mutex);
1020 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { 1023 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
1021 mutex_lock(&client->ioctl_mutex);
1022 err = snd_seq_pool_init(client->pool); 1024 err = snd_seq_pool_init(client->pool);
1023 mutex_unlock(&client->ioctl_mutex);
1024 if (err < 0) 1025 if (err < 0)
1025 return -ENOMEM; 1026 goto out;
1026 } 1027 }
1027 1028
1028 /* only process whole events */ 1029 /* only process whole events */
@@ -1073,7 +1074,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1073 /* ok, enqueue it */ 1074 /* ok, enqueue it */
1074 err = snd_seq_client_enqueue_event(client, &event, file, 1075 err = snd_seq_client_enqueue_event(client, &event, file,
1075 !(file->f_flags & O_NONBLOCK), 1076 !(file->f_flags & O_NONBLOCK),
1076 0, 0); 1077 0, 0, &client->ioctl_mutex);
1077 if (err < 0) 1078 if (err < 0)
1078 break; 1079 break;
1079 1080
@@ -1084,6 +1085,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1084 written += len; 1085 written += len;
1085 } 1086 }
1086 1087
1088 out:
1089 mutex_unlock(&client->ioctl_mutex);
1087 return written ? written : err; 1090 return written ? written : err;
1088} 1091}
1089 1092
@@ -1838,9 +1841,11 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
1838 (! snd_seq_write_pool_allocated(client) || 1841 (! snd_seq_write_pool_allocated(client) ||
1839 info->output_pool != client->pool->size)) { 1842 info->output_pool != client->pool->size)) {
1840 if (snd_seq_write_pool_allocated(client)) { 1843 if (snd_seq_write_pool_allocated(client)) {
1844 /* is the pool in use? */
1845 if (atomic_read(&client->pool->counter))
1846 return -EBUSY;
1841 /* remove all existing cells */ 1847 /* remove all existing cells */
1842 snd_seq_pool_mark_closing(client->pool); 1848 snd_seq_pool_mark_closing(client->pool);
1843 snd_seq_queue_client_leave_cells(client->number);
1844 snd_seq_pool_done(client->pool); 1849 snd_seq_pool_done(client->pool);
1845 } 1850 }
1846 client->pool->size = info->output_pool; 1851 client->pool->size = info->output_pool;
@@ -2260,7 +2265,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
2260 if (! cptr->accept_output) 2265 if (! cptr->accept_output)
2261 result = -EPERM; 2266 result = -EPERM;
2262 else /* send it */ 2267 else /* send it */
2263 result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop); 2268 result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
2269 atomic, hop, NULL);
2264 2270
2265 snd_seq_client_unlock(cptr); 2271 snd_seq_client_unlock(cptr);
2266 return result; 2272 return result;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index a8c2822e0198..72c0302a55d2 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -125,7 +125,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
125 return -EINVAL; 125 return -EINVAL;
126 126
127 snd_use_lock_use(&f->use_lock); 127 snd_use_lock_use(&f->use_lock);
128 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ 128 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
129 if (err < 0) { 129 if (err < 0) {
130 if ((err == -ENOMEM) || (err == -EAGAIN)) 130 if ((err == -ENOMEM) || (err == -EAGAIN))
131 atomic_inc(&f->overflow); 131 atomic_inc(&f->overflow);
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index f763682584a8..ab1112e90f88 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -220,7 +220,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
220 */ 220 */
221static int snd_seq_cell_alloc(struct snd_seq_pool *pool, 221static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
222 struct snd_seq_event_cell **cellp, 222 struct snd_seq_event_cell **cellp,
223 int nonblock, struct file *file) 223 int nonblock, struct file *file,
224 struct mutex *mutexp)
224{ 225{
225 struct snd_seq_event_cell *cell; 226 struct snd_seq_event_cell *cell;
226 unsigned long flags; 227 unsigned long flags;
@@ -244,7 +245,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
244 set_current_state(TASK_INTERRUPTIBLE); 245 set_current_state(TASK_INTERRUPTIBLE);
245 add_wait_queue(&pool->output_sleep, &wait); 246 add_wait_queue(&pool->output_sleep, &wait);
246 spin_unlock_irq(&pool->lock); 247 spin_unlock_irq(&pool->lock);
248 if (mutexp)
249 mutex_unlock(mutexp);
247 schedule(); 250 schedule();
251 if (mutexp)
252 mutex_lock(mutexp);
248 spin_lock_irq(&pool->lock); 253 spin_lock_irq(&pool->lock);
249 remove_wait_queue(&pool->output_sleep, &wait); 254 remove_wait_queue(&pool->output_sleep, &wait);
250 /* interrupted? */ 255 /* interrupted? */
@@ -287,7 +292,7 @@ __error:
287 */ 292 */
288int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 293int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
289 struct snd_seq_event_cell **cellp, int nonblock, 294 struct snd_seq_event_cell **cellp, int nonblock,
290 struct file *file) 295 struct file *file, struct mutex *mutexp)
291{ 296{
292 int ncells, err; 297 int ncells, err;
293 unsigned int extlen; 298 unsigned int extlen;
@@ -304,7 +309,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
304 if (ncells >= pool->total_elements) 309 if (ncells >= pool->total_elements)
305 return -ENOMEM; 310 return -ENOMEM;
306 311
307 err = snd_seq_cell_alloc(pool, &cell, nonblock, file); 312 err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
308 if (err < 0) 313 if (err < 0)
309 return err; 314 return err;
310 315
@@ -330,7 +335,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
330 int size = sizeof(struct snd_seq_event); 335 int size = sizeof(struct snd_seq_event);
331 if (len < size) 336 if (len < size)
332 size = len; 337 size = len;
333 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); 338 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
339 mutexp);
334 if (err < 0) 340 if (err < 0)
335 goto __error; 341 goto __error;
336 if (cell->event.data.ext.ptr == NULL) 342 if (cell->event.data.ext.ptr == NULL)
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 32f959c17786..3abe306c394a 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -66,7 +66,8 @@ struct snd_seq_pool {
66void snd_seq_cell_free(struct snd_seq_event_cell *cell); 66void snd_seq_cell_free(struct snd_seq_event_cell *cell);
67 67
68int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 68int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
69 struct snd_seq_event_cell **cellp, int nonblock, struct file *file); 69 struct snd_seq_event_cell **cellp, int nonblock,
70 struct file *file, struct mutex *mutexp);
70 71
71/* return number of unused (free) cells */ 72/* return number of unused (free) cells */
72static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) 73static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 37e1cf8218ff..5b4dbcec6de8 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -957,6 +957,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
957 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), 957 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
958 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), 958 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
959 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), 959 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
960 SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
961 SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
960 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), 962 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
961 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), 963 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
962 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), 964 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b9c93fa0a51c..9af301c6bba2 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5274,6 +5274,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec,
5274 } 5274 }
5275} 5275}
5276 5276
5277/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */
5278static void alc295_fixup_disable_dac3(struct hda_codec *codec,
5279 const struct hda_fixup *fix, int action)
5280{
5281 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
5282 hda_nid_t conn[2] = { 0x02, 0x03 };
5283 snd_hda_override_conn_list(codec, 0x17, 2, conn);
5284 }
5285}
5286
5277/* Hook to update amp GPIO4 for automute */ 5287/* Hook to update amp GPIO4 for automute */
5278static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, 5288static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
5279 struct hda_jack_callback *jack) 5289 struct hda_jack_callback *jack)
@@ -5466,6 +5476,7 @@ enum {
5466 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, 5476 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
5467 ALC255_FIXUP_DELL_SPK_NOISE, 5477 ALC255_FIXUP_DELL_SPK_NOISE,
5468 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 5478 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5479 ALC295_FIXUP_DISABLE_DAC3,
5469 ALC280_FIXUP_HP_HEADSET_MIC, 5480 ALC280_FIXUP_HP_HEADSET_MIC,
5470 ALC221_FIXUP_HP_FRONT_MIC, 5481 ALC221_FIXUP_HP_FRONT_MIC,
5471 ALC292_FIXUP_TPT460, 5482 ALC292_FIXUP_TPT460,
@@ -5480,10 +5491,12 @@ enum {
5480 ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, 5491 ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
5481 ALC233_FIXUP_LENOVO_MULTI_CODECS, 5492 ALC233_FIXUP_LENOVO_MULTI_CODECS,
5482 ALC294_FIXUP_LENOVO_MIC_LOCATION, 5493 ALC294_FIXUP_LENOVO_MIC_LOCATION,
5494 ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
5483 ALC700_FIXUP_INTEL_REFERENCE, 5495 ALC700_FIXUP_INTEL_REFERENCE,
5484 ALC274_FIXUP_DELL_BIND_DACS, 5496 ALC274_FIXUP_DELL_BIND_DACS,
5485 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, 5497 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
5486 ALC298_FIXUP_TPT470_DOCK, 5498 ALC298_FIXUP_TPT470_DOCK,
5499 ALC255_FIXUP_DUMMY_LINEOUT_VERB,
5487}; 5500};
5488 5501
5489static const struct hda_fixup alc269_fixups[] = { 5502static const struct hda_fixup alc269_fixups[] = {
@@ -6198,6 +6211,10 @@ static const struct hda_fixup alc269_fixups[] = {
6198 .chained = true, 6211 .chained = true,
6199 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, 6212 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
6200 }, 6213 },
6214 [ALC295_FIXUP_DISABLE_DAC3] = {
6215 .type = HDA_FIXUP_FUNC,
6216 .v.func = alc295_fixup_disable_dac3,
6217 },
6201 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { 6218 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
6202 .type = HDA_FIXUP_PINS, 6219 .type = HDA_FIXUP_PINS,
6203 .v.pins = (const struct hda_pintbl[]) { 6220 .v.pins = (const struct hda_pintbl[]) {
@@ -6283,6 +6300,18 @@ static const struct hda_fixup alc269_fixups[] = {
6283 { } 6300 { }
6284 }, 6301 },
6285 }, 6302 },
6303 [ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE] = {
6304 .type = HDA_FIXUP_PINS,
6305 .v.pins = (const struct hda_pintbl[]) {
6306 { 0x16, 0x0101102f }, /* Rear Headset HP */
6307 { 0x19, 0x02a1913c }, /* use as Front headset mic, without its own jack detect */
6308 { 0x1a, 0x01a19030 }, /* Rear Headset MIC */
6309 { 0x1b, 0x02011020 },
6310 { }
6311 },
6312 .chained = true,
6313 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6314 },
6286 [ALC700_FIXUP_INTEL_REFERENCE] = { 6315 [ALC700_FIXUP_INTEL_REFERENCE] = {
6287 .type = HDA_FIXUP_VERBS, 6316 .type = HDA_FIXUP_VERBS,
6288 .v.verbs = (const struct hda_verb[]) { 6317 .v.verbs = (const struct hda_verb[]) {
@@ -6319,6 +6348,15 @@ static const struct hda_fixup alc269_fixups[] = {
6319 .chained = true, 6348 .chained = true,
6320 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE 6349 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
6321 }, 6350 },
6351 [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = {
6352 .type = HDA_FIXUP_PINS,
6353 .v.pins = (const struct hda_pintbl[]) {
6354 { 0x14, 0x0201101f },
6355 { }
6356 },
6357 .chained = true,
6358 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
6359 },
6322}; 6360};
6323 6361
6324static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6362static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6367,10 +6405,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6367 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), 6405 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
6368 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 6406 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6369 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), 6407 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
6408 SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
6370 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), 6409 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
6410 SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
6371 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 6411 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6372 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), 6412 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6373 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), 6413 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6414 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
6374 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6415 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6375 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6416 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6376 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 6417 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6508,9 +6549,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6508 SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK), 6549 SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
6509 SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6550 SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6510 SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6551 SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6552 SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460),
6511 SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6553 SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6512 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6554 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6513 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6555 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6556 SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
6514 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6557 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6515 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6558 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6516 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6559 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
@@ -6872,7 +6915,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6872 {0x12, 0x90a60120}, 6915 {0x12, 0x90a60120},
6873 {0x14, 0x90170110}, 6916 {0x14, 0x90170110},
6874 {0x21, 0x0321101f}), 6917 {0x21, 0x0321101f}),
6875 SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 6918 SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
6876 {0x12, 0xb7a60130}, 6919 {0x12, 0xb7a60130},
6877 {0x14, 0x90170110}, 6920 {0x14, 0x90170110},
6878 {0x21, 0x04211020}), 6921 {0x21, 0x04211020}),
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 0dfe4d3f74e2..f41079da38c5 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -213,6 +213,7 @@
213#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ 213#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
214 214
215#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ 215#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
216#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
216 217
217/* Virtualization flags: Linux defined, word 8 */ 218/* Virtualization flags: Linux defined, word 8 */
218#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 219#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 0fb5ef939732..7b26d4b0b052 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -761,6 +761,7 @@ struct kvm_ppc_resize_hpt {
761#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 761#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
762#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 762#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
763#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) 763#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
764#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
764 765
765/* 766/*
766 * Extension capability list. 767 * Extension capability list.
@@ -934,6 +935,7 @@ struct kvm_ppc_resize_hpt {
934#define KVM_CAP_S390_AIS_MIGRATION 150 935#define KVM_CAP_S390_AIS_MIGRATION 150
935#define KVM_CAP_PPC_GET_CPU_CHAR 151 936#define KVM_CAP_PPC_GET_CPU_CHAR 151
936#define KVM_CAP_S390_BPB 152 937#define KVM_CAP_S390_BPB 152
938#define KVM_CAP_GET_MSR_FEATURES 153
937 939
938#ifdef KVM_CAP_IRQ_ROUTING 940#ifdef KVM_CAP_IRQ_ROUTING
939 941
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 46c1d239cc1b..92b6a2c21631 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1116,42 +1116,29 @@ static int read_unwind_hints(struct objtool_file *file)
1116 1116
1117static int read_retpoline_hints(struct objtool_file *file) 1117static int read_retpoline_hints(struct objtool_file *file)
1118{ 1118{
1119 struct section *sec, *relasec; 1119 struct section *sec;
1120 struct instruction *insn; 1120 struct instruction *insn;
1121 struct rela *rela; 1121 struct rela *rela;
1122 int i;
1123 1122
1124 sec = find_section_by_name(file->elf, ".discard.retpoline_safe"); 1123 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1125 if (!sec) 1124 if (!sec)
1126 return 0; 1125 return 0;
1127 1126
1128 relasec = sec->rela; 1127 list_for_each_entry(rela, &sec->rela_list, list) {
1129 if (!relasec) { 1128 if (rela->sym->type != STT_SECTION) {
1130 WARN("missing .rela.discard.retpoline_safe section"); 1129 WARN("unexpected relocation symbol type in %s", sec->name);
1131 return -1;
1132 }
1133
1134 if (sec->len % sizeof(unsigned long)) {
1135 WARN("retpoline_safe size mismatch: %d %ld", sec->len, sizeof(unsigned long));
1136 return -1;
1137 }
1138
1139 for (i = 0; i < sec->len / sizeof(unsigned long); i++) {
1140 rela = find_rela_by_dest(sec, i * sizeof(unsigned long));
1141 if (!rela) {
1142 WARN("can't find rela for retpoline_safe[%d]", i);
1143 return -1; 1130 return -1;
1144 } 1131 }
1145 1132
1146 insn = find_insn(file, rela->sym->sec, rela->addend); 1133 insn = find_insn(file, rela->sym->sec, rela->addend);
1147 if (!insn) { 1134 if (!insn) {
1148 WARN("can't find insn for retpoline_safe[%d]", i); 1135 WARN("bad .discard.retpoline_safe entry");
1149 return -1; 1136 return -1;
1150 } 1137 }
1151 1138
1152 if (insn->type != INSN_JUMP_DYNAMIC && 1139 if (insn->type != INSN_JUMP_DYNAMIC &&
1153 insn->type != INSN_CALL_DYNAMIC) { 1140 insn->type != INSN_CALL_DYNAMIC) {
1154 WARN_FUNC("retpoline_safe hint not a indirect jump/call", 1141 WARN_FUNC("retpoline_safe hint not an indirect jump/call",
1155 insn->sec, insn->offset); 1142 insn->sec, insn->offset);
1156 return -1; 1143 return -1;
1157 } 1144 }
diff --git a/tools/perf/Documentation/perf-kallsyms.txt b/tools/perf/Documentation/perf-kallsyms.txt
index 954ea9e21236..cf9f4040ea5c 100644
--- a/tools/perf/Documentation/perf-kallsyms.txt
+++ b/tools/perf/Documentation/perf-kallsyms.txt
@@ -8,7 +8,7 @@ perf-kallsyms - Searches running kernel for symbols
8SYNOPSIS 8SYNOPSIS
9-------- 9--------
10[verse] 10[verse]
11'perf kallsyms <options> symbol_name[,symbol_name...]' 11'perf kallsyms' [<options>] symbol_name[,symbol_name...]
12 12
13DESCRIPTION 13DESCRIPTION
14----------- 14-----------
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index bf4ca749d1ac..a217623fec2e 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -881,6 +881,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
881 } 881 }
882 } 882 }
883 883
884 /*
885 * If we have just single event and are sending data
886 * through pipe, we need to force the ids allocation,
887 * because we synthesize event name through the pipe
888 * and need the id for that.
889 */
890 if (data->is_pipe && rec->evlist->nr_entries == 1)
891 rec->opts.sample_id = true;
892
884 if (record__open(rec) != 0) { 893 if (record__open(rec) != 0) {
885 err = -1; 894 err = -1;
886 goto out_child; 895 goto out_child;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 98bf9d32f222..54a4c152edb3 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -917,7 +917,7 @@ static void print_metric_csv(void *ctx,
917 char buf[64], *vals, *ends; 917 char buf[64], *vals, *ends;
918 918
919 if (unit == NULL || fmt == NULL) { 919 if (unit == NULL || fmt == NULL) {
920 fprintf(out, "%s%s%s%s", csv_sep, csv_sep, csv_sep, csv_sep); 920 fprintf(out, "%s%s", csv_sep, csv_sep);
921 return; 921 return;
922 } 922 }
923 snprintf(buf, sizeof(buf), fmt, val); 923 snprintf(buf, sizeof(buf), fmt, val);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b7c823ba8374..35ac016fcb98 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -991,7 +991,7 @@ static int perf_top_overwrite_fallback(struct perf_top *top,
991 evlist__for_each_entry(evlist, counter) 991 evlist__for_each_entry(evlist, counter)
992 counter->attr.write_backward = false; 992 counter->attr.write_backward = false;
993 opts->overwrite = false; 993 opts->overwrite = false;
994 ui__warning("fall back to non-overwrite mode\n"); 994 pr_debug2("fall back to non-overwrite mode\n");
995 return 1; 995 return 1;
996} 996}
997 997
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index cfe46236a5e5..57b9b342d533 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -61,6 +61,7 @@ struct record_opts {
61 bool tail_synthesize; 61 bool tail_synthesize;
62 bool overwrite; 62 bool overwrite;
63 bool ignore_missing_thread; 63 bool ignore_missing_thread;
64 bool sample_id;
64 unsigned int freq; 65 unsigned int freq;
65 unsigned int mmap_pages; 66 unsigned int mmap_pages;
66 unsigned int auxtrace_mmap_pages; 67 unsigned int auxtrace_mmap_pages;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 286427975112..fbf927cf775d 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -327,7 +327,32 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
327 if (!disasm_line__is_valid_jump(cursor, sym)) 327 if (!disasm_line__is_valid_jump(cursor, sym))
328 return; 328 return;
329 329
330 /*
331 * This first was seen with a gcc function, _cpp_lex_token, that
332 * has the usual jumps:
333 *
334 * │1159e6c: ↓ jne 115aa32 <_cpp_lex_token@@Base+0xf92>
335 *
336 * I.e. jumps to a label inside that function (_cpp_lex_token), and
337 * those works, but also this kind:
338 *
339 * │1159e8b: ↓ jne c469be <cpp_named_operator2name@@Base+0xa72>
340 *
341 * I.e. jumps to another function, outside _cpp_lex_token, which
342 * are not being correctly handled generating as a side effect references
343 * to ab->offset[] entries that are set to NULL, so to make this code
344 * more robust, check that here.
345 *
346 * A proper fix for will be put in place, looking at the function
347 * name right after the '<' token and probably treating this like a
348 * 'call' instruction.
349 */
330 target = ab->offsets[cursor->ops.target.offset]; 350 target = ab->offsets[cursor->ops.target.offset];
351 if (target == NULL) {
352 ui_helpline__printf("WARN: jump target inconsistency, press 'o', ab->offsets[%#x] = NULL\n",
353 cursor->ops.target.offset);
354 return;
355 }
331 356
332 bcursor = browser_line(&cursor->al); 357 bcursor = browser_line(&cursor->al);
333 btarget = browser_line(target); 358 btarget = browser_line(target);
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 9faf3b5367db..6470ea2aa25e 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -60,6 +60,12 @@
60#include "sane_ctype.h" 60#include "sane_ctype.h"
61#include "symbol/kallsyms.h" 61#include "symbol/kallsyms.h"
62 62
63static bool auxtrace__dont_decode(struct perf_session *session)
64{
65 return !session->itrace_synth_opts ||
66 session->itrace_synth_opts->dont_decode;
67}
68
63int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 69int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
64 struct auxtrace_mmap_params *mp, 70 struct auxtrace_mmap_params *mp,
65 void *userpg, int fd) 71 void *userpg, int fd)
@@ -762,6 +768,9 @@ int auxtrace_queues__process_index(struct auxtrace_queues *queues,
762 size_t i; 768 size_t i;
763 int err; 769 int err;
764 770
771 if (auxtrace__dont_decode(session))
772 return 0;
773
765 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { 774 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
766 for (i = 0; i < auxtrace_index->nr; i++) { 775 for (i = 0; i < auxtrace_index->nr; i++) {
767 ent = &auxtrace_index->entries[i]; 776 ent = &auxtrace_index->entries[i];
@@ -892,12 +901,6 @@ out_free:
892 return err; 901 return err;
893} 902}
894 903
895static bool auxtrace__dont_decode(struct perf_session *session)
896{
897 return !session->itrace_synth_opts ||
898 session->itrace_synth_opts->dont_decode;
899}
900
901int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused, 904int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
902 union perf_event *event, 905 union perf_event *event,
903 struct perf_session *session) 906 struct perf_session *session)
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 1e97937b03a9..6f09e4962dad 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -137,6 +137,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
137 struct perf_evsel *evsel; 137 struct perf_evsel *evsel;
138 bool use_sample_identifier = false; 138 bool use_sample_identifier = false;
139 bool use_comm_exec; 139 bool use_comm_exec;
140 bool sample_id = opts->sample_id;
140 141
141 /* 142 /*
142 * Set the evsel leader links before we configure attributes, 143 * Set the evsel leader links before we configure attributes,
@@ -163,8 +164,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
163 * match the id. 164 * match the id.
164 */ 165 */
165 use_sample_identifier = perf_can_sample_identifier(); 166 use_sample_identifier = perf_can_sample_identifier();
166 evlist__for_each_entry(evlist, evsel) 167 sample_id = true;
167 perf_evsel__set_sample_id(evsel, use_sample_identifier);
168 } else if (evlist->nr_entries > 1) { 168 } else if (evlist->nr_entries > 1) {
169 struct perf_evsel *first = perf_evlist__first(evlist); 169 struct perf_evsel *first = perf_evlist__first(evlist);
170 170
@@ -174,6 +174,10 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
174 use_sample_identifier = perf_can_sample_identifier(); 174 use_sample_identifier = perf_can_sample_identifier();
175 break; 175 break;
176 } 176 }
177 sample_id = true;
178 }
179
180 if (sample_id) {
177 evlist__for_each_entry(evlist, evsel) 181 evlist__for_each_entry(evlist, evsel)
178 perf_evsel__set_sample_id(evsel, use_sample_identifier); 182 perf_evsel__set_sample_id(evsel, use_sample_identifier);
179 } 183 }
diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h
index 370138e7e35c..88223bc7c82b 100644
--- a/tools/perf/util/trigger.h
+++ b/tools/perf/util/trigger.h
@@ -12,7 +12,7 @@
12 * States and transits: 12 * States and transits:
13 * 13 *
14 * 14 *
15 * OFF--(on)--> READY --(hit)--> HIT 15 * OFF--> ON --> READY --(hit)--> HIT
16 * ^ | 16 * ^ |
17 * | (ready) 17 * | (ready)
18 * | | 18 * | |
@@ -27,8 +27,9 @@ struct trigger {
27 volatile enum { 27 volatile enum {
28 TRIGGER_ERROR = -2, 28 TRIGGER_ERROR = -2,
29 TRIGGER_OFF = -1, 29 TRIGGER_OFF = -1,
30 TRIGGER_READY = 0, 30 TRIGGER_ON = 0,
31 TRIGGER_HIT = 1, 31 TRIGGER_READY = 1,
32 TRIGGER_HIT = 2,
32 } state; 33 } state;
33 const char *name; 34 const char *name;
34}; 35};
@@ -50,7 +51,7 @@ static inline bool trigger_is_error(struct trigger *t)
50static inline void trigger_on(struct trigger *t) 51static inline void trigger_on(struct trigger *t)
51{ 52{
52 TRIGGER_WARN_ONCE(t, TRIGGER_OFF); 53 TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
53 t->state = TRIGGER_READY; 54 t->state = TRIGGER_ON;
54} 55}
55 56
56static inline void trigger_ready(struct trigger *t) 57static inline void trigger_ready(struct trigger *t)
diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c
index 35ade7406dcd..3ae77ba93208 100644
--- a/tools/testing/selftests/powerpc/mm/subpage_prot.c
+++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c
@@ -135,6 +135,16 @@ static int run_test(void *addr, unsigned long size)
135 return 0; 135 return 0;
136} 136}
137 137
138static int syscall_available(void)
139{
140 int rc;
141
142 errno = 0;
143 rc = syscall(__NR_subpage_prot, 0, 0, 0);
144
145 return rc == 0 || (errno != ENOENT && errno != ENOSYS);
146}
147
138int test_anon(void) 148int test_anon(void)
139{ 149{
140 unsigned long align; 150 unsigned long align;
@@ -145,6 +155,8 @@ int test_anon(void)
145 void *mallocblock; 155 void *mallocblock;
146 unsigned long mallocsize; 156 unsigned long mallocsize;
147 157
158 SKIP_IF(!syscall_available());
159
148 if (getpagesize() != 0x10000) { 160 if (getpagesize() != 0x10000) {
149 fprintf(stderr, "Kernel page size must be 64K!\n"); 161 fprintf(stderr, "Kernel page size must be 64K!\n");
150 return 1; 162 return 1;
@@ -180,6 +192,8 @@ int test_file(void)
180 off_t filesize; 192 off_t filesize;
181 int fd; 193 int fd;
182 194
195 SKIP_IF(!syscall_available());
196
183 fd = open(file_name, O_RDWR); 197 fd = open(file_name, O_RDWR);
184 if (fd == -1) { 198 if (fd == -1) {
185 perror("failed to open file"); 199 perror("failed to open file");
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index a23453943ad2..5c72ff978f27 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -16,7 +16,7 @@ $(OUTPUT)/tm-syscall: tm-syscall-asm.S
16$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include 16$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include
17$(OUTPUT)/tm-tmspr: CFLAGS += -pthread 17$(OUTPUT)/tm-tmspr: CFLAGS += -pthread
18$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64 18$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
19$(OUTPUT)/tm-resched-dscr: ../pmu/lib.o 19$(OUTPUT)/tm-resched-dscr: ../pmu/lib.c
20$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx 20$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx
21$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64 21$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64
22 22
diff --git a/tools/testing/selftests/powerpc/tm/tm-trap.c b/tools/testing/selftests/powerpc/tm/tm-trap.c
index 5d92c23ee6cb..179d592f0073 100644
--- a/tools/testing/selftests/powerpc/tm/tm-trap.c
+++ b/tools/testing/selftests/powerpc/tm/tm-trap.c
@@ -255,6 +255,8 @@ int tm_trap_test(void)
255 255
256 struct sigaction trap_sa; 256 struct sigaction trap_sa;
257 257
258 SKIP_IF(!have_htm());
259
258 trap_sa.sa_flags = SA_SIGINFO; 260 trap_sa.sa_flags = SA_SIGINFO;
259 trap_sa.sa_sigaction = trap_signal_handler; 261 trap_sa.sa_sigaction = trap_signal_handler;
260 sigaction(SIGTRAP, &trap_sa, NULL); 262 sigaction(SIGTRAP, &trap_sa, NULL);
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index d2561895a021..22d564673830 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -2,25 +2,33 @@
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3#please run as root 3#please run as root
4 4
5#we need 256M, below is the size in kB
6needmem=262144
7mnt=./huge 5mnt=./huge
8exitcode=0 6exitcode=0
9 7
10#get pagesize and freepages from /proc/meminfo 8#get huge pagesize and freepages from /proc/meminfo
11while read name size unit; do 9while read name size unit; do
12 if [ "$name" = "HugePages_Free:" ]; then 10 if [ "$name" = "HugePages_Free:" ]; then
13 freepgs=$size 11 freepgs=$size
14 fi 12 fi
15 if [ "$name" = "Hugepagesize:" ]; then 13 if [ "$name" = "Hugepagesize:" ]; then
16 pgsize=$size 14 hpgsize_KB=$size
17 fi 15 fi
18done < /proc/meminfo 16done < /proc/meminfo
19 17
18# Simple hugetlbfs tests have a hardcoded minimum requirement of
19# huge pages totaling 256MB (262144KB) in size. The userfaultfd
20# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take
21# both of these requirements into account and attempt to increase
22# number of huge pages available.
23nr_cpus=$(nproc)
24hpgsize_MB=$((hpgsize_KB / 1024))
25half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
26needmem_KB=$((half_ufd_size_MB * 2 * 1024))
27
20#set proper nr_hugepages 28#set proper nr_hugepages
21if [ -n "$freepgs" ] && [ -n "$pgsize" ]; then 29if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
22 nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` 30 nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
23 needpgs=`expr $needmem / $pgsize` 31 needpgs=$((needmem_KB / hpgsize_KB))
24 tries=2 32 tries=2
25 while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do 33 while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do
26 lackpgs=$(( $needpgs - $freepgs )) 34 lackpgs=$(( $needpgs - $freepgs ))
@@ -107,8 +115,9 @@ fi
107echo "---------------------------" 115echo "---------------------------"
108echo "running userfaultfd_hugetlb" 116echo "running userfaultfd_hugetlb"
109echo "---------------------------" 117echo "---------------------------"
110# 256MB total huge pages == 128MB src and 128MB dst 118# Test requires source and destination huge pages. Size of source
111./userfaultfd hugetlb 128 32 $mnt/ufd_test_file 119# (half_ufd_size_MB) is passed as argument to test.
120./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file
112if [ $? -ne 0 ]; then 121if [ $? -ne 0 ]; then
113 echo "[FAIL]" 122 echo "[FAIL]"
114 exitcode=1 123 exitcode=1
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index be81621446f0..0b4f1cc2291c 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -450,7 +450,7 @@ static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
450 num_vsyscall_traps++; 450 num_vsyscall_traps++;
451} 451}
452 452
453static int test_native_vsyscall(void) 453static int test_emulation(void)
454{ 454{
455 time_t tmp; 455 time_t tmp;
456 bool is_native; 456 bool is_native;
@@ -458,7 +458,7 @@ static int test_native_vsyscall(void)
458 if (!vtime) 458 if (!vtime)
459 return 0; 459 return 0;
460 460
461 printf("[RUN]\tchecking for native vsyscall\n"); 461 printf("[RUN]\tchecking that vsyscalls are emulated\n");
462 sethandler(SIGTRAP, sigtrap, 0); 462 sethandler(SIGTRAP, sigtrap, 0);
463 set_eflags(get_eflags() | X86_EFLAGS_TF); 463 set_eflags(get_eflags() | X86_EFLAGS_TF);
464 vtime(&tmp); 464 vtime(&tmp);
@@ -474,11 +474,12 @@ static int test_native_vsyscall(void)
474 */ 474 */
475 is_native = (num_vsyscall_traps > 1); 475 is_native = (num_vsyscall_traps > 1);
476 476
477 printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n", 477 printf("[%s]\tvsyscalls are %s (%d instructions in vsyscall page)\n",
478 (is_native ? "FAIL" : "OK"),
478 (is_native ? "native" : "emulated"), 479 (is_native ? "native" : "emulated"),
479 (int)num_vsyscall_traps); 480 (int)num_vsyscall_traps);
480 481
481 return 0; 482 return is_native;
482} 483}
483#endif 484#endif
484 485
@@ -498,7 +499,7 @@ int main(int argc, char **argv)
498 nerrs += test_vsys_r(); 499 nerrs += test_vsys_r();
499 500
500#ifdef __x86_64__ 501#ifdef __x86_64__
501 nerrs += test_native_vsyscall(); 502 nerrs += test_emulation();
502#endif 503#endif
503 504
504 return nerrs ? 1 : 0; 505 return nerrs ? 1 : 0;