aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/locking.txt12
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/boot/dts/mti/malta.dts3
-rw-r--r--arch/mips/generic/init.c16
-rw-r--r--arch/mips/include/asm/fpu_emulator.h13
-rw-r--r--arch/mips/include/asm/kvm_host.h7
-rw-r--r--arch/mips/include/asm/switch_to.h18
-rw-r--r--arch/mips/kernel/mips-cpc.c11
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c10
-rw-r--r--arch/mips/kernel/ptrace.c8
-rw-r--r--arch/mips/kernel/r2300_fpu.S138
-rw-r--r--arch/mips/kernel/r6000_fpu.S89
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/traps.c137
-rw-r--r--arch/mips/kvm/emulate.c32
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/kvm/mmu.c4
-rw-r--r--arch/mips/lib/dump_tlb.c44
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c18
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/drivers.c6
-rw-r--r--arch/parisc/kernel/syscall.S66
-rw-r--r--arch/s390/kvm/sthyi.c4
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/emulate.c2
-rw-r--r--arch/x86/kvm/svm.c23
-rw-r--r--arch/x86/kvm/vmx.c65
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/char/virtio_console.c22
-rw-r--r--drivers/firewire/net.c59
-rw-r--r--drivers/gpio/gpio-mvebu.c92
-rw-r--r--drivers/gpio/gpiolib-of.c14
-rw-r--r--drivers/gpio/gpiolib.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c1
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c9
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c68
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c21
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c122
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c75
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c24
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c53
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c12
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c22
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c2
-rw-r--r--drivers/input/mouse/focaltech.c6
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/raid1.c26
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5-cache.c6
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c105
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.h4
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c34
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c319
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c90
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c100
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c62
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h6
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c31
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c25
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c113
-rw-r--r--drivers/media/usb/dvb-usb/dibusb.h3
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c26
-rw-r--r--drivers/media/usb/dvb-usb/digitv.h5
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c128
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c120
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c10
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c2
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c25
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c25
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c136
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c16
-rw-r--r--drivers/media/usb/s2255/s2255drv.c15
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c16
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c5
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/mtk_ecc.c19
-rw-r--r--drivers/mtd/nand/nand_base.c60
-rw-r--r--drivers/pci/host/pcie-designware.c7
-rw-r--r--drivers/pci/host/pcie-qcom.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c33
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/virtio/config.c12
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c16
-rw-r--r--drivers/virtio/virtio_ring.c16
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/inode.c13
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/btrfs/relocation.c9
-rw-r--r--fs/nfsd/netns.h5
-rw-r--r--fs/nfsd/nfs4state.c38
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/super.c15
-rw-r--r--include/drm/drm_plane.h8
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--kernel/sched/core.c12
-rw-r--r--mm/page_alloc.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c13
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c82
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c21
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c12
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--scripts/gcc-plugins/cyc_complexity_plugin.c4
-rw-r--r--scripts/gcc-plugins/gcc-common.h1
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c25
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c4
-rw-r--r--tools/virtio/ringtest/Makefile4
-rw-r--r--tools/virtio/ringtest/main.c20
-rw-r--r--tools/virtio/ringtest/main.h4
-rw-r--r--tools/virtio/ringtest/noring.c6
-rw-r--r--tools/virtio/ringtest/ptr_ring.c22
-rw-r--r--tools/virtio/ringtest/ring.c18
-rw-r--r--tools/virtio/ringtest/virtio_ring_0_9.c64
-rw-r--r--virt/kvm/eventfd.c22
-rw-r--r--virt/kvm/kvm_main.c6
166 files changed, 2345 insertions, 1518 deletions
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index f2491a8c68b4..e5dd9f4d6100 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -4,7 +4,17 @@ KVM Lock Overview
41. Acquisition Orders 41. Acquisition Orders
5--------------------- 5---------------------
6 6
7(to be written) 7The acquisition orders for mutexes are as follows:
8
9- kvm->lock is taken outside vcpu->mutex
10
11- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
12
13- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
14 them together is quite rare.
15
16For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
17else is a leaf: no other lock is taken inside the critical sections.
8 18
92: Exception 192: Exception
10------------ 20------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 4012c2f98617..411e3b87b8c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7925,6 +7925,10 @@ F: mm/
7925MEMORY TECHNOLOGY DEVICES (MTD) 7925MEMORY TECHNOLOGY DEVICES (MTD)
7926M: David Woodhouse <dwmw2@infradead.org> 7926M: David Woodhouse <dwmw2@infradead.org>
7927M: Brian Norris <computersforpeace@gmail.com> 7927M: Brian Norris <computersforpeace@gmail.com>
7928M: Boris Brezillon <boris.brezillon@free-electrons.com>
7929M: Marek Vasut <marek.vasut@gmail.com>
7930M: Richard Weinberger <richard@nod.at>
7931M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
7928L: linux-mtd@lists.infradead.org 7932L: linux-mtd@lists.infradead.org
7929W: http://www.linux-mtd.infradead.org/ 7933W: http://www.linux-mtd.infradead.org/
7930Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ 7934Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
@@ -11404,6 +11408,17 @@ W: http://www.st.com/spear
11404S: Maintained 11408S: Maintained
11405F: drivers/clk/spear/ 11409F: drivers/clk/spear/
11406 11410
11411SPI NOR SUBSYSTEM
11412M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
11413M: Marek Vasut <marek.vasut@gmail.com>
11414L: linux-mtd@lists.infradead.org
11415W: http://www.linux-mtd.infradead.org/
11416Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
11417T: git git://github.com/spi-nor/linux.git
11418S: Maintained
11419F: drivers/mtd/spi-nor/
11420F: include/linux/mtd/spi-nor.h
11421
11407SPI SUBSYSTEM 11422SPI SUBSYSTEM
11408M: Mark Brown <broonie@kernel.org> 11423M: Mark Brown <broonie@kernel.org>
11409L: linux-spi@vger.kernel.org 11424L: linux-spi@vger.kernel.org
@@ -12783,6 +12798,7 @@ F: include/uapi/linux/virtio_console.h
12783 12798
12784VIRTIO CORE, NET AND BLOCK DRIVERS 12799VIRTIO CORE, NET AND BLOCK DRIVERS
12785M: "Michael S. Tsirkin" <mst@redhat.com> 12800M: "Michael S. Tsirkin" <mst@redhat.com>
12801M: Jason Wang <jasowang@redhat.com>
12786L: virtualization@lists.linux-foundation.org 12802L: virtualization@lists.linux-foundation.org
12787S: Maintained 12803S: Maintained
12788F: Documentation/devicetree/bindings/virtio/ 12804F: Documentation/devicetree/bindings/virtio/
@@ -12813,6 +12829,7 @@ F: include/uapi/linux/virtio_gpu.h
12813 12829
12814VIRTIO HOST (VHOST) 12830VIRTIO HOST (VHOST)
12815M: "Michael S. Tsirkin" <mst@redhat.com> 12831M: "Michael S. Tsirkin" <mst@redhat.com>
12832M: Jason Wang <jasowang@redhat.com>
12816L: kvm@vger.kernel.org 12833L: kvm@vger.kernel.org
12817L: virtualization@lists.linux-foundation.org 12834L: virtualization@lists.linux-foundation.org
12818L: netdev@vger.kernel.org 12835L: netdev@vger.kernel.org
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index fbf40d3c8123..1a6bac7b076f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
263 263
264bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ 264bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
265 VMLINUX_ENTRY_ADDRESS=$(entry-y) \ 265 VMLINUX_ENTRY_ADDRESS=$(entry-y) \
266 PLATFORM=$(platform-y) 266 PLATFORM="$(platform-y)"
267ifdef CONFIG_32BIT 267ifdef CONFIG_32BIT
268bootvars-y += ADDR_BITS=32 268bootvars-y += ADDR_BITS=32
269endif 269endif
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
index f604a272d91d..ffe3a1508e72 100644
--- a/arch/mips/boot/dts/mti/malta.dts
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -84,12 +84,13 @@
84 fpga_regs: system-controller@1f000000 { 84 fpga_regs: system-controller@1f000000 {
85 compatible = "mti,malta-fpga", "syscon", "simple-mfd"; 85 compatible = "mti,malta-fpga", "syscon", "simple-mfd";
86 reg = <0x1f000000 0x1000>; 86 reg = <0x1f000000 0x1000>;
87 native-endian;
87 88
88 reboot { 89 reboot {
89 compatible = "syscon-reboot"; 90 compatible = "syscon-reboot";
90 regmap = <&fpga_regs>; 91 regmap = <&fpga_regs>;
91 offset = <0x500>; 92 offset = <0x500>;
92 mask = <0x4d>; 93 mask = <0x42>;
93 }; 94 };
94 }; 95 };
95 96
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 0ea73e845440..d493ccbf274a 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -30,9 +30,19 @@ static __initdata const void *mach_match_data;
30 30
31void __init prom_init(void) 31void __init prom_init(void)
32{ 32{
33 plat_get_fdt();
34 BUG_ON(!fdt);
35}
36
37void __init *plat_get_fdt(void)
38{
33 const struct mips_machine *check_mach; 39 const struct mips_machine *check_mach;
34 const struct of_device_id *match; 40 const struct of_device_id *match;
35 41
42 if (fdt)
43 /* Already set up */
44 return (void *)fdt;
45
36 if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) { 46 if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
37 /* 47 /*
38 * We booted using the UHI boot protocol, so we have been 48 * We booted using the UHI boot protocol, so we have been
@@ -75,12 +85,6 @@ void __init prom_init(void)
75 /* Retrieve the machine's FDT */ 85 /* Retrieve the machine's FDT */
76 fdt = mach->fdt; 86 fdt = mach->fdt;
77 } 87 }
78
79 BUG_ON(!fdt);
80}
81
82void __init *plat_get_fdt(void)
83{
84 return (void *)fdt; 88 return (void *)fdt;
85} 89}
86 90
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 355dc25172e7..c05369e0b8d6 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -63,6 +63,8 @@ do { \
63extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 63extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
64 struct mips_fpu_struct *ctx, int has_fpu, 64 struct mips_fpu_struct *ctx, int has_fpu,
65 void *__user *fault_addr); 65 void *__user *fault_addr);
66void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
67 struct task_struct *tsk);
66int process_fpemu_return(int sig, void __user *fault_addr, 68int process_fpemu_return(int sig, void __user *fault_addr,
67 unsigned long fcr31); 69 unsigned long fcr31);
68int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, 70int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void)
81 set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN); 83 set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
82} 84}
83 85
86/*
87 * Mask the FCSR Cause bits according to the Enable bits, observing
88 * that Unimplemented is always enabled.
89 */
90static inline unsigned long mask_fcr31_x(unsigned long fcr31)
91{
92 return fcr31 & (FPU_CSR_UNI_X |
93 ((fcr31 & FPU_CSR_ALL_E) <<
94 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
95}
96
84#endif /* _ASM_FPU_EMULATOR_H */ 97#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 07f58cfc1ab9..bebec370324f 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
293 /* Host KSEG0 address of the EI/DI offset */ 293 /* Host KSEG0 address of the EI/DI offset */
294 void *kseg0_commpage; 294 void *kseg0_commpage;
295 295
296 u32 io_gpr; /* GPR used as IO source/target */ 296 /* Resume PC after MMIO completion */
297 unsigned long io_pc;
298 /* GPR used as IO source/target */
299 u32 io_gpr;
297 300
298 struct hrtimer comparecount_timer; 301 struct hrtimer comparecount_timer;
299 /* Count timer control KVM register */ 302 /* Count timer control KVM register */
@@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
315 /* Bitmask of pending exceptions to be cleared */ 318 /* Bitmask of pending exceptions to be cleared */
316 unsigned long pending_exceptions_clr; 319 unsigned long pending_exceptions_clr;
317 320
318 u32 pending_load_cause;
319
320 /* Save/Restore the entryhi register when are are preempted/scheduled back in */ 321 /* Save/Restore the entryhi register when are are preempted/scheduled back in */
321 unsigned long preempt_entryhi; 322 unsigned long preempt_entryhi;
322 323
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index ebb5c0f2f90d..c0ae27971e31 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -76,6 +76,22 @@ do { if (cpu_has_rw_llb) { \
76} while (0) 76} while (0)
77 77
78/* 78/*
79 * Check FCSR for any unmasked exceptions pending set with `ptrace',
80 * clear them and send a signal.
81 */
82#define __sanitize_fcr31(next) \
83do { \
84 unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
85 void __user *pc; \
86 \
87 if (unlikely(fcr31)) { \
88 pc = (void __user *)task_pt_regs(next)->cp0_epc; \
89 next->thread.fpu.fcr31 &= ~fcr31; \
90 force_fcr31_sig(fcr31, pc, next); \
91 } \
92} while (0)
93
94/*
79 * For newly created kernel threads switch_to() will return to 95 * For newly created kernel threads switch_to() will return to
80 * ret_from_kernel_thread, newly created user threads to ret_from_fork. 96 * ret_from_kernel_thread, newly created user threads to ret_from_fork.
81 * That is, everything following resume() will be skipped for new threads. 97 * That is, everything following resume() will be skipped for new threads.
@@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
85do { \ 101do { \
86 __mips_mt_fpaff_switch_to(prev); \ 102 __mips_mt_fpaff_switch_to(prev); \
87 lose_fpu_inatomic(1, prev); \ 103 lose_fpu_inatomic(1, prev); \
104 if (tsk_used_math(next)) \
105 __sanitize_fcr31(next); \
88 if (cpu_has_dsp) { \ 106 if (cpu_has_dsp) { \
89 __save_dsp(prev); \ 107 __save_dsp(prev); \
90 __restore_dsp(next); \ 108 __restore_dsp(next); \
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 2a45867d3b4f..a4964c334cab 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
21 21
22static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); 22static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
23 23
24phys_addr_t __weak mips_cpc_default_phys_base(void)
25{
26 return 0;
27}
28
24/** 29/**
25 * mips_cpc_phys_base - retrieve the physical base address of the CPC 30 * mips_cpc_phys_base - retrieve the physical base address of the CPC
26 * 31 *
@@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void)
43 if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK) 48 if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
44 return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK; 49 return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
45 50
46 /* Otherwise, give it the default address & enable it */ 51 /* Otherwise, use the default address */
47 cpc_base = mips_cpc_default_phys_base(); 52 cpc_base = mips_cpc_default_phys_base();
53 if (!cpc_base)
54 return cpc_base;
55
56 /* Enable the CPC, mapped at the default address */
48 write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK); 57 write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
49 return cpc_base; 58 return cpc_base;
50} 59}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 22dedd62818a..bd09853aecdf 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
899 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction 899 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
900 * @regs: Process register set 900 * @regs: Process register set
901 * @inst: Instruction to decode and emulate 901 * @inst: Instruction to decode and emulate
902 * @fcr31: Floating Point Control and Status Register returned 902 * @fcr31: Floating Point Control and Status Register Cause bits returned
903 */ 903 */
904int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) 904int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
905{ 905{
@@ -1172,13 +1172,13 @@ fpu_emul:
1172 1172
1173 err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0, 1173 err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1174 &fault_addr); 1174 &fault_addr);
1175 *fcr31 = current->thread.fpu.fcr31;
1176 1175
1177 /* 1176 /*
1178 * We can't allow the emulated instruction to leave any of 1177 * We can't allow the emulated instruction to leave any
1179 * the cause bits set in $fcr31. 1178 * enabled Cause bits set in $fcr31.
1180 */ 1179 */
1181 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 1180 *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
1181 current->thread.fpu.fcr31 &= ~res;
1182 1182
1183 /* 1183 /*
1184 * this is a tricky issue - lose_fpu() uses LL/SC atomics 1184 * this is a tricky issue - lose_fpu() uses LL/SC atomics
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 6103b24d1bfc..a92994d60e91 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
79} 79}
80 80
81/* 81/*
82 * Poke at FCSR according to its mask. Don't set the cause bits as 82 * Poke at FCSR according to its mask. Set the Cause bits even
83 * this is currently not handled correctly in FP context restoration 83 * if a corresponding Enable bit is set. This will be noticed at
84 * and will cause an oops if a corresponding enable bit is set. 84 * the time the thread is switched to and SIGFPE thrown accordingly.
85 */ 85 */
86static void ptrace_setfcr31(struct task_struct *child, u32 value) 86static void ptrace_setfcr31(struct task_struct *child, u32 value)
87{ 87{
88 u32 fcr31; 88 u32 fcr31;
89 u32 mask; 89 u32 mask;
90 90
91 value &= ~FPU_CSR_ALL_X;
92 fcr31 = child->thread.fpu.fcr31; 91 fcr31 = child->thread.fpu.fcr31;
93 mask = boot_cpu_data.fpu_msk31; 92 mask = boot_cpu_data.fpu_msk31;
94 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); 93 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
@@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
817 break; 816 break;
818#endif 817#endif
819 case FPC_CSR: 818 case FPC_CSR:
819 init_fp_ctx(child);
820 ptrace_setfcr31(child, data); 820 ptrace_setfcr31(child, data);
821 break; 821 break;
822 case DSP_BASE ... DSP_BASE + 5: { 822 case DSP_BASE ... DSP_BASE + 5: {
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index b4ac6374a38f..918f2f6d3861 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -21,106 +21,84 @@
21#define EX(a,b) \ 21#define EX(a,b) \
229: a,##b; \ 229: a,##b; \
23 .section __ex_table,"a"; \ 23 .section __ex_table,"a"; \
24 PTR 9b,fault; \
25 .previous
26
27#define EX2(a,b) \
289: a,##b; \
29 .section __ex_table,"a"; \
24 PTR 9b,bad_stack; \ 30 PTR 9b,bad_stack; \
31 PTR 9b+4,bad_stack; \
25 .previous 32 .previous
26 33
27 .set noreorder 34 .set noreorder
28 .set mips1 35 .set mips1
29 /* Save floating point context */ 36
37/**
38 * _save_fp_context() - save FP context from the FPU
39 * @a0 - pointer to fpregs field of sigcontext
40 * @a1 - pointer to fpc_csr field of sigcontext
41 *
42 * Save FP context, including the 32 FP data registers and the FP
43 * control & status register, from the FPU to signal context.
44 */
30LEAF(_save_fp_context) 45LEAF(_save_fp_context)
31 .set push 46 .set push
32 SET_HARDFLOAT 47 SET_HARDFLOAT
33 li v0, 0 # assume success 48 li v0, 0 # assume success
34 cfc1 t1,fcr31 49 cfc1 t1, fcr31
35 EX(swc1 $f0,(SC_FPREGS+0)(a0)) 50 EX2(s.d $f0, 0(a0))
36 EX(swc1 $f1,(SC_FPREGS+8)(a0)) 51 EX2(s.d $f2, 16(a0))
37 EX(swc1 $f2,(SC_FPREGS+16)(a0)) 52 EX2(s.d $f4, 32(a0))
38 EX(swc1 $f3,(SC_FPREGS+24)(a0)) 53 EX2(s.d $f6, 48(a0))
39 EX(swc1 $f4,(SC_FPREGS+32)(a0)) 54 EX2(s.d $f8, 64(a0))
40 EX(swc1 $f5,(SC_FPREGS+40)(a0)) 55 EX2(s.d $f10, 80(a0))
41 EX(swc1 $f6,(SC_FPREGS+48)(a0)) 56 EX2(s.d $f12, 96(a0))
42 EX(swc1 $f7,(SC_FPREGS+56)(a0)) 57 EX2(s.d $f14, 112(a0))
43 EX(swc1 $f8,(SC_FPREGS+64)(a0)) 58 EX2(s.d $f16, 128(a0))
44 EX(swc1 $f9,(SC_FPREGS+72)(a0)) 59 EX2(s.d $f18, 144(a0))
45 EX(swc1 $f10,(SC_FPREGS+80)(a0)) 60 EX2(s.d $f20, 160(a0))
46 EX(swc1 $f11,(SC_FPREGS+88)(a0)) 61 EX2(s.d $f22, 176(a0))
47 EX(swc1 $f12,(SC_FPREGS+96)(a0)) 62 EX2(s.d $f24, 192(a0))
48 EX(swc1 $f13,(SC_FPREGS+104)(a0)) 63 EX2(s.d $f26, 208(a0))
49 EX(swc1 $f14,(SC_FPREGS+112)(a0)) 64 EX2(s.d $f28, 224(a0))
50 EX(swc1 $f15,(SC_FPREGS+120)(a0)) 65 EX2(s.d $f30, 240(a0))
51 EX(swc1 $f16,(SC_FPREGS+128)(a0))
52 EX(swc1 $f17,(SC_FPREGS+136)(a0))
53 EX(swc1 $f18,(SC_FPREGS+144)(a0))
54 EX(swc1 $f19,(SC_FPREGS+152)(a0))
55 EX(swc1 $f20,(SC_FPREGS+160)(a0))
56 EX(swc1 $f21,(SC_FPREGS+168)(a0))
57 EX(swc1 $f22,(SC_FPREGS+176)(a0))
58 EX(swc1 $f23,(SC_FPREGS+184)(a0))
59 EX(swc1 $f24,(SC_FPREGS+192)(a0))
60 EX(swc1 $f25,(SC_FPREGS+200)(a0))
61 EX(swc1 $f26,(SC_FPREGS+208)(a0))
62 EX(swc1 $f27,(SC_FPREGS+216)(a0))
63 EX(swc1 $f28,(SC_FPREGS+224)(a0))
64 EX(swc1 $f29,(SC_FPREGS+232)(a0))
65 EX(swc1 $f30,(SC_FPREGS+240)(a0))
66 EX(swc1 $f31,(SC_FPREGS+248)(a0))
67 EX(sw t1,(SC_FPC_CSR)(a0))
68 cfc1 t0,$0 # implementation/version
69 jr ra 66 jr ra
67 EX(sw t1, (a1))
70 .set pop 68 .set pop
71 .set nomacro
72 EX(sw t0,(SC_FPC_EIR)(a0))
73 .set macro
74 END(_save_fp_context) 69 END(_save_fp_context)
75 70
76/* 71/**
77 * Restore FPU state: 72 * _restore_fp_context() - restore FP context to the FPU
78 * - fp gp registers 73 * @a0 - pointer to fpregs field of sigcontext
79 * - cp1 status/control register 74 * @a1 - pointer to fpc_csr field of sigcontext
80 * 75 *
81 * We base the decision which registers to restore from the signal stack 76 * Restore FP context, including the 32 FP data registers and the FP
82 * frame on the current content of c0_status, not on the content of the 77 * control & status register, from signal context to the FPU.
83 * stack frame which might have been changed by the user.
84 */ 78 */
85LEAF(_restore_fp_context) 79LEAF(_restore_fp_context)
86 .set push 80 .set push
87 SET_HARDFLOAT 81 SET_HARDFLOAT
88 li v0, 0 # assume success 82 li v0, 0 # assume success
89 EX(lw t0,(SC_FPC_CSR)(a0)) 83 EX(lw t0, (a1))
90 EX(lwc1 $f0,(SC_FPREGS+0)(a0)) 84 EX2(l.d $f0, 0(a0))
91 EX(lwc1 $f1,(SC_FPREGS+8)(a0)) 85 EX2(l.d $f2, 16(a0))
92 EX(lwc1 $f2,(SC_FPREGS+16)(a0)) 86 EX2(l.d $f4, 32(a0))
93 EX(lwc1 $f3,(SC_FPREGS+24)(a0)) 87 EX2(l.d $f6, 48(a0))
94 EX(lwc1 $f4,(SC_FPREGS+32)(a0)) 88 EX2(l.d $f8, 64(a0))
95 EX(lwc1 $f5,(SC_FPREGS+40)(a0)) 89 EX2(l.d $f10, 80(a0))
96 EX(lwc1 $f6,(SC_FPREGS+48)(a0)) 90 EX2(l.d $f12, 96(a0))
97 EX(lwc1 $f7,(SC_FPREGS+56)(a0)) 91 EX2(l.d $f14, 112(a0))
98 EX(lwc1 $f8,(SC_FPREGS+64)(a0)) 92 EX2(l.d $f16, 128(a0))
99 EX(lwc1 $f9,(SC_FPREGS+72)(a0)) 93 EX2(l.d $f18, 144(a0))
100 EX(lwc1 $f10,(SC_FPREGS+80)(a0)) 94 EX2(l.d $f20, 160(a0))
101 EX(lwc1 $f11,(SC_FPREGS+88)(a0)) 95 EX2(l.d $f22, 176(a0))
102 EX(lwc1 $f12,(SC_FPREGS+96)(a0)) 96 EX2(l.d $f24, 192(a0))
103 EX(lwc1 $f13,(SC_FPREGS+104)(a0)) 97 EX2(l.d $f26, 208(a0))
104 EX(lwc1 $f14,(SC_FPREGS+112)(a0)) 98 EX2(l.d $f28, 224(a0))
105 EX(lwc1 $f15,(SC_FPREGS+120)(a0)) 99 EX2(l.d $f30, 240(a0))
106 EX(lwc1 $f16,(SC_FPREGS+128)(a0))
107 EX(lwc1 $f17,(SC_FPREGS+136)(a0))
108 EX(lwc1 $f18,(SC_FPREGS+144)(a0))
109 EX(lwc1 $f19,(SC_FPREGS+152)(a0))
110 EX(lwc1 $f20,(SC_FPREGS+160)(a0))
111 EX(lwc1 $f21,(SC_FPREGS+168)(a0))
112 EX(lwc1 $f22,(SC_FPREGS+176)(a0))
113 EX(lwc1 $f23,(SC_FPREGS+184)(a0))
114 EX(lwc1 $f24,(SC_FPREGS+192)(a0))
115 EX(lwc1 $f25,(SC_FPREGS+200)(a0))
116 EX(lwc1 $f26,(SC_FPREGS+208)(a0))
117 EX(lwc1 $f27,(SC_FPREGS+216)(a0))
118 EX(lwc1 $f28,(SC_FPREGS+224)(a0))
119 EX(lwc1 $f29,(SC_FPREGS+232)(a0))
120 EX(lwc1 $f30,(SC_FPREGS+240)(a0))
121 EX(lwc1 $f31,(SC_FPREGS+248)(a0))
122 jr ra 100 jr ra
123 ctc1 t0,fcr31 101 ctc1 t0, fcr31
124 .set pop 102 .set pop
125 END(_restore_fp_context) 103 END(_restore_fp_context)
126 .set reorder 104 .set reorder
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index 47077380c15c..9cc7bfab3419 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -21,7 +21,14 @@
21 .set push 21 .set push
22 SET_HARDFLOAT 22 SET_HARDFLOAT
23 23
24 /* Save floating point context */ 24/**
25 * _save_fp_context() - save FP context from the FPU
26 * @a0 - pointer to fpregs field of sigcontext
27 * @a1 - pointer to fpc_csr field of sigcontext
28 *
29 * Save FP context, including the 32 FP data registers and the FP
30 * control & status register, from the FPU to signal context.
31 */
25 LEAF(_save_fp_context) 32 LEAF(_save_fp_context)
26 mfc0 t0,CP0_STATUS 33 mfc0 t0,CP0_STATUS
27 sll t0,t0,2 34 sll t0,t0,2
@@ -30,59 +37,59 @@
30 37
31 cfc1 t1,fcr31 38 cfc1 t1,fcr31
32 /* Store the 16 double precision registers */ 39 /* Store the 16 double precision registers */
33 sdc1 $f0,(SC_FPREGS+0)(a0) 40 sdc1 $f0,0(a0)
34 sdc1 $f2,(SC_FPREGS+16)(a0) 41 sdc1 $f2,16(a0)
35 sdc1 $f4,(SC_FPREGS+32)(a0) 42 sdc1 $f4,32(a0)
36 sdc1 $f6,(SC_FPREGS+48)(a0) 43 sdc1 $f6,48(a0)
37 sdc1 $f8,(SC_FPREGS+64)(a0) 44 sdc1 $f8,64(a0)
38 sdc1 $f10,(SC_FPREGS+80)(a0) 45 sdc1 $f10,80(a0)
39 sdc1 $f12,(SC_FPREGS+96)(a0) 46 sdc1 $f12,96(a0)
40 sdc1 $f14,(SC_FPREGS+112)(a0) 47 sdc1 $f14,112(a0)
41 sdc1 $f16,(SC_FPREGS+128)(a0) 48 sdc1 $f16,128(a0)
42 sdc1 $f18,(SC_FPREGS+144)(a0) 49 sdc1 $f18,144(a0)
43 sdc1 $f20,(SC_FPREGS+160)(a0) 50 sdc1 $f20,160(a0)
44 sdc1 $f22,(SC_FPREGS+176)(a0) 51 sdc1 $f22,176(a0)
45 sdc1 $f24,(SC_FPREGS+192)(a0) 52 sdc1 $f24,192(a0)
46 sdc1 $f26,(SC_FPREGS+208)(a0) 53 sdc1 $f26,208(a0)
47 sdc1 $f28,(SC_FPREGS+224)(a0) 54 sdc1 $f28,224(a0)
48 sdc1 $f30,(SC_FPREGS+240)(a0) 55 sdc1 $f30,240(a0)
49 jr ra 56 jr ra
50 sw t0,SC_FPC_CSR(a0) 57 sw t0,(a1)
511: jr ra 581: jr ra
52 nop 59 nop
53 END(_save_fp_context) 60 END(_save_fp_context)
54 61
55/* Restore FPU state: 62/**
56 * - fp gp registers 63 * _restore_fp_context() - restore FP context to the FPU
57 * - cp1 status/control register 64 * @a0 - pointer to fpregs field of sigcontext
65 * @a1 - pointer to fpc_csr field of sigcontext
58 * 66 *
59 * We base the decision which registers to restore from the signal stack 67 * Restore FP context, including the 32 FP data registers and the FP
60 * frame on the current content of c0_status, not on the content of the 68 * control & status register, from signal context to the FPU.
61 * stack frame which might have been changed by the user.
62 */ 69 */
63 LEAF(_restore_fp_context) 70 LEAF(_restore_fp_context)
64 mfc0 t0,CP0_STATUS 71 mfc0 t0,CP0_STATUS
65 sll t0,t0,2 72 sll t0,t0,2
66 73
67 bgez t0,1f 74 bgez t0,1f
68 lw t0,SC_FPC_CSR(a0) 75 lw t0,(a1)
69 /* Restore the 16 double precision registers */ 76 /* Restore the 16 double precision registers */
70 ldc1 $f0,(SC_FPREGS+0)(a0) 77 ldc1 $f0,0(a0)
71 ldc1 $f2,(SC_FPREGS+16)(a0) 78 ldc1 $f2,16(a0)
72 ldc1 $f4,(SC_FPREGS+32)(a0) 79 ldc1 $f4,32(a0)
73 ldc1 $f6,(SC_FPREGS+48)(a0) 80 ldc1 $f6,48(a0)
74 ldc1 $f8,(SC_FPREGS+64)(a0) 81 ldc1 $f8,64(a0)
75 ldc1 $f10,(SC_FPREGS+80)(a0) 82 ldc1 $f10,80(a0)
76 ldc1 $f12,(SC_FPREGS+96)(a0) 83 ldc1 $f12,96(a0)
77 ldc1 $f14,(SC_FPREGS+112)(a0) 84 ldc1 $f14,112(a0)
78 ldc1 $f16,(SC_FPREGS+128)(a0) 85 ldc1 $f16,128(a0)
79 ldc1 $f18,(SC_FPREGS+144)(a0) 86 ldc1 $f18,144(a0)
80 ldc1 $f20,(SC_FPREGS+160)(a0) 87 ldc1 $f20,160(a0)
81 ldc1 $f22,(SC_FPREGS+176)(a0) 88 ldc1 $f22,176(a0)
82 ldc1 $f24,(SC_FPREGS+192)(a0) 89 ldc1 $f24,192(a0)
83 ldc1 $f26,(SC_FPREGS+208)(a0) 90 ldc1 $f26,208(a0)
84 ldc1 $f28,(SC_FPREGS+224)(a0) 91 ldc1 $f28,224(a0)
85 ldc1 $f30,(SC_FPREGS+240)(a0) 92 ldc1 $f30,240(a0)
86 jr ra 93 jr ra
87 ctc1 t0,fcr31 94 ctc1 t0,fcr31
881: jr ra 951: jr ra
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index ca1cc30c0891..1958910b75c0 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
200 200
201#if defined(CONFIG_USE_OF) 201#if defined(CONFIG_USE_OF)
202 /* Get any additional entropy passed in device tree */ 202 /* Get any additional entropy passed in device tree */
203 { 203 if (initial_boot_params) {
204 int node, len; 204 int node, len;
205 u64 *prop; 205 u64 *prop;
206 206
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 0d57909d9026..f66e5ce505b2 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -368,6 +368,19 @@ static void __init bootmem_init(void)
368 end = PFN_DOWN(boot_mem_map.map[i].addr 368 end = PFN_DOWN(boot_mem_map.map[i].addr
369 + boot_mem_map.map[i].size); 369 + boot_mem_map.map[i].size);
370 370
371#ifndef CONFIG_HIGHMEM
372 /*
373 * Skip highmem here so we get an accurate max_low_pfn if low
374 * memory stops short of high memory.
375 * If the region overlaps HIGHMEM_START, end is clipped so
376 * max_pfn excludes the highmem portion.
377 */
378 if (start >= PFN_DOWN(HIGHMEM_START))
379 continue;
380 if (end > PFN_DOWN(HIGHMEM_START))
381 end = PFN_DOWN(HIGHMEM_START);
382#endif
383
371 if (end > max_low_pfn) 384 if (end > max_low_pfn)
372 max_low_pfn = end; 385 max_low_pfn = end;
373 if (start < min_low_pfn) 386 if (start < min_low_pfn)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 1f5fdee1dfc3..3905003dfe2b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
156 print_ip_sym(pc); 156 print_ip_sym(pc);
157 pc = unwind_stack(task, &sp, pc, &ra); 157 pc = unwind_stack(task, &sp, pc, &ra);
158 } while (pc); 158 } while (pc);
159 printk("\n"); 159 pr_cont("\n");
160} 160}
161 161
162/* 162/*
@@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task,
174 printk("Stack :"); 174 printk("Stack :");
175 i = 0; 175 i = 0;
176 while ((unsigned long) sp & (PAGE_SIZE - 1)) { 176 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
177 if (i && ((i % (64 / field)) == 0)) 177 if (i && ((i % (64 / field)) == 0)) {
178 printk("\n "); 178 pr_cont("\n");
179 printk(" ");
180 }
179 if (i > 39) { 181 if (i > 39) {
180 printk(" ..."); 182 pr_cont(" ...");
181 break; 183 break;
182 } 184 }
183 185
184 if (__get_user(stackdata, sp++)) { 186 if (__get_user(stackdata, sp++)) {
185 printk(" (Bad stack address)"); 187 pr_cont(" (Bad stack address)");
186 break; 188 break;
187 } 189 }
188 190
189 printk(" %0*lx", field, stackdata); 191 pr_cont(" %0*lx", field, stackdata);
190 i++; 192 i++;
191 } 193 }
192 printk("\n"); 194 pr_cont("\n");
193 show_backtrace(task, regs); 195 show_backtrace(task, regs);
194} 196}
195 197
@@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc)
229 long i; 231 long i;
230 unsigned short __user *pc16 = NULL; 232 unsigned short __user *pc16 = NULL;
231 233
232 printk("\nCode:"); 234 printk("Code:");
233 235
234 if ((unsigned long)pc & 1) 236 if ((unsigned long)pc & 1)
235 pc16 = (unsigned short __user *)((unsigned long)pc & ~1); 237 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
236 for(i = -3 ; i < 6 ; i++) { 238 for(i = -3 ; i < 6 ; i++) {
237 unsigned int insn; 239 unsigned int insn;
238 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { 240 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
239 printk(" (Bad address in epc)\n"); 241 pr_cont(" (Bad address in epc)\n");
240 break; 242 break;
241 } 243 }
242 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); 244 pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
243 } 245 }
246 pr_cont("\n");
244} 247}
245 248
246static void __show_regs(const struct pt_regs *regs) 249static void __show_regs(const struct pt_regs *regs)
@@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs)
259 if ((i % 4) == 0) 262 if ((i % 4) == 0)
260 printk("$%2d :", i); 263 printk("$%2d :", i);
261 if (i == 0) 264 if (i == 0)
262 printk(" %0*lx", field, 0UL); 265 pr_cont(" %0*lx", field, 0UL);
263 else if (i == 26 || i == 27) 266 else if (i == 26 || i == 27)
264 printk(" %*s", field, ""); 267 pr_cont(" %*s", field, "");
265 else 268 else
266 printk(" %0*lx", field, regs->regs[i]); 269 pr_cont(" %0*lx", field, regs->regs[i]);
267 270
268 i++; 271 i++;
269 if ((i % 4) == 0) 272 if ((i % 4) == 0)
270 printk("\n"); 273 pr_cont("\n");
271 } 274 }
272 275
273#ifdef CONFIG_CPU_HAS_SMARTMIPS 276#ifdef CONFIG_CPU_HAS_SMARTMIPS
@@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs)
288 291
289 if (cpu_has_3kex) { 292 if (cpu_has_3kex) {
290 if (regs->cp0_status & ST0_KUO) 293 if (regs->cp0_status & ST0_KUO)
291 printk("KUo "); 294 pr_cont("KUo ");
292 if (regs->cp0_status & ST0_IEO) 295 if (regs->cp0_status & ST0_IEO)
293 printk("IEo "); 296 pr_cont("IEo ");
294 if (regs->cp0_status & ST0_KUP) 297 if (regs->cp0_status & ST0_KUP)
295 printk("KUp "); 298 pr_cont("KUp ");
296 if (regs->cp0_status & ST0_IEP) 299 if (regs->cp0_status & ST0_IEP)
297 printk("IEp "); 300 pr_cont("IEp ");
298 if (regs->cp0_status & ST0_KUC) 301 if (regs->cp0_status & ST0_KUC)
299 printk("KUc "); 302 pr_cont("KUc ");
300 if (regs->cp0_status & ST0_IEC) 303 if (regs->cp0_status & ST0_IEC)
301 printk("IEc "); 304 pr_cont("IEc ");
302 } else if (cpu_has_4kex) { 305 } else if (cpu_has_4kex) {
303 if (regs->cp0_status & ST0_KX) 306 if (regs->cp0_status & ST0_KX)
304 printk("KX "); 307 pr_cont("KX ");
305 if (regs->cp0_status & ST0_SX) 308 if (regs->cp0_status & ST0_SX)
306 printk("SX "); 309 pr_cont("SX ");
307 if (regs->cp0_status & ST0_UX) 310 if (regs->cp0_status & ST0_UX)
308 printk("UX "); 311 pr_cont("UX ");
309 switch (regs->cp0_status & ST0_KSU) { 312 switch (regs->cp0_status & ST0_KSU) {
310 case KSU_USER: 313 case KSU_USER:
311 printk("USER "); 314 pr_cont("USER ");
312 break; 315 break;
313 case KSU_SUPERVISOR: 316 case KSU_SUPERVISOR:
314 printk("SUPERVISOR "); 317 pr_cont("SUPERVISOR ");
315 break; 318 break;
316 case KSU_KERNEL: 319 case KSU_KERNEL:
317 printk("KERNEL "); 320 pr_cont("KERNEL ");
318 break; 321 break;
319 default: 322 default:
320 printk("BAD_MODE "); 323 pr_cont("BAD_MODE ");
321 break; 324 break;
322 } 325 }
323 if (regs->cp0_status & ST0_ERL) 326 if (regs->cp0_status & ST0_ERL)
324 printk("ERL "); 327 pr_cont("ERL ");
325 if (regs->cp0_status & ST0_EXL) 328 if (regs->cp0_status & ST0_EXL)
326 printk("EXL "); 329 pr_cont("EXL ");
327 if (regs->cp0_status & ST0_IE) 330 if (regs->cp0_status & ST0_IE)
328 printk("IE "); 331 pr_cont("IE ");
329 } 332 }
330 printk("\n"); 333 pr_cont("\n");
331 334
332 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 335 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
333 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode); 336 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
@@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
705 exception_exit(prev_state); 708 exception_exit(prev_state);
706} 709}
707 710
711/*
712 * Send SIGFPE according to FCSR Cause bits, which must have already
713 * been masked against Enable bits. This is impotant as Inexact can
714 * happen together with Overflow or Underflow, and `ptrace' can set
715 * any bits.
716 */
717void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
718 struct task_struct *tsk)
719{
720 struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
721
722 if (fcr31 & FPU_CSR_INV_X)
723 si.si_code = FPE_FLTINV;
724 else if (fcr31 & FPU_CSR_DIV_X)
725 si.si_code = FPE_FLTDIV;
726 else if (fcr31 & FPU_CSR_OVF_X)
727 si.si_code = FPE_FLTOVF;
728 else if (fcr31 & FPU_CSR_UDF_X)
729 si.si_code = FPE_FLTUND;
730 else if (fcr31 & FPU_CSR_INE_X)
731 si.si_code = FPE_FLTRES;
732 else
733 si.si_code = __SI_FAULT;
734 force_sig_info(SIGFPE, &si, tsk);
735}
736
708int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) 737int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
709{ 738{
710 struct siginfo si = { 0 }; 739 struct siginfo si = { 0 };
@@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
715 return 0; 744 return 0;
716 745
717 case SIGFPE: 746 case SIGFPE:
718 si.si_addr = fault_addr; 747 force_fcr31_sig(fcr31, fault_addr, current);
719 si.si_signo = sig;
720 /*
721 * Inexact can happen together with Overflow or Underflow.
722 * Respect the mask to deliver the correct exception.
723 */
724 fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
725 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
726 if (fcr31 & FPU_CSR_INV_X)
727 si.si_code = FPE_FLTINV;
728 else if (fcr31 & FPU_CSR_DIV_X)
729 si.si_code = FPE_FLTDIV;
730 else if (fcr31 & FPU_CSR_OVF_X)
731 si.si_code = FPE_FLTOVF;
732 else if (fcr31 & FPU_CSR_UDF_X)
733 si.si_code = FPE_FLTUND;
734 else if (fcr31 & FPU_CSR_INE_X)
735 si.si_code = FPE_FLTRES;
736 else
737 si.si_code = __SI_FAULT;
738 force_sig_info(sig, &si, current);
739 return 1; 748 return 1;
740 749
741 case SIGBUS: 750 case SIGBUS:
@@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
799 /* Run the emulator */ 808 /* Run the emulator */
800 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, 809 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
801 &fault_addr); 810 &fault_addr);
802 fcr31 = current->thread.fpu.fcr31;
803 811
804 /* 812 /*
805 * We can't allow the emulated instruction to leave any of 813 * We can't allow the emulated instruction to leave any
806 * the cause bits set in $fcr31. 814 * enabled Cause bits set in $fcr31.
807 */ 815 */
808 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 816 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
817 current->thread.fpu.fcr31 &= ~fcr31;
809 818
810 /* Restore the hardware register state */ 819 /* Restore the hardware register state */
811 own_fpu(1); 820 own_fpu(1);
@@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
831 goto out; 840 goto out;
832 841
833 /* Clear FCSR.Cause before enabling interrupts */ 842 /* Clear FCSR.Cause before enabling interrupts */
834 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X); 843 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
835 local_irq_enable(); 844 local_irq_enable();
836 845
837 die_if_kernel("FP exception in kernel code", regs); 846 die_if_kernel("FP exception in kernel code", regs);
@@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
853 /* Run the emulator */ 862 /* Run the emulator */
854 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, 863 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
855 &fault_addr); 864 &fault_addr);
856 fcr31 = current->thread.fpu.fcr31;
857 865
858 /* 866 /*
859 * We can't allow the emulated instruction to leave any of 867 * We can't allow the emulated instruction to leave any
860 * the cause bits set in $fcr31. 868 * enabled Cause bits set in $fcr31.
861 */ 869 */
862 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 870 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
871 current->thread.fpu.fcr31 &= ~fcr31;
863 872
864 /* Restore the hardware register state */ 873 /* Restore the hardware register state */
865 own_fpu(1); /* Using the FPU again. */ 874 own_fpu(1); /* Using the FPU again. */
@@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1424 1433
1425 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0, 1434 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1426 &fault_addr); 1435 &fault_addr);
1427 fcr31 = current->thread.fpu.fcr31;
1428 1436
1429 /* 1437 /*
1430 * We can't allow the emulated instruction to leave 1438 * We can't allow the emulated instruction to leave
1431 * any of the cause bits set in $fcr31. 1439 * any enabled Cause bits set in $fcr31.
1432 */ 1440 */
1433 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 1441 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1442 current->thread.fpu.fcr31 &= ~fcr31;
1434 1443
1435 /* Send a signal if required. */ 1444 /* Send a signal if required. */
1436 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) 1445 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 8770f32c9e0b..aa0937423e28 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
790 struct mips_coproc *cop0 = vcpu->arch.cop0; 790 struct mips_coproc *cop0 = vcpu->arch.cop0;
791 enum emulation_result er = EMULATE_DONE; 791 enum emulation_result er = EMULATE_DONE;
792 792
793 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { 793 if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
794 kvm_clear_c0_guest_status(cop0, ST0_ERL);
795 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
796 } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
794 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, 797 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
795 kvm_read_c0_guest_epc(cop0)); 798 kvm_read_c0_guest_epc(cop0));
796 kvm_clear_c0_guest_status(cop0, ST0_EXL); 799 kvm_clear_c0_guest_status(cop0, ST0_EXL);
797 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); 800 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
798 801
799 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
800 kvm_clear_c0_guest_status(cop0, ST0_ERL);
801 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
802 } else { 802 } else {
803 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", 803 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
804 vcpu->arch.pc); 804 vcpu->arch.pc);
@@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1528 struct kvm_vcpu *vcpu) 1528 struct kvm_vcpu *vcpu)
1529{ 1529{
1530 enum emulation_result er = EMULATE_DO_MMIO; 1530 enum emulation_result er = EMULATE_DO_MMIO;
1531 unsigned long curr_pc;
1531 u32 op, rt; 1532 u32 op, rt;
1532 u32 bytes; 1533 u32 bytes;
1533 1534
1534 rt = inst.i_format.rt; 1535 rt = inst.i_format.rt;
1535 op = inst.i_format.opcode; 1536 op = inst.i_format.opcode;
1536 1537
1537 vcpu->arch.pending_load_cause = cause; 1538 /*
1539 * Find the resume PC now while we have safe and easy access to the
1540 * prior branch instruction, and save it for
1541 * kvm_mips_complete_mmio_load() to restore later.
1542 */
1543 curr_pc = vcpu->arch.pc;
1544 er = update_pc(vcpu, cause);
1545 if (er == EMULATE_FAIL)
1546 return er;
1547 vcpu->arch.io_pc = vcpu->arch.pc;
1548 vcpu->arch.pc = curr_pc;
1549
1538 vcpu->arch.io_gpr = rt; 1550 vcpu->arch.io_gpr = rt;
1539 1551
1540 switch (op) { 1552 switch (op) {
@@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2494 goto done; 2506 goto done;
2495 } 2507 }
2496 2508
2497 er = update_pc(vcpu, vcpu->arch.pending_load_cause); 2509 /* Restore saved resume PC */
2498 if (er == EMULATE_FAIL) 2510 vcpu->arch.pc = vcpu->arch.io_pc;
2499 return er;
2500 2511
2501 switch (run->mmio.len) { 2512 switch (run->mmio.len) {
2502 case 4: 2513 case 4:
@@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2518 break; 2529 break;
2519 } 2530 }
2520 2531
2521 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2522 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2523 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2524 vcpu->mmio_needed);
2525
2526done: 2532done:
2527 return er; 2533 return er;
2528} 2534}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 622037d851a3..06a60b19acfb 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
426static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) 426static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
427{ 427{
428 struct mips_coproc *cop0 = vcpu->arch.cop0; 428 struct mips_coproc *cop0 = vcpu->arch.cop0;
429 int cpu = smp_processor_id(); 429 int i, cpu = smp_processor_id();
430 unsigned int gasid; 430 unsigned int gasid;
431 431
432 /* 432 /*
@@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
442 vcpu); 442 vcpu);
443 vcpu->arch.guest_user_asid[cpu] = 443 vcpu->arch.guest_user_asid[cpu] =
444 vcpu->arch.guest_user_mm.context.asid[cpu]; 444 vcpu->arch.guest_user_mm.context.asid[cpu];
445 for_each_possible_cpu(i)
446 if (i != cpu)
447 vcpu->arch.guest_user_asid[cpu] = 0;
445 vcpu->arch.last_user_gasid = gasid; 448 vcpu->arch.last_user_gasid = gasid;
446 } 449 }
447 } 450 }
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 03883ba806e2..3b677c851be0 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
260 260
261 if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & 261 if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
262 asid_version_mask(cpu)) { 262 asid_version_mask(cpu)) {
263 u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
264 KVM_ENTRYHI_ASID;
265
266 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); 263 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
267 vcpu->arch.guest_user_asid[cpu] = 264 vcpu->arch.guest_user_asid[cpu] =
268 vcpu->arch.guest_user_mm.context.asid[cpu]; 265 vcpu->arch.guest_user_mm.context.asid[cpu];
269 vcpu->arch.last_user_gasid = gasid;
270 newasid++; 266 newasid++;
271 267
272 kvm_debug("[%d]: cpu_context: %#lx\n", cpu, 268 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 0f80b936e75e..6eb50a7137db 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -135,42 +135,42 @@ static void dump_tlb(int first, int last)
135 c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; 135 c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
136 c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; 136 c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
137 137
138 printk("va=%0*lx asid=%0*lx", 138 pr_cont("va=%0*lx asid=%0*lx",
139 vwidth, (entryhi & ~0x1fffUL), 139 vwidth, (entryhi & ~0x1fffUL),
140 asidwidth, entryhi & asidmask); 140 asidwidth, entryhi & asidmask);
141 if (cpu_has_guestid) 141 if (cpu_has_guestid)
142 printk(" gid=%02lx", 142 pr_cont(" gid=%02lx",
143 (guestctl1 & MIPS_GCTL1_RID) 143 (guestctl1 & MIPS_GCTL1_RID)
144 >> MIPS_GCTL1_RID_SHIFT); 144 >> MIPS_GCTL1_RID_SHIFT);
145 /* RI/XI are in awkward places, so mask them off separately */ 145 /* RI/XI are in awkward places, so mask them off separately */
146 pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); 146 pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
147 if (xpa) 147 if (xpa)
148 pa |= (unsigned long long)readx_c0_entrylo0() << 30; 148 pa |= (unsigned long long)readx_c0_entrylo0() << 30;
149 pa = (pa << 6) & PAGE_MASK; 149 pa = (pa << 6) & PAGE_MASK;
150 printk("\n\t["); 150 pr_cont("\n\t[");
151 if (cpu_has_rixi) 151 if (cpu_has_rixi)
152 printk("ri=%d xi=%d ", 152 pr_cont("ri=%d xi=%d ",
153 (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0, 153 (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
154 (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0); 154 (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
155 printk("pa=%0*llx c=%d d=%d v=%d g=%d] [", 155 pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
156 pwidth, pa, c0, 156 pwidth, pa, c0,
157 (entrylo0 & ENTRYLO_D) ? 1 : 0, 157 (entrylo0 & ENTRYLO_D) ? 1 : 0,
158 (entrylo0 & ENTRYLO_V) ? 1 : 0, 158 (entrylo0 & ENTRYLO_V) ? 1 : 0,
159 (entrylo0 & ENTRYLO_G) ? 1 : 0); 159 (entrylo0 & ENTRYLO_G) ? 1 : 0);
160 /* RI/XI are in awkward places, so mask them off separately */ 160 /* RI/XI are in awkward places, so mask them off separately */
161 pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); 161 pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
162 if (xpa) 162 if (xpa)
163 pa |= (unsigned long long)readx_c0_entrylo1() << 30; 163 pa |= (unsigned long long)readx_c0_entrylo1() << 30;
164 pa = (pa << 6) & PAGE_MASK; 164 pa = (pa << 6) & PAGE_MASK;
165 if (cpu_has_rixi) 165 if (cpu_has_rixi)
166 printk("ri=%d xi=%d ", 166 pr_cont("ri=%d xi=%d ",
167 (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0, 167 (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
168 (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0); 168 (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
169 printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n", 169 pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
170 pwidth, pa, c1, 170 pwidth, pa, c1,
171 (entrylo1 & ENTRYLO_D) ? 1 : 0, 171 (entrylo1 & ENTRYLO_D) ? 1 : 0,
172 (entrylo1 & ENTRYLO_V) ? 1 : 0, 172 (entrylo1 & ENTRYLO_V) ? 1 : 0,
173 (entrylo1 & ENTRYLO_G) ? 1 : 0); 173 (entrylo1 & ENTRYLO_G) ? 1 : 0);
174 } 174 }
175 printk("\n"); 175 printk("\n");
176 176
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 744f4a7bc49d..85b4086e553e 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -53,15 +53,15 @@ static void dump_tlb(int first, int last)
53 */ 53 */
54 printk("Index: %2d ", i); 54 printk("Index: %2d ", i);
55 55
56 printk("va=%08lx asid=%08lx" 56 pr_cont("va=%08lx asid=%08lx"
57 " [pa=%06lx n=%d d=%d v=%d g=%d]", 57 " [pa=%06lx n=%d d=%d v=%d g=%d]",
58 entryhi & PAGE_MASK, 58 entryhi & PAGE_MASK,
59 entryhi & asid_mask, 59 entryhi & asid_mask,
60 entrylo0 & PAGE_MASK, 60 entrylo0 & PAGE_MASK,
61 (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0, 61 (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
62 (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0, 62 (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
63 (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0, 63 (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
64 (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0); 64 (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
65 } 65 }
66 } 66 }
67 printk("\n"); 67 printk("\n");
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index a9b9407f38f7..6b0741e7a7ed 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -368,7 +368,9 @@
368 368
369#define __IGNORE_select /* newselect */ 369#define __IGNORE_select /* newselect */
370#define __IGNORE_fadvise64 /* fadvise64_64 */ 370#define __IGNORE_fadvise64 /* fadvise64_64 */
371 371#define __IGNORE_pkey_mprotect
372#define __IGNORE_pkey_alloc
373#define __IGNORE_pkey_free
372 374
373#define LINUX_GATEWAY_ADDR 0x100 375#define LINUX_GATEWAY_ADDR 0x100
374 376
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index f8150669b8c6..700e2d2da096 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev)
873 873
874 if (dev->num_addrs) { 874 if (dev->num_addrs) {
875 int k; 875 int k;
876 printk(", additional addresses: "); 876 pr_cont(", additional addresses: ");
877 for (k = 0; k < dev->num_addrs; k++) 877 for (k = 0; k < dev->num_addrs; k++)
878 printk("0x%lx ", dev->addr[k]); 878 pr_cont("0x%lx ", dev->addr[k]);
879 } 879 }
880 printk("\n"); 880 pr_cont("\n");
881} 881}
882 882
883/** 883/**
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index d03422e5f188..23de307c3052 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -100,14 +100,12 @@ set_thread_pointer:
100 .endr 100 .endr
101 101
102/* This address must remain fixed at 0x100 for glibc's syscalls to work */ 102/* This address must remain fixed at 0x100 for glibc's syscalls to work */
103 .align 256 103 .align LINUX_GATEWAY_ADDR
104linux_gateway_entry: 104linux_gateway_entry:
105 gate .+8, %r0 /* become privileged */ 105 gate .+8, %r0 /* become privileged */
106 mtsp %r0,%sr4 /* get kernel space into sr4 */ 106 mtsp %r0,%sr4 /* get kernel space into sr4 */
107 mtsp %r0,%sr5 /* get kernel space into sr5 */ 107 mtsp %r0,%sr5 /* get kernel space into sr5 */
108 mtsp %r0,%sr6 /* get kernel space into sr6 */ 108 mtsp %r0,%sr6 /* get kernel space into sr6 */
109 mfsp %sr7,%r1 /* save user sr7 */
110 mtsp %r1,%sr3 /* and store it in sr3 */
111 109
112#ifdef CONFIG_64BIT 110#ifdef CONFIG_64BIT
113 /* for now we can *always* set the W bit on entry to the syscall 111 /* for now we can *always* set the W bit on entry to the syscall
@@ -133,6 +131,14 @@ linux_gateway_entry:
133 depdi 0, 31, 32, %r21 131 depdi 0, 31, 32, %r21
1341: 1321:
135#endif 133#endif
134
135 /* We use a rsm/ssm pair to prevent sr3 from being clobbered
136 * by external interrupts.
137 */
138 mfsp %sr7,%r1 /* save user sr7 */
139 rsm PSW_SM_I, %r0 /* disable interrupts */
140 mtsp %r1,%sr3 /* and store it in sr3 */
141
136 mfctl %cr30,%r1 142 mfctl %cr30,%r1
137 xor %r1,%r30,%r30 /* ye olde xor trick */ 143 xor %r1,%r30,%r30 /* ye olde xor trick */
138 xor %r1,%r30,%r1 144 xor %r1,%r30,%r1
@@ -147,6 +153,7 @@ linux_gateway_entry:
147 */ 153 */
148 154
149 mtsp %r0,%sr7 /* get kernel space into sr7 */ 155 mtsp %r0,%sr7 /* get kernel space into sr7 */
156 ssm PSW_SM_I, %r0 /* enable interrupts */
150 STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ 157 STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
151 mfctl %cr30,%r1 /* get task ptr in %r1 */ 158 mfctl %cr30,%r1 /* get task ptr in %r1 */
152 LDREG TI_TASK(%r1),%r1 159 LDREG TI_TASK(%r1),%r1
@@ -474,11 +481,6 @@ lws_start:
474 comiclr,>> __NR_lws_entries, %r20, %r0 481 comiclr,>> __NR_lws_entries, %r20, %r0
475 b,n lws_exit_nosys 482 b,n lws_exit_nosys
476 483
477 /* WARNING: Trashing sr2 and sr3 */
478 mfsp %sr7,%r1 /* get userspace into sr3 */
479 mtsp %r1,%sr3
480 mtsp %r0,%sr2 /* get kernel space into sr2 */
481
482 /* Load table start */ 484 /* Load table start */
483 ldil L%lws_table, %r1 485 ldil L%lws_table, %r1
484 ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */ 486 ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
@@ -627,9 +629,9 @@ cas_action:
627 stw %r1, 4(%sr2,%r20) 629 stw %r1, 4(%sr2,%r20)
628#endif 630#endif
629 /* The load and store could fail */ 631 /* The load and store could fail */
6301: ldw,ma 0(%sr3,%r26), %r28 6321: ldw,ma 0(%r26), %r28
631 sub,<> %r28, %r25, %r0 633 sub,<> %r28, %r25, %r0
6322: stw,ma %r24, 0(%sr3,%r26) 6342: stw,ma %r24, 0(%r26)
633 /* Free lock */ 635 /* Free lock */
634 stw,ma %r20, 0(%sr2,%r20) 636 stw,ma %r20, 0(%sr2,%r20)
635#if ENABLE_LWS_DEBUG 637#if ENABLE_LWS_DEBUG
@@ -706,9 +708,9 @@ lws_compare_and_swap_2:
706 nop 708 nop
707 709
708 /* 8bit load */ 710 /* 8bit load */
7094: ldb 0(%sr3,%r25), %r25 7114: ldb 0(%r25), %r25
710 b cas2_lock_start 712 b cas2_lock_start
7115: ldb 0(%sr3,%r24), %r24 7135: ldb 0(%r24), %r24
712 nop 714 nop
713 nop 715 nop
714 nop 716 nop
@@ -716,9 +718,9 @@ lws_compare_and_swap_2:
716 nop 718 nop
717 719
718 /* 16bit load */ 720 /* 16bit load */
7196: ldh 0(%sr3,%r25), %r25 7216: ldh 0(%r25), %r25
720 b cas2_lock_start 722 b cas2_lock_start
7217: ldh 0(%sr3,%r24), %r24 7237: ldh 0(%r24), %r24
722 nop 724 nop
723 nop 725 nop
724 nop 726 nop
@@ -726,9 +728,9 @@ lws_compare_and_swap_2:
726 nop 728 nop
727 729
728 /* 32bit load */ 730 /* 32bit load */
7298: ldw 0(%sr3,%r25), %r25 7318: ldw 0(%r25), %r25
730 b cas2_lock_start 732 b cas2_lock_start
7319: ldw 0(%sr3,%r24), %r24 7339: ldw 0(%r24), %r24
732 nop 734 nop
733 nop 735 nop
734 nop 736 nop
@@ -737,14 +739,14 @@ lws_compare_and_swap_2:
737 739
738 /* 64bit load */ 740 /* 64bit load */
739#ifdef CONFIG_64BIT 741#ifdef CONFIG_64BIT
74010: ldd 0(%sr3,%r25), %r25 74210: ldd 0(%r25), %r25
74111: ldd 0(%sr3,%r24), %r24 74311: ldd 0(%r24), %r24
742#else 744#else
743 /* Load new value into r22/r23 - high/low */ 745 /* Load new value into r22/r23 - high/low */
74410: ldw 0(%sr3,%r25), %r22 74610: ldw 0(%r25), %r22
74511: ldw 4(%sr3,%r25), %r23 74711: ldw 4(%r25), %r23
746 /* Load new value into fr4 for atomic store later */ 748 /* Load new value into fr4 for atomic store later */
74712: flddx 0(%sr3,%r24), %fr4 74912: flddx 0(%r24), %fr4
748#endif 750#endif
749 751
750cas2_lock_start: 752cas2_lock_start:
@@ -794,30 +796,30 @@ cas2_action:
794 ldo 1(%r0),%r28 796 ldo 1(%r0),%r28
795 797
796 /* 8bit CAS */ 798 /* 8bit CAS */
79713: ldb,ma 0(%sr3,%r26), %r29 79913: ldb,ma 0(%r26), %r29
798 sub,= %r29, %r25, %r0 800 sub,= %r29, %r25, %r0
799 b,n cas2_end 801 b,n cas2_end
80014: stb,ma %r24, 0(%sr3,%r26) 80214: stb,ma %r24, 0(%r26)
801 b cas2_end 803 b cas2_end
802 copy %r0, %r28 804 copy %r0, %r28
803 nop 805 nop
804 nop 806 nop
805 807
806 /* 16bit CAS */ 808 /* 16bit CAS */
80715: ldh,ma 0(%sr3,%r26), %r29 80915: ldh,ma 0(%r26), %r29
808 sub,= %r29, %r25, %r0 810 sub,= %r29, %r25, %r0
809 b,n cas2_end 811 b,n cas2_end
81016: sth,ma %r24, 0(%sr3,%r26) 81216: sth,ma %r24, 0(%r26)
811 b cas2_end 813 b cas2_end
812 copy %r0, %r28 814 copy %r0, %r28
813 nop 815 nop
814 nop 816 nop
815 817
816 /* 32bit CAS */ 818 /* 32bit CAS */
81717: ldw,ma 0(%sr3,%r26), %r29 81917: ldw,ma 0(%r26), %r29
818 sub,= %r29, %r25, %r0 820 sub,= %r29, %r25, %r0
819 b,n cas2_end 821 b,n cas2_end
82018: stw,ma %r24, 0(%sr3,%r26) 82218: stw,ma %r24, 0(%r26)
821 b cas2_end 823 b cas2_end
822 copy %r0, %r28 824 copy %r0, %r28
823 nop 825 nop
@@ -825,22 +827,22 @@ cas2_action:
825 827
826 /* 64bit CAS */ 828 /* 64bit CAS */
827#ifdef CONFIG_64BIT 829#ifdef CONFIG_64BIT
82819: ldd,ma 0(%sr3,%r26), %r29 83019: ldd,ma 0(%r26), %r29
829 sub,*= %r29, %r25, %r0 831 sub,*= %r29, %r25, %r0
830 b,n cas2_end 832 b,n cas2_end
83120: std,ma %r24, 0(%sr3,%r26) 83320: std,ma %r24, 0(%r26)
832 copy %r0, %r28 834 copy %r0, %r28
833#else 835#else
834 /* Compare first word */ 836 /* Compare first word */
83519: ldw,ma 0(%sr3,%r26), %r29 83719: ldw,ma 0(%r26), %r29
836 sub,= %r29, %r22, %r0 838 sub,= %r29, %r22, %r0
837 b,n cas2_end 839 b,n cas2_end
838 /* Compare second word */ 840 /* Compare second word */
83920: ldw,ma 4(%sr3,%r26), %r29 84120: ldw,ma 4(%r26), %r29
840 sub,= %r29, %r23, %r0 842 sub,= %r29, %r23, %r0
841 b,n cas2_end 843 b,n cas2_end
842 /* Perform the store */ 844 /* Perform the store */
84321: fstdx %fr4, 0(%sr3,%r26) 84521: fstdx %fr4, 0(%r26)
844 copy %r0, %r28 846 copy %r0, %r28
845#endif 847#endif
846 848
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index bd98b7d25200..05c98bb853cf 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
315 if (r < 0) 315 if (r < 0)
316 goto out; 316 goto out;
317 317
318 diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA); 318 diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
319 if (!diag224_buf || diag224(diag224_buf)) 319 if (!diag224_buf || diag224(diag224_buf))
320 goto out; 320 goto out;
321 321
@@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
378 sctns->par.infpval1 |= PAR_WGHT_VLD; 378 sctns->par.infpval1 |= PAR_WGHT_VLD;
379 379
380out: 380out:
381 kfree(diag224_buf); 381 free_page((unsigned long)diag224_buf);
382 vfree(diag204_buf); 382 vfree(diag204_buf);
383} 383}
384 384
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4b20f7304b9c..bdde80731f49 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -948,7 +948,6 @@ struct kvm_x86_ops {
948 int (*get_lpage_level)(void); 948 int (*get_lpage_level)(void);
949 bool (*rdtscp_supported)(void); 949 bool (*rdtscp_supported)(void);
950 bool (*invpcid_supported)(void); 950 bool (*invpcid_supported)(void);
951 void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
952 951
953 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 952 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
954 953
@@ -958,8 +957,6 @@ struct kvm_x86_ops {
958 957
959 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 958 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
960 959
961 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
962
963 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 960 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
964 961
965 int (*check_intercept)(struct kvm_vcpu *vcpu, 962 int (*check_intercept)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4e95d3eb2955..cbd7b92585bb 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -5045,7 +5045,7 @@ done_prefixes:
5045 /* Decode and fetch the destination operand: register or memory. */ 5045 /* Decode and fetch the destination operand: register or memory. */
5046 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 5046 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5047 5047
5048 if (ctxt->rip_relative) 5048 if (ctxt->rip_relative && likely(ctxt->memopp))
5049 ctxt->memopp->addr.mem.ea = address_mask(ctxt, 5049 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5050 ctxt->memopp->addr.mem.ea + ctxt->_eip); 5050 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5051 5051
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f8157a36ab09..8ca1eca5038d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1138,21 +1138,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1138 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1138 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1139} 1139}
1140 1140
1141static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
1142{
1143 struct vcpu_svm *svm = to_svm(vcpu);
1144
1145 svm->vmcb->control.tsc_offset += adjustment;
1146 if (is_guest_mode(vcpu))
1147 svm->nested.hsave->control.tsc_offset += adjustment;
1148 else
1149 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1150 svm->vmcb->control.tsc_offset - adjustment,
1151 svm->vmcb->control.tsc_offset);
1152
1153 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1154}
1155
1156static void avic_init_vmcb(struct vcpu_svm *svm) 1141static void avic_init_vmcb(struct vcpu_svm *svm)
1157{ 1142{
1158 struct vmcb *vmcb = svm->vmcb; 1143 struct vmcb *vmcb = svm->vmcb;
@@ -3449,12 +3434,6 @@ static int cr8_write_interception(struct vcpu_svm *svm)
3449 return 0; 3434 return 0;
3450} 3435}
3451 3436
3452static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
3453{
3454 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
3455 return vmcb->control.tsc_offset + host_tsc;
3456}
3457
3458static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3437static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3459{ 3438{
3460 struct vcpu_svm *svm = to_svm(vcpu); 3439 struct vcpu_svm *svm = to_svm(vcpu);
@@ -5422,8 +5401,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
5422 .has_wbinvd_exit = svm_has_wbinvd_exit, 5401 .has_wbinvd_exit = svm_has_wbinvd_exit,
5423 5402
5424 .write_tsc_offset = svm_write_tsc_offset, 5403 .write_tsc_offset = svm_write_tsc_offset,
5425 .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
5426 .read_l1_tsc = svm_read_l1_tsc,
5427 5404
5428 .set_tdp_cr3 = set_tdp_cr3, 5405 .set_tdp_cr3 = set_tdp_cr3,
5429 5406
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index cf1b16dbc98a..5382b82462fc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -187,6 +187,7 @@ struct vmcs {
187 */ 187 */
188struct loaded_vmcs { 188struct loaded_vmcs {
189 struct vmcs *vmcs; 189 struct vmcs *vmcs;
190 struct vmcs *shadow_vmcs;
190 int cpu; 191 int cpu;
191 int launched; 192 int launched;
192 struct list_head loaded_vmcss_on_cpu_link; 193 struct list_head loaded_vmcss_on_cpu_link;
@@ -411,7 +412,6 @@ struct nested_vmx {
411 * memory during VMXOFF, VMCLEAR, VMPTRLD. 412 * memory during VMXOFF, VMCLEAR, VMPTRLD.
412 */ 413 */
413 struct vmcs12 *cached_vmcs12; 414 struct vmcs12 *cached_vmcs12;
414 struct vmcs *current_shadow_vmcs;
415 /* 415 /*
416 * Indicates if the shadow vmcs must be updated with the 416 * Indicates if the shadow vmcs must be updated with the
417 * data hold by vmcs12 417 * data hold by vmcs12
@@ -421,7 +421,6 @@ struct nested_vmx {
421 /* vmcs02_list cache of VMCSs recently used to run L2 guests */ 421 /* vmcs02_list cache of VMCSs recently used to run L2 guests */
422 struct list_head vmcs02_pool; 422 struct list_head vmcs02_pool;
423 int vmcs02_num; 423 int vmcs02_num;
424 u64 vmcs01_tsc_offset;
425 bool change_vmcs01_virtual_x2apic_mode; 424 bool change_vmcs01_virtual_x2apic_mode;
426 /* L2 must run next, and mustn't decide to exit to L1. */ 425 /* L2 must run next, and mustn't decide to exit to L1. */
427 bool nested_run_pending; 426 bool nested_run_pending;
@@ -1419,6 +1418,8 @@ static void vmcs_clear(struct vmcs *vmcs)
1419static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) 1418static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1420{ 1419{
1421 vmcs_clear(loaded_vmcs->vmcs); 1420 vmcs_clear(loaded_vmcs->vmcs);
1421 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1422 vmcs_clear(loaded_vmcs->shadow_vmcs);
1422 loaded_vmcs->cpu = -1; 1423 loaded_vmcs->cpu = -1;
1423 loaded_vmcs->launched = 0; 1424 loaded_vmcs->launched = 0;
1424} 1425}
@@ -2605,20 +2606,6 @@ static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2605} 2606}
2606 2607
2607/* 2608/*
2608 * Like guest_read_tsc, but always returns L1's notion of the timestamp
2609 * counter, even if a nested guest (L2) is currently running.
2610 */
2611static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2612{
2613 u64 tsc_offset;
2614
2615 tsc_offset = is_guest_mode(vcpu) ?
2616 to_vmx(vcpu)->nested.vmcs01_tsc_offset :
2617 vmcs_read64(TSC_OFFSET);
2618 return host_tsc + tsc_offset;
2619}
2620
2621/*
2622 * writes 'offset' into guest's timestamp counter offset register 2609 * writes 'offset' into guest's timestamp counter offset register
2623 */ 2610 */
2624static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 2611static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
@@ -2631,7 +2618,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2631 * to the newly set TSC to get L2's TSC. 2618 * to the newly set TSC to get L2's TSC.
2632 */ 2619 */
2633 struct vmcs12 *vmcs12; 2620 struct vmcs12 *vmcs12;
2634 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
2635 /* recalculate vmcs02.TSC_OFFSET: */ 2621 /* recalculate vmcs02.TSC_OFFSET: */
2636 vmcs12 = get_vmcs12(vcpu); 2622 vmcs12 = get_vmcs12(vcpu);
2637 vmcs_write64(TSC_OFFSET, offset + 2623 vmcs_write64(TSC_OFFSET, offset +
@@ -2644,19 +2630,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2644 } 2630 }
2645} 2631}
2646 2632
2647static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
2648{
2649 u64 offset = vmcs_read64(TSC_OFFSET);
2650
2651 vmcs_write64(TSC_OFFSET, offset + adjustment);
2652 if (is_guest_mode(vcpu)) {
2653 /* Even when running L2, the adjustment needs to apply to L1 */
2654 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
2655 } else
2656 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
2657 offset + adjustment);
2658}
2659
2660static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) 2633static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
2661{ 2634{
2662 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); 2635 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
@@ -3562,6 +3535,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3562 loaded_vmcs_clear(loaded_vmcs); 3535 loaded_vmcs_clear(loaded_vmcs);
3563 free_vmcs(loaded_vmcs->vmcs); 3536 free_vmcs(loaded_vmcs->vmcs);
3564 loaded_vmcs->vmcs = NULL; 3537 loaded_vmcs->vmcs = NULL;
3538 WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
3565} 3539}
3566 3540
3567static void free_kvm_area(void) 3541static void free_kvm_area(void)
@@ -6696,6 +6670,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
6696 if (!item) 6670 if (!item)
6697 return NULL; 6671 return NULL;
6698 item->vmcs02.vmcs = alloc_vmcs(); 6672 item->vmcs02.vmcs = alloc_vmcs();
6673 item->vmcs02.shadow_vmcs = NULL;
6699 if (!item->vmcs02.vmcs) { 6674 if (!item->vmcs02.vmcs) {
6700 kfree(item); 6675 kfree(item);
6701 return NULL; 6676 return NULL;
@@ -7072,7 +7047,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
7072 shadow_vmcs->revision_id |= (1u << 31); 7047 shadow_vmcs->revision_id |= (1u << 31);
7073 /* init shadow vmcs */ 7048 /* init shadow vmcs */
7074 vmcs_clear(shadow_vmcs); 7049 vmcs_clear(shadow_vmcs);
7075 vmx->nested.current_shadow_vmcs = shadow_vmcs; 7050 vmx->vmcs01.shadow_vmcs = shadow_vmcs;
7076 } 7051 }
7077 7052
7078 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); 7053 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
@@ -7174,8 +7149,11 @@ static void free_nested(struct vcpu_vmx *vmx)
7174 free_page((unsigned long)vmx->nested.msr_bitmap); 7149 free_page((unsigned long)vmx->nested.msr_bitmap);
7175 vmx->nested.msr_bitmap = NULL; 7150 vmx->nested.msr_bitmap = NULL;
7176 } 7151 }
7177 if (enable_shadow_vmcs) 7152 if (enable_shadow_vmcs) {
7178 free_vmcs(vmx->nested.current_shadow_vmcs); 7153 vmcs_clear(vmx->vmcs01.shadow_vmcs);
7154 free_vmcs(vmx->vmcs01.shadow_vmcs);
7155 vmx->vmcs01.shadow_vmcs = NULL;
7156 }
7179 kfree(vmx->nested.cached_vmcs12); 7157 kfree(vmx->nested.cached_vmcs12);
7180 /* Unpin physical memory we referred to in current vmcs02 */ 7158 /* Unpin physical memory we referred to in current vmcs02 */
7181 if (vmx->nested.apic_access_page) { 7159 if (vmx->nested.apic_access_page) {
@@ -7352,7 +7330,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
7352 int i; 7330 int i;
7353 unsigned long field; 7331 unsigned long field;
7354 u64 field_value; 7332 u64 field_value;
7355 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; 7333 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
7356 const unsigned long *fields = shadow_read_write_fields; 7334 const unsigned long *fields = shadow_read_write_fields;
7357 const int num_fields = max_shadow_read_write_fields; 7335 const int num_fields = max_shadow_read_write_fields;
7358 7336
@@ -7401,7 +7379,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
7401 int i, q; 7379 int i, q;
7402 unsigned long field; 7380 unsigned long field;
7403 u64 field_value = 0; 7381 u64 field_value = 0;
7404 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; 7382 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
7405 7383
7406 vmcs_load(shadow_vmcs); 7384 vmcs_load(shadow_vmcs);
7407 7385
@@ -7591,7 +7569,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
7591 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, 7569 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
7592 SECONDARY_EXEC_SHADOW_VMCS); 7570 SECONDARY_EXEC_SHADOW_VMCS);
7593 vmcs_write64(VMCS_LINK_POINTER, 7571 vmcs_write64(VMCS_LINK_POINTER,
7594 __pa(vmx->nested.current_shadow_vmcs)); 7572 __pa(vmx->vmcs01.shadow_vmcs));
7595 vmx->nested.sync_shadow_vmcs = true; 7573 vmx->nested.sync_shadow_vmcs = true;
7596 } 7574 }
7597 } 7575 }
@@ -7659,7 +7637,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
7659 7637
7660 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 7638 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
7661 7639
7662 if (!(types & (1UL << type))) { 7640 if (type >= 32 || !(types & (1 << type))) {
7663 nested_vmx_failValid(vcpu, 7641 nested_vmx_failValid(vcpu,
7664 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 7642 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7665 skip_emulated_instruction(vcpu); 7643 skip_emulated_instruction(vcpu);
@@ -7722,7 +7700,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
7722 7700
7723 types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7; 7701 types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
7724 7702
7725 if (!(types & (1UL << type))) { 7703 if (type >= 32 || !(types & (1 << type))) {
7726 nested_vmx_failValid(vcpu, 7704 nested_vmx_failValid(vcpu,
7727 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 7705 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7728 skip_emulated_instruction(vcpu); 7706 skip_emulated_instruction(vcpu);
@@ -9156,6 +9134,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
9156 9134
9157 vmx->loaded_vmcs = &vmx->vmcs01; 9135 vmx->loaded_vmcs = &vmx->vmcs01;
9158 vmx->loaded_vmcs->vmcs = alloc_vmcs(); 9136 vmx->loaded_vmcs->vmcs = alloc_vmcs();
9137 vmx->loaded_vmcs->shadow_vmcs = NULL;
9159 if (!vmx->loaded_vmcs->vmcs) 9138 if (!vmx->loaded_vmcs->vmcs)
9160 goto free_msrs; 9139 goto free_msrs;
9161 if (!vmm_exclusive) 9140 if (!vmm_exclusive)
@@ -10061,9 +10040,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
10061 10040
10062 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 10041 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
10063 vmcs_write64(TSC_OFFSET, 10042 vmcs_write64(TSC_OFFSET,
10064 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); 10043 vcpu->arch.tsc_offset + vmcs12->tsc_offset);
10065 else 10044 else
10066 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); 10045 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
10067 if (kvm_has_tsc_control) 10046 if (kvm_has_tsc_control)
10068 decache_tsc_multiplier(vmx); 10047 decache_tsc_multiplier(vmx);
10069 10048
@@ -10293,8 +10272,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10293 10272
10294 enter_guest_mode(vcpu); 10273 enter_guest_mode(vcpu);
10295 10274
10296 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
10297
10298 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 10275 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
10299 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 10276 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
10300 10277
@@ -10818,7 +10795,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
10818 load_vmcs12_host_state(vcpu, vmcs12); 10795 load_vmcs12_host_state(vcpu, vmcs12);
10819 10796
10820 /* Update any VMCS fields that might have changed while L2 ran */ 10797 /* Update any VMCS fields that might have changed while L2 ran */
10821 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); 10798 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
10822 if (vmx->hv_deadline_tsc == -1) 10799 if (vmx->hv_deadline_tsc == -1)
10823 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, 10800 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
10824 PIN_BASED_VMX_PREEMPTION_TIMER); 10801 PIN_BASED_VMX_PREEMPTION_TIMER);
@@ -11339,8 +11316,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
11339 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 11316 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
11340 11317
11341 .write_tsc_offset = vmx_write_tsc_offset, 11318 .write_tsc_offset = vmx_write_tsc_offset,
11342 .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
11343 .read_l1_tsc = vmx_read_l1_tsc,
11344 11319
11345 .set_tdp_cr3 = vmx_set_cr3, 11320 .set_tdp_cr3 = vmx_set_cr3,
11346 11321
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e375235d81c9..3017de0431bd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1409,7 +1409,7 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1409 1409
1410u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 1410u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
1411{ 1411{
1412 return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc)); 1412 return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1413} 1413}
1414EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 1414EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1415 1415
@@ -1547,7 +1547,7 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
1547static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 1547static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
1548 s64 adjustment) 1548 s64 adjustment)
1549{ 1549{
1550 kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); 1550 kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
1551} 1551}
1552 1552
1553static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 1553static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -1555,7 +1555,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
1555 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) 1555 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
1556 WARN_ON(adjustment < 0); 1556 WARN_ON(adjustment < 0);
1557 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment); 1557 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
1558 kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); 1558 adjust_tsc_offset_guest(vcpu, adjustment);
1559} 1559}
1560 1560
1561#ifdef CONFIG_X86_64 1561#ifdef CONFIG_X86_64
@@ -2262,7 +2262,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2262 /* Drop writes to this legacy MSR -- see rdmsr 2262 /* Drop writes to this legacy MSR -- see rdmsr
2263 * counterpart for further detail. 2263 * counterpart for further detail.
2264 */ 2264 */
2265 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); 2265 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
2266 break; 2266 break;
2267 case MSR_AMD64_OSVW_ID_LENGTH: 2267 case MSR_AMD64_OSVW_ID_LENGTH:
2268 if (!guest_cpuid_has_osvw(vcpu)) 2268 if (!guest_cpuid_has_osvw(vcpu))
@@ -2280,11 +2280,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2280 if (kvm_pmu_is_valid_msr(vcpu, msr)) 2280 if (kvm_pmu_is_valid_msr(vcpu, msr))
2281 return kvm_pmu_set_msr(vcpu, msr_info); 2281 return kvm_pmu_set_msr(vcpu, msr_info);
2282 if (!ignore_msrs) { 2282 if (!ignore_msrs) {
2283 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", 2283 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
2284 msr, data); 2284 msr, data);
2285 return 1; 2285 return 1;
2286 } else { 2286 } else {
2287 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", 2287 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
2288 msr, data); 2288 msr, data);
2289 break; 2289 break;
2290 } 2290 }
@@ -7410,10 +7410,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7410 7410
7411void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 7411void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
7412{ 7412{
7413 void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
7414
7413 kvmclock_reset(vcpu); 7415 kvmclock_reset(vcpu);
7414 7416
7415 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
7416 kvm_x86_ops->vcpu_free(vcpu); 7417 kvm_x86_ops->vcpu_free(vcpu);
7418 free_cpumask_var(wbinvd_dirty_mask);
7417} 7419}
7418 7420
7419struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 7421struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2dc5c96c186a..5545a679abd8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev)
376 376
377static int init_vq(struct virtio_blk *vblk) 377static int init_vq(struct virtio_blk *vblk)
378{ 378{
379 int err = 0; 379 int err;
380 int i; 380 int i;
381 vq_callback_t **callbacks; 381 vq_callback_t **callbacks;
382 const char **names; 382 const char **names;
@@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk)
390 if (err) 390 if (err)
391 num_vqs = 1; 391 num_vqs = 1;
392 392
393 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); 393 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
394 if (!vblk->vqs) 394 if (!vblk->vqs)
395 return -ENOMEM; 395 return -ENOMEM;
396 396
397 names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); 397 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
398 callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); 398 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
399 vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); 399 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
400 if (!names || !callbacks || !vqs) { 400 if (!names || !callbacks || !vqs) {
401 err = -ENOMEM; 401 err = -ENOMEM;
402 goto out; 402 goto out;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d433b1db1fdd..5649234b7316 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1539,19 +1539,29 @@ static void remove_port_data(struct port *port)
1539 spin_lock_irq(&port->inbuf_lock); 1539 spin_lock_irq(&port->inbuf_lock);
1540 /* Remove unused data this port might have received. */ 1540 /* Remove unused data this port might have received. */
1541 discard_port_data(port); 1541 discard_port_data(port);
1542 spin_unlock_irq(&port->inbuf_lock);
1542 1543
1543 /* Remove buffers we queued up for the Host to send us data in. */ 1544 /* Remove buffers we queued up for the Host to send us data in. */
1544 while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1545 do {
1545 free_buf(buf, true); 1546 spin_lock_irq(&port->inbuf_lock);
1546 spin_unlock_irq(&port->inbuf_lock); 1547 buf = virtqueue_detach_unused_buf(port->in_vq);
1548 spin_unlock_irq(&port->inbuf_lock);
1549 if (buf)
1550 free_buf(buf, true);
1551 } while (buf);
1547 1552
1548 spin_lock_irq(&port->outvq_lock); 1553 spin_lock_irq(&port->outvq_lock);
1549 reclaim_consumed_buffers(port); 1554 reclaim_consumed_buffers(port);
1555 spin_unlock_irq(&port->outvq_lock);
1550 1556
1551 /* Free pending buffers from the out-queue. */ 1557 /* Free pending buffers from the out-queue. */
1552 while ((buf = virtqueue_detach_unused_buf(port->out_vq))) 1558 do {
1553 free_buf(buf, true); 1559 spin_lock_irq(&port->outvq_lock);
1554 spin_unlock_irq(&port->outvq_lock); 1560 buf = virtqueue_detach_unused_buf(port->out_vq);
1561 spin_unlock_irq(&port->outvq_lock);
1562 if (buf)
1563 free_buf(buf, true);
1564 } while (buf);
1555} 1565}
1556 1566
1557/* 1567/*
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 309311b1faae..15475892af0c 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -73,13 +73,13 @@ struct rfc2734_header {
73 73
74#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) 74#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
75#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) 75#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
76#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) 76#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
77#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) 77#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
78#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) 78#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
79 79
80#define fwnet_set_hdr_lf(lf) ((lf) << 30) 80#define fwnet_set_hdr_lf(lf) ((lf) << 30)
81#define fwnet_set_hdr_ether_type(et) (et) 81#define fwnet_set_hdr_ether_type(et) (et)
82#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) 82#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
83#define fwnet_set_hdr_fg_off(fgo) (fgo) 83#define fwnet_set_hdr_fg_off(fgo) (fgo)
84 84
85#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) 85#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
578 int retval; 578 int retval;
579 u16 ether_type; 579 u16 ether_type;
580 580
581 if (len <= RFC2374_UNFRAG_HDR_SIZE)
582 return 0;
583
581 hdr.w0 = be32_to_cpu(buf[0]); 584 hdr.w0 = be32_to_cpu(buf[0]);
582 lf = fwnet_get_hdr_lf(&hdr); 585 lf = fwnet_get_hdr_lf(&hdr);
583 if (lf == RFC2374_HDR_UNFRAG) { 586 if (lf == RFC2374_HDR_UNFRAG) {
@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
602 return fwnet_finish_incoming_packet(net, skb, source_node_id, 605 return fwnet_finish_incoming_packet(net, skb, source_node_id,
603 is_broadcast, ether_type); 606 is_broadcast, ether_type);
604 } 607 }
608
605 /* A datagram fragment has been received, now the fun begins. */ 609 /* A datagram fragment has been received, now the fun begins. */
610
611 if (len <= RFC2374_FRAG_HDR_SIZE)
612 return 0;
613
606 hdr.w1 = ntohl(buf[1]); 614 hdr.w1 = ntohl(buf[1]);
607 buf += 2; 615 buf += 2;
608 len -= RFC2374_FRAG_HDR_SIZE; 616 len -= RFC2374_FRAG_HDR_SIZE;
@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
614 fg_off = fwnet_get_hdr_fg_off(&hdr); 622 fg_off = fwnet_get_hdr_fg_off(&hdr);
615 } 623 }
616 datagram_label = fwnet_get_hdr_dgl(&hdr); 624 datagram_label = fwnet_get_hdr_dgl(&hdr);
617 dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ 625 dg_size = fwnet_get_hdr_dg_size(&hdr);
626
627 if (fg_off + len > dg_size)
628 return 0;
618 629
619 spin_lock_irqsave(&dev->lock, flags); 630 spin_lock_irqsave(&dev->lock, flags);
620 631
@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
722 fw_send_response(card, r, rcode); 733 fw_send_response(card, r, rcode);
723} 734}
724 735
736static int gasp_source_id(__be32 *p)
737{
738 return be32_to_cpu(p[0]) >> 16;
739}
740
741static u32 gasp_specifier_id(__be32 *p)
742{
743 return (be32_to_cpu(p[0]) & 0xffff) << 8 |
744 (be32_to_cpu(p[1]) & 0xff000000) >> 24;
745}
746
747static u32 gasp_version(__be32 *p)
748{
749 return be32_to_cpu(p[1]) & 0xffffff;
750}
751
725static void fwnet_receive_broadcast(struct fw_iso_context *context, 752static void fwnet_receive_broadcast(struct fw_iso_context *context,
726 u32 cycle, size_t header_length, void *header, void *data) 753 u32 cycle, size_t header_length, void *header, void *data)
727{ 754{
@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
731 __be32 *buf_ptr; 758 __be32 *buf_ptr;
732 int retval; 759 int retval;
733 u32 length; 760 u32 length;
734 u16 source_node_id;
735 u32 specifier_id;
736 u32 ver;
737 unsigned long offset; 761 unsigned long offset;
738 unsigned long flags; 762 unsigned long flags;
739 763
@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
750 774
751 spin_unlock_irqrestore(&dev->lock, flags); 775 spin_unlock_irqrestore(&dev->lock, flags);
752 776
753 specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 777 if (length > IEEE1394_GASP_HDR_SIZE &&
754 | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; 778 gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
755 ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; 779 (gasp_version(buf_ptr) == RFC2734_SW_VERSION
756 source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
757
758 if (specifier_id == IANA_SPECIFIER_ID &&
759 (ver == RFC2734_SW_VERSION
760#if IS_ENABLED(CONFIG_IPV6) 780#if IS_ENABLED(CONFIG_IPV6)
761 || ver == RFC3146_SW_VERSION 781 || gasp_version(buf_ptr) == RFC3146_SW_VERSION
762#endif 782#endif
763 )) { 783 ))
764 buf_ptr += 2; 784 fwnet_incoming_packet(dev, buf_ptr + 2,
765 length -= IEEE1394_GASP_HDR_SIZE; 785 length - IEEE1394_GASP_HDR_SIZE,
766 fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, 786 gasp_source_id(buf_ptr),
767 context->card->generation, true); 787 context->card->generation, true);
768 }
769 788
770 packet.payload_length = dev->rcv_buffer_size; 789 packet.payload_length = dev->rcv_buffer_size;
771 packet.interrupt = 1; 790 packet.interrupt = 1;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index cd5dc27320a2..1ed6132b993c 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
293{ 293{
294 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 294 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
295 struct mvebu_gpio_chip *mvchip = gc->private; 295 struct mvebu_gpio_chip *mvchip = gc->private;
296 u32 mask = ~(1 << (d->irq - gc->irq_base)); 296 u32 mask = d->mask;
297 297
298 irq_gc_lock(gc); 298 irq_gc_lock(gc);
299 writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip)); 299 writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip));
300 irq_gc_unlock(gc); 300 irq_gc_unlock(gc);
301} 301}
302 302
@@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
305 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 305 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
306 struct mvebu_gpio_chip *mvchip = gc->private; 306 struct mvebu_gpio_chip *mvchip = gc->private;
307 struct irq_chip_type *ct = irq_data_get_chip_type(d); 307 struct irq_chip_type *ct = irq_data_get_chip_type(d);
308 u32 mask = 1 << (d->irq - gc->irq_base); 308 u32 mask = d->mask;
309 309
310 irq_gc_lock(gc); 310 irq_gc_lock(gc);
311 ct->mask_cache_priv &= ~mask; 311 ct->mask_cache_priv &= ~mask;
@@ -319,8 +319,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
319 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 319 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
320 struct mvebu_gpio_chip *mvchip = gc->private; 320 struct mvebu_gpio_chip *mvchip = gc->private;
321 struct irq_chip_type *ct = irq_data_get_chip_type(d); 321 struct irq_chip_type *ct = irq_data_get_chip_type(d);
322 322 u32 mask = d->mask;
323 u32 mask = 1 << (d->irq - gc->irq_base);
324 323
325 irq_gc_lock(gc); 324 irq_gc_lock(gc);
326 ct->mask_cache_priv |= mask; 325 ct->mask_cache_priv |= mask;
@@ -333,8 +332,7 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
333 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 332 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
334 struct mvebu_gpio_chip *mvchip = gc->private; 333 struct mvebu_gpio_chip *mvchip = gc->private;
335 struct irq_chip_type *ct = irq_data_get_chip_type(d); 334 struct irq_chip_type *ct = irq_data_get_chip_type(d);
336 335 u32 mask = d->mask;
337 u32 mask = 1 << (d->irq - gc->irq_base);
338 336
339 irq_gc_lock(gc); 337 irq_gc_lock(gc);
340 ct->mask_cache_priv &= ~mask; 338 ct->mask_cache_priv &= ~mask;
@@ -347,8 +345,7 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
347 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 345 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
348 struct mvebu_gpio_chip *mvchip = gc->private; 346 struct mvebu_gpio_chip *mvchip = gc->private;
349 struct irq_chip_type *ct = irq_data_get_chip_type(d); 347 struct irq_chip_type *ct = irq_data_get_chip_type(d);
350 348 u32 mask = d->mask;
351 u32 mask = 1 << (d->irq - gc->irq_base);
352 349
353 irq_gc_lock(gc); 350 irq_gc_lock(gc);
354 ct->mask_cache_priv |= mask; 351 ct->mask_cache_priv |= mask;
@@ -462,7 +459,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
462 for (i = 0; i < mvchip->chip.ngpio; i++) { 459 for (i = 0; i < mvchip->chip.ngpio; i++) {
463 int irq; 460 int irq;
464 461
465 irq = mvchip->irqbase + i; 462 irq = irq_find_mapping(mvchip->domain, i);
466 463
467 if (!(cause & (1 << i))) 464 if (!(cause & (1 << i)))
468 continue; 465 continue;
@@ -655,6 +652,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
655 struct irq_chip_type *ct; 652 struct irq_chip_type *ct;
656 struct clk *clk; 653 struct clk *clk;
657 unsigned int ngpios; 654 unsigned int ngpios;
655 bool have_irqs;
658 int soc_variant; 656 int soc_variant;
659 int i, cpu, id; 657 int i, cpu, id;
660 int err; 658 int err;
@@ -665,6 +663,9 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
665 else 663 else
666 soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION; 664 soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
667 665
666 /* Some gpio controllers do not provide irq support */
667 have_irqs = of_irq_count(np) != 0;
668
668 mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), 669 mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
669 GFP_KERNEL); 670 GFP_KERNEL);
670 if (!mvchip) 671 if (!mvchip)
@@ -697,7 +698,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
697 mvchip->chip.get = mvebu_gpio_get; 698 mvchip->chip.get = mvebu_gpio_get;
698 mvchip->chip.direction_output = mvebu_gpio_direction_output; 699 mvchip->chip.direction_output = mvebu_gpio_direction_output;
699 mvchip->chip.set = mvebu_gpio_set; 700 mvchip->chip.set = mvebu_gpio_set;
700 mvchip->chip.to_irq = mvebu_gpio_to_irq; 701 if (have_irqs)
702 mvchip->chip.to_irq = mvebu_gpio_to_irq;
701 mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK; 703 mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
702 mvchip->chip.ngpio = ngpios; 704 mvchip->chip.ngpio = ngpios;
703 mvchip->chip.can_sleep = false; 705 mvchip->chip.can_sleep = false;
@@ -758,34 +760,30 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
758 devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); 760 devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
759 761
760 /* Some gpio controllers do not provide irq support */ 762 /* Some gpio controllers do not provide irq support */
761 if (!of_irq_count(np)) 763 if (!have_irqs)
762 return 0; 764 return 0;
763 765
764 /* Setup the interrupt handlers. Each chip can have up to 4 766 mvchip->domain =
765 * interrupt handlers, with each handler dealing with 8 GPIO 767 irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
766 * pins. */ 768 if (!mvchip->domain) {
767 for (i = 0; i < 4; i++) { 769 dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
768 int irq = platform_get_irq(pdev, i); 770 mvchip->chip.label);
769 771 return -ENODEV;
770 if (irq < 0)
771 continue;
772 irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
773 mvchip);
774 }
775
776 mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
777 if (mvchip->irqbase < 0) {
778 dev_err(&pdev->dev, "no irqs\n");
779 return mvchip->irqbase;
780 } 772 }
781 773
782 gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase, 774 err = irq_alloc_domain_generic_chips(
783 mvchip->membase, handle_level_irq); 775 mvchip->domain, ngpios, 2, np->name, handle_level_irq,
784 if (!gc) { 776 IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
785 dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); 777 if (err) {
786 return -ENOMEM; 778 dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
779 mvchip->chip.label);
780 goto err_domain;
787 } 781 }
788 782
783 /* NOTE: The common accessors cannot be used because of the percpu
784 * access to the mask registers
785 */
786 gc = irq_get_domain_generic_chip(mvchip->domain, 0);
789 gc->private = mvchip; 787 gc->private = mvchip;
790 ct = &gc->chip_types[0]; 788 ct = &gc->chip_types[0];
791 ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; 789 ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
@@ -803,27 +801,23 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
803 ct->handler = handle_edge_irq; 801 ct->handler = handle_edge_irq;
804 ct->chip.name = mvchip->chip.label; 802 ct->chip.name = mvchip->chip.label;
805 803
806 irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0, 804 /* Setup the interrupt handlers. Each chip can have up to 4
807 IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); 805 * interrupt handlers, with each handler dealing with 8 GPIO
806 * pins.
807 */
808 for (i = 0; i < 4; i++) {
809 int irq = platform_get_irq(pdev, i);
808 810
809 /* Setup irq domain on top of the generic chip. */ 811 if (irq < 0)
810 mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio, 812 continue;
811 mvchip->irqbase, 813 irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
812 &irq_domain_simple_ops, 814 mvchip);
813 mvchip);
814 if (!mvchip->domain) {
815 dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
816 mvchip->chip.label);
817 err = -ENODEV;
818 goto err_generic_chip;
819 } 815 }
820 816
821 return 0; 817 return 0;
822 818
823err_generic_chip: 819err_domain:
824 irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, 820 irq_domain_remove(mvchip->domain);
825 IRQ_LEVEL | IRQ_NOPROBE);
826 kfree(gc);
827 821
828 return err; 822 return err;
829} 823}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ecad3f0e3b77..193f15d50bba 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -26,14 +26,18 @@
26 26
27#include "gpiolib.h" 27#include "gpiolib.h"
28 28
29static int of_gpiochip_match_node(struct gpio_chip *chip, void *data) 29static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
30{ 30{
31 return chip->gpiodev->dev.of_node == data; 31 struct of_phandle_args *gpiospec = data;
32
33 return chip->gpiodev->dev.of_node == gpiospec->np &&
34 chip->of_xlate(chip, gpiospec, NULL) >= 0;
32} 35}
33 36
34static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np) 37static struct gpio_chip *of_find_gpiochip_by_xlate(
38 struct of_phandle_args *gpiospec)
35{ 39{
36 return gpiochip_find(np, of_gpiochip_match_node); 40 return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate);
37} 41}
38 42
39static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip, 43static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
@@ -79,7 +83,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
79 return ERR_PTR(ret); 83 return ERR_PTR(ret);
80 } 84 }
81 85
82 chip = of_find_gpiochip_by_node(gpiospec.np); 86 chip = of_find_gpiochip_by_xlate(&gpiospec);
83 if (!chip) { 87 if (!chip) {
84 desc = ERR_PTR(-EPROBE_DEFER); 88 desc = ERR_PTR(-EPROBE_DEFER);
85 goto out; 89 goto out;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 20e09b7c2de3..93ed0e00c578 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -21,6 +21,7 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/compat.h> 22#include <linux/compat.h>
23#include <linux/anon_inodes.h> 23#include <linux/anon_inodes.h>
24#include <linux/file.h>
24#include <linux/kfifo.h> 25#include <linux/kfifo.h>
25#include <linux/poll.h> 26#include <linux/poll.h>
26#include <linux/timekeeping.h> 27#include <linux/timekeeping.h>
@@ -423,6 +424,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
423{ 424{
424 struct gpiohandle_request handlereq; 425 struct gpiohandle_request handlereq;
425 struct linehandle_state *lh; 426 struct linehandle_state *lh;
427 struct file *file;
426 int fd, i, ret; 428 int fd, i, ret;
427 429
428 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) 430 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -499,26 +501,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
499 i--; 501 i--;
500 lh->numdescs = handlereq.lines; 502 lh->numdescs = handlereq.lines;
501 503
502 fd = anon_inode_getfd("gpio-linehandle", 504 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
503 &linehandle_fileops,
504 lh,
505 O_RDONLY | O_CLOEXEC);
506 if (fd < 0) { 505 if (fd < 0) {
507 ret = fd; 506 ret = fd;
508 goto out_free_descs; 507 goto out_free_descs;
509 } 508 }
510 509
510 file = anon_inode_getfile("gpio-linehandle",
511 &linehandle_fileops,
512 lh,
513 O_RDONLY | O_CLOEXEC);
514 if (IS_ERR(file)) {
515 ret = PTR_ERR(file);
516 goto out_put_unused_fd;
517 }
518
511 handlereq.fd = fd; 519 handlereq.fd = fd;
512 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 520 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
513 ret = -EFAULT; 521 /*
514 goto out_free_descs; 522 * fput() will trigger the release() callback, so do not go onto
523 * the regular error cleanup path here.
524 */
525 fput(file);
526 put_unused_fd(fd);
527 return -EFAULT;
515 } 528 }
516 529
530 fd_install(fd, file);
531
517 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 532 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
518 lh->numdescs); 533 lh->numdescs);
519 534
520 return 0; 535 return 0;
521 536
537out_put_unused_fd:
538 put_unused_fd(fd);
522out_free_descs: 539out_free_descs:
523 for (; i >= 0; i--) 540 for (; i >= 0; i--)
524 gpiod_free(lh->descs[i]); 541 gpiod_free(lh->descs[i]);
@@ -721,6 +738,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
721 struct gpioevent_request eventreq; 738 struct gpioevent_request eventreq;
722 struct lineevent_state *le; 739 struct lineevent_state *le;
723 struct gpio_desc *desc; 740 struct gpio_desc *desc;
741 struct file *file;
724 u32 offset; 742 u32 offset;
725 u32 lflags; 743 u32 lflags;
726 u32 eflags; 744 u32 eflags;
@@ -815,23 +833,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
815 if (ret) 833 if (ret)
816 goto out_free_desc; 834 goto out_free_desc;
817 835
818 fd = anon_inode_getfd("gpio-event", 836 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
819 &lineevent_fileops,
820 le,
821 O_RDONLY | O_CLOEXEC);
822 if (fd < 0) { 837 if (fd < 0) {
823 ret = fd; 838 ret = fd;
824 goto out_free_irq; 839 goto out_free_irq;
825 } 840 }
826 841
842 file = anon_inode_getfile("gpio-event",
843 &lineevent_fileops,
844 le,
845 O_RDONLY | O_CLOEXEC);
846 if (IS_ERR(file)) {
847 ret = PTR_ERR(file);
848 goto out_put_unused_fd;
849 }
850
827 eventreq.fd = fd; 851 eventreq.fd = fd;
828 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { 852 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
829 ret = -EFAULT; 853 /*
830 goto out_free_irq; 854 * fput() will trigger the release() callback, so do not go onto
855 * the regular error cleanup path here.
856 */
857 fput(file);
858 put_unused_fd(fd);
859 return -EFAULT;
831 } 860 }
832 861
862 fd_install(fd, file);
863
833 return 0; 864 return 0;
834 865
866out_put_unused_fd:
867 put_unused_fd(fd);
835out_free_irq: 868out_free_irq:
836 free_irq(le->irq, le); 869 free_irq(le->irq, le);
837out_free_desc: 870out_free_desc:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b0f6e6957536..82dc8d20e28a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -519,7 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
519 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 519 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
520 &duplicates); 520 &duplicates);
521 if (unlikely(r != 0)) { 521 if (unlikely(r != 0)) {
522 DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); 522 if (r != -ERESTARTSYS)
523 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
523 goto error_free_pages; 524 goto error_free_pages;
524 } 525 }
525 526
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4f4a9239069..7ca07e7b25c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1959,6 +1959,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
1959 /* evict remaining vram memory */ 1959 /* evict remaining vram memory */
1960 amdgpu_bo_evict_vram(adev); 1960 amdgpu_bo_evict_vram(adev);
1961 1961
1962 amdgpu_atombios_scratch_regs_save(adev);
1962 pci_save_state(dev->pdev); 1963 pci_save_state(dev->pdev);
1963 if (suspend) { 1964 if (suspend) {
1964 /* Shut down the device */ 1965 /* Shut down the device */
@@ -2010,6 +2011,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2010 return r; 2011 return r;
2011 } 2012 }
2012 } 2013 }
2014 amdgpu_atombios_scratch_regs_restore(adev);
2013 2015
2014 /* post card */ 2016 /* post card */
2015 if (!amdgpu_card_posted(adev) || !resume) { 2017 if (!amdgpu_card_posted(adev) || !resume) {
@@ -2268,8 +2270,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
2268 } 2270 }
2269 2271
2270 if (need_full_reset) { 2272 if (need_full_reset) {
2271 /* save scratch */
2272 amdgpu_atombios_scratch_regs_save(adev);
2273 r = amdgpu_suspend(adev); 2273 r = amdgpu_suspend(adev);
2274 2274
2275retry: 2275retry:
@@ -2279,8 +2279,9 @@ retry:
2279 amdgpu_display_stop_mc_access(adev, &save); 2279 amdgpu_display_stop_mc_access(adev, &save);
2280 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); 2280 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2281 } 2281 }
2282 2282 amdgpu_atombios_scratch_regs_save(adev);
2283 r = amdgpu_asic_reset(adev); 2283 r = amdgpu_asic_reset(adev);
2284 amdgpu_atombios_scratch_regs_restore(adev);
2284 /* post card */ 2285 /* post card */
2285 amdgpu_atom_asic_init(adev->mode_info.atom_context); 2286 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2286 2287
@@ -2288,8 +2289,6 @@ retry:
2288 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); 2289 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2289 r = amdgpu_resume(adev); 2290 r = amdgpu_resume(adev);
2290 } 2291 }
2291 /* restore scratch */
2292 amdgpu_atombios_scratch_regs_restore(adev);
2293 } 2292 }
2294 if (!r) { 2293 if (!r) {
2295 amdgpu_irq_gpu_reset_resume_helper(adev); 2294 amdgpu_irq_gpu_reset_resume_helper(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3a2e42f4b897..77b34ec92632 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -68,6 +68,7 @@ int amdgpu_fence_slab_init(void)
68 68
69void amdgpu_fence_slab_fini(void) 69void amdgpu_fence_slab_fini(void)
70{ 70{
71 rcu_barrier();
71 kmem_cache_destroy(amdgpu_fence_slab); 72 kmem_cache_destroy(amdgpu_fence_slab);
72} 73}
73/* 74/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 278708f5a744..9fa809876339 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -239,6 +239,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
239 if (r) { 239 if (r) {
240 adev->irq.installed = false; 240 adev->irq.installed = false;
241 flush_work(&adev->hotplug_work); 241 flush_work(&adev->hotplug_work);
242 cancel_work_sync(&adev->reset_work);
242 return r; 243 return r;
243 } 244 }
244 245
@@ -264,6 +265,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
264 if (adev->irq.msi_enabled) 265 if (adev->irq.msi_enabled)
265 pci_disable_msi(adev->pdev); 266 pci_disable_msi(adev->pdev);
266 flush_work(&adev->hotplug_work); 267 flush_work(&adev->hotplug_work);
268 cancel_work_sync(&adev->reset_work);
267 } 269 }
268 270
269 for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { 271 for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c2c7fb140338..203d98b00555 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -459,10 +459,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
459 /* return all clocks in KHz */ 459 /* return all clocks in KHz */
460 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; 460 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
461 if (adev->pm.dpm_enabled) { 461 if (adev->pm.dpm_enabled) {
462 dev_info.max_engine_clock = 462 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
463 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; 463 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
464 dev_info.max_memory_clock =
465 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10;
466 } else { 464 } else {
467 dev_info.max_engine_clock = adev->pm.default_sclk * 10; 465 dev_info.max_engine_clock = adev->pm.default_sclk * 10;
468 dev_info.max_memory_clock = adev->pm.default_mclk * 10; 466 dev_info.max_memory_clock = adev->pm.default_mclk * 10;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 06f24322e7c3..968c4260d7a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1758,5 +1758,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1758 fence_put(adev->vm_manager.ids[i].first); 1758 fence_put(adev->vm_manager.ids[i].first);
1759 amdgpu_sync_free(&adev->vm_manager.ids[i].active); 1759 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1760 fence_put(id->flushed_updates); 1760 fence_put(id->flushed_updates);
1761 fence_put(id->last_flush);
1761 } 1762 }
1762} 1763}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 1d8c375a3561..5be788b269e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -4075,7 +4075,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4075 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 4075 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4076 } 4076 }
4077 } else { 4077 } else {
4078 if (pi->last_mclk_dpm_enable_mask & 0x1) { 4078 if (pi->uvd_enabled) {
4079 pi->uvd_enabled = false; 4079 pi->uvd_enabled = false;
4080 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; 4080 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4081 amdgpu_ci_send_msg_to_smc_with_parameter(adev, 4081 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
@@ -6236,6 +6236,8 @@ static int ci_dpm_sw_fini(void *handle)
6236{ 6236{
6237 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6237 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6238 6238
6239 flush_work(&adev->pm.dpm.thermal.work);
6240
6239 mutex_lock(&adev->pm.mutex); 6241 mutex_lock(&adev->pm.mutex);
6240 amdgpu_pm_sysfs_fini(adev); 6242 amdgpu_pm_sysfs_fini(adev);
6241 ci_dpm_fini(adev); 6243 ci_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4108c686aa7c..9260caef74fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3151,10 +3151,6 @@ static int dce_v10_0_hw_fini(void *handle)
3151 3151
3152static int dce_v10_0_suspend(void *handle) 3152static int dce_v10_0_suspend(void *handle)
3153{ 3153{
3154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3155
3156 amdgpu_atombios_scratch_regs_save(adev);
3157
3158 return dce_v10_0_hw_fini(handle); 3154 return dce_v10_0_hw_fini(handle);
3159} 3155}
3160 3156
@@ -3165,8 +3161,6 @@ static int dce_v10_0_resume(void *handle)
3165 3161
3166 ret = dce_v10_0_hw_init(handle); 3162 ret = dce_v10_0_hw_init(handle);
3167 3163
3168 amdgpu_atombios_scratch_regs_restore(adev);
3169
3170 /* turn on the BL */ 3164 /* turn on the BL */
3171 if (adev->mode_info.bl_encoder) { 3165 if (adev->mode_info.bl_encoder) {
3172 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3166 u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f264b8f17ad1..367739bd1927 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3215,10 +3215,6 @@ static int dce_v11_0_hw_fini(void *handle)
3215 3215
3216static int dce_v11_0_suspend(void *handle) 3216static int dce_v11_0_suspend(void *handle)
3217{ 3217{
3218 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3219
3220 amdgpu_atombios_scratch_regs_save(adev);
3221
3222 return dce_v11_0_hw_fini(handle); 3218 return dce_v11_0_hw_fini(handle);
3223} 3219}
3224 3220
@@ -3229,8 +3225,6 @@ static int dce_v11_0_resume(void *handle)
3229 3225
3230 ret = dce_v11_0_hw_init(handle); 3226 ret = dce_v11_0_hw_init(handle);
3231 3227
3232 amdgpu_atombios_scratch_regs_restore(adev);
3233
3234 /* turn on the BL */ 3228 /* turn on the BL */
3235 if (adev->mode_info.bl_encoder) { 3229 if (adev->mode_info.bl_encoder) {
3236 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3230 u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b948d6cb1399..15f9fc0514b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2482,10 +2482,6 @@ static int dce_v6_0_hw_fini(void *handle)
2482 2482
2483static int dce_v6_0_suspend(void *handle) 2483static int dce_v6_0_suspend(void *handle)
2484{ 2484{
2485 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2486
2487 amdgpu_atombios_scratch_regs_save(adev);
2488
2489 return dce_v6_0_hw_fini(handle); 2485 return dce_v6_0_hw_fini(handle);
2490} 2486}
2491 2487
@@ -2496,8 +2492,6 @@ static int dce_v6_0_resume(void *handle)
2496 2492
2497 ret = dce_v6_0_hw_init(handle); 2493 ret = dce_v6_0_hw_init(handle);
2498 2494
2499 amdgpu_atombios_scratch_regs_restore(adev);
2500
2501 /* turn on the BL */ 2495 /* turn on the BL */
2502 if (adev->mode_info.bl_encoder) { 2496 if (adev->mode_info.bl_encoder) {
2503 u8 bl_level = amdgpu_display_backlight_get_level(adev, 2497 u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5966166ec94c..8c4d808db0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3033,10 +3033,6 @@ static int dce_v8_0_hw_fini(void *handle)
3033 3033
3034static int dce_v8_0_suspend(void *handle) 3034static int dce_v8_0_suspend(void *handle)
3035{ 3035{
3036 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3037
3038 amdgpu_atombios_scratch_regs_save(adev);
3039
3040 return dce_v8_0_hw_fini(handle); 3036 return dce_v8_0_hw_fini(handle);
3041} 3037}
3042 3038
@@ -3047,8 +3043,6 @@ static int dce_v8_0_resume(void *handle)
3047 3043
3048 ret = dce_v8_0_hw_init(handle); 3044 ret = dce_v8_0_hw_init(handle);
3049 3045
3050 amdgpu_atombios_scratch_regs_restore(adev);
3051
3052 /* turn on the BL */ 3046 /* turn on the BL */
3053 if (adev->mode_info.bl_encoder) { 3047 if (adev->mode_info.bl_encoder) {
3054 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3048 u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ee6a48a09214..bb97182dc749 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -640,7 +640,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
640 mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201, 640 mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
641 mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201, 641 mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
642 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, 642 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
643 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
644}; 643};
645 644
646static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); 645static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c22ef140a542..a16b2201d52c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -100,6 +100,7 @@ static const u32 cz_mgcg_cgcg_init[] =
100 100
101static const u32 stoney_mgcg_cgcg_init[] = 101static const u32 stoney_mgcg_cgcg_init[] =
102{ 102{
103 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
103 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 104 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
104}; 105};
105 106
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f8618a3881a8..71d2856222fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -3063,6 +3063,8 @@ static int kv_dpm_sw_fini(void *handle)
3063{ 3063{
3064 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3065 3065
3066 flush_work(&adev->pm.dpm.thermal.work);
3067
3066 mutex_lock(&adev->pm.mutex); 3068 mutex_lock(&adev->pm.mutex);
3067 amdgpu_pm_sysfs_fini(adev); 3069 amdgpu_pm_sysfs_fini(adev);
3068 kv_dpm_fini(adev); 3070 kv_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3de7bca5854b..d6f85b1a0b93 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3477,6 +3477,49 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3477 int i; 3477 int i;
3478 struct si_dpm_quirk *p = si_dpm_quirk_list; 3478 struct si_dpm_quirk *p = si_dpm_quirk_list;
3479 3479
3480 /* limit all SI kickers */
3481 if (adev->asic_type == CHIP_PITCAIRN) {
3482 if ((adev->pdev->revision == 0x81) ||
3483 (adev->pdev->device == 0x6810) ||
3484 (adev->pdev->device == 0x6811) ||
3485 (adev->pdev->device == 0x6816) ||
3486 (adev->pdev->device == 0x6817) ||
3487 (adev->pdev->device == 0x6806))
3488 max_mclk = 120000;
3489 } else if (adev->asic_type == CHIP_VERDE) {
3490 if ((adev->pdev->revision == 0x81) ||
3491 (adev->pdev->revision == 0x83) ||
3492 (adev->pdev->revision == 0x87) ||
3493 (adev->pdev->device == 0x6820) ||
3494 (adev->pdev->device == 0x6821) ||
3495 (adev->pdev->device == 0x6822) ||
3496 (adev->pdev->device == 0x6823) ||
3497 (adev->pdev->device == 0x682A) ||
3498 (adev->pdev->device == 0x682B)) {
3499 max_sclk = 75000;
3500 max_mclk = 80000;
3501 }
3502 } else if (adev->asic_type == CHIP_OLAND) {
3503 if ((adev->pdev->revision == 0xC7) ||
3504 (adev->pdev->revision == 0x80) ||
3505 (adev->pdev->revision == 0x81) ||
3506 (adev->pdev->revision == 0x83) ||
3507 (adev->pdev->device == 0x6604) ||
3508 (adev->pdev->device == 0x6605)) {
3509 max_sclk = 75000;
3510 max_mclk = 80000;
3511 }
3512 } else if (adev->asic_type == CHIP_HAINAN) {
3513 if ((adev->pdev->revision == 0x81) ||
3514 (adev->pdev->revision == 0x83) ||
3515 (adev->pdev->revision == 0xC3) ||
3516 (adev->pdev->device == 0x6664) ||
3517 (adev->pdev->device == 0x6665) ||
3518 (adev->pdev->device == 0x6667)) {
3519 max_sclk = 75000;
3520 max_mclk = 80000;
3521 }
3522 }
3480 /* Apply dpm quirks */ 3523 /* Apply dpm quirks */
3481 while (p && p->chip_device != 0) { 3524 while (p && p->chip_device != 0) {
3482 if (adev->pdev->vendor == p->chip_vendor && 3525 if (adev->pdev->vendor == p->chip_vendor &&
@@ -3489,22 +3532,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3489 } 3532 }
3490 ++p; 3533 ++p;
3491 } 3534 }
3492 /* limit mclk on all R7 370 parts for stability */
3493 if (adev->pdev->device == 0x6811 &&
3494 adev->pdev->revision == 0x81)
3495 max_mclk = 120000;
3496 /* limit sclk/mclk on Jet parts for stability */
3497 if (adev->pdev->device == 0x6665 &&
3498 adev->pdev->revision == 0xc3) {
3499 max_sclk = 75000;
3500 max_mclk = 80000;
3501 }
3502 /* Limit clocks for some HD8600 parts */
3503 if (adev->pdev->device == 0x6660 &&
3504 adev->pdev->revision == 0x83) {
3505 max_sclk = 75000;
3506 max_mclk = 80000;
3507 }
3508 3535
3509 if (rps->vce_active) { 3536 if (rps->vce_active) {
3510 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 3537 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
@@ -7777,6 +7804,8 @@ static int si_dpm_sw_fini(void *handle)
7777{ 7804{
7778 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7805 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7779 7806
7807 flush_work(&adev->pm.dpm.thermal.work);
7808
7780 mutex_lock(&adev->pm.mutex); 7809 mutex_lock(&adev->pm.mutex);
7781 amdgpu_pm_sysfs_fini(adev); 7810 amdgpu_pm_sysfs_fini(adev);
7782 si_dpm_fini(adev); 7811 si_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8533269ec160..6feed726e299 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -52,6 +52,8 @@
52#define VCE_V3_0_STACK_SIZE (64 * 1024) 52#define VCE_V3_0_STACK_SIZE (64 * 1024)
53#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 53#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
54 54
55#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
56
55static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 57static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
56static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 58static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
57static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 59static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -382,6 +384,10 @@ static int vce_v3_0_sw_init(void *handle)
382 if (r) 384 if (r)
383 return r; 385 return r;
384 386
387 /* 52.8.3 required for 3 ring support */
388 if (adev->vce.fw_version < FW_52_8_3)
389 adev->vce.num_rings = 2;
390
385 r = amdgpu_vce_resume(adev); 391 r = amdgpu_vce_resume(adev);
386 if (r) 392 if (r)
387 return r; 393 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index c0d9aad7126f..7c13090df7c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1651,7 +1651,7 @@ static int vi_common_early_init(void *handle)
1651 AMD_CG_SUPPORT_SDMA_MGCG | 1651 AMD_CG_SUPPORT_SDMA_MGCG |
1652 AMD_CG_SUPPORT_SDMA_LS | 1652 AMD_CG_SUPPORT_SDMA_LS |
1653 AMD_CG_SUPPORT_VCE_MGCG; 1653 AMD_CG_SUPPORT_VCE_MGCG;
1654 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1654 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1655 AMD_PG_SUPPORT_GFX_SMG | 1655 AMD_PG_SUPPORT_GFX_SMG |
1656 AMD_PG_SUPPORT_GFX_PIPELINE | 1656 AMD_PG_SUPPORT_GFX_PIPELINE |
1657 AMD_PG_SUPPORT_UVD | 1657 AMD_PG_SUPPORT_UVD |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 1167205057b3..2ba7937d2545 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -716,7 +716,7 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
716 *voltage = 1150; 716 *voltage = 1150;
717 } else { 717 } else {
718 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol); 718 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
719 *voltage = (uint16_t)vol/100; 719 *voltage = (uint16_t)(vol/100);
720 } 720 }
721 return ret; 721 return ret;
722} 722}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 1126bd4f74dc..0894527d932f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1320,7 +1320,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
1320 if (0 != result) 1320 if (0 != result)
1321 return result; 1321 return result;
1322 1322
1323 *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); 1323 *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)
1324 (&get_voltage_info_param_space))->ulVoltageLevel);
1324 1325
1325 return result; 1326 return result;
1326} 1327}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 7de701d8a450..4477c55a58e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -1201,12 +1201,15 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
1201static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr) 1201static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
1202{ 1202{
1203 const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); 1203 const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
1204 const ATOM_Tonga_VCE_State_Table *vce_state_table = 1204 const ATOM_Tonga_VCE_State_Table *vce_state_table;
1205 (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
1206 1205
1207 if (vce_state_table == NULL) 1206
1207 if (pp_table == NULL)
1208 return 0; 1208 return 0;
1209 1209
1210 vce_state_table = (void *)pp_table +
1211 le16_to_cpu(pp_table->usVCEStateTableOffset);
1212
1210 return vce_state_table->ucNumEntries; 1213 return vce_state_table->ucNumEntries;
1211} 1214}
1212 1215
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 609996c84ad5..75854021f403 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1168,8 +1168,8 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1168 1168
1169 tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1; 1169 tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
1170 PP_ASSERT_WITH_CODE(tmp_result == 0, 1170 PP_ASSERT_WITH_CODE(tmp_result == 0,
1171 "DPM is already running right now, no need to enable DPM!", 1171 "DPM is already running",
1172 return 0); 1172 );
1173 1173
1174 if (smu7_voltage_control(hwmgr)) { 1174 if (smu7_voltage_control(hwmgr)) {
1175 tmp_result = smu7_enable_voltage_control(hwmgr); 1175 tmp_result = smu7_enable_voltage_control(hwmgr);
@@ -2127,15 +2127,18 @@ static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2127} 2127}
2128 2128
2129static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, 2129static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2130 struct phm_clock_and_voltage_limits *tab) 2130 struct phm_clock_and_voltage_limits *tab)
2131{ 2131{
2132 uint32_t vddc, vddci;
2132 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2133 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2133 2134
2134 if (tab) { 2135 if (tab) {
2135 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc, 2136 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2136 &data->vddc_leakage); 2137 &data->vddc_leakage);
2137 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci, 2138 tab->vddc = vddc;
2138 &data->vddci_leakage); 2139 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2140 &data->vddci_leakage);
2141 tab->vddci = vddci;
2139 } 2142 }
2140 2143
2141 return 0; 2144 return 0;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 963a24d46a93..910b8d5b21c5 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -645,6 +645,7 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
645{ 645{
646 if (sched->thread) 646 if (sched->thread)
647 kthread_stop(sched->thread); 647 kthread_stop(sched->thread);
648 rcu_barrier();
648 if (atomic_dec_and_test(&sched_fence_slab_ref)) 649 if (atomic_dec_and_test(&sched_fence_slab_ref))
649 kmem_cache_destroy(sched_fence_slab); 650 kmem_cache_destroy(sched_fence_slab);
650} 651}
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 6b63beaf7574..3653b5a40494 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -103,7 +103,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
103} 103}
104 104
105/** 105/**
106 * amd_sched_fence_release - callback that fence can be freed 106 * amd_sched_fence_release_scheduled - callback that fence can be freed
107 * 107 *
108 * @fence: fence 108 * @fence: fence
109 * 109 *
@@ -118,7 +118,7 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
118} 118}
119 119
120/** 120/**
121 * amd_sched_fence_release_scheduled - drop extra reference 121 * amd_sched_fence_release_finished - drop extra reference
122 * 122 *
123 * @f: fence 123 * @f: fence
124 * 124 *
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 23739609427d..e6862a744210 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -420,18 +420,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
420 ssize_t expected_size, 420 ssize_t expected_size,
421 bool *replaced) 421 bool *replaced)
422{ 422{
423 struct drm_device *dev = crtc->dev;
424 struct drm_property_blob *new_blob = NULL; 423 struct drm_property_blob *new_blob = NULL;
425 424
426 if (blob_id != 0) { 425 if (blob_id != 0) {
427 new_blob = drm_property_lookup_blob(dev, blob_id); 426 new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
428 if (new_blob == NULL) 427 if (new_blob == NULL)
429 return -EINVAL; 428 return -EINVAL;
430 if (expected_size > 0 && expected_size != new_blob->length) 429
430 if (expected_size > 0 && expected_size != new_blob->length) {
431 drm_property_unreference_blob(new_blob);
431 return -EINVAL; 432 return -EINVAL;
433 }
432 } 434 }
433 435
434 drm_atomic_replace_property_blob(blob, new_blob, replaced); 436 drm_atomic_replace_property_blob(blob, new_blob, replaced);
437 drm_property_unreference_blob(new_blob);
435 438
436 return 0; 439 return 0;
437} 440}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c3f83476f996..21f992605541 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -594,10 +594,6 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
594 struct drm_plane_state *plane_state; 594 struct drm_plane_state *plane_state;
595 int i, ret = 0; 595 int i, ret = 0;
596 596
597 ret = drm_atomic_normalize_zpos(dev, state);
598 if (ret)
599 return ret;
600
601 for_each_plane_in_state(state, plane, plane_state, i) { 597 for_each_plane_in_state(state, plane, plane_state, i) {
602 const struct drm_plane_helper_funcs *funcs; 598 const struct drm_plane_helper_funcs *funcs;
603 599
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 04e457117980..aa644487749c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref)
914 /* no need to clean up vcpi 914 /* no need to clean up vcpi
915 * as if we have no connector we never setup a vcpi */ 915 * as if we have no connector we never setup a vcpi */
916 drm_dp_port_teardown_pdt(port, port->pdt); 916 drm_dp_port_teardown_pdt(port, port->pdt);
917 port->pdt = DP_PEER_DEVICE_NONE;
917 } 918 }
918 kfree(port); 919 kfree(port);
919} 920}
@@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1159 drm_dp_put_port(port); 1160 drm_dp_put_port(port);
1160 goto out; 1161 goto out;
1161 } 1162 }
1162 if (port->port_num >= DP_MST_LOGICAL_PORT_0) { 1163 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1164 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1165 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1163 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1166 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1164 drm_mode_connector_set_tile_property(port->connector); 1167 drm_mode_connector_set_tile_property(port->connector);
1165 } 1168 }
@@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2919 mgr->cbs->destroy_connector(mgr, port->connector); 2922 mgr->cbs->destroy_connector(mgr, port->connector);
2920 2923
2921 drm_dp_port_teardown_pdt(port, port->pdt); 2924 drm_dp_port_teardown_pdt(port, port->pdt);
2925 port->pdt = DP_PEER_DEVICE_NONE;
2922 2926
2923 if (!port->input && port->vcpi.vcpi > 0) { 2927 if (!port->input && port->vcpi.vcpi > 0) {
2924 drm_dp_mst_reset_vcpi_slots(mgr, port); 2928 drm_dp_mst_reset_vcpi_slots(mgr, port);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 03414bde1f15..6c75e62c0b22 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -131,7 +131,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
131 return 0; 131 return 0;
132fail: 132fail:
133 for (i = 0; i < fb_helper->connector_count; i++) { 133 for (i = 0; i < fb_helper->connector_count; i++) {
134 kfree(fb_helper->connector_info[i]); 134 struct drm_fb_helper_connector *fb_helper_connector =
135 fb_helper->connector_info[i];
136
137 drm_connector_unreference(fb_helper_connector->connector);
138
139 kfree(fb_helper_connector);
135 fb_helper->connector_info[i] = NULL; 140 fb_helper->connector_info[i] = NULL;
136 } 141 }
137 fb_helper->connector_count = 0; 142 fb_helper->connector_count = 0;
@@ -603,6 +608,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
603} 608}
604EXPORT_SYMBOL(drm_fb_helper_blank); 609EXPORT_SYMBOL(drm_fb_helper_blank);
605 610
611static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
612 struct drm_mode_set *modeset)
613{
614 int i;
615
616 for (i = 0; i < modeset->num_connectors; i++) {
617 drm_connector_unreference(modeset->connectors[i]);
618 modeset->connectors[i] = NULL;
619 }
620 modeset->num_connectors = 0;
621
622 drm_mode_destroy(helper->dev, modeset->mode);
623 modeset->mode = NULL;
624
625 /* FIXME should hold a ref? */
626 modeset->fb = NULL;
627}
628
606static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) 629static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
607{ 630{
608 int i; 631 int i;
@@ -612,10 +635,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
612 kfree(helper->connector_info[i]); 635 kfree(helper->connector_info[i]);
613 } 636 }
614 kfree(helper->connector_info); 637 kfree(helper->connector_info);
638
615 for (i = 0; i < helper->crtc_count; i++) { 639 for (i = 0; i < helper->crtc_count; i++) {
616 kfree(helper->crtc_info[i].mode_set.connectors); 640 struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
617 if (helper->crtc_info[i].mode_set.mode) 641
618 drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode); 642 drm_fb_helper_modeset_release(helper, modeset);
643 kfree(modeset->connectors);
619 } 644 }
620 kfree(helper->crtc_info); 645 kfree(helper->crtc_info);
621} 646}
@@ -644,7 +669,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
644 clip->x2 = clip->y2 = 0; 669 clip->x2 = clip->y2 = 0;
645 spin_unlock_irqrestore(&helper->dirty_lock, flags); 670 spin_unlock_irqrestore(&helper->dirty_lock, flags);
646 671
647 helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); 672 /* call dirty callback only when it has been really touched */
673 if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
674 helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
648} 675}
649 676
650/** 677/**
@@ -2088,7 +2115,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
2088 struct drm_fb_helper_crtc **crtcs; 2115 struct drm_fb_helper_crtc **crtcs;
2089 struct drm_display_mode **modes; 2116 struct drm_display_mode **modes;
2090 struct drm_fb_offset *offsets; 2117 struct drm_fb_offset *offsets;
2091 struct drm_mode_set *modeset;
2092 bool *enabled; 2118 bool *enabled;
2093 int width, height; 2119 int width, height;
2094 int i; 2120 int i;
@@ -2136,45 +2162,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
2136 2162
2137 /* need to set the modesets up here for use later */ 2163 /* need to set the modesets up here for use later */
2138 /* fill out the connector<->crtc mappings into the modesets */ 2164 /* fill out the connector<->crtc mappings into the modesets */
2139 for (i = 0; i < fb_helper->crtc_count; i++) { 2165 for (i = 0; i < fb_helper->crtc_count; i++)
2140 modeset = &fb_helper->crtc_info[i].mode_set; 2166 drm_fb_helper_modeset_release(fb_helper,
2141 modeset->num_connectors = 0; 2167 &fb_helper->crtc_info[i].mode_set);
2142 modeset->fb = NULL;
2143 }
2144 2168
2145 for (i = 0; i < fb_helper->connector_count; i++) { 2169 for (i = 0; i < fb_helper->connector_count; i++) {
2146 struct drm_display_mode *mode = modes[i]; 2170 struct drm_display_mode *mode = modes[i];
2147 struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; 2171 struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
2148 struct drm_fb_offset *offset = &offsets[i]; 2172 struct drm_fb_offset *offset = &offsets[i];
2149 modeset = &fb_crtc->mode_set; 2173 struct drm_mode_set *modeset = &fb_crtc->mode_set;
2150 2174
2151 if (mode && fb_crtc) { 2175 if (mode && fb_crtc) {
2176 struct drm_connector *connector =
2177 fb_helper->connector_info[i]->connector;
2178
2152 DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n", 2179 DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
2153 mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y); 2180 mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
2181
2154 fb_crtc->desired_mode = mode; 2182 fb_crtc->desired_mode = mode;
2155 fb_crtc->x = offset->x; 2183 fb_crtc->x = offset->x;
2156 fb_crtc->y = offset->y; 2184 fb_crtc->y = offset->y;
2157 if (modeset->mode)
2158 drm_mode_destroy(dev, modeset->mode);
2159 modeset->mode = drm_mode_duplicate(dev, 2185 modeset->mode = drm_mode_duplicate(dev,
2160 fb_crtc->desired_mode); 2186 fb_crtc->desired_mode);
2161 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; 2187 drm_connector_reference(connector);
2188 modeset->connectors[modeset->num_connectors++] = connector;
2162 modeset->fb = fb_helper->fb; 2189 modeset->fb = fb_helper->fb;
2163 modeset->x = offset->x; 2190 modeset->x = offset->x;
2164 modeset->y = offset->y; 2191 modeset->y = offset->y;
2165 } 2192 }
2166 } 2193 }
2167
2168 /* Clear out any old modes if there are no more connected outputs. */
2169 for (i = 0; i < fb_helper->crtc_count; i++) {
2170 modeset = &fb_helper->crtc_info[i].mode_set;
2171 if (modeset->num_connectors == 0) {
2172 BUG_ON(modeset->fb);
2173 if (modeset->mode)
2174 drm_mode_destroy(dev, modeset->mode);
2175 modeset->mode = NULL;
2176 }
2177 }
2178out: 2194out:
2179 kfree(crtcs); 2195 kfree(crtcs);
2180 kfree(modes); 2196 kfree(modes);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index def78c8c1780..f86e7c846678 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -262,6 +262,26 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
262 return 0; 262 return 0;
263} 263}
264 264
265int exynos_atomic_check(struct drm_device *dev,
266 struct drm_atomic_state *state)
267{
268 int ret;
269
270 ret = drm_atomic_helper_check_modeset(dev, state);
271 if (ret)
272 return ret;
273
274 ret = drm_atomic_normalize_zpos(dev, state);
275 if (ret)
276 return ret;
277
278 ret = drm_atomic_helper_check_planes(dev, state);
279 if (ret)
280 return ret;
281
282 return ret;
283}
284
265static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 285static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
266{ 286{
267 struct drm_exynos_file_private *file_priv; 287 struct drm_exynos_file_private *file_priv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d215149e737b..80c4d5b81689 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -301,6 +301,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
301 301
302int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, 302int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
303 bool nonblock); 303 bool nonblock);
304int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
304 305
305 306
306extern struct platform_driver fimd_driver; 307extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 40ce841eb952..23cce0a3f5fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -190,7 +190,7 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
190static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 190static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
191 .fb_create = exynos_user_fb_create, 191 .fb_create = exynos_user_fb_create,
192 .output_poll_changed = exynos_drm_output_poll_changed, 192 .output_poll_changed = exynos_drm_output_poll_changed,
193 .atomic_check = drm_atomic_helper_check, 193 .atomic_check = exynos_atomic_check,
194 .atomic_commit = exynos_atomic_commit, 194 .atomic_commit = exynos_atomic_commit,
195}; 195};
196 196
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bfb2efd8d4d4..18dfdd5c1b3b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1447,8 +1447,6 @@ static int i915_drm_suspend(struct drm_device *dev)
1447 1447
1448 dev_priv->suspend_count++; 1448 dev_priv->suspend_count++;
1449 1449
1450 intel_display_set_init_power(dev_priv, false);
1451
1452 intel_csr_ucode_suspend(dev_priv); 1450 intel_csr_ucode_suspend(dev_priv);
1453 1451
1454out: 1452out:
@@ -1466,6 +1464,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1466 1464
1467 disable_rpm_wakeref_asserts(dev_priv); 1465 disable_rpm_wakeref_asserts(dev_priv);
1468 1466
1467 intel_display_set_init_power(dev_priv, false);
1468
1469 fw_csr = !IS_BROXTON(dev_priv) && 1469 fw_csr = !IS_BROXTON(dev_priv) &&
1470 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; 1470 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
1471 /* 1471 /*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8b9ee4e390c0..685e9e065287 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2883,6 +2883,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
2883extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2883extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2884 unsigned long arg); 2884 unsigned long arg);
2885#endif 2885#endif
2886extern const struct dev_pm_ops i915_pm_ops;
2887
2888extern int i915_driver_load(struct pci_dev *pdev,
2889 const struct pci_device_id *ent);
2890extern void i915_driver_unload(struct drm_device *dev);
2886extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); 2891extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2887extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); 2892extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2888extern void i915_reset(struct drm_i915_private *dev_priv); 2893extern void i915_reset(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 947e82c2b175..23960de81b57 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3550,8 +3550,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3550 3550
3551 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 3551 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3552 3552
3553 WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
3554
3555 i915_gem_object_flush_cpu_write_domain(obj); 3553 i915_gem_object_flush_cpu_write_domain(obj);
3556 3554
3557 old_write_domain = obj->base.write_domain; 3555 old_write_domain = obj->base.write_domain;
@@ -3588,7 +3586,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3588 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 3586 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3589 3587
3590 i915_vma_unpin(vma); 3588 i915_vma_unpin(vma);
3591 WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
3592} 3589}
3593 3590
3594/** 3591/**
@@ -3745,7 +3742,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3745 mappable = (vma->node.start + fence_size <= 3742 mappable = (vma->node.start + fence_size <=
3746 dev_priv->ggtt.mappable_end); 3743 dev_priv->ggtt.mappable_end);
3747 3744
3748 if (mappable && fenceable) 3745 /*
3746 * Explicitly disable for rotated VMA since the display does not
3747 * need the fence and the VMA is not accessible to other users.
3748 */
3749 if (mappable && fenceable &&
3750 vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
3749 vma->flags |= I915_VMA_CAN_FENCE; 3751 vma->flags |= I915_VMA_CAN_FENCE;
3750 else 3752 else
3751 vma->flags &= ~I915_VMA_CAN_FENCE; 3753 vma->flags &= ~I915_VMA_CAN_FENCE;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 8df1fa7234e8..2c7ba0ee127c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -290,6 +290,8 @@ i915_vma_put_fence(struct i915_vma *vma)
290{ 290{
291 struct drm_i915_fence_reg *fence = vma->fence; 291 struct drm_i915_fence_reg *fence = vma->fence;
292 292
293 assert_rpm_wakelock_held(to_i915(vma->vm->dev));
294
293 if (!fence) 295 if (!fence)
294 return 0; 296 return 0;
295 297
@@ -341,6 +343,8 @@ i915_vma_get_fence(struct i915_vma *vma)
341 struct drm_i915_fence_reg *fence; 343 struct drm_i915_fence_reg *fence;
342 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; 344 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
343 345
346 assert_rpm_wakelock_held(to_i915(vma->vm->dev));
347
344 /* Just update our place in the LRU if our fence is getting reused. */ 348 /* Just update our place in the LRU if our fence is getting reused. */
345 if (vma->fence) { 349 if (vma->fence) {
346 fence = vma->fence; 350 fence = vma->fence;
@@ -371,6 +375,12 @@ void i915_gem_restore_fences(struct drm_device *dev)
371 struct drm_i915_private *dev_priv = to_i915(dev); 375 struct drm_i915_private *dev_priv = to_i915(dev);
372 int i; 376 int i;
373 377
378 /* Note that this may be called outside of struct_mutex, by
379 * runtime suspend/resume. The barrier we require is enforced by
380 * rpm itself - all access to fences/GTT are only within an rpm
381 * wakeref, and to acquire that wakeref you must pass through here.
382 */
383
374 for (i = 0; i < dev_priv->num_fence_regs; i++) { 384 for (i = 0; i < dev_priv->num_fence_regs; i++) {
375 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 385 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
376 struct i915_vma *vma = reg->vma; 386 struct i915_vma *vma = reg->vma;
@@ -379,10 +389,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
379 * Commit delayed tiling changes if we have an object still 389 * Commit delayed tiling changes if we have an object still
380 * attached to the fence, otherwise just clear the fence. 390 * attached to the fence, otherwise just clear the fence.
381 */ 391 */
382 if (vma && !i915_gem_object_is_tiled(vma->obj)) 392 if (vma && !i915_gem_object_is_tiled(vma->obj)) {
393 GEM_BUG_ON(!reg->dirty);
394 GEM_BUG_ON(vma->obj->fault_mappable);
395
396 list_move(&reg->link, &dev_priv->mm.fence_list);
397 vma->fence = NULL;
383 vma = NULL; 398 vma = NULL;
399 }
384 400
385 fence_update(reg, vma); 401 fence_write(reg, vma);
402 reg->vma = vma;
386 } 403 }
387} 404}
388 405
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 687c768833b3..31e6edd08dd0 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -431,9 +431,6 @@ static const struct pci_device_id pciidlist[] = {
431}; 431};
432MODULE_DEVICE_TABLE(pci, pciidlist); 432MODULE_DEVICE_TABLE(pci, pciidlist);
433 433
434extern int i915_driver_load(struct pci_dev *pdev,
435 const struct pci_device_id *ent);
436
437static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 434static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
438{ 435{
439 struct intel_device_info *intel_info = 436 struct intel_device_info *intel_info =
@@ -463,8 +460,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
463 return i915_driver_load(pdev, ent); 460 return i915_driver_load(pdev, ent);
464} 461}
465 462
466extern void i915_driver_unload(struct drm_device *dev);
467
468static void i915_pci_remove(struct pci_dev *pdev) 463static void i915_pci_remove(struct pci_dev *pdev)
469{ 464{
470 struct drm_device *dev = pci_get_drvdata(pdev); 465 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -473,8 +468,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
473 drm_dev_unref(dev); 468 drm_dev_unref(dev);
474} 469}
475 470
476extern const struct dev_pm_ops i915_pm_ops;
477
478static struct pci_driver i915_pci_driver = { 471static struct pci_driver i915_pci_driver = {
479 .name = DRIVER_NAME, 472 .name = DRIVER_NAME,
480 .id_table = pciidlist, 473 .id_table = pciidlist,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c6e69e4cfa83..1f8af87c6294 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1031,6 +1031,77 @@ static u8 translate_iboost(u8 val)
1031 return mapping[val]; 1031 return mapping[val];
1032} 1032}
1033 1033
1034static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
1035 enum port port)
1036{
1037 const struct ddi_vbt_port_info *info =
1038 &dev_priv->vbt.ddi_port_info[port];
1039 enum port p;
1040
1041 if (!info->alternate_ddc_pin)
1042 return;
1043
1044 for_each_port_masked(p, (1 << port) - 1) {
1045 struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
1046
1047 if (info->alternate_ddc_pin != i->alternate_ddc_pin)
1048 continue;
1049
1050 DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
1051 "disabling port %c DVI/HDMI support\n",
1052 port_name(p), i->alternate_ddc_pin,
1053 port_name(port), port_name(p));
1054
1055 /*
1056 * If we have multiple ports supposedly sharing the
1057 * pin, then dvi/hdmi couldn't exist on the shared
1058 * port. Otherwise they share the same ddc bin and
1059 * system couldn't communicate with them separately.
1060 *
1061 * Due to parsing the ports in alphabetical order,
1062 * a higher port will always clobber a lower one.
1063 */
1064 i->supports_dvi = false;
1065 i->supports_hdmi = false;
1066 i->alternate_ddc_pin = 0;
1067 }
1068}
1069
1070static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
1071 enum port port)
1072{
1073 const struct ddi_vbt_port_info *info =
1074 &dev_priv->vbt.ddi_port_info[port];
1075 enum port p;
1076
1077 if (!info->alternate_aux_channel)
1078 return;
1079
1080 for_each_port_masked(p, (1 << port) - 1) {
1081 struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
1082
1083 if (info->alternate_aux_channel != i->alternate_aux_channel)
1084 continue;
1085
1086 DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
1087 "disabling port %c DP support\n",
1088 port_name(p), i->alternate_aux_channel,
1089 port_name(port), port_name(p));
1090
1091 /*
1092 * If we have multiple ports supposedlt sharing the
1093 * aux channel, then DP couldn't exist on the shared
1094 * port. Otherwise they share the same aux channel
1095 * and system couldn't communicate with them separately.
1096 *
1097 * Due to parsing the ports in alphabetical order,
1098 * a higher port will always clobber a lower one.
1099 */
1100 i->supports_dp = false;
1101 i->alternate_aux_channel = 0;
1102 }
1103}
1104
1034static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, 1105static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1035 const struct bdb_header *bdb) 1106 const struct bdb_header *bdb)
1036{ 1107{
@@ -1105,54 +1176,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1105 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); 1176 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
1106 1177
1107 if (is_dvi) { 1178 if (is_dvi) {
1108 if (port == PORT_E) { 1179 info->alternate_ddc_pin = ddc_pin;
1109 info->alternate_ddc_pin = ddc_pin; 1180
1110 /* if DDIE share ddc pin with other port, then 1181 sanitize_ddc_pin(dev_priv, port);
1111 * dvi/hdmi couldn't exist on the shared port.
1112 * Otherwise they share the same ddc bin and system
1113 * couldn't communicate with them seperately. */
1114 if (ddc_pin == DDC_PIN_B) {
1115 dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
1116 dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
1117 } else if (ddc_pin == DDC_PIN_C) {
1118 dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
1119 dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
1120 } else if (ddc_pin == DDC_PIN_D) {
1121 dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
1122 dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
1123 }
1124 } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
1125 DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
1126 else if (ddc_pin == DDC_PIN_C && port != PORT_C)
1127 DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
1128 else if (ddc_pin == DDC_PIN_D && port != PORT_D)
1129 DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
1130 } 1182 }
1131 1183
1132 if (is_dp) { 1184 if (is_dp) {
1133 if (port == PORT_E) { 1185 info->alternate_aux_channel = aux_channel;
1134 info->alternate_aux_channel = aux_channel; 1186
1135 /* if DDIE share aux channel with other port, then 1187 sanitize_aux_ch(dev_priv, port);
1136 * DP couldn't exist on the shared port. Otherwise
1137 * they share the same aux channel and system
1138 * couldn't communicate with them seperately. */
1139 if (aux_channel == DP_AUX_A)
1140 dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
1141 else if (aux_channel == DP_AUX_B)
1142 dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
1143 else if (aux_channel == DP_AUX_C)
1144 dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
1145 else if (aux_channel == DP_AUX_D)
1146 dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
1147 }
1148 else if (aux_channel == DP_AUX_A && port != PORT_A)
1149 DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
1150 else if (aux_channel == DP_AUX_B && port != PORT_B)
1151 DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
1152 else if (aux_channel == DP_AUX_C && port != PORT_C)
1153 DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
1154 else if (aux_channel == DP_AUX_D && port != PORT_D)
1155 DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
1156 } 1188 }
1157 1189
1158 if (bdb->version >= 158) { 1190 if (bdb->version >= 158) {
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 73b6858600ac..1b20e160bc1f 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -192,7 +192,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
192 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; 192 struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
193 const int s_max = 3, ss_max = 3, eu_max = 8; 193 const int s_max = 3, ss_max = 3, eu_max = 8;
194 int s, ss; 194 int s, ss;
195 u32 fuse2, eu_disable[s_max]; 195 u32 fuse2, eu_disable[3]; /* s_max */
196 196
197 fuse2 = I915_READ(GEN8_FUSE2); 197 fuse2 = I915_READ(GEN8_FUSE2);
198 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 198 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fbcfed63a76e..0ad1879bfd9d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2978,7 +2978,8 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
2978 /* Rotate src coordinates to match rotated GTT view */ 2978 /* Rotate src coordinates to match rotated GTT view */
2979 if (intel_rotation_90_or_270(rotation)) 2979 if (intel_rotation_90_or_270(rotation))
2980 drm_rect_rotate(&plane_state->base.src, 2980 drm_rect_rotate(&plane_state->base.src,
2981 fb->width, fb->height, DRM_ROTATE_270); 2981 fb->width << 16, fb->height << 16,
2982 DRM_ROTATE_270);
2982 2983
2983 /* 2984 /*
2984 * Handle the AUX surface first since 2985 * Handle the AUX surface first since
@@ -14310,7 +14311,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14310 14311
14311 for_each_plane_in_state(state, plane, plane_state, i) { 14312 for_each_plane_in_state(state, plane, plane_state, i) {
14312 struct intel_plane_state *intel_plane_state = 14313 struct intel_plane_state *intel_plane_state =
14313 to_intel_plane_state(plane_state); 14314 to_intel_plane_state(plane->state);
14314 14315
14315 if (!intel_plane_state->wait_req) 14316 if (!intel_plane_state->wait_req)
14316 continue; 14317 continue;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 14a3cf0b7213..3581b5a7f716 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1108,6 +1108,44 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1108 return ret; 1108 return ret;
1109} 1109}
1110 1110
1111static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1112 enum port port)
1113{
1114 const struct ddi_vbt_port_info *info =
1115 &dev_priv->vbt.ddi_port_info[port];
1116 enum port aux_port;
1117
1118 if (!info->alternate_aux_channel) {
1119 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1120 port_name(port), port_name(port));
1121 return port;
1122 }
1123
1124 switch (info->alternate_aux_channel) {
1125 case DP_AUX_A:
1126 aux_port = PORT_A;
1127 break;
1128 case DP_AUX_B:
1129 aux_port = PORT_B;
1130 break;
1131 case DP_AUX_C:
1132 aux_port = PORT_C;
1133 break;
1134 case DP_AUX_D:
1135 aux_port = PORT_D;
1136 break;
1137 default:
1138 MISSING_CASE(info->alternate_aux_channel);
1139 aux_port = PORT_A;
1140 break;
1141 }
1142
1143 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1144 port_name(aux_port), port_name(port));
1145
1146 return aux_port;
1147}
1148
1111static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv, 1149static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1112 enum port port) 1150 enum port port)
1113{ 1151{
@@ -1168,36 +1206,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1168 } 1206 }
1169} 1207}
1170 1208
1171/*
1172 * On SKL we don't have Aux for port E so we rely
1173 * on VBT to set a proper alternate aux channel.
1174 */
1175static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1176{
1177 const struct ddi_vbt_port_info *info =
1178 &dev_priv->vbt.ddi_port_info[PORT_E];
1179
1180 switch (info->alternate_aux_channel) {
1181 case DP_AUX_A:
1182 return PORT_A;
1183 case DP_AUX_B:
1184 return PORT_B;
1185 case DP_AUX_C:
1186 return PORT_C;
1187 case DP_AUX_D:
1188 return PORT_D;
1189 default:
1190 MISSING_CASE(info->alternate_aux_channel);
1191 return PORT_A;
1192 }
1193}
1194
1195static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, 1209static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1196 enum port port) 1210 enum port port)
1197{ 1211{
1198 if (port == PORT_E)
1199 port = skl_porte_aux_port(dev_priv);
1200
1201 switch (port) { 1212 switch (port) {
1202 case PORT_A: 1213 case PORT_A:
1203 case PORT_B: 1214 case PORT_B:
@@ -1213,9 +1224,6 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1213static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv, 1224static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1214 enum port port, int index) 1225 enum port port, int index)
1215{ 1226{
1216 if (port == PORT_E)
1217 port = skl_porte_aux_port(dev_priv);
1218
1219 switch (port) { 1227 switch (port) {
1220 case PORT_A: 1228 case PORT_A:
1221 case PORT_B: 1229 case PORT_B:
@@ -1253,7 +1261,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1253static void intel_aux_reg_init(struct intel_dp *intel_dp) 1261static void intel_aux_reg_init(struct intel_dp *intel_dp)
1254{ 1262{
1255 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1263 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1256 enum port port = dp_to_dig_port(intel_dp)->port; 1264 enum port port = intel_aux_port(dev_priv,
1265 dp_to_dig_port(intel_dp)->port);
1257 int i; 1266 int i;
1258 1267
1259 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port); 1268 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
@@ -3551,8 +3560,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
3551 /* Read the eDP Display control capabilities registers */ 3560 /* Read the eDP Display control capabilities registers */
3552 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && 3561 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3553 drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 3562 drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3554 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) == 3563 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3555 sizeof(intel_dp->edp_dpcd))) 3564 sizeof(intel_dp->edp_dpcd))
3556 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), 3565 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3557 intel_dp->edp_dpcd); 3566 intel_dp->edp_dpcd);
3558 3567
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index faa67624e1ed..c43dd9abce79 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
104 int lines; 104 int lines;
105 105
106 intel_fbc_get_plane_source_size(cache, NULL, &lines); 106 intel_fbc_get_plane_source_size(cache, NULL, &lines);
107 if (INTEL_INFO(dev_priv)->gen >= 7) 107 if (INTEL_GEN(dev_priv) == 7)
108 lines = min(lines, 2048); 108 lines = min(lines, 2048);
109 else if (INTEL_GEN(dev_priv) >= 8)
110 lines = min(lines, 2560);
109 111
110 /* Hardware needs the full buffer stride, not just the active area. */ 112 /* Hardware needs the full buffer stride, not just the active area. */
111 return lines * cache->fb.stride; 113 return lines * cache->fb.stride;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a2f751cd187a..db24f898853c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3362,13 +3362,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3362 int num_active; 3362 int num_active;
3363 int id, i; 3363 int id, i;
3364 3364
3365 /* Clear the partitioning for disabled planes. */
3366 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3367 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3368
3365 if (WARN_ON(!state)) 3369 if (WARN_ON(!state))
3366 return 0; 3370 return 0;
3367 3371
3368 if (!cstate->base.active) { 3372 if (!cstate->base.active) {
3369 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0; 3373 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3370 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3371 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3372 return 0; 3374 return 0;
3373 } 3375 }
3374 3376
@@ -3468,12 +3470,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3468 return 0; 3470 return 0;
3469} 3471}
3470 3472
3471static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
3472{
3473 /* TODO: Take into account the scalers once we support them */
3474 return config->base.adjusted_mode.crtc_clock;
3475}
3476
3477/* 3473/*
3478 * The max latency should be 257 (max the punit can code is 255 and we add 2us 3474 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3479 * for the read latency) and cpp should always be <= 8, so that 3475 * for the read latency) and cpp should always be <= 8, so that
@@ -3524,7 +3520,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
3524 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate 3520 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3525 * with additional adjustments for plane-specific scaling. 3521 * with additional adjustments for plane-specific scaling.
3526 */ 3522 */
3527 adjusted_pixel_rate = skl_pipe_pixel_rate(cstate); 3523 adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
3528 downscale_amount = skl_plane_downscale_amount(pstate); 3524 downscale_amount = skl_plane_downscale_amount(pstate);
3529 3525
3530 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; 3526 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
@@ -3736,11 +3732,11 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3736 if (!cstate->base.active) 3732 if (!cstate->base.active)
3737 return 0; 3733 return 0;
3738 3734
3739 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0)) 3735 if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0))
3740 return 0; 3736 return 0;
3741 3737
3742 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, 3738 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3743 skl_pipe_pixel_rate(cstate)); 3739 ilk_pipe_pixel_rate(cstate));
3744} 3740}
3745 3741
3746static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 3742static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
@@ -4050,6 +4046,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
4050 intel_state->wm_results.dirty_pipes = ~0; 4046 intel_state->wm_results.dirty_pipes = ~0;
4051 } 4047 }
4052 4048
4049 /*
4050 * We're not recomputing for the pipes not included in the commit, so
4051 * make sure we start with the current state.
4052 */
4053 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4054
4053 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { 4055 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4054 struct intel_crtc_state *cstate; 4056 struct intel_crtc_state *cstate;
4055 4057
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 98df09c2b388..9672b579f950 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -357,8 +357,8 @@ static int imx_drm_bind(struct device *dev)
357 int ret; 357 int ret;
358 358
359 drm = drm_dev_alloc(&imx_drm_driver, dev); 359 drm = drm_dev_alloc(&imx_drm_driver, dev);
360 if (!drm) 360 if (IS_ERR(drm))
361 return -ENOMEM; 361 return PTR_ERR(drm);
362 362
363 imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL); 363 imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
364 if (!imxdrm) { 364 if (!imxdrm) {
@@ -436,9 +436,11 @@ static int imx_drm_bind(struct device *dev)
436 436
437err_fbhelper: 437err_fbhelper:
438 drm_kms_helper_poll_fini(drm); 438 drm_kms_helper_poll_fini(drm);
439#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
439 if (imxdrm->fbhelper) 440 if (imxdrm->fbhelper)
440 drm_fbdev_cma_fini(imxdrm->fbhelper); 441 drm_fbdev_cma_fini(imxdrm->fbhelper);
441err_unbind: 442err_unbind:
443#endif
442 component_unbind_all(drm->dev, drm); 444 component_unbind_all(drm->dev, drm);
443err_vblank: 445err_vblank:
444 drm_vblank_cleanup(drm); 446 drm_vblank_cleanup(drm);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index ce22d0a0ddc8..d5864ed4d772 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -103,11 +103,11 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
103 (state->src_x >> 16) / 2 - eba; 103 (state->src_x >> 16) / 2 - eba;
104} 104}
105 105
106static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane, 106static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane)
107 struct drm_plane_state *old_state)
108{ 107{
109 struct drm_plane *plane = &ipu_plane->base; 108 struct drm_plane *plane = &ipu_plane->base;
110 struct drm_plane_state *state = plane->state; 109 struct drm_plane_state *state = plane->state;
110 struct drm_crtc_state *crtc_state = state->crtc->state;
111 struct drm_framebuffer *fb = state->fb; 111 struct drm_framebuffer *fb = state->fb;
112 unsigned long eba, ubo, vbo; 112 unsigned long eba, ubo, vbo;
113 int active; 113 int active;
@@ -117,7 +117,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
117 switch (fb->pixel_format) { 117 switch (fb->pixel_format) {
118 case DRM_FORMAT_YUV420: 118 case DRM_FORMAT_YUV420:
119 case DRM_FORMAT_YVU420: 119 case DRM_FORMAT_YVU420:
120 if (old_state->fb) 120 if (!drm_atomic_crtc_needs_modeset(crtc_state))
121 break; 121 break;
122 122
123 /* 123 /*
@@ -149,7 +149,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
149 break; 149 break;
150 } 150 }
151 151
152 if (old_state->fb) { 152 if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
153 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); 153 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
154 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); 154 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
155 ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); 155 ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
@@ -259,6 +259,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
259 struct drm_framebuffer *fb = state->fb; 259 struct drm_framebuffer *fb = state->fb;
260 struct drm_framebuffer *old_fb = old_state->fb; 260 struct drm_framebuffer *old_fb = old_state->fb;
261 unsigned long eba, ubo, vbo, old_ubo, old_vbo; 261 unsigned long eba, ubo, vbo, old_ubo, old_vbo;
262 int hsub, vsub;
262 263
263 /* Ok to disable */ 264 /* Ok to disable */
264 if (!fb) 265 if (!fb)
@@ -355,7 +356,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
355 if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) 356 if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
356 return -EINVAL; 357 return -EINVAL;
357 358
358 if (old_fb) { 359 if (old_fb &&
360 (old_fb->pixel_format == DRM_FORMAT_YUV420 ||
361 old_fb->pixel_format == DRM_FORMAT_YVU420)) {
359 old_ubo = drm_plane_state_to_ubo(old_state); 362 old_ubo = drm_plane_state_to_ubo(old_state);
360 old_vbo = drm_plane_state_to_vbo(old_state); 363 old_vbo = drm_plane_state_to_vbo(old_state);
361 if (ubo != old_ubo || vbo != old_vbo) 364 if (ubo != old_ubo || vbo != old_vbo)
@@ -370,6 +373,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
370 373
371 if (old_fb && old_fb->pitches[1] != fb->pitches[1]) 374 if (old_fb && old_fb->pitches[1] != fb->pitches[1])
372 crtc_state->mode_changed = true; 375 crtc_state->mode_changed = true;
376
377 /*
378 * The x/y offsets must be even in case of horizontal/vertical
379 * chroma subsampling.
380 */
381 hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
382 vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
383 if (((state->src_x >> 16) & (hsub - 1)) ||
384 ((state->src_y >> 16) & (vsub - 1)))
385 return -EINVAL;
373 } 386 }
374 387
375 return 0; 388 return 0;
@@ -392,7 +405,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
392 struct drm_crtc_state *crtc_state = state->crtc->state; 405 struct drm_crtc_state *crtc_state = state->crtc->state;
393 406
394 if (!drm_atomic_crtc_needs_modeset(crtc_state)) { 407 if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
395 ipu_plane_atomic_set_base(ipu_plane, old_state); 408 ipu_plane_atomic_set_base(ipu_plane);
396 return; 409 return;
397 } 410 }
398 } 411 }
@@ -424,6 +437,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
424 ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false); 437 ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
425 break; 438 break;
426 default: 439 default:
440 ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
427 break; 441 break;
428 } 442 }
429 } 443 }
@@ -437,7 +451,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
437 ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); 451 ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
438 ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); 452 ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
439 ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]); 453 ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
440 ipu_plane_atomic_set_base(ipu_plane, old_state); 454 ipu_plane_atomic_set_base(ipu_plane);
441 ipu_plane_enable(ipu_plane); 455 ipu_plane_enable(ipu_plane);
442} 456}
443 457
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dc57b628e074..193573d191e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
240 if (!parent_adev) 240 if (!parent_adev)
241 return false; 241 return false;
242 242
243 return acpi_has_method(parent_adev->handle, "_PR3"); 243 return parent_adev->power.flags.power_resources &&
244 acpi_has_method(parent_adev->handle, "_PR3");
244} 245}
245 246
246static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out, 247static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 103fc8650197..a0d4a0522fdc 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1396void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 1396void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1397 int ring, u32 cp_int_cntl) 1397 int ring, u32 cp_int_cntl)
1398{ 1398{
1399 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; 1399 WREG32(SRBM_GFX_CNTL, RINGID(ring));
1400
1401 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1402 WREG32(CP_INT_CNTL, cp_int_cntl); 1400 WREG32(CP_INT_CNTL, cp_int_cntl);
1403} 1401}
1404 1402
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 2d465648856a..474a8a1886f7 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
105 105
106 tmp &= AUX_HPD_SEL(0x7); 106 tmp &= AUX_HPD_SEL(0x7);
107 tmp |= AUX_HPD_SEL(chan->rec.hpd); 107 tmp |= AUX_HPD_SEL(chan->rec.hpd);
108 tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1); 108 tmp |= AUX_EN | AUX_LS_READ_EN;
109 109
110 WREG32(AUX_CONTROL + aux_offset[instance], tmp); 110 WREG32(AUX_CONTROL + aux_offset[instance], tmp);
111 111
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 89bdf20344ae..c49934527a87 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2999 int i; 2999 int i;
3000 struct si_dpm_quirk *p = si_dpm_quirk_list; 3000 struct si_dpm_quirk *p = si_dpm_quirk_list;
3001 3001
3002 /* limit all SI kickers */
3003 if (rdev->family == CHIP_PITCAIRN) {
3004 if ((rdev->pdev->revision == 0x81) ||
3005 (rdev->pdev->device == 0x6810) ||
3006 (rdev->pdev->device == 0x6811) ||
3007 (rdev->pdev->device == 0x6816) ||
3008 (rdev->pdev->device == 0x6817) ||
3009 (rdev->pdev->device == 0x6806))
3010 max_mclk = 120000;
3011 } else if (rdev->family == CHIP_VERDE) {
3012 if ((rdev->pdev->revision == 0x81) ||
3013 (rdev->pdev->revision == 0x83) ||
3014 (rdev->pdev->revision == 0x87) ||
3015 (rdev->pdev->device == 0x6820) ||
3016 (rdev->pdev->device == 0x6821) ||
3017 (rdev->pdev->device == 0x6822) ||
3018 (rdev->pdev->device == 0x6823) ||
3019 (rdev->pdev->device == 0x682A) ||
3020 (rdev->pdev->device == 0x682B)) {
3021 max_sclk = 75000;
3022 max_mclk = 80000;
3023 }
3024 } else if (rdev->family == CHIP_OLAND) {
3025 if ((rdev->pdev->revision == 0xC7) ||
3026 (rdev->pdev->revision == 0x80) ||
3027 (rdev->pdev->revision == 0x81) ||
3028 (rdev->pdev->revision == 0x83) ||
3029 (rdev->pdev->device == 0x6604) ||
3030 (rdev->pdev->device == 0x6605)) {
3031 max_sclk = 75000;
3032 max_mclk = 80000;
3033 }
3034 } else if (rdev->family == CHIP_HAINAN) {
3035 if ((rdev->pdev->revision == 0x81) ||
3036 (rdev->pdev->revision == 0x83) ||
3037 (rdev->pdev->revision == 0xC3) ||
3038 (rdev->pdev->device == 0x6664) ||
3039 (rdev->pdev->device == 0x6665) ||
3040 (rdev->pdev->device == 0x6667)) {
3041 max_sclk = 75000;
3042 max_mclk = 80000;
3043 }
3044 }
3002 /* Apply dpm quirks */ 3045 /* Apply dpm quirks */
3003 while (p && p->chip_device != 0) { 3046 while (p && p->chip_device != 0) {
3004 if (rdev->pdev->vendor == p->chip_vendor && 3047 if (rdev->pdev->vendor == p->chip_vendor &&
@@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3011 } 3054 }
3012 ++p; 3055 ++p;
3013 } 3056 }
3014 /* limit mclk on all R7 370 parts for stability */
3015 if (rdev->pdev->device == 0x6811 &&
3016 rdev->pdev->revision == 0x81)
3017 max_mclk = 120000;
3018 /* limit sclk/mclk on Jet parts for stability */
3019 if (rdev->pdev->device == 0x6665 &&
3020 rdev->pdev->revision == 0xc3) {
3021 max_sclk = 75000;
3022 max_mclk = 80000;
3023 }
3024 3057
3025 if (rps->vce_active) { 3058 if (rps->vce_active) {
3026 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 3059 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index bd9c3bb9252c..392c7e6de042 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -231,8 +231,16 @@ static int rcar_du_atomic_check(struct drm_device *dev,
231 struct rcar_du_device *rcdu = dev->dev_private; 231 struct rcar_du_device *rcdu = dev->dev_private;
232 int ret; 232 int ret;
233 233
234 ret = drm_atomic_helper_check(dev, state); 234 ret = drm_atomic_helper_check_modeset(dev, state);
235 if (ret < 0) 235 if (ret)
236 return ret;
237
238 ret = drm_atomic_normalize_zpos(dev, state);
239 if (ret)
240 return ret;
241
242 ret = drm_atomic_helper_check_planes(dev, state);
243 if (ret)
236 return ret; 244 return ret;
237 245
238 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) 246 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 2784919a7366..9df308565f6c 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -195,6 +195,26 @@ static void sti_atomic_work(struct work_struct *work)
195 sti_atomic_complete(private, private->commit.state); 195 sti_atomic_complete(private, private->commit.state);
196} 196}
197 197
198static int sti_atomic_check(struct drm_device *dev,
199 struct drm_atomic_state *state)
200{
201 int ret;
202
203 ret = drm_atomic_helper_check_modeset(dev, state);
204 if (ret)
205 return ret;
206
207 ret = drm_atomic_normalize_zpos(dev, state);
208 if (ret)
209 return ret;
210
211 ret = drm_atomic_helper_check_planes(dev, state);
212 if (ret)
213 return ret;
214
215 return ret;
216}
217
198static int sti_atomic_commit(struct drm_device *drm, 218static int sti_atomic_commit(struct drm_device *drm,
199 struct drm_atomic_state *state, bool nonblock) 219 struct drm_atomic_state *state, bool nonblock)
200{ 220{
@@ -248,7 +268,7 @@ static void sti_output_poll_changed(struct drm_device *ddev)
248static const struct drm_mode_config_funcs sti_mode_config_funcs = { 268static const struct drm_mode_config_funcs sti_mode_config_funcs = {
249 .fb_create = drm_fb_cma_create, 269 .fb_create = drm_fb_cma_create,
250 .output_poll_changed = sti_output_poll_changed, 270 .output_poll_changed = sti_output_poll_changed,
251 .atomic_check = drm_atomic_helper_check, 271 .atomic_check = sti_atomic_check,
252 .atomic_commit = sti_atomic_commit, 272 .atomic_commit = sti_atomic_commit,
253}; 273};
254 274
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 7cf3678623c3..58048709c34e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -338,8 +338,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
338 338
339 drm_atomic_helper_commit_modeset_disables(dev, state); 339 drm_atomic_helper_commit_modeset_disables(dev, state);
340 drm_atomic_helper_commit_modeset_enables(dev, state); 340 drm_atomic_helper_commit_modeset_enables(dev, state);
341 drm_atomic_helper_commit_planes(dev, state, 341 drm_atomic_helper_commit_planes(dev, state, 0);
342 DRM_PLANE_COMMIT_ACTIVE_ONLY);
343 342
344 drm_atomic_helper_commit_hw_done(state); 343 drm_atomic_helper_commit_hw_done(state);
345 344
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 2ba7d437a2af..805b6fa7b5f4 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -1617,7 +1617,7 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1617 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode, 1617 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
1618 complete, complete_context); 1618 complete, complete_context);
1619 if (IS_ERR(ctx)) 1619 if (IS_ERR(ctx))
1620 return ERR_PTR(PTR_ERR(ctx)); 1620 return ERR_CAST(ctx);
1621 1621
1622 run = kzalloc(sizeof(*run), GFP_KERNEL); 1622 run = kzalloc(sizeof(*run), GFP_KERNEL);
1623 if (!run) { 1623 if (!run) {
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 54eceb30ede5..a7d39689bbfb 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -43,7 +43,7 @@ int focaltech_detect(struct psmouse *psmouse, bool set_properties)
43 43
44 if (set_properties) { 44 if (set_properties) {
45 psmouse->vendor = "FocalTech"; 45 psmouse->vendor = "FocalTech";
46 psmouse->name = "FocalTech Touchpad"; 46 psmouse->name = "Touchpad";
47 } 47 }
48 48
49 return 0; 49 return 0;
@@ -146,8 +146,8 @@ static void focaltech_report_state(struct psmouse *psmouse)
146 } 146 }
147 input_mt_report_pointer_emulation(dev, true); 147 input_mt_report_pointer_emulation(dev, true);
148 148
149 input_report_key(psmouse->dev, BTN_LEFT, state->pressed); 149 input_report_key(dev, BTN_LEFT, state->pressed);
150 input_sync(psmouse->dev); 150 input_sync(dev);
151} 151}
152 152
153static void focaltech_process_touch_packet(struct psmouse *psmouse, 153static void focaltech_process_touch_packet(struct psmouse *psmouse,
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index f4bfb4b2d50a..073246c7d163 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
877 DMI_MATCH(DMI_PRODUCT_NAME, "P34"), 877 DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
878 }, 878 },
879 }, 879 },
880 {
881 /* Schenker XMG C504 - Elantech touchpad */
882 .matches = {
883 DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
884 DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
885 },
886 },
880 { } 887 { }
881}; 888};
882 889
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eac84d8ff724..2089d46b0eb8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3887,10 +3887,10 @@ array_state_show(struct mddev *mddev, char *page)
3887 st = read_auto; 3887 st = read_auto;
3888 break; 3888 break;
3889 case 0: 3889 case 0:
3890 if (mddev->in_sync) 3890 if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3891 st = clean;
3892 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3893 st = write_pending; 3891 st = write_pending;
3892 else if (mddev->in_sync)
3893 st = clean;
3894 else if (mddev->safemode) 3894 else if (mddev->safemode)
3895 st = active_idle; 3895 st = active_idle;
3896 else 3896 else
@@ -8144,14 +8144,14 @@ void md_do_sync(struct md_thread *thread)
8144 8144
8145 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8145 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8146 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8146 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8147 mddev->curr_resync > 2) { 8147 mddev->curr_resync > 3) {
8148 mddev->curr_resync_completed = mddev->curr_resync; 8148 mddev->curr_resync_completed = mddev->curr_resync;
8149 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8149 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8150 } 8150 }
8151 mddev->pers->sync_request(mddev, max_sectors, &skipped); 8151 mddev->pers->sync_request(mddev, max_sectors, &skipped);
8152 8152
8153 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 8153 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8154 mddev->curr_resync > 2) { 8154 mddev->curr_resync > 3) {
8155 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8155 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8156 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8156 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8157 if (mddev->curr_resync >= mddev->recovery_cp) { 8157 if (mddev->curr_resync >= mddev->recovery_cp) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1961d827dbd1..29e2df5cd77b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -403,11 +403,14 @@ static void raid1_end_write_request(struct bio *bio)
403 struct bio *to_put = NULL; 403 struct bio *to_put = NULL;
404 int mirror = find_bio_disk(r1_bio, bio); 404 int mirror = find_bio_disk(r1_bio, bio);
405 struct md_rdev *rdev = conf->mirrors[mirror].rdev; 405 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
406 bool discard_error;
407
408 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
406 409
407 /* 410 /*
408 * 'one mirror IO has finished' event handler: 411 * 'one mirror IO has finished' event handler:
409 */ 412 */
410 if (bio->bi_error) { 413 if (bio->bi_error && !discard_error) {
411 set_bit(WriteErrorSeen, &rdev->flags); 414 set_bit(WriteErrorSeen, &rdev->flags);
412 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 415 if (!test_and_set_bit(WantReplacement, &rdev->flags))
413 set_bit(MD_RECOVERY_NEEDED, & 416 set_bit(MD_RECOVERY_NEEDED, &
@@ -444,7 +447,7 @@ static void raid1_end_write_request(struct bio *bio)
444 447
445 /* Maybe we can clear some bad blocks. */ 448 /* Maybe we can clear some bad blocks. */
446 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 449 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
447 &first_bad, &bad_sectors)) { 450 &first_bad, &bad_sectors) && !discard_error) {
448 r1_bio->bios[mirror] = IO_MADE_GOOD; 451 r1_bio->bios[mirror] = IO_MADE_GOOD;
449 set_bit(R1BIO_MadeGood, &r1_bio->state); 452 set_bit(R1BIO_MadeGood, &r1_bio->state);
450 } 453 }
@@ -2294,17 +2297,23 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2294 * This is all done synchronously while the array is 2297 * This is all done synchronously while the array is
2295 * frozen 2298 * frozen
2296 */ 2299 */
2300
2301 bio = r1_bio->bios[r1_bio->read_disk];
2302 bdevname(bio->bi_bdev, b);
2303 bio_put(bio);
2304 r1_bio->bios[r1_bio->read_disk] = NULL;
2305
2297 if (mddev->ro == 0) { 2306 if (mddev->ro == 0) {
2298 freeze_array(conf, 1); 2307 freeze_array(conf, 1);
2299 fix_read_error(conf, r1_bio->read_disk, 2308 fix_read_error(conf, r1_bio->read_disk,
2300 r1_bio->sector, r1_bio->sectors); 2309 r1_bio->sector, r1_bio->sectors);
2301 unfreeze_array(conf); 2310 unfreeze_array(conf);
2302 } else 2311 } else {
2303 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); 2312 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2313 }
2314
2304 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); 2315 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2305 2316
2306 bio = r1_bio->bios[r1_bio->read_disk];
2307 bdevname(bio->bi_bdev, b);
2308read_more: 2317read_more:
2309 disk = read_balance(conf, r1_bio, &max_sectors); 2318 disk = read_balance(conf, r1_bio, &max_sectors);
2310 if (disk == -1) { 2319 if (disk == -1) {
@@ -2315,11 +2324,6 @@ read_more:
2315 } else { 2324 } else {
2316 const unsigned long do_sync 2325 const unsigned long do_sync
2317 = r1_bio->master_bio->bi_opf & REQ_SYNC; 2326 = r1_bio->master_bio->bi_opf & REQ_SYNC;
2318 if (bio) {
2319 r1_bio->bios[r1_bio->read_disk] =
2320 mddev->ro ? IO_BLOCKED : NULL;
2321 bio_put(bio);
2322 }
2323 r1_bio->read_disk = disk; 2327 r1_bio->read_disk = disk;
2324 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); 2328 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2325 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, 2329 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index be1a9fca3b2d..39fddda2fef2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio)
447 struct r10conf *conf = r10_bio->mddev->private; 447 struct r10conf *conf = r10_bio->mddev->private;
448 int slot, repl; 448 int slot, repl;
449 struct md_rdev *rdev = NULL; 449 struct md_rdev *rdev = NULL;
450 bool discard_error;
451
452 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
450 453
451 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 454 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
452 455
@@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio)
460 /* 463 /*
461 * this branch is our 'one mirror IO has finished' event handler: 464 * this branch is our 'one mirror IO has finished' event handler:
462 */ 465 */
463 if (bio->bi_error) { 466 if (bio->bi_error && !discard_error) {
464 if (repl) 467 if (repl)
465 /* Never record new bad blocks to replacement, 468 /* Never record new bad blocks to replacement,
466 * just fail it. 469 * just fail it.
@@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio)
503 if (is_badblock(rdev, 506 if (is_badblock(rdev,
504 r10_bio->devs[slot].addr, 507 r10_bio->devs[slot].addr,
505 r10_bio->sectors, 508 r10_bio->sectors,
506 &first_bad, &bad_sectors)) { 509 &first_bad, &bad_sectors) && !discard_error) {
507 bio_put(bio); 510 bio_put(bio);
508 if (repl) 511 if (repl)
509 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; 512 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 1b1ab4a1d132..a227a9f3ee65 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1087,7 +1087,7 @@ static int r5l_recovery_log(struct r5l_log *log)
1087 * 1's seq + 10 and let superblock points to meta2. The same recovery will 1087 * 1's seq + 10 and let superblock points to meta2. The same recovery will
1088 * not think meta 3 is a valid meta, because its seq doesn't match 1088 * not think meta 3 is a valid meta, because its seq doesn't match
1089 */ 1089 */
1090 if (ctx.seq > log->last_cp_seq + 1) { 1090 if (ctx.seq > log->last_cp_seq) {
1091 int ret; 1091 int ret;
1092 1092
1093 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); 1093 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
@@ -1096,6 +1096,8 @@ static int r5l_recovery_log(struct r5l_log *log)
1096 log->seq = ctx.seq + 11; 1096 log->seq = ctx.seq + 11;
1097 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); 1097 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
1098 r5l_write_super(log, ctx.pos); 1098 r5l_write_super(log, ctx.pos);
1099 log->last_checkpoint = ctx.pos;
1100 log->next_checkpoint = ctx.pos;
1099 } else { 1101 } else {
1100 log->log_start = ctx.pos; 1102 log->log_start = ctx.pos;
1101 log->seq = ctx.seq; 1103 log->seq = ctx.seq;
@@ -1154,6 +1156,7 @@ create:
1154 if (create_super) { 1156 if (create_super) {
1155 log->last_cp_seq = prandom_u32(); 1157 log->last_cp_seq = prandom_u32();
1156 cp = 0; 1158 cp = 0;
1159 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
1157 /* 1160 /*
1158 * Make sure super points to correct address. Log might have 1161 * Make sure super points to correct address. Log might have
1159 * data very soon. If super hasn't correct log tail address, 1162 * data very soon. If super hasn't correct log tail address,
@@ -1168,6 +1171,7 @@ create:
1168 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) 1171 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1169 log->max_free_space = RECLAIM_MAX_FREE_SPACE; 1172 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
1170 log->last_checkpoint = cp; 1173 log->last_checkpoint = cp;
1174 log->next_checkpoint = cp;
1171 1175
1172 __free_page(page); 1176 __free_page(page);
1173 1177
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index d4bdba60b0f7..52bc42da8a4c 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -73,23 +73,34 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI,
73 u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR; 73 u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR;
74 u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) | 74 u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) |
75 (read ? 0x80 : 0); 75 (read ? 0x80 : 0);
76 int ret;
77
78 mutex_lock(&fc_usb->data_mutex);
79 if (!read)
80 memcpy(fc_usb->data, val, sizeof(*val));
76 81
77 int len = usb_control_msg(fc_usb->udev, 82 ret = usb_control_msg(fc_usb->udev,
78 read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT, 83 read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT,
79 request, 84 request,
80 request_type, /* 0xc0 read or 0x40 write */ 85 request_type, /* 0xc0 read or 0x40 write */
81 wAddress, 86 wAddress,
82 0, 87 0,
83 val, 88 fc_usb->data,
84 sizeof(u32), 89 sizeof(u32),
85 B2C2_WAIT_FOR_OPERATION_RDW * HZ); 90 B2C2_WAIT_FOR_OPERATION_RDW * HZ);
86 91
87 if (len != sizeof(u32)) { 92 if (ret != sizeof(u32)) {
88 err("error while %s dword from %d (%d).", read ? "reading" : 93 err("error while %s dword from %d (%d).", read ? "reading" :
89 "writing", wAddress, wRegOffsPCI); 94 "writing", wAddress, wRegOffsPCI);
90 return -EIO; 95 if (ret >= 0)
96 ret = -EIO;
91 } 97 }
92 return 0; 98
99 if (read && ret >= 0)
100 memcpy(val, fc_usb->data, sizeof(*val));
101 mutex_unlock(&fc_usb->data_mutex);
102
103 return ret;
93} 104}
94/* 105/*
95 * DKT 010817 - add support for V8 memory read/write and flash update 106 * DKT 010817 - add support for V8 memory read/write and flash update
@@ -100,9 +111,14 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
100{ 111{
101 u8 request_type = USB_TYPE_VENDOR; 112 u8 request_type = USB_TYPE_VENDOR;
102 u16 wIndex; 113 u16 wIndex;
103 int nWaitTime, pipe, len; 114 int nWaitTime, pipe, ret;
104 wIndex = page << 8; 115 wIndex = page << 8;
105 116
117 if (buflen > sizeof(fc_usb->data)) {
118 err("Buffer size bigger than max URB control message\n");
119 return -EIO;
120 }
121
106 switch (req) { 122 switch (req) {
107 case B2C2_USB_READ_V8_MEM: 123 case B2C2_USB_READ_V8_MEM:
108 nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ; 124 nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ;
@@ -127,17 +143,32 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
127 deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req, 143 deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req,
128 wAddress, wIndex, buflen); 144 wAddress, wIndex, buflen);
129 145
130 len = usb_control_msg(fc_usb->udev, pipe, 146 mutex_lock(&fc_usb->data_mutex);
147
148 if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
149 memcpy(fc_usb->data, pbBuffer, buflen);
150
151 ret = usb_control_msg(fc_usb->udev, pipe,
131 req, 152 req,
132 request_type, 153 request_type,
133 wAddress, 154 wAddress,
134 wIndex, 155 wIndex,
135 pbBuffer, 156 fc_usb->data,
136 buflen, 157 buflen,
137 nWaitTime * HZ); 158 nWaitTime * HZ);
159 if (ret != buflen)
160 ret = -EIO;
161
162 if (ret >= 0) {
163 ret = 0;
164 if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
165 memcpy(pbBuffer, fc_usb->data, buflen);
166 }
138 167
139 debug_dump(pbBuffer, len, deb_v8); 168 mutex_unlock(&fc_usb->data_mutex);
140 return len == buflen ? 0 : -EIO; 169
170 debug_dump(pbBuffer, ret, deb_v8);
171 return ret;
141} 172}
142 173
143#define bytes_left_to_read_on_page(paddr,buflen) \ 174#define bytes_left_to_read_on_page(paddr,buflen) \
@@ -196,29 +227,6 @@ static int flexcop_usb_get_mac_addr(struct flexcop_device *fc, int extended)
196 fc->dvb_adapter.proposed_mac, 6); 227 fc->dvb_adapter.proposed_mac, 6);
197} 228}
198 229
199#if 0
200static int flexcop_usb_utility_req(struct flexcop_usb *fc_usb, int set,
201 flexcop_usb_utility_function_t func, u8 extra, u16 wIndex,
202 u16 buflen, u8 *pvBuffer)
203{
204 u16 wValue;
205 u8 request_type = (set ? USB_DIR_OUT : USB_DIR_IN) | USB_TYPE_VENDOR;
206 int nWaitTime = 2,
207 pipe = set ? B2C2_USB_CTRL_PIPE_OUT : B2C2_USB_CTRL_PIPE_IN, len;
208 wValue = (func << 8) | extra;
209
210 len = usb_control_msg(fc_usb->udev,pipe,
211 B2C2_USB_UTILITY,
212 request_type,
213 wValue,
214 wIndex,
215 pvBuffer,
216 buflen,
217 nWaitTime * HZ);
218 return len == buflen ? 0 : -EIO;
219}
220#endif
221
222/* usb i2c stuff */ 230/* usb i2c stuff */
223static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, 231static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
224 flexcop_usb_request_t req, flexcop_usb_i2c_function_t func, 232 flexcop_usb_request_t req, flexcop_usb_i2c_function_t func,
@@ -226,9 +234,14 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
226{ 234{
227 struct flexcop_usb *fc_usb = i2c->fc->bus_specific; 235 struct flexcop_usb *fc_usb = i2c->fc->bus_specific;
228 u16 wValue, wIndex; 236 u16 wValue, wIndex;
229 int nWaitTime,pipe,len; 237 int nWaitTime, pipe, ret;
230 u8 request_type = USB_TYPE_VENDOR; 238 u8 request_type = USB_TYPE_VENDOR;
231 239
240 if (buflen > sizeof(fc_usb->data)) {
241 err("Buffer size bigger than max URB control message\n");
242 return -EIO;
243 }
244
232 switch (func) { 245 switch (func) {
233 case USB_FUNC_I2C_WRITE: 246 case USB_FUNC_I2C_WRITE:
234 case USB_FUNC_I2C_MULTIWRITE: 247 case USB_FUNC_I2C_MULTIWRITE:
@@ -257,15 +270,32 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
257 wValue & 0xff, wValue >> 8, 270 wValue & 0xff, wValue >> 8,
258 wIndex & 0xff, wIndex >> 8); 271 wIndex & 0xff, wIndex >> 8);
259 272
260 len = usb_control_msg(fc_usb->udev,pipe, 273 mutex_lock(&fc_usb->data_mutex);
274
275 if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
276 memcpy(fc_usb->data, buf, buflen);
277
278 ret = usb_control_msg(fc_usb->udev, pipe,
261 req, 279 req,
262 request_type, 280 request_type,
263 wValue, 281 wValue,
264 wIndex, 282 wIndex,
265 buf, 283 fc_usb->data,
266 buflen, 284 buflen,
267 nWaitTime * HZ); 285 nWaitTime * HZ);
268 return len == buflen ? 0 : -EREMOTEIO; 286
287 if (ret != buflen)
288 ret = -EIO;
289
290 if (ret >= 0) {
291 ret = 0;
292 if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
293 memcpy(buf, fc_usb->data, buflen);
294 }
295
296 mutex_unlock(&fc_usb->data_mutex);
297
298 return 0;
269} 299}
270 300
271/* actual bus specific access functions, 301/* actual bus specific access functions,
@@ -516,6 +546,7 @@ static int flexcop_usb_probe(struct usb_interface *intf,
516 /* general flexcop init */ 546 /* general flexcop init */
517 fc_usb = fc->bus_specific; 547 fc_usb = fc->bus_specific;
518 fc_usb->fc_dev = fc; 548 fc_usb->fc_dev = fc;
549 mutex_init(&fc_usb->data_mutex);
519 550
520 fc->read_ibi_reg = flexcop_usb_read_ibi_reg; 551 fc->read_ibi_reg = flexcop_usb_read_ibi_reg;
521 fc->write_ibi_reg = flexcop_usb_write_ibi_reg; 552 fc->write_ibi_reg = flexcop_usb_write_ibi_reg;
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h
index 92529a9c4475..25ad43166e78 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.h
+++ b/drivers/media/usb/b2c2/flexcop-usb.h
@@ -29,6 +29,10 @@ struct flexcop_usb {
29 29
30 u8 tmp_buffer[1023+190]; 30 u8 tmp_buffer[1023+190];
31 int tmp_buffer_length; 31 int tmp_buffer_length;
32
33 /* for URB control messages */
34 u8 data[80];
35 struct mutex data_mutex;
32}; 36};
33 37
34#if 0 38#if 0
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 13620cdf0599..e9100a235831 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -545,18 +545,30 @@ static void free_sbufs(struct camera_data *cam)
545static int write_packet(struct usb_device *udev, 545static int write_packet(struct usb_device *udev,
546 u8 request, u8 * registers, u16 start, size_t size) 546 u8 request, u8 * registers, u16 start, size_t size)
547{ 547{
548 unsigned char *buf;
549 int ret;
550
548 if (!registers || size <= 0) 551 if (!registers || size <= 0)
549 return -EINVAL; 552 return -EINVAL;
550 553
551 return usb_control_msg(udev, 554 buf = kmalloc(size, GFP_KERNEL);
555 if (!buf)
556 return -ENOMEM;
557
558 memcpy(buf, registers, size);
559
560 ret = usb_control_msg(udev,
552 usb_sndctrlpipe(udev, 0), 561 usb_sndctrlpipe(udev, 0),
553 request, 562 request,
554 USB_TYPE_VENDOR | USB_RECIP_DEVICE, 563 USB_TYPE_VENDOR | USB_RECIP_DEVICE,
555 start, /* value */ 564 start, /* value */
556 0, /* index */ 565 0, /* index */
557 registers, /* buffer */ 566 buf, /* buffer */
558 size, 567 size,
559 HZ); 568 HZ);
569
570 kfree(buf);
571 return ret;
560} 572}
561 573
562/**************************************************************************** 574/****************************************************************************
@@ -567,18 +579,32 @@ static int write_packet(struct usb_device *udev,
567static int read_packet(struct usb_device *udev, 579static int read_packet(struct usb_device *udev,
568 u8 request, u8 * registers, u16 start, size_t size) 580 u8 request, u8 * registers, u16 start, size_t size)
569{ 581{
582 unsigned char *buf;
583 int ret;
584
570 if (!registers || size <= 0) 585 if (!registers || size <= 0)
571 return -EINVAL; 586 return -EINVAL;
572 587
573 return usb_control_msg(udev, 588 buf = kmalloc(size, GFP_KERNEL);
589 if (!buf)
590 return -ENOMEM;
591
592 ret = usb_control_msg(udev,
574 usb_rcvctrlpipe(udev, 0), 593 usb_rcvctrlpipe(udev, 0),
575 request, 594 request,
576 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE, 595 USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
577 start, /* value */ 596 start, /* value */
578 0, /* index */ 597 0, /* index */
579 registers, /* buffer */ 598 buf, /* buffer */
580 size, 599 size,
581 HZ); 600 HZ);
601
602 if (ret >= 0)
603 memcpy(registers, buf, size);
604
605 kfree(buf);
606
607 return ret;
582} 608}
583 609
584/****************************************************************************** 610/******************************************************************************
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index efa782ed6e2d..b257780fb380 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -52,17 +52,16 @@ u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
52struct af9005_device_state { 52struct af9005_device_state {
53 u8 sequence; 53 u8 sequence;
54 int led_state; 54 int led_state;
55 unsigned char data[256];
56 struct mutex data_mutex;
55}; 57};
56 58
57static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, 59static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
58 int readwrite, int type, u8 * values, int len) 60 int readwrite, int type, u8 * values, int len)
59{ 61{
60 struct af9005_device_state *st = d->priv; 62 struct af9005_device_state *st = d->priv;
61 u8 obuf[16] = { 0 }; 63 u8 command, seq;
62 u8 ibuf[17] = { 0 }; 64 int i, ret;
63 u8 command;
64 int i;
65 int ret;
66 65
67 if (len < 1) { 66 if (len < 1) {
68 err("generic read/write, less than 1 byte. Makes no sense."); 67 err("generic read/write, less than 1 byte. Makes no sense.");
@@ -73,16 +72,17 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
73 return -EINVAL; 72 return -EINVAL;
74 } 73 }
75 74
76 obuf[0] = 14; /* rest of buffer length low */ 75 mutex_lock(&st->data_mutex);
77 obuf[1] = 0; /* rest of buffer length high */ 76 st->data[0] = 14; /* rest of buffer length low */
77 st->data[1] = 0; /* rest of buffer length high */
78 78
79 obuf[2] = AF9005_REGISTER_RW; /* register operation */ 79 st->data[2] = AF9005_REGISTER_RW; /* register operation */
80 obuf[3] = 12; /* rest of buffer length */ 80 st->data[3] = 12; /* rest of buffer length */
81 81
82 obuf[4] = st->sequence++; /* sequence number */ 82 st->data[4] = seq = st->sequence++; /* sequence number */
83 83
84 obuf[5] = (u8) (reg >> 8); /* register address */ 84 st->data[5] = (u8) (reg >> 8); /* register address */
85 obuf[6] = (u8) (reg & 0xff); 85 st->data[6] = (u8) (reg & 0xff);
86 86
87 if (type == AF9005_OFDM_REG) { 87 if (type == AF9005_OFDM_REG) {
88 command = AF9005_CMD_OFDM_REG; 88 command = AF9005_CMD_OFDM_REG;
@@ -96,51 +96,52 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
96 command |= readwrite; 96 command |= readwrite;
97 if (readwrite == AF9005_CMD_WRITE) 97 if (readwrite == AF9005_CMD_WRITE)
98 for (i = 0; i < len; i++) 98 for (i = 0; i < len; i++)
99 obuf[8 + i] = values[i]; 99 st->data[8 + i] = values[i];
100 else if (type == AF9005_TUNER_REG) 100 else if (type == AF9005_TUNER_REG)
101 /* read command for tuner, the first byte contains the i2c address */ 101 /* read command for tuner, the first byte contains the i2c address */
102 obuf[8] = values[0]; 102 st->data[8] = values[0];
103 obuf[7] = command; 103 st->data[7] = command;
104 104
105 ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 17, 0); 105 ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 17, 0);
106 if (ret) 106 if (ret)
107 return ret; 107 goto ret;
108 108
109 /* sanity check */ 109 /* sanity check */
110 if (ibuf[2] != AF9005_REGISTER_RW_ACK) { 110 if (st->data[2] != AF9005_REGISTER_RW_ACK) {
111 err("generic read/write, wrong reply code."); 111 err("generic read/write, wrong reply code.");
112 return -EIO; 112 ret = -EIO;
113 goto ret;
113 } 114 }
114 if (ibuf[3] != 0x0d) { 115 if (st->data[3] != 0x0d) {
115 err("generic read/write, wrong length in reply."); 116 err("generic read/write, wrong length in reply.");
116 return -EIO; 117 ret = -EIO;
118 goto ret;
117 } 119 }
118 if (ibuf[4] != obuf[4]) { 120 if (st->data[4] != seq) {
119 err("generic read/write, wrong sequence in reply."); 121 err("generic read/write, wrong sequence in reply.");
120 return -EIO; 122 ret = -EIO;
123 goto ret;
121 } 124 }
122 /* 125 /*
123 Windows driver doesn't check these fields, in fact sometimes 126 * In thesis, both input and output buffers should have
124 the register in the reply is different that what has been sent 127 * identical values for st->data[5] to st->data[8].
125 128 * However, windows driver doesn't check these fields, in fact
126 if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) { 129 * sometimes the register in the reply is different that what
127 err("generic read/write, wrong register in reply."); 130 * has been sent
128 return -EIO;
129 }
130 if (ibuf[7] != command) {
131 err("generic read/write wrong command in reply.");
132 return -EIO;
133 }
134 */ 131 */
135 if (ibuf[16] != 0x01) { 132 if (st->data[16] != 0x01) {
136 err("generic read/write wrong status code in reply."); 133 err("generic read/write wrong status code in reply.");
137 return -EIO; 134 ret = -EIO;
135 goto ret;
138 } 136 }
137
139 if (readwrite == AF9005_CMD_READ) 138 if (readwrite == AF9005_CMD_READ)
140 for (i = 0; i < len; i++) 139 for (i = 0; i < len; i++)
141 values[i] = ibuf[8 + i]; 140 values[i] = st->data[8 + i];
142 141
143 return 0; 142ret:
143 mutex_unlock(&st->data_mutex);
144 return ret;
144 145
145} 146}
146 147
@@ -464,8 +465,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
464 struct af9005_device_state *st = d->priv; 465 struct af9005_device_state *st = d->priv;
465 466
466 int ret, i, packet_len; 467 int ret, i, packet_len;
467 u8 buf[64]; 468 u8 seq;
468 u8 ibuf[64];
469 469
470 if (wlen < 0) { 470 if (wlen < 0) {
471 err("send command, wlen less than 0 bytes. Makes no sense."); 471 err("send command, wlen less than 0 bytes. Makes no sense.");
@@ -480,94 +480,97 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
480 return -EINVAL; 480 return -EINVAL;
481 } 481 }
482 packet_len = wlen + 5; 482 packet_len = wlen + 5;
483 buf[0] = (u8) (packet_len & 0xff); 483
484 buf[1] = (u8) ((packet_len & 0xff00) >> 8); 484 mutex_lock(&st->data_mutex);
485 485
486 buf[2] = 0x26; /* packet type */ 486 st->data[0] = (u8) (packet_len & 0xff);
487 buf[3] = wlen + 3; 487 st->data[1] = (u8) ((packet_len & 0xff00) >> 8);
488 buf[4] = st->sequence++; 488
489 buf[5] = command; 489 st->data[2] = 0x26; /* packet type */
490 buf[6] = wlen; 490 st->data[3] = wlen + 3;
491 st->data[4] = seq = st->sequence++;
492 st->data[5] = command;
493 st->data[6] = wlen;
491 for (i = 0; i < wlen; i++) 494 for (i = 0; i < wlen; i++)
492 buf[7 + i] = wbuf[i]; 495 st->data[7 + i] = wbuf[i];
493 ret = dvb_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0); 496 ret = dvb_usb_generic_rw(d, st->data, wlen + 7, st->data, rlen + 7, 0);
494 if (ret) 497 if (st->data[2] != 0x27) {
495 return ret;
496 if (ibuf[2] != 0x27) {
497 err("send command, wrong reply code."); 498 err("send command, wrong reply code.");
498 return -EIO; 499 ret = -EIO;
499 } 500 } else if (st->data[4] != seq) {
500 if (ibuf[4] != buf[4]) {
501 err("send command, wrong sequence in reply."); 501 err("send command, wrong sequence in reply.");
502 return -EIO; 502 ret = -EIO;
503 } 503 } else if (st->data[5] != 0x01) {
504 if (ibuf[5] != 0x01) {
505 err("send command, wrong status code in reply."); 504 err("send command, wrong status code in reply.");
506 return -EIO; 505 ret = -EIO;
507 } 506 } else if (st->data[6] != rlen) {
508 if (ibuf[6] != rlen) {
509 err("send command, invalid data length in reply."); 507 err("send command, invalid data length in reply.");
510 return -EIO; 508 ret = -EIO;
511 } 509 }
512 for (i = 0; i < rlen; i++) 510 if (!ret) {
513 rbuf[i] = ibuf[i + 7]; 511 for (i = 0; i < rlen; i++)
514 return 0; 512 rbuf[i] = st->data[i + 7];
513 }
514
515 mutex_unlock(&st->data_mutex);
516 return ret;
515} 517}
516 518
517int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values, 519int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
518 int len) 520 int len)
519{ 521{
520 struct af9005_device_state *st = d->priv; 522 struct af9005_device_state *st = d->priv;
521 u8 obuf[16], ibuf[14]; 523 u8 seq;
522 int ret, i; 524 int ret, i;
523 525
524 memset(obuf, 0, sizeof(obuf)); 526 mutex_lock(&st->data_mutex);
525 memset(ibuf, 0, sizeof(ibuf));
526 527
527 obuf[0] = 14; /* length of rest of packet low */ 528 memset(st->data, 0, sizeof(st->data));
528 obuf[1] = 0; /* length of rest of packer high */
529 529
530 obuf[2] = 0x2a; /* read/write eeprom */ 530 st->data[0] = 14; /* length of rest of packet low */
531 st->data[1] = 0; /* length of rest of packer high */
531 532
532 obuf[3] = 12; /* size */ 533 st->data[2] = 0x2a; /* read/write eeprom */
533 534
534 obuf[4] = st->sequence++; 535 st->data[3] = 12; /* size */
535 536
536 obuf[5] = 0; /* read */ 537 st->data[4] = seq = st->sequence++;
537 538
538 obuf[6] = len; 539 st->data[5] = 0; /* read */
539 obuf[7] = address; 540
540 ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 14, 0); 541 st->data[6] = len;
541 if (ret) 542 st->data[7] = address;
542 return ret; 543 ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 14, 0);
543 if (ibuf[2] != 0x2b) { 544 if (st->data[2] != 0x2b) {
544 err("Read eeprom, invalid reply code"); 545 err("Read eeprom, invalid reply code");
545 return -EIO; 546 ret = -EIO;
546 } 547 } else if (st->data[3] != 10) {
547 if (ibuf[3] != 10) {
548 err("Read eeprom, invalid reply length"); 548 err("Read eeprom, invalid reply length");
549 return -EIO; 549 ret = -EIO;
550 } 550 } else if (st->data[4] != seq) {
551 if (ibuf[4] != obuf[4]) {
552 err("Read eeprom, wrong sequence in reply "); 551 err("Read eeprom, wrong sequence in reply ");
553 return -EIO; 552 ret = -EIO;
554 } 553 } else if (st->data[5] != 1) {
555 if (ibuf[5] != 1) {
556 err("Read eeprom, wrong status in reply "); 554 err("Read eeprom, wrong status in reply ");
557 return -EIO; 555 ret = -EIO;
558 } 556 }
559 for (i = 0; i < len; i++) { 557
560 values[i] = ibuf[6 + i]; 558 if (!ret) {
559 for (i = 0; i < len; i++)
560 values[i] = st->data[6 + i];
561 } 561 }
562 return 0; 562 mutex_unlock(&st->data_mutex);
563
564 return ret;
563} 565}
564 566
565static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply) 567static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
568 u8 *buf, int size)
566{ 569{
567 u8 buf[FW_BULKOUT_SIZE + 2];
568 u16 checksum; 570 u16 checksum;
569 int act_len, i, ret; 571 int act_len, i, ret;
570 memset(buf, 0, sizeof(buf)); 572
573 memset(buf, 0, size);
571 buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); 574 buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
572 buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff); 575 buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
573 switch (type) { 576 switch (type) {
@@ -720,15 +723,21 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
720{ 723{
721 int i, packets, ret, act_len; 724 int i, packets, ret, act_len;
722 725
723 u8 buf[FW_BULKOUT_SIZE + 2]; 726 u8 *buf;
724 u8 reply; 727 u8 reply;
725 728
726 ret = af9005_boot_packet(udev, FW_CONFIG, &reply); 729 buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
730 if (!buf)
731 return -ENOMEM;
732
733 ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
734 FW_BULKOUT_SIZE + 2);
727 if (ret) 735 if (ret)
728 return ret; 736 goto err;
729 if (reply != 0x01) { 737 if (reply != 0x01) {
730 err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply); 738 err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply);
731 return -EIO; 739 ret = -EIO;
740 goto err;
732 } 741 }
733 packets = fw->size / FW_BULKOUT_SIZE; 742 packets = fw->size / FW_BULKOUT_SIZE;
734 buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); 743 buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
@@ -743,28 +752,35 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
743 buf, FW_BULKOUT_SIZE + 2, &act_len, 1000); 752 buf, FW_BULKOUT_SIZE + 2, &act_len, 1000);
744 if (ret) { 753 if (ret) {
745 err("firmware download failed at packet %d with code %d", i, ret); 754 err("firmware download failed at packet %d with code %d", i, ret);
746 return ret; 755 goto err;
747 } 756 }
748 } 757 }
749 ret = af9005_boot_packet(udev, FW_CONFIRM, &reply); 758 ret = af9005_boot_packet(udev, FW_CONFIRM, &reply,
759 buf, FW_BULKOUT_SIZE + 2);
750 if (ret) 760 if (ret)
751 return ret; 761 goto err;
752 if (reply != (u8) (packets & 0xff)) { 762 if (reply != (u8) (packets & 0xff)) {
753 err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply); 763 err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply);
754 return -EIO; 764 ret = -EIO;
765 goto err;
755 } 766 }
756 ret = af9005_boot_packet(udev, FW_BOOT, &reply); 767 ret = af9005_boot_packet(udev, FW_BOOT, &reply, buf,
768 FW_BULKOUT_SIZE + 2);
757 if (ret) 769 if (ret)
758 return ret; 770 goto err;
759 ret = af9005_boot_packet(udev, FW_CONFIG, &reply); 771 ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
772 FW_BULKOUT_SIZE + 2);
760 if (ret) 773 if (ret)
761 return ret; 774 goto err;
762 if (reply != 0x02) { 775 if (reply != 0x02) {
763 err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply); 776 err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply);
764 return -EIO; 777 ret = -EIO;
778 goto err;
765 } 779 }
766 780
767 return 0; 781err:
782 kfree(buf);
783 return ret;
768 784
769} 785}
770 786
@@ -823,53 +839,59 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
823{ 839{
824 struct af9005_device_state *st = d->priv; 840 struct af9005_device_state *st = d->priv;
825 int ret, len; 841 int ret, len;
826 842 u8 seq;
827 u8 obuf[5];
828 u8 ibuf[256];
829 843
830 *state = REMOTE_NO_KEY_PRESSED; 844 *state = REMOTE_NO_KEY_PRESSED;
831 if (rc_decode == NULL) { 845 if (rc_decode == NULL) {
832 /* it shouldn't never come here */ 846 /* it shouldn't never come here */
833 return 0; 847 return 0;
834 } 848 }
849
850 mutex_lock(&st->data_mutex);
851
835 /* deb_info("rc_query\n"); */ 852 /* deb_info("rc_query\n"); */
836 obuf[0] = 3; /* rest of packet length low */ 853 st->data[0] = 3; /* rest of packet length low */
837 obuf[1] = 0; /* rest of packet lentgh high */ 854 st->data[1] = 0; /* rest of packet lentgh high */
838 obuf[2] = 0x40; /* read remote */ 855 st->data[2] = 0x40; /* read remote */
839 obuf[3] = 1; /* rest of packet length */ 856 st->data[3] = 1; /* rest of packet length */
840 obuf[4] = st->sequence++; /* sequence number */ 857 st->data[4] = seq = st->sequence++; /* sequence number */
841 ret = dvb_usb_generic_rw(d, obuf, 5, ibuf, 256, 0); 858 ret = dvb_usb_generic_rw(d, st->data, 5, st->data, 256, 0);
842 if (ret) { 859 if (ret) {
843 err("rc query failed"); 860 err("rc query failed");
844 return ret; 861 goto ret;
845 } 862 }
846 if (ibuf[2] != 0x41) { 863 if (st->data[2] != 0x41) {
847 err("rc query bad header."); 864 err("rc query bad header.");
848 return -EIO; 865 ret = -EIO;
849 } 866 goto ret;
850 if (ibuf[4] != obuf[4]) { 867 } else if (st->data[4] != seq) {
851 err("rc query bad sequence."); 868 err("rc query bad sequence.");
852 return -EIO; 869 ret = -EIO;
870 goto ret;
853 } 871 }
854 len = ibuf[5]; 872 len = st->data[5];
855 if (len > 246) { 873 if (len > 246) {
856 err("rc query invalid length"); 874 err("rc query invalid length");
857 return -EIO; 875 ret = -EIO;
876 goto ret;
858 } 877 }
859 if (len > 0) { 878 if (len > 0) {
860 deb_rc("rc data (%d) ", len); 879 deb_rc("rc data (%d) ", len);
861 debug_dump((ibuf + 6), len, deb_rc); 880 debug_dump((st->data + 6), len, deb_rc);
862 ret = rc_decode(d, &ibuf[6], len, event, state); 881 ret = rc_decode(d, &st->data[6], len, event, state);
863 if (ret) { 882 if (ret) {
864 err("rc_decode failed"); 883 err("rc_decode failed");
865 return ret; 884 goto ret;
866 } else { 885 } else {
867 deb_rc("rc_decode state %x event %x\n", *state, *event); 886 deb_rc("rc_decode state %x event %x\n", *state, *event);
868 if (*state == REMOTE_KEY_REPEAT) 887 if (*state == REMOTE_KEY_REPEAT)
869 *event = d->last_event; 888 *event = d->last_event;
870 } 889 }
871 } 890 }
872 return 0; 891
892ret:
893 mutex_unlock(&st->data_mutex);
894 return ret;
873} 895}
874 896
875static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff) 897static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff)
@@ -953,10 +975,16 @@ static int af9005_identify_state(struct usb_device *udev,
953 int *cold) 975 int *cold)
954{ 976{
955 int ret; 977 int ret;
956 u8 reply; 978 u8 reply, *buf;
957 ret = af9005_boot_packet(udev, FW_CONFIG, &reply); 979
980 buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
981 if (!buf)
982 return -ENOMEM;
983
984 ret = af9005_boot_packet(udev, FW_CONFIG, &reply,
985 buf, FW_BULKOUT_SIZE + 2);
958 if (ret) 986 if (ret)
959 return ret; 987 goto err;
960 deb_info("result of FW_CONFIG in identify state %d\n", reply); 988 deb_info("result of FW_CONFIG in identify state %d\n", reply);
961 if (reply == 0x01) 989 if (reply == 0x01)
962 *cold = 1; 990 *cold = 1;
@@ -965,7 +993,10 @@ static int af9005_identify_state(struct usb_device *udev,
965 else 993 else
966 return -EIO; 994 return -EIO;
967 deb_info("Identify state cold = %d\n", *cold); 995 deb_info("Identify state cold = %d\n", *cold);
968 return 0; 996
997err:
998 kfree(buf);
999 return ret;
969} 1000}
970 1001
971static struct dvb_usb_device_properties af9005_properties; 1002static struct dvb_usb_device_properties af9005_properties;
@@ -973,8 +1004,20 @@ static struct dvb_usb_device_properties af9005_properties;
973static int af9005_usb_probe(struct usb_interface *intf, 1004static int af9005_usb_probe(struct usb_interface *intf,
974 const struct usb_device_id *id) 1005 const struct usb_device_id *id)
975{ 1006{
976 return dvb_usb_device_init(intf, &af9005_properties, 1007 struct dvb_usb_device *d;
977 THIS_MODULE, NULL, adapter_nr); 1008 struct af9005_device_state *st;
1009 int ret;
1010
1011 ret = dvb_usb_device_init(intf, &af9005_properties,
1012 THIS_MODULE, &d, adapter_nr);
1013
1014 if (ret < 0)
1015 return ret;
1016
1017 st = d->priv;
1018 mutex_init(&st->data_mutex);
1019
1020 return 0;
978} 1021}
979 1022
980enum af9005_usb_table_entry { 1023enum af9005_usb_table_entry {
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 9fd1527494eb..8ac825413d5a 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -41,6 +41,8 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
41 41
42struct cinergyt2_state { 42struct cinergyt2_state {
43 u8 rc_counter; 43 u8 rc_counter;
44 unsigned char data[64];
45 struct mutex data_mutex;
44}; 46};
45 47
46/* We are missing a release hook with usb_device data */ 48/* We are missing a release hook with usb_device data */
@@ -50,38 +52,57 @@ static struct dvb_usb_device_properties cinergyt2_properties;
50 52
51static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable) 53static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
52{ 54{
53 char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 }; 55 struct dvb_usb_device *d = adap->dev;
54 char result[64]; 56 struct cinergyt2_state *st = d->priv;
55 return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result, 57 int ret;
56 sizeof(result), 0); 58
59 mutex_lock(&st->data_mutex);
60 st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
61 st->data[1] = enable ? 1 : 0;
62
63 ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0);
64 mutex_unlock(&st->data_mutex);
65
66 return ret;
57} 67}
58 68
59static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable) 69static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
60{ 70{
61 char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 }; 71 struct cinergyt2_state *st = d->priv;
62 char state[3]; 72 int ret;
63 return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0); 73
74 mutex_lock(&st->data_mutex);
75 st->data[0] = CINERGYT2_EP1_SLEEP_MODE;
76 st->data[1] = enable ? 0 : 1;
77
78 ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0);
79 mutex_unlock(&st->data_mutex);
80
81 return ret;
64} 82}
65 83
66static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) 84static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
67{ 85{
68 char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION }; 86 struct dvb_usb_device *d = adap->dev;
69 char state[3]; 87 struct cinergyt2_state *st = d->priv;
70 int ret; 88 int ret;
71 89
72 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); 90 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
73 91
74 ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state, 92 mutex_lock(&st->data_mutex);
75 sizeof(state), 0); 93 st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
94
95 ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
76 if (ret < 0) { 96 if (ret < 0) {
77 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " 97 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
78 "state info\n"); 98 "state info\n");
79 } 99 }
100 mutex_unlock(&st->data_mutex);
80 101
81 /* Copy this pointer as we are gonna need it in the release phase */ 102 /* Copy this pointer as we are gonna need it in the release phase */
82 cinergyt2_usb_device = adap->dev; 103 cinergyt2_usb_device = adap->dev;
83 104
84 return 0; 105 return ret;
85} 106}
86 107
87static struct rc_map_table rc_map_cinergyt2_table[] = { 108static struct rc_map_table rc_map_cinergyt2_table[] = {
@@ -141,13 +162,18 @@ static int repeatable_keys[] = {
141static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) 162static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
142{ 163{
143 struct cinergyt2_state *st = d->priv; 164 struct cinergyt2_state *st = d->priv;
144 u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS; 165 int i, ret;
145 int i;
146 166
147 *state = REMOTE_NO_KEY_PRESSED; 167 *state = REMOTE_NO_KEY_PRESSED;
148 168
149 dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0); 169 mutex_lock(&st->data_mutex);
150 if (key[4] == 0xff) { 170 st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS;
171
172 ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
173 if (ret < 0)
174 goto ret;
175
176 if (st->data[4] == 0xff) {
151 /* key repeat */ 177 /* key repeat */
152 st->rc_counter++; 178 st->rc_counter++;
153 if (st->rc_counter > RC_REPEAT_DELAY) { 179 if (st->rc_counter > RC_REPEAT_DELAY) {
@@ -157,31 +183,45 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
157 *event = d->last_event; 183 *event = d->last_event;
158 deb_rc("repeat key, event %x\n", 184 deb_rc("repeat key, event %x\n",
159 *event); 185 *event);
160 return 0; 186 goto ret;
161 } 187 }
162 } 188 }
163 deb_rc("repeated key (non repeatable)\n"); 189 deb_rc("repeated key (non repeatable)\n");
164 } 190 }
165 return 0; 191 goto ret;
166 } 192 }
167 193
168 /* hack to pass checksum on the custom field */ 194 /* hack to pass checksum on the custom field */
169 key[2] = ~key[1]; 195 st->data[2] = ~st->data[1];
170 dvb_usb_nec_rc_key_to_event(d, key, event, state); 196 dvb_usb_nec_rc_key_to_event(d, st->data, event, state);
171 if (key[0] != 0) { 197 if (st->data[0] != 0) {
172 if (*event != d->last_event) 198 if (*event != d->last_event)
173 st->rc_counter = 0; 199 st->rc_counter = 0;
174 200
175 deb_rc("key: %*ph\n", 5, key); 201 deb_rc("key: %*ph\n", 5, st->data);
176 } 202 }
177 return 0; 203
204ret:
205 mutex_unlock(&st->data_mutex);
206 return ret;
178} 207}
179 208
180static int cinergyt2_usb_probe(struct usb_interface *intf, 209static int cinergyt2_usb_probe(struct usb_interface *intf,
181 const struct usb_device_id *id) 210 const struct usb_device_id *id)
182{ 211{
183 return dvb_usb_device_init(intf, &cinergyt2_properties, 212 struct dvb_usb_device *d;
184 THIS_MODULE, NULL, adapter_nr); 213 struct cinergyt2_state *st;
214 int ret;
215
216 ret = dvb_usb_device_init(intf, &cinergyt2_properties,
217 THIS_MODULE, &d, adapter_nr);
218 if (ret < 0)
219 return ret;
220
221 st = d->priv;
222 mutex_init(&st->data_mutex);
223
224 return 0;
185} 225}
186 226
187 227
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index b3ec743a7a2e..2d29b4174dba 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -139,32 +139,42 @@ static uint16_t compute_tps(struct dtv_frontend_properties *op)
139struct cinergyt2_fe_state { 139struct cinergyt2_fe_state {
140 struct dvb_frontend fe; 140 struct dvb_frontend fe;
141 struct dvb_usb_device *d; 141 struct dvb_usb_device *d;
142
143 unsigned char data[64];
144 struct mutex data_mutex;
145
146 struct dvbt_get_status_msg status;
142}; 147};
143 148
144static int cinergyt2_fe_read_status(struct dvb_frontend *fe, 149static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
145 enum fe_status *status) 150 enum fe_status *status)
146{ 151{
147 struct cinergyt2_fe_state *state = fe->demodulator_priv; 152 struct cinergyt2_fe_state *state = fe->demodulator_priv;
148 struct dvbt_get_status_msg result;
149 u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
150 int ret; 153 int ret;
151 154
152 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result, 155 mutex_lock(&state->data_mutex);
153 sizeof(result), 0); 156 state->data[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
157
158 ret = dvb_usb_generic_rw(state->d, state->data, 1,
159 state->data, sizeof(state->status), 0);
160 if (!ret)
161 memcpy(&state->status, state->data, sizeof(state->status));
162 mutex_unlock(&state->data_mutex);
163
154 if (ret < 0) 164 if (ret < 0)
155 return ret; 165 return ret;
156 166
157 *status = 0; 167 *status = 0;
158 168
159 if (0xffff - le16_to_cpu(result.gain) > 30) 169 if (0xffff - le16_to_cpu(state->status.gain) > 30)
160 *status |= FE_HAS_SIGNAL; 170 *status |= FE_HAS_SIGNAL;
161 if (result.lock_bits & (1 << 6)) 171 if (state->status.lock_bits & (1 << 6))
162 *status |= FE_HAS_LOCK; 172 *status |= FE_HAS_LOCK;
163 if (result.lock_bits & (1 << 5)) 173 if (state->status.lock_bits & (1 << 5))
164 *status |= FE_HAS_SYNC; 174 *status |= FE_HAS_SYNC;
165 if (result.lock_bits & (1 << 4)) 175 if (state->status.lock_bits & (1 << 4))
166 *status |= FE_HAS_CARRIER; 176 *status |= FE_HAS_CARRIER;
167 if (result.lock_bits & (1 << 1)) 177 if (state->status.lock_bits & (1 << 1))
168 *status |= FE_HAS_VITERBI; 178 *status |= FE_HAS_VITERBI;
169 179
170 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != 180 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
@@ -177,34 +187,16 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
177static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber) 187static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
178{ 188{
179 struct cinergyt2_fe_state *state = fe->demodulator_priv; 189 struct cinergyt2_fe_state *state = fe->demodulator_priv;
180 struct dvbt_get_status_msg status;
181 char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
182 int ret;
183
184 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
185 sizeof(status), 0);
186 if (ret < 0)
187 return ret;
188 190
189 *ber = le32_to_cpu(status.viterbi_error_rate); 191 *ber = le32_to_cpu(state->status.viterbi_error_rate);
190 return 0; 192 return 0;
191} 193}
192 194
193static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) 195static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
194{ 196{
195 struct cinergyt2_fe_state *state = fe->demodulator_priv; 197 struct cinergyt2_fe_state *state = fe->demodulator_priv;
196 struct dvbt_get_status_msg status;
197 u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
198 int ret;
199 198
200 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status, 199 *unc = le32_to_cpu(state->status.uncorrected_block_count);
201 sizeof(status), 0);
202 if (ret < 0) {
203 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
204 ret);
205 return ret;
206 }
207 *unc = le32_to_cpu(status.uncorrected_block_count);
208 return 0; 200 return 0;
209} 201}
210 202
@@ -212,35 +204,16 @@ static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
212 u16 *strength) 204 u16 *strength)
213{ 205{
214 struct cinergyt2_fe_state *state = fe->demodulator_priv; 206 struct cinergyt2_fe_state *state = fe->demodulator_priv;
215 struct dvbt_get_status_msg status;
216 char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
217 int ret;
218 207
219 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, 208 *strength = (0xffff - le16_to_cpu(state->status.gain));
220 sizeof(status), 0);
221 if (ret < 0) {
222 err("cinergyt2_fe_read_signal_strength() Failed!"
223 " (Error=%d)\n", ret);
224 return ret;
225 }
226 *strength = (0xffff - le16_to_cpu(status.gain));
227 return 0; 209 return 0;
228} 210}
229 211
230static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr) 212static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
231{ 213{
232 struct cinergyt2_fe_state *state = fe->demodulator_priv; 214 struct cinergyt2_fe_state *state = fe->demodulator_priv;
233 struct dvbt_get_status_msg status;
234 char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
235 int ret;
236 215
237 ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, 216 *snr = (state->status.snr << 8) | state->status.snr;
238 sizeof(status), 0);
239 if (ret < 0) {
240 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
241 return ret;
242 }
243 *snr = (status.snr << 8) | status.snr;
244 return 0; 217 return 0;
245} 218}
246 219
@@ -266,34 +239,36 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
266{ 239{
267 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; 240 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
268 struct cinergyt2_fe_state *state = fe->demodulator_priv; 241 struct cinergyt2_fe_state *state = fe->demodulator_priv;
269 struct dvbt_set_parameters_msg param; 242 struct dvbt_set_parameters_msg *param;
270 char result[2];
271 int err; 243 int err;
272 244
273 param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; 245 mutex_lock(&state->data_mutex);
274 param.tps = cpu_to_le16(compute_tps(fep)); 246
275 param.freq = cpu_to_le32(fep->frequency / 1000); 247 param = (void *)state->data;
276 param.flags = 0; 248 param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
249 param->tps = cpu_to_le16(compute_tps(fep));
250 param->freq = cpu_to_le32(fep->frequency / 1000);
251 param->flags = 0;
277 252
278 switch (fep->bandwidth_hz) { 253 switch (fep->bandwidth_hz) {
279 default: 254 default:
280 case 8000000: 255 case 8000000:
281 param.bandwidth = 8; 256 param->bandwidth = 8;
282 break; 257 break;
283 case 7000000: 258 case 7000000:
284 param.bandwidth = 7; 259 param->bandwidth = 7;
285 break; 260 break;
286 case 6000000: 261 case 6000000:
287 param.bandwidth = 6; 262 param->bandwidth = 6;
288 break; 263 break;
289 } 264 }
290 265
291 err = dvb_usb_generic_rw(state->d, 266 err = dvb_usb_generic_rw(state->d, state->data, sizeof(*param),
292 (char *)&param, sizeof(param), 267 state->data, 2, 0);
293 result, sizeof(result), 0);
294 if (err < 0) 268 if (err < 0)
295 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err); 269 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
296 270
271 mutex_unlock(&state->data_mutex);
297 return (err < 0) ? err : 0; 272 return (err < 0) ? err : 0;
298} 273}
299 274
@@ -315,6 +290,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
315 s->d = d; 290 s->d = d;
316 memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops)); 291 memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops));
317 s->fe.demodulator_priv = s; 292 s->fe.demodulator_priv = s;
293 mutex_init(&s->data_mutex);
318 return &s->fe; 294 return &s->fe;
319} 295}
320 296
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 907ac01ae297..39772812269d 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -45,9 +45,6 @@
45#include "si2168.h" 45#include "si2168.h"
46#include "si2157.h" 46#include "si2157.h"
47 47
48/* Max transfer size done by I2C transfer functions */
49#define MAX_XFER_SIZE 80
50
51/* debug */ 48/* debug */
52static int dvb_usb_cxusb_debug; 49static int dvb_usb_cxusb_debug;
53module_param_named(debug, dvb_usb_cxusb_debug, int, 0644); 50module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
@@ -61,23 +58,27 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
61static int cxusb_ctrl_msg(struct dvb_usb_device *d, 58static int cxusb_ctrl_msg(struct dvb_usb_device *d,
62 u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen) 59 u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
63{ 60{
64 int wo = (rbuf == NULL || rlen == 0); /* write-only */ 61 struct cxusb_state *st = d->priv;
65 u8 sndbuf[MAX_XFER_SIZE]; 62 int ret, wo;
66 63
67 if (1 + wlen > sizeof(sndbuf)) { 64 if (1 + wlen > MAX_XFER_SIZE) {
68 warn("i2c wr: len=%d is too big!\n", 65 warn("i2c wr: len=%d is too big!\n", wlen);
69 wlen);
70 return -EOPNOTSUPP; 66 return -EOPNOTSUPP;
71 } 67 }
72 68
73 memset(sndbuf, 0, 1+wlen); 69 wo = (rbuf == NULL || rlen == 0); /* write-only */
74 70
75 sndbuf[0] = cmd; 71 mutex_lock(&st->data_mutex);
76 memcpy(&sndbuf[1], wbuf, wlen); 72 st->data[0] = cmd;
73 memcpy(&st->data[1], wbuf, wlen);
77 if (wo) 74 if (wo)
78 return dvb_usb_generic_write(d, sndbuf, 1+wlen); 75 ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
79 else 76 else
80 return dvb_usb_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen, 0); 77 ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
78 rbuf, rlen, 0);
79
80 mutex_unlock(&st->data_mutex);
81 return ret;
81} 82}
82 83
83/* GPIO */ 84/* GPIO */
@@ -1460,36 +1461,43 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties;
1460static int cxusb_probe(struct usb_interface *intf, 1461static int cxusb_probe(struct usb_interface *intf,
1461 const struct usb_device_id *id) 1462 const struct usb_device_id *id)
1462{ 1463{
1464 struct dvb_usb_device *d;
1465 struct cxusb_state *st;
1466
1463 if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties, 1467 if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties,
1464 THIS_MODULE, NULL, adapter_nr) || 1468 THIS_MODULE, &d, adapter_nr) ||
1465 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties, 1469 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties,
1466 THIS_MODULE, NULL, adapter_nr) || 1470 THIS_MODULE, &d, adapter_nr) ||
1467 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties, 1471 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties,
1468 THIS_MODULE, NULL, adapter_nr) || 1472 THIS_MODULE, &d, adapter_nr) ||
1469 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties, 1473 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties,
1470 THIS_MODULE, NULL, adapter_nr) || 1474 THIS_MODULE, &d, adapter_nr) ||
1471 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties, 1475 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties,
1472 THIS_MODULE, NULL, adapter_nr) || 1476 THIS_MODULE, &d, adapter_nr) ||
1473 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties, 1477 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties,
1474 THIS_MODULE, NULL, adapter_nr) || 1478 THIS_MODULE, &d, adapter_nr) ||
1475 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties, 1479 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties,
1476 THIS_MODULE, NULL, adapter_nr) || 1480 THIS_MODULE, &d, adapter_nr) ||
1477 0 == dvb_usb_device_init(intf, 1481 0 == dvb_usb_device_init(intf,
1478 &cxusb_bluebird_nano2_needsfirmware_properties, 1482 &cxusb_bluebird_nano2_needsfirmware_properties,
1479 THIS_MODULE, NULL, adapter_nr) || 1483 THIS_MODULE, &d, adapter_nr) ||
1480 0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties, 1484 0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties,
1481 THIS_MODULE, NULL, adapter_nr) || 1485 THIS_MODULE, &d, adapter_nr) ||
1482 0 == dvb_usb_device_init(intf, 1486 0 == dvb_usb_device_init(intf,
1483 &cxusb_bluebird_dualdig4_rev2_properties, 1487 &cxusb_bluebird_dualdig4_rev2_properties,
1484 THIS_MODULE, NULL, adapter_nr) || 1488 THIS_MODULE, &d, adapter_nr) ||
1485 0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties, 1489 0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties,
1486 THIS_MODULE, NULL, adapter_nr) || 1490 THIS_MODULE, &d, adapter_nr) ||
1487 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties, 1491 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties,
1488 THIS_MODULE, NULL, adapter_nr) || 1492 THIS_MODULE, &d, adapter_nr) ||
1489 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties, 1493 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties,
1490 THIS_MODULE, NULL, adapter_nr) || 1494 THIS_MODULE, &d, adapter_nr) ||
1491 0) 1495 0) {
1496 st = d->priv;
1497 mutex_init(&st->data_mutex);
1498
1492 return 0; 1499 return 0;
1500 }
1493 1501
1494 return -EINVAL; 1502 return -EINVAL;
1495} 1503}
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 527ff7905e15..9f3ee0e47d5c 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -28,10 +28,16 @@
28#define CMD_ANALOG 0x50 28#define CMD_ANALOG 0x50
29#define CMD_DIGITAL 0x51 29#define CMD_DIGITAL 0x51
30 30
31/* Max transfer size done by I2C transfer functions */
32#define MAX_XFER_SIZE 80
33
31struct cxusb_state { 34struct cxusb_state {
32 u8 gpio_write_state[3]; 35 u8 gpio_write_state[3];
33 struct i2c_client *i2c_client_demod; 36 struct i2c_client *i2c_client_demod;
34 struct i2c_client *i2c_client_tuner; 37 struct i2c_client *i2c_client_tuner;
38
39 unsigned char data[MAX_XFER_SIZE];
40 struct mutex data_mutex;
35}; 41};
36 42
37#endif 43#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index f3196658fb70..92d5408684ac 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -213,7 +213,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
213 usb_rcvctrlpipe(d->udev, 0), 213 usb_rcvctrlpipe(d->udev, 0),
214 REQUEST_NEW_I2C_READ, 214 REQUEST_NEW_I2C_READ,
215 USB_TYPE_VENDOR | USB_DIR_IN, 215 USB_TYPE_VENDOR | USB_DIR_IN,
216 value, index, msg[i].buf, 216 value, index, st->buf,
217 msg[i].len, 217 msg[i].len,
218 USB_CTRL_GET_TIMEOUT); 218 USB_CTRL_GET_TIMEOUT);
219 if (result < 0) { 219 if (result < 0) {
@@ -221,6 +221,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
221 break; 221 break;
222 } 222 }
223 223
224 if (msg[i].len > sizeof(st->buf)) {
225 deb_info("buffer too small to fit %d bytes\n",
226 msg[i].len);
227 return -EIO;
228 }
229
230 memcpy(msg[i].buf, st->buf, msg[i].len);
231
224 deb_data("<<< "); 232 deb_data("<<< ");
225 debug_dump(msg[i].buf, msg[i].len, deb_data); 233 debug_dump(msg[i].buf, msg[i].len, deb_data);
226 234
@@ -238,6 +246,13 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
238 /* I2C ctrl + FE bus; */ 246 /* I2C ctrl + FE bus; */
239 st->buf[3] = ((gen_mode << 6) & 0xC0) | 247 st->buf[3] = ((gen_mode << 6) & 0xC0) |
240 ((bus_mode << 4) & 0x30); 248 ((bus_mode << 4) & 0x30);
249
250 if (msg[i].len > sizeof(st->buf) - 4) {
251 deb_info("i2c message to big: %d\n",
252 msg[i].len);
253 return -EIO;
254 }
255
241 /* The Actual i2c payload */ 256 /* The Actual i2c payload */
242 memcpy(&st->buf[4], msg[i].buf, msg[i].len); 257 memcpy(&st->buf[4], msg[i].buf, msg[i].len);
243 258
@@ -283,6 +298,11 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
283 /* fill in the address */ 298 /* fill in the address */
284 st->buf[1] = msg[i].addr << 1; 299 st->buf[1] = msg[i].addr << 1;
285 /* fill the buffer */ 300 /* fill the buffer */
301 if (msg[i].len > sizeof(st->buf) - 2) {
302 deb_info("i2c xfer to big: %d\n",
303 msg[i].len);
304 return -EIO;
305 }
286 memcpy(&st->buf[2], msg[i].buf, msg[i].len); 306 memcpy(&st->buf[2], msg[i].buf, msg[i].len);
287 307
288 /* write/read request */ 308 /* write/read request */
@@ -292,13 +312,20 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
292 312
293 /* special thing in the current firmware: when length is zero the read-failed */ 313 /* special thing in the current firmware: when length is zero the read-failed */
294 len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2, 314 len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2,
295 msg[i+1].buf, msg[i+1].len); 315 st->buf, msg[i + 1].len);
296 if (len <= 0) { 316 if (len <= 0) {
297 deb_info("I2C read failed on address 0x%02x\n", 317 deb_info("I2C read failed on address 0x%02x\n",
298 msg[i].addr); 318 msg[i].addr);
299 break; 319 break;
300 } 320 }
301 321
322 if (msg[i + 1].len > sizeof(st->buf)) {
323 deb_info("i2c xfer buffer to small for %d\n",
324 msg[i].len);
325 return -EIO;
326 }
327 memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len);
328
302 msg[i+1].len = len; 329 msg[i+1].len = len;
303 330
304 i++; 331 i++;
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 0857b56e652c..ef1b8ee75c57 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -508,8 +508,6 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap)
508 508
509#define DEFAULT_RC_INTERVAL 50 509#define DEFAULT_RC_INTERVAL 50
510 510
511static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
512
513/* 511/*
514 * This function is used only when firmware is < 1.20 version. Newer 512 * This function is used only when firmware is < 1.20 version. Newer
515 * firmwares use bulk mode, with functions implemented at dib0700_core, 513 * firmwares use bulk mode, with functions implemented at dib0700_core,
@@ -517,7 +515,6 @@ static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
517 */ 515 */
518static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) 516static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
519{ 517{
520 u8 key[4];
521 enum rc_type protocol; 518 enum rc_type protocol;
522 u32 scancode; 519 u32 scancode;
523 u8 toggle; 520 u8 toggle;
@@ -532,39 +529,43 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
532 return 0; 529 return 0;
533 } 530 }
534 531
535 i = dib0700_ctrl_rd(d, rc_request, 2, key, 4); 532 st->buf[0] = REQUEST_POLL_RC;
533 st->buf[1] = 0;
534
535 i = dib0700_ctrl_rd(d, st->buf, 2, st->buf, 4);
536 if (i <= 0) { 536 if (i <= 0) {
537 err("RC Query Failed"); 537 err("RC Query Failed");
538 return -1; 538 return -EIO;
539 } 539 }
540 540
541 /* losing half of KEY_0 events from Philipps rc5 remotes.. */ 541 /* losing half of KEY_0 events from Philipps rc5 remotes.. */
542 if (key[0] == 0 && key[1] == 0 && key[2] == 0 && key[3] == 0) 542 if (st->buf[0] == 0 && st->buf[1] == 0
543 && st->buf[2] == 0 && st->buf[3] == 0)
543 return 0; 544 return 0;
544 545
545 /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)key[3-2],(int)key[3-3],(int)key[3-1],(int)key[3]); */ 546 /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)st->buf[3 - 2],(int)st->buf[3 - 3],(int)st->buf[3 - 1],(int)st->buf[3]); */
546 547
547 dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */ 548 dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */
548 549
549 switch (d->props.rc.core.protocol) { 550 switch (d->props.rc.core.protocol) {
550 case RC_BIT_NEC: 551 case RC_BIT_NEC:
551 /* NEC protocol sends repeat code as 0 0 0 FF */ 552 /* NEC protocol sends repeat code as 0 0 0 FF */
552 if ((key[3-2] == 0x00) && (key[3-3] == 0x00) && 553 if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) &&
553 (key[3] == 0xff)) { 554 (st->buf[3] == 0xff)) {
554 rc_repeat(d->rc_dev); 555 rc_repeat(d->rc_dev);
555 return 0; 556 return 0;
556 } 557 }
557 558
558 protocol = RC_TYPE_NEC; 559 protocol = RC_TYPE_NEC;
559 scancode = RC_SCANCODE_NEC(key[3-2], key[3-3]); 560 scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]);
560 toggle = 0; 561 toggle = 0;
561 break; 562 break;
562 563
563 default: 564 default:
564 /* RC-5 protocol changes toggle bit on new keypress */ 565 /* RC-5 protocol changes toggle bit on new keypress */
565 protocol = RC_TYPE_RC5; 566 protocol = RC_TYPE_RC5;
566 scancode = RC_SCANCODE_RC5(key[3-2], key[3-3]); 567 scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]);
567 toggle = key[3-1]; 568 toggle = st->buf[3 - 1];
568 break; 569 break;
569 } 570 }
570 571
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 18ed3bfbb5e2..de3ee2547479 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -62,72 +62,117 @@ EXPORT_SYMBOL(dibusb_pid_filter_ctrl);
62 62
63int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff) 63int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff)
64{ 64{
65 u8 b[3]; 65 u8 *b;
66 int ret; 66 int ret;
67
68 b = kmalloc(3, GFP_KERNEL);
69 if (!b)
70 return -ENOMEM;
71
67 b[0] = DIBUSB_REQ_SET_IOCTL; 72 b[0] = DIBUSB_REQ_SET_IOCTL;
68 b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; 73 b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
69 b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP; 74 b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP;
70 ret = dvb_usb_generic_write(d,b,3); 75
76 ret = dvb_usb_generic_write(d, b, 3);
77
78 kfree(b);
79
71 msleep(10); 80 msleep(10);
81
72 return ret; 82 return ret;
73} 83}
74EXPORT_SYMBOL(dibusb_power_ctrl); 84EXPORT_SYMBOL(dibusb_power_ctrl);
75 85
76int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) 86int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
77{ 87{
78 u8 b[3] = { 0 };
79 int ret; 88 int ret;
89 u8 *b;
90
91 b = kmalloc(3, GFP_KERNEL);
92 if (!b)
93 return -ENOMEM;
80 94
81 if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0) 95 if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0)
82 return ret; 96 goto ret;
83 97
84 if (onoff) { 98 if (onoff) {
85 b[0] = DIBUSB_REQ_SET_STREAMING_MODE; 99 b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
86 b[1] = 0x00; 100 b[1] = 0x00;
87 if ((ret = dvb_usb_generic_write(adap->dev,b,2)) < 0) 101 ret = dvb_usb_generic_write(adap->dev, b, 2);
88 return ret; 102 if (ret < 0)
103 goto ret;
89 } 104 }
90 105
91 b[0] = DIBUSB_REQ_SET_IOCTL; 106 b[0] = DIBUSB_REQ_SET_IOCTL;
92 b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM; 107 b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
93 return dvb_usb_generic_write(adap->dev,b,3); 108 ret = dvb_usb_generic_write(adap->dev, b, 3);
109
110ret:
111 kfree(b);
112 return ret;
94} 113}
95EXPORT_SYMBOL(dibusb2_0_streaming_ctrl); 114EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
96 115
97int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff) 116int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff)
98{ 117{
99 if (onoff) { 118 u8 *b;
100 u8 b[3] = { DIBUSB_REQ_SET_IOCTL, DIBUSB_IOCTL_CMD_POWER_MODE, DIBUSB_IOCTL_POWER_WAKEUP }; 119 int ret;
101 return dvb_usb_generic_write(d,b,3); 120
102 } else 121 if (!onoff)
103 return 0; 122 return 0;
123
124 b = kmalloc(3, GFP_KERNEL);
125 if (!b)
126 return -ENOMEM;
127
128 b[0] = DIBUSB_REQ_SET_IOCTL;
129 b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
130 b[2] = DIBUSB_IOCTL_POWER_WAKEUP;
131
132 ret = dvb_usb_generic_write(d, b, 3);
133
134 kfree(b);
135
136 return ret;
104} 137}
105EXPORT_SYMBOL(dibusb2_0_power_ctrl); 138EXPORT_SYMBOL(dibusb2_0_power_ctrl);
106 139
107static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr, 140static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
108 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) 141 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
109{ 142{
110 u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */ 143 u8 *sndbuf;
144 int ret, wo, len;
145
111 /* write only ? */ 146 /* write only ? */
112 int wo = (rbuf == NULL || rlen == 0), 147 wo = (rbuf == NULL || rlen == 0);
113 len = 2 + wlen + (wo ? 0 : 2); 148
149 len = 2 + wlen + (wo ? 0 : 2);
150
151 sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL);
152 if (!sndbuf)
153 return -ENOMEM;
114 154
115 if (4 + wlen > sizeof(sndbuf)) { 155 if (4 + wlen > MAX_XFER_SIZE) {
116 warn("i2c wr: len=%d is too big!\n", wlen); 156 warn("i2c wr: len=%d is too big!\n", wlen);
117 return -EOPNOTSUPP; 157 ret = -EOPNOTSUPP;
158 goto ret;
118 } 159 }
119 160
120 sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ; 161 sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
121 sndbuf[1] = (addr << 1) | (wo ? 0 : 1); 162 sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
122 163
123 memcpy(&sndbuf[2],wbuf,wlen); 164 memcpy(&sndbuf[2], wbuf, wlen);
124 165
125 if (!wo) { 166 if (!wo) {
126 sndbuf[wlen+2] = (rlen >> 8) & 0xff; 167 sndbuf[wlen + 2] = (rlen >> 8) & 0xff;
127 sndbuf[wlen+3] = rlen & 0xff; 168 sndbuf[wlen + 3] = rlen & 0xff;
128 } 169 }
129 170
130 return dvb_usb_generic_rw(d,sndbuf,len,rbuf,rlen,0); 171 ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0);
172
173ret:
174 kfree(sndbuf);
175 return ret;
131} 176}
132 177
133/* 178/*
@@ -319,11 +364,27 @@ EXPORT_SYMBOL(rc_map_dibusb_table);
319 364
320int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) 365int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
321{ 366{
322 u8 key[5],cmd = DIBUSB_REQ_POLL_REMOTE; 367 u8 *buf;
323 dvb_usb_generic_rw(d,&cmd,1,key,5,0); 368 int ret;
324 dvb_usb_nec_rc_key_to_event(d,key,event,state); 369
325 if (key[0] != 0) 370 buf = kmalloc(5, GFP_KERNEL);
326 deb_info("key: %*ph\n", 5, key); 371 if (!buf)
327 return 0; 372 return -ENOMEM;
373
374 buf[0] = DIBUSB_REQ_POLL_REMOTE;
375
376 ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0);
377 if (ret < 0)
378 goto ret;
379
380 dvb_usb_nec_rc_key_to_event(d, buf, event, state);
381
382 if (buf[0] != 0)
383 deb_info("key: %*ph\n", 5, buf);
384
385 kfree(buf);
386
387ret:
388 return ret;
328} 389}
329EXPORT_SYMBOL(dibusb_rc_query); 390EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h
index 3f82163d8ab8..697be2a17ade 100644
--- a/drivers/media/usb/dvb-usb/dibusb.h
+++ b/drivers/media/usb/dvb-usb/dibusb.h
@@ -96,6 +96,9 @@
96#define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01 96#define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01
97#define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02 97#define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
98 98
99/* Max transfer size done by I2C transfer functions */
100#define MAX_XFER_SIZE 64
101
99struct dibusb_state { 102struct dibusb_state {
100 struct dib_fe_xfer_ops ops; 103 struct dib_fe_xfer_ops ops;
101 int mt2060_present; 104 int mt2060_present;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 63134335c994..4284f6984dc1 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -28,22 +28,26 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
28static int digitv_ctrl_msg(struct dvb_usb_device *d, 28static int digitv_ctrl_msg(struct dvb_usb_device *d,
29 u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen) 29 u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
30{ 30{
31 int wo = (rbuf == NULL || rlen == 0); /* write-only */ 31 struct digitv_state *st = d->priv;
32 u8 sndbuf[7],rcvbuf[7]; 32 int ret, wo;
33 memset(sndbuf,0,7); memset(rcvbuf,0,7);
34 33
35 sndbuf[0] = cmd; 34 wo = (rbuf == NULL || rlen == 0); /* write-only */
36 sndbuf[1] = vv; 35
37 sndbuf[2] = wo ? wlen : rlen; 36 memset(st->sndbuf, 0, 7);
37 memset(st->rcvbuf, 0, 7);
38
39 st->sndbuf[0] = cmd;
40 st->sndbuf[1] = vv;
41 st->sndbuf[2] = wo ? wlen : rlen;
38 42
39 if (wo) { 43 if (wo) {
40 memcpy(&sndbuf[3],wbuf,wlen); 44 memcpy(&st->sndbuf[3], wbuf, wlen);
41 dvb_usb_generic_write(d,sndbuf,7); 45 ret = dvb_usb_generic_write(d, st->sndbuf, 7);
42 } else { 46 } else {
43 dvb_usb_generic_rw(d,sndbuf,7,rcvbuf,7,10); 47 ret = dvb_usb_generic_rw(d, st->sndbuf, 7, st->rcvbuf, 7, 10);
44 memcpy(rbuf,&rcvbuf[3],rlen); 48 memcpy(rbuf, &st->rcvbuf[3], rlen);
45 } 49 }
46 return 0; 50 return ret;
47} 51}
48 52
49/* I2C */ 53/* I2C */
diff --git a/drivers/media/usb/dvb-usb/digitv.h b/drivers/media/usb/dvb-usb/digitv.h
index 908c09f4966b..581e09c25491 100644
--- a/drivers/media/usb/dvb-usb/digitv.h
+++ b/drivers/media/usb/dvb-usb/digitv.h
@@ -5,7 +5,10 @@
5#include "dvb-usb.h" 5#include "dvb-usb.h"
6 6
7struct digitv_state { 7struct digitv_state {
8 int is_nxt6000; 8 int is_nxt6000;
9
10 unsigned char sndbuf[7];
11 unsigned char rcvbuf[7];
9}; 12};
10 13
11/* protocol (from usblogging and the SDK: 14/* protocol (from usblogging and the SDK:
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index c09332bd99cb..f5c042baa254 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -18,17 +18,28 @@ struct dtt200u_fe_state {
18 18
19 struct dtv_frontend_properties fep; 19 struct dtv_frontend_properties fep;
20 struct dvb_frontend frontend; 20 struct dvb_frontend frontend;
21
22 unsigned char data[80];
23 struct mutex data_mutex;
21}; 24};
22 25
23static int dtt200u_fe_read_status(struct dvb_frontend *fe, 26static int dtt200u_fe_read_status(struct dvb_frontend *fe,
24 enum fe_status *stat) 27 enum fe_status *stat)
25{ 28{
26 struct dtt200u_fe_state *state = fe->demodulator_priv; 29 struct dtt200u_fe_state *state = fe->demodulator_priv;
27 u8 st = GET_TUNE_STATUS, b[3]; 30 int ret;
31
32 mutex_lock(&state->data_mutex);
33 state->data[0] = GET_TUNE_STATUS;
28 34
29 dvb_usb_generic_rw(state->d,&st,1,b,3,0); 35 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
36 if (ret < 0) {
37 *stat = 0;
38 mutex_unlock(&state->data_mutex);
39 return ret;
40 }
30 41
31 switch (b[0]) { 42 switch (state->data[0]) {
32 case 0x01: 43 case 0x01:
33 *stat = FE_HAS_SIGNAL | FE_HAS_CARRIER | 44 *stat = FE_HAS_SIGNAL | FE_HAS_CARRIER |
34 FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; 45 FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
@@ -41,51 +52,86 @@ static int dtt200u_fe_read_status(struct dvb_frontend *fe,
41 *stat = 0; 52 *stat = 0;
42 break; 53 break;
43 } 54 }
55 mutex_unlock(&state->data_mutex);
44 return 0; 56 return 0;
45} 57}
46 58
47static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber) 59static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber)
48{ 60{
49 struct dtt200u_fe_state *state = fe->demodulator_priv; 61 struct dtt200u_fe_state *state = fe->demodulator_priv;
50 u8 bw = GET_VIT_ERR_CNT,b[3]; 62 int ret;
51 dvb_usb_generic_rw(state->d,&bw,1,b,3,0); 63
52 *ber = (b[0] << 16) | (b[1] << 8) | b[2]; 64 mutex_lock(&state->data_mutex);
53 return 0; 65 state->data[0] = GET_VIT_ERR_CNT;
66
67 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
68 if (ret >= 0)
69 *ber = (state->data[0] << 16) | (state->data[1] << 8) | state->data[2];
70
71 mutex_unlock(&state->data_mutex);
72 return ret;
54} 73}
55 74
56static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) 75static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc)
57{ 76{
58 struct dtt200u_fe_state *state = fe->demodulator_priv; 77 struct dtt200u_fe_state *state = fe->demodulator_priv;
59 u8 bw = GET_RS_UNCOR_BLK_CNT,b[2]; 78 int ret;
60 79
61 dvb_usb_generic_rw(state->d,&bw,1,b,2,0); 80 mutex_lock(&state->data_mutex);
62 *unc = (b[0] << 8) | b[1]; 81 state->data[0] = GET_RS_UNCOR_BLK_CNT;
63 return 0; 82
83 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 2, 0);
84 if (ret >= 0)
85 *unc = (state->data[0] << 8) | state->data[1];
86
87 mutex_unlock(&state->data_mutex);
88 return ret;
64} 89}
65 90
66static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength) 91static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength)
67{ 92{
68 struct dtt200u_fe_state *state = fe->demodulator_priv; 93 struct dtt200u_fe_state *state = fe->demodulator_priv;
69 u8 bw = GET_AGC, b; 94 int ret;
70 dvb_usb_generic_rw(state->d,&bw,1,&b,1,0); 95
71 *strength = (b << 8) | b; 96 mutex_lock(&state->data_mutex);
72 return 0; 97 state->data[0] = GET_AGC;
98
99 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
100 if (ret >= 0)
101 *strength = (state->data[0] << 8) | state->data[0];
102
103 mutex_unlock(&state->data_mutex);
104 return ret;
73} 105}
74 106
75static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr) 107static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr)
76{ 108{
77 struct dtt200u_fe_state *state = fe->demodulator_priv; 109 struct dtt200u_fe_state *state = fe->demodulator_priv;
78 u8 bw = GET_SNR,br; 110 int ret;
79 dvb_usb_generic_rw(state->d,&bw,1,&br,1,0); 111
80 *snr = ~((br << 8) | br); 112 mutex_lock(&state->data_mutex);
81 return 0; 113 state->data[0] = GET_SNR;
114
115 ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
116 if (ret >= 0)
117 *snr = ~((state->data[0] << 8) | state->data[0]);
118
119 mutex_unlock(&state->data_mutex);
120 return ret;
82} 121}
83 122
84static int dtt200u_fe_init(struct dvb_frontend* fe) 123static int dtt200u_fe_init(struct dvb_frontend* fe)
85{ 124{
86 struct dtt200u_fe_state *state = fe->demodulator_priv; 125 struct dtt200u_fe_state *state = fe->demodulator_priv;
87 u8 b = SET_INIT; 126 int ret;
88 return dvb_usb_generic_write(state->d,&b,1); 127
128 mutex_lock(&state->data_mutex);
129 state->data[0] = SET_INIT;
130
131 ret = dvb_usb_generic_write(state->d, state->data, 1);
132 mutex_unlock(&state->data_mutex);
133
134 return ret;
89} 135}
90 136
91static int dtt200u_fe_sleep(struct dvb_frontend* fe) 137static int dtt200u_fe_sleep(struct dvb_frontend* fe)
@@ -105,39 +151,40 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend *fe)
105{ 151{
106 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; 152 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
107 struct dtt200u_fe_state *state = fe->demodulator_priv; 153 struct dtt200u_fe_state *state = fe->demodulator_priv;
108 int i; 154 int ret;
109 enum fe_status st;
110 u16 freq = fep->frequency / 250000; 155 u16 freq = fep->frequency / 250000;
111 u8 bwbuf[2] = { SET_BANDWIDTH, 0 },freqbuf[3] = { SET_RF_FREQ, 0, 0 };
112 156
157 mutex_lock(&state->data_mutex);
158 state->data[0] = SET_BANDWIDTH;
113 switch (fep->bandwidth_hz) { 159 switch (fep->bandwidth_hz) {
114 case 8000000: 160 case 8000000:
115 bwbuf[1] = 8; 161 state->data[1] = 8;
116 break; 162 break;
117 case 7000000: 163 case 7000000:
118 bwbuf[1] = 7; 164 state->data[1] = 7;
119 break; 165 break;
120 case 6000000: 166 case 6000000:
121 bwbuf[1] = 6; 167 state->data[1] = 6;
122 break; 168 break;
123 default: 169 default:
124 return -EINVAL; 170 ret = -EINVAL;
171 goto ret;
125 } 172 }
126 173
127 dvb_usb_generic_write(state->d,bwbuf,2); 174 ret = dvb_usb_generic_write(state->d, state->data, 2);
175 if (ret < 0)
176 goto ret;
128 177
129 freqbuf[1] = freq & 0xff; 178 state->data[0] = SET_RF_FREQ;
130 freqbuf[2] = (freq >> 8) & 0xff; 179 state->data[1] = freq & 0xff;
131 dvb_usb_generic_write(state->d,freqbuf,3); 180 state->data[2] = (freq >> 8) & 0xff;
181 ret = dvb_usb_generic_write(state->d, state->data, 3);
182 if (ret < 0)
183 goto ret;
132 184
133 for (i = 0; i < 30; i++) { 185ret:
134 msleep(20); 186 mutex_unlock(&state->data_mutex);
135 dtt200u_fe_read_status(fe, &st); 187 return ret;
136 if (st & FE_TIMEDOUT)
137 continue;
138 }
139
140 return 0;
141} 188}
142 189
143static int dtt200u_fe_get_frontend(struct dvb_frontend* fe, 190static int dtt200u_fe_get_frontend(struct dvb_frontend* fe,
@@ -169,6 +216,7 @@ struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
169 deb_info("attaching frontend dtt200u\n"); 216 deb_info("attaching frontend dtt200u\n");
170 217
171 state->d = d; 218 state->d = d;
219 mutex_init(&state->data_mutex);
172 220
173 memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops)); 221 memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops));
174 state->frontend.demodulator_priv = state; 222 state->frontend.demodulator_priv = state;
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index d2a01b50af0d..f88572c7ae7c 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -20,75 +20,114 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2 (or-able))." DVB_USB
20 20
21DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 21DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
22 22
23struct dtt200u_state {
24 unsigned char data[80];
25 struct mutex data_mutex;
26};
27
23static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) 28static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff)
24{ 29{
25 u8 b = SET_INIT; 30 struct dtt200u_state *st = d->priv;
31 int ret = 0;
32
33 mutex_lock(&st->data_mutex);
34
35 st->data[0] = SET_INIT;
26 36
27 if (onoff) 37 if (onoff)
28 dvb_usb_generic_write(d,&b,2); 38 ret = dvb_usb_generic_write(d, st->data, 2);
29 39
30 return 0; 40 mutex_unlock(&st->data_mutex);
41 return ret;
31} 42}
32 43
33static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) 44static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
34{ 45{
35 u8 b_streaming[2] = { SET_STREAMING, onoff }; 46 struct dtt200u_state *st = adap->dev->priv;
36 u8 b_rst_pid = RESET_PID_FILTER; 47 int ret;
37 48
38 dvb_usb_generic_write(adap->dev, b_streaming, 2); 49 mutex_lock(&st->data_mutex);
50 st->data[0] = SET_STREAMING;
51 st->data[1] = onoff;
39 52
40 if (onoff == 0) 53 ret = dvb_usb_generic_write(adap->dev, st->data, 2);
41 dvb_usb_generic_write(adap->dev, &b_rst_pid, 1); 54 if (ret < 0)
42 return 0; 55 goto ret;
56
57 if (onoff)
58 goto ret;
59
60 st->data[0] = RESET_PID_FILTER;
61 ret = dvb_usb_generic_write(adap->dev, st->data, 1);
62
63ret:
64 mutex_unlock(&st->data_mutex);
65
66 return ret;
43} 67}
44 68
45static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) 69static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff)
46{ 70{
47 u8 b_pid[4]; 71 struct dtt200u_state *st = adap->dev->priv;
72 int ret;
73
48 pid = onoff ? pid : 0; 74 pid = onoff ? pid : 0;
49 75
50 b_pid[0] = SET_PID_FILTER; 76 mutex_lock(&st->data_mutex);
51 b_pid[1] = index; 77 st->data[0] = SET_PID_FILTER;
52 b_pid[2] = pid & 0xff; 78 st->data[1] = index;
53 b_pid[3] = (pid >> 8) & 0x1f; 79 st->data[2] = pid & 0xff;
80 st->data[3] = (pid >> 8) & 0x1f;
54 81
55 return dvb_usb_generic_write(adap->dev, b_pid, 4); 82 ret = dvb_usb_generic_write(adap->dev, st->data, 4);
83 mutex_unlock(&st->data_mutex);
84
85 return ret;
56} 86}
57 87
58static int dtt200u_rc_query(struct dvb_usb_device *d) 88static int dtt200u_rc_query(struct dvb_usb_device *d)
59{ 89{
60 u8 key[5],cmd = GET_RC_CODE; 90 struct dtt200u_state *st = d->priv;
61 u32 scancode; 91 u32 scancode;
92 int ret;
93
94 mutex_lock(&st->data_mutex);
95 st->data[0] = GET_RC_CODE;
96
97 ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
98 if (ret < 0)
99 goto ret;
62 100
63 dvb_usb_generic_rw(d,&cmd,1,key,5,0); 101 if (st->data[0] == 1) {
64 if (key[0] == 1) {
65 enum rc_type proto = RC_TYPE_NEC; 102 enum rc_type proto = RC_TYPE_NEC;
66 103
67 scancode = key[1]; 104 scancode = st->data[1];
68 if ((u8) ~key[1] != key[2]) { 105 if ((u8) ~st->data[1] != st->data[2]) {
69 /* Extended NEC */ 106 /* Extended NEC */
70 scancode = scancode << 8; 107 scancode = scancode << 8;
71 scancode |= key[2]; 108 scancode |= st->data[2];
72 proto = RC_TYPE_NECX; 109 proto = RC_TYPE_NECX;
73 } 110 }
74 scancode = scancode << 8; 111 scancode = scancode << 8;
75 scancode |= key[3]; 112 scancode |= st->data[3];
76 113
77 /* Check command checksum is ok */ 114 /* Check command checksum is ok */
78 if ((u8) ~key[3] == key[4]) 115 if ((u8) ~st->data[3] == st->data[4])
79 rc_keydown(d->rc_dev, proto, scancode, 0); 116 rc_keydown(d->rc_dev, proto, scancode, 0);
80 else 117 else
81 rc_keyup(d->rc_dev); 118 rc_keyup(d->rc_dev);
82 } else if (key[0] == 2) { 119 } else if (st->data[0] == 2) {
83 rc_repeat(d->rc_dev); 120 rc_repeat(d->rc_dev);
84 } else { 121 } else {
85 rc_keyup(d->rc_dev); 122 rc_keyup(d->rc_dev);
86 } 123 }
87 124
88 if (key[0] != 0) 125 if (st->data[0] != 0)
89 deb_info("key: %*ph\n", 5, key); 126 deb_info("st->data: %*ph\n", 5, st->data);
90 127
91 return 0; 128ret:
129 mutex_unlock(&st->data_mutex);
130 return ret;
92} 131}
93 132
94static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap) 133static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap)
@@ -106,17 +145,24 @@ static struct dvb_usb_device_properties wt220u_miglia_properties;
106static int dtt200u_usb_probe(struct usb_interface *intf, 145static int dtt200u_usb_probe(struct usb_interface *intf,
107 const struct usb_device_id *id) 146 const struct usb_device_id *id)
108{ 147{
148 struct dvb_usb_device *d;
149 struct dtt200u_state *st;
150
109 if (0 == dvb_usb_device_init(intf, &dtt200u_properties, 151 if (0 == dvb_usb_device_init(intf, &dtt200u_properties,
110 THIS_MODULE, NULL, adapter_nr) || 152 THIS_MODULE, &d, adapter_nr) ||
111 0 == dvb_usb_device_init(intf, &wt220u_properties, 153 0 == dvb_usb_device_init(intf, &wt220u_properties,
112 THIS_MODULE, NULL, adapter_nr) || 154 THIS_MODULE, &d, adapter_nr) ||
113 0 == dvb_usb_device_init(intf, &wt220u_fc_properties, 155 0 == dvb_usb_device_init(intf, &wt220u_fc_properties,
114 THIS_MODULE, NULL, adapter_nr) || 156 THIS_MODULE, &d, adapter_nr) ||
115 0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties, 157 0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties,
116 THIS_MODULE, NULL, adapter_nr) || 158 THIS_MODULE, &d, adapter_nr) ||
117 0 == dvb_usb_device_init(intf, &wt220u_miglia_properties, 159 0 == dvb_usb_device_init(intf, &wt220u_miglia_properties,
118 THIS_MODULE, NULL, adapter_nr)) 160 THIS_MODULE, &d, adapter_nr)) {
161 st = d->priv;
162 mutex_init(&st->data_mutex);
163
119 return 0; 164 return 0;
165 }
120 166
121 return -ENODEV; 167 return -ENODEV;
122} 168}
@@ -140,6 +186,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
140 .usb_ctrl = CYPRESS_FX2, 186 .usb_ctrl = CYPRESS_FX2,
141 .firmware = "dvb-usb-dtt200u-01.fw", 187 .firmware = "dvb-usb-dtt200u-01.fw",
142 188
189 .size_of_priv = sizeof(struct dtt200u_state),
190
143 .num_adapters = 1, 191 .num_adapters = 1,
144 .adapter = { 192 .adapter = {
145 { 193 {
@@ -190,6 +238,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
190 .usb_ctrl = CYPRESS_FX2, 238 .usb_ctrl = CYPRESS_FX2,
191 .firmware = "dvb-usb-wt220u-02.fw", 239 .firmware = "dvb-usb-wt220u-02.fw",
192 240
241 .size_of_priv = sizeof(struct dtt200u_state),
242
193 .num_adapters = 1, 243 .num_adapters = 1,
194 .adapter = { 244 .adapter = {
195 { 245 {
@@ -240,6 +290,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
240 .usb_ctrl = CYPRESS_FX2, 290 .usb_ctrl = CYPRESS_FX2,
241 .firmware = "dvb-usb-wt220u-fc03.fw", 291 .firmware = "dvb-usb-wt220u-fc03.fw",
242 292
293 .size_of_priv = sizeof(struct dtt200u_state),
294
243 .num_adapters = 1, 295 .num_adapters = 1,
244 .adapter = { 296 .adapter = {
245 { 297 {
@@ -290,6 +342,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
290 .usb_ctrl = CYPRESS_FX2, 342 .usb_ctrl = CYPRESS_FX2,
291 .firmware = "dvb-usb-wt220u-zl0353-01.fw", 343 .firmware = "dvb-usb-wt220u-zl0353-01.fw",
292 344
345 .size_of_priv = sizeof(struct dtt200u_state),
346
293 .num_adapters = 1, 347 .num_adapters = 1,
294 .adapter = { 348 .adapter = {
295 { 349 {
@@ -340,6 +394,8 @@ static struct dvb_usb_device_properties wt220u_miglia_properties = {
340 .usb_ctrl = CYPRESS_FX2, 394 .usb_ctrl = CYPRESS_FX2,
341 .firmware = "dvb-usb-wt220u-miglia-01.fw", 395 .firmware = "dvb-usb-wt220u-miglia-01.fw",
342 396
397 .size_of_priv = sizeof(struct dtt200u_state),
398
343 .num_adapters = 1, 399 .num_adapters = 1,
344 .generic_bulk_ctrl_endpoint = 0x01, 400 .generic_bulk_ctrl_endpoint = 0x01,
345 401
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index 3d11df41cac0..c60fb54f445f 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -31,9 +31,14 @@ module_param_named(debug, dvb_usb_dtv5100_debug, int, 0644);
31MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS); 31MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
32DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 32DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
33 33
34struct dtv5100_state {
35 unsigned char data[80];
36};
37
34static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, 38static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
35 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) 39 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
36{ 40{
41 struct dtv5100_state *st = d->priv;
37 u8 request; 42 u8 request;
38 u8 type; 43 u8 type;
39 u16 value; 44 u16 value;
@@ -60,9 +65,10 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
60 } 65 }
61 index = (addr << 8) + wbuf[0]; 66 index = (addr << 8) + wbuf[0];
62 67
68 memcpy(st->data, rbuf, rlen);
63 msleep(1); /* avoid I2C errors */ 69 msleep(1); /* avoid I2C errors */
64 return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request, 70 return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
65 type, value, index, rbuf, rlen, 71 type, value, index, st->data, rlen,
66 DTV5100_USB_TIMEOUT); 72 DTV5100_USB_TIMEOUT);
67} 73}
68 74
@@ -176,7 +182,7 @@ static struct dvb_usb_device_properties dtv5100_properties = {
176 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 182 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
177 .usb_ctrl = DEVICE_SPECIFIC, 183 .usb_ctrl = DEVICE_SPECIFIC,
178 184
179 .size_of_priv = 0, 185 .size_of_priv = sizeof(struct dtv5100_state),
180 186
181 .num_adapters = 1, 187 .num_adapters = 1,
182 .adapter = {{ 188 .adapter = {{
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 5fb0c650926e..2c720cb2fb00 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -852,7 +852,7 @@ static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
852 if (i && !state->initialized) { 852 if (i && !state->initialized) {
853 state->initialized = 1; 853 state->initialized = 1;
854 /* reset board */ 854 /* reset board */
855 dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); 855 return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
856 } 856 }
857 857
858 return 0; 858 return 0;
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 5d0384dd45b5..adfd76491451 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -24,6 +24,10 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DV
24 24
25DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 25DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
26 26
27struct gp8psk_state {
28 unsigned char data[80];
29};
30
27static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) 31static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
28{ 32{
29 return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6)); 33 return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6));
@@ -53,17 +57,22 @@ static void gp8psk_info(struct dvb_usb_device *d)
53 57
54int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) 58int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen)
55{ 59{
60 struct gp8psk_state *st = d->priv;
56 int ret = 0,try = 0; 61 int ret = 0,try = 0;
57 62
63 if (blen > sizeof(st->data))
64 return -EIO;
65
58 if ((ret = mutex_lock_interruptible(&d->usb_mutex))) 66 if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
59 return ret; 67 return ret;
60 68
61 while (ret >= 0 && ret != blen && try < 3) { 69 while (ret >= 0 && ret != blen && try < 3) {
70 memcpy(st->data, b, blen);
62 ret = usb_control_msg(d->udev, 71 ret = usb_control_msg(d->udev,
63 usb_rcvctrlpipe(d->udev,0), 72 usb_rcvctrlpipe(d->udev,0),
64 req, 73 req,
65 USB_TYPE_VENDOR | USB_DIR_IN, 74 USB_TYPE_VENDOR | USB_DIR_IN,
66 value,index,b,blen, 75 value, index, st->data, blen,
67 2000); 76 2000);
68 deb_info("reading number %d (ret: %d)\n",try,ret); 77 deb_info("reading number %d (ret: %d)\n",try,ret);
69 try++; 78 try++;
@@ -86,19 +95,24 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
86int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, 95int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
87 u16 index, u8 *b, int blen) 96 u16 index, u8 *b, int blen)
88{ 97{
98 struct gp8psk_state *st = d->priv;
89 int ret; 99 int ret;
90 100
91 deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index); 101 deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
92 debug_dump(b,blen,deb_xfer); 102 debug_dump(b,blen,deb_xfer);
93 103
104 if (blen > sizeof(st->data))
105 return -EIO;
106
94 if ((ret = mutex_lock_interruptible(&d->usb_mutex))) 107 if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
95 return ret; 108 return ret;
96 109
110 memcpy(st->data, b, blen);
97 if (usb_control_msg(d->udev, 111 if (usb_control_msg(d->udev,
98 usb_sndctrlpipe(d->udev,0), 112 usb_sndctrlpipe(d->udev,0),
99 req, 113 req,
100 USB_TYPE_VENDOR | USB_DIR_OUT, 114 USB_TYPE_VENDOR | USB_DIR_OUT,
101 value,index,b,blen, 115 value, index, st->data, blen,
102 2000) != blen) { 116 2000) != blen) {
103 warn("usb out operation failed."); 117 warn("usb out operation failed.");
104 ret = -EIO; 118 ret = -EIO;
@@ -143,6 +157,11 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
143 err("failed to load bcm4500 firmware."); 157 err("failed to load bcm4500 firmware.");
144 goto out_free; 158 goto out_free;
145 } 159 }
160 if (buflen > 64) {
161 err("firmare chunk size bigger than 64 bytes.");
162 goto out_free;
163 }
164
146 memcpy(buf, ptr, buflen); 165 memcpy(buf, ptr, buflen);
147 if (dvb_usb_generic_write(d, buf, buflen)) { 166 if (dvb_usb_generic_write(d, buf, buflen)) {
148 err("failed to load bcm4500 firmware."); 167 err("failed to load bcm4500 firmware.");
@@ -265,6 +284,8 @@ static struct dvb_usb_device_properties gp8psk_properties = {
265 .usb_ctrl = CYPRESS_FX2, 284 .usb_ctrl = CYPRESS_FX2,
266 .firmware = "dvb-usb-gp8psk-01.fw", 285 .firmware = "dvb-usb-gp8psk-01.fw",
267 286
287 .size_of_priv = sizeof(struct gp8psk_state),
288
268 .num_adapters = 1, 289 .num_adapters = 1,
269 .adapter = { 290 .adapter = {
270 { 291 {
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index fc7569e2728d..1babd3341910 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -74,22 +74,31 @@ static struct rc_map_table rc_map_haupp_table[] = {
74 */ 74 */
75static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state) 75static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
76{ 76{
77 u8 key[5],cmd[2] = { DIBUSB_REQ_POLL_REMOTE, 0x35 }, data,toggle,custom; 77 u8 *buf, data, toggle, custom;
78 u16 raw; 78 u16 raw;
79 int i; 79 int i, ret;
80 struct dibusb_device_state *st = d->priv; 80 struct dibusb_device_state *st = d->priv;
81 81
82 dvb_usb_generic_rw(d,cmd,2,key,5,0); 82 buf = kmalloc(5, GFP_KERNEL);
83 if (!buf)
84 return -ENOMEM;
85
86 buf[0] = DIBUSB_REQ_POLL_REMOTE;
87 buf[1] = 0x35;
88 ret = dvb_usb_generic_rw(d, buf, 2, buf, 5, 0);
89 if (ret < 0)
90 goto ret;
83 91
84 *state = REMOTE_NO_KEY_PRESSED; 92 *state = REMOTE_NO_KEY_PRESSED;
85 switch (key[0]) { 93 switch (buf[0]) {
86 case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED: 94 case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED:
87 raw = ((key[1] << 8) | key[2]) >> 3; 95 raw = ((buf[1] << 8) | buf[2]) >> 3;
88 toggle = !!(raw & 0x800); 96 toggle = !!(raw & 0x800);
89 data = raw & 0x3f; 97 data = raw & 0x3f;
90 custom = (raw >> 6) & 0x1f; 98 custom = (raw >> 6) & 0x1f;
91 99
92 deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle); 100 deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",
101 buf[1], buf[2], buf[3], custom, data, toggle);
93 102
94 for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) { 103 for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) {
95 if (rc5_data(&rc_map_haupp_table[i]) == data && 104 if (rc5_data(&rc_map_haupp_table[i]) == data &&
@@ -117,7 +126,9 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
117 break; 126 break;
118 } 127 }
119 128
120 return 0; 129ret:
130 kfree(buf);
131 return ret;
121} 132}
122 133
123static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6]) 134static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index c05de1b088a4..07fa08be9e99 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,48 +97,53 @@ struct pctv452e_state {
97 u8 c; /* transaction counter, wraps around... */ 97 u8 c; /* transaction counter, wraps around... */
98 u8 initialized; /* set to 1 if 0x15 has been sent */ 98 u8 initialized; /* set to 1 if 0x15 has been sent */
99 u16 last_rc_key; 99 u16 last_rc_key;
100
101 unsigned char data[80];
100}; 102};
101 103
102static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, 104static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
103 unsigned int write_len, unsigned int read_len) 105 unsigned int write_len, unsigned int read_len)
104{ 106{
105 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 107 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
106 u8 buf[64];
107 u8 id; 108 u8 id;
108 unsigned int rlen; 109 unsigned int rlen;
109 int ret; 110 int ret;
110 111
111 BUG_ON(NULL == data && 0 != (write_len | read_len)); 112 if (!data || (write_len > 64 - 4) || (read_len > 64 - 4)) {
112 BUG_ON(write_len > 64 - 4); 113 err("%s: transfer data invalid", __func__);
113 BUG_ON(read_len > 64 - 4); 114 return -EIO;
115 }
114 116
117 mutex_lock(&state->ca_mutex);
115 id = state->c++; 118 id = state->c++;
116 119
117 buf[0] = SYNC_BYTE_OUT; 120 state->data[0] = SYNC_BYTE_OUT;
118 buf[1] = id; 121 state->data[1] = id;
119 buf[2] = cmd; 122 state->data[2] = cmd;
120 buf[3] = write_len; 123 state->data[3] = write_len;
121 124
122 memcpy(buf + 4, data, write_len); 125 memcpy(state->data + 4, data, write_len);
123 126
124 rlen = (read_len > 0) ? 64 : 0; 127 rlen = (read_len > 0) ? 64 : 0;
125 ret = dvb_usb_generic_rw(d, buf, 4 + write_len, 128 ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
126 buf, rlen, /* delay_ms */ 0); 129 state->data, rlen, /* delay_ms */ 0);
127 if (0 != ret) 130 if (0 != ret)
128 goto failed; 131 goto failed;
129 132
130 ret = -EIO; 133 ret = -EIO;
131 if (SYNC_BYTE_IN != buf[0] || id != buf[1]) 134 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
132 goto failed; 135 goto failed;
133 136
134 memcpy(data, buf + 4, read_len); 137 memcpy(data, state->data + 4, read_len);
135 138
139 mutex_unlock(&state->ca_mutex);
136 return 0; 140 return 0;
137 141
138failed: 142failed:
139 err("CI error %d; %02X %02X %02X -> %*ph.", 143 err("CI error %d; %02X %02X %02X -> %*ph.",
140 ret, SYNC_BYTE_OUT, id, cmd, 3, buf); 144 ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
141 145
146 mutex_unlock(&state->ca_mutex);
142 return ret; 147 return ret;
143} 148}
144 149
@@ -405,52 +410,53 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
405 u8 *rcv_buf, u8 rcv_len) 410 u8 *rcv_buf, u8 rcv_len)
406{ 411{
407 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 412 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
408 u8 buf[64];
409 u8 id; 413 u8 id;
410 int ret; 414 int ret;
411 415
416 mutex_lock(&state->ca_mutex);
412 id = state->c++; 417 id = state->c++;
413 418
414 ret = -EINVAL; 419 ret = -EINVAL;
415 if (snd_len > 64 - 7 || rcv_len > 64 - 7) 420 if (snd_len > 64 - 7 || rcv_len > 64 - 7)
416 goto failed; 421 goto failed;
417 422
418 buf[0] = SYNC_BYTE_OUT; 423 state->data[0] = SYNC_BYTE_OUT;
419 buf[1] = id; 424 state->data[1] = id;
420 buf[2] = PCTV_CMD_I2C; 425 state->data[2] = PCTV_CMD_I2C;
421 buf[3] = snd_len + 3; 426 state->data[3] = snd_len + 3;
422 buf[4] = addr << 1; 427 state->data[4] = addr << 1;
423 buf[5] = snd_len; 428 state->data[5] = snd_len;
424 buf[6] = rcv_len; 429 state->data[6] = rcv_len;
425 430
426 memcpy(buf + 7, snd_buf, snd_len); 431 memcpy(state->data + 7, snd_buf, snd_len);
427 432
428 ret = dvb_usb_generic_rw(d, buf, 7 + snd_len, 433 ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
429 buf, /* rcv_len */ 64, 434 state->data, /* rcv_len */ 64,
430 /* delay_ms */ 0); 435 /* delay_ms */ 0);
431 if (ret < 0) 436 if (ret < 0)
432 goto failed; 437 goto failed;
433 438
434 /* TT USB protocol error. */ 439 /* TT USB protocol error. */
435 ret = -EIO; 440 ret = -EIO;
436 if (SYNC_BYTE_IN != buf[0] || id != buf[1]) 441 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
437 goto failed; 442 goto failed;
438 443
439 /* I2C device didn't respond as expected. */ 444 /* I2C device didn't respond as expected. */
440 ret = -EREMOTEIO; 445 ret = -EREMOTEIO;
441 if (buf[5] < snd_len || buf[6] < rcv_len) 446 if (state->data[5] < snd_len || state->data[6] < rcv_len)
442 goto failed; 447 goto failed;
443 448
444 memcpy(rcv_buf, buf + 7, rcv_len); 449 memcpy(rcv_buf, state->data + 7, rcv_len);
450 mutex_unlock(&state->ca_mutex);
445 451
446 return rcv_len; 452 return rcv_len;
447 453
448failed: 454failed:
449 err("I2C error %d; %02X %02X %02X %02X %02X -> " 455 err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
450 "%02X %02X %02X %02X %02X.",
451 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, 456 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
452 buf[0], buf[1], buf[4], buf[5], buf[6]); 457 7, state->data);
453 458
459 mutex_unlock(&state->ca_mutex);
454 return ret; 460 return ret;
455} 461}
456 462
@@ -499,8 +505,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
499static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) 505static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
500{ 506{
501 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 507 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
502 u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 }; 508 u8 *rx;
503 u8 rx[PCTV_ANSWER_LEN];
504 int ret; 509 int ret;
505 510
506 info("%s: %d\n", __func__, i); 511 info("%s: %d\n", __func__, i);
@@ -511,6 +516,11 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
511 if (state->initialized) 516 if (state->initialized)
512 return 0; 517 return 0;
513 518
519 rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
520 if (!rx)
521 return -ENOMEM;
522
523 mutex_lock(&state->ca_mutex);
514 /* hmm where shoud this should go? */ 524 /* hmm where shoud this should go? */
515 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); 525 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
516 if (ret != 0) 526 if (ret != 0)
@@ -518,65 +528,75 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
518 __func__, ret); 528 __func__, ret);
519 529
520 /* this is a one-time initialization, dont know where to put */ 530 /* this is a one-time initialization, dont know where to put */
521 b0[1] = state->c++; 531 state->data[0] = 0xaa;
532 state->data[1] = state->c++;
533 state->data[2] = PCTV_CMD_RESET;
534 state->data[3] = 1;
535 state->data[4] = 0;
522 /* reset board */ 536 /* reset board */
523 ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); 537 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
524 if (ret) 538 if (ret)
525 return ret; 539 goto ret;
526 540
527 b0[1] = state->c++; 541 state->data[1] = state->c++;
528 b0[4] = 1; 542 state->data[4] = 1;
529 /* reset board (again?) */ 543 /* reset board (again?) */
530 ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); 544 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
531 if (ret) 545 if (ret)
532 return ret; 546 goto ret;
533 547
534 state->initialized = 1; 548 state->initialized = 1;
535 549
536 return 0; 550ret:
551 mutex_unlock(&state->ca_mutex);
552 kfree(rx);
553 return ret;
537} 554}
538 555
539static int pctv452e_rc_query(struct dvb_usb_device *d) 556static int pctv452e_rc_query(struct dvb_usb_device *d)
540{ 557{
541 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 558 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
542 u8 b[CMD_BUFFER_SIZE];
543 u8 rx[PCTV_ANSWER_LEN];
544 int ret, i; 559 int ret, i;
545 u8 id = state->c++; 560 u8 id;
561
562 mutex_lock(&state->ca_mutex);
563 id = state->c++;
546 564
547 /* prepare command header */ 565 /* prepare command header */
548 b[0] = SYNC_BYTE_OUT; 566 state->data[0] = SYNC_BYTE_OUT;
549 b[1] = id; 567 state->data[1] = id;
550 b[2] = PCTV_CMD_IR; 568 state->data[2] = PCTV_CMD_IR;
551 b[3] = 0; 569 state->data[3] = 0;
552 570
553 /* send ir request */ 571 /* send ir request */
554 ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0); 572 ret = dvb_usb_generic_rw(d, state->data, 4,
573 state->data, PCTV_ANSWER_LEN, 0);
555 if (ret != 0) 574 if (ret != 0)
556 return ret; 575 goto ret;
557 576
558 if (debug > 3) { 577 if (debug > 3) {
559 info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx); 578 info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
560 for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++) 579 for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
561 info(" %02x", rx[i+3]); 580 info(" %02x", state->data[i + 3]);
562 581
563 info("\n"); 582 info("\n");
564 } 583 }
565 584
566 if ((rx[3] == 9) && (rx[12] & 0x01)) { 585 if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
567 /* got a "press" event */ 586 /* got a "press" event */
568 state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]); 587 state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
569 if (debug > 2) 588 if (debug > 2)
570 info("%s: cmd=0x%02x sys=0x%02x\n", 589 info("%s: cmd=0x%02x sys=0x%02x\n",
571 __func__, rx[6], rx[7]); 590 __func__, state->data[6], state->data[7]);
572 591
573 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); 592 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
574 } else if (state->last_rc_key) { 593 } else if (state->last_rc_key) {
575 rc_keyup(d->rc_dev); 594 rc_keyup(d->rc_dev);
576 state->last_rc_key = 0; 595 state->last_rc_key = 0;
577 } 596 }
578 597ret:
579 return 0; 598 mutex_unlock(&state->ca_mutex);
599 return ret;
580} 600}
581 601
582static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) 602static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index d9f3262bf071..4706628a3ed5 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -89,9 +89,13 @@ struct technisat_usb2_state {
89static int technisat_usb2_i2c_access(struct usb_device *udev, 89static int technisat_usb2_i2c_access(struct usb_device *udev,
90 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) 90 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
91{ 91{
92 u8 b[64]; 92 u8 *b;
93 int ret, actual_length; 93 int ret, actual_length;
94 94
95 b = kmalloc(64, GFP_KERNEL);
96 if (!b)
97 return -ENOMEM;
98
95 deb_i2c("i2c-access: %02x, tx: ", device_addr); 99 deb_i2c("i2c-access: %02x, tx: ", device_addr);
96 debug_dump(tx, txlen, deb_i2c); 100 debug_dump(tx, txlen, deb_i2c);
97 deb_i2c(" "); 101 deb_i2c(" ");
@@ -123,7 +127,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
123 127
124 if (ret < 0) { 128 if (ret < 0) {
125 err("i2c-error: out failed %02x = %d", device_addr, ret); 129 err("i2c-error: out failed %02x = %d", device_addr, ret);
126 return -ENODEV; 130 goto err;
127 } 131 }
128 132
129 ret = usb_bulk_msg(udev, 133 ret = usb_bulk_msg(udev,
@@ -131,7 +135,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
131 b, 64, &actual_length, 1000); 135 b, 64, &actual_length, 1000);
132 if (ret < 0) { 136 if (ret < 0) {
133 err("i2c-error: in failed %02x = %d", device_addr, ret); 137 err("i2c-error: in failed %02x = %d", device_addr, ret);
134 return -ENODEV; 138 goto err;
135 } 139 }
136 140
137 if (b[0] != I2C_STATUS_OK) { 141 if (b[0] != I2C_STATUS_OK) {
@@ -140,7 +144,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
140 if (!(b[0] == I2C_STATUS_NAK && 144 if (!(b[0] == I2C_STATUS_NAK &&
141 device_addr == 0x60 145 device_addr == 0x60
142 /* && device_is_technisat_usb2 */)) 146 /* && device_is_technisat_usb2 */))
143 return -ENODEV; 147 goto err;
144 } 148 }
145 149
146 deb_i2c("status: %d, ", b[0]); 150 deb_i2c("status: %d, ", b[0]);
@@ -154,7 +158,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
154 158
155 deb_i2c("\n"); 159 deb_i2c("\n");
156 160
157 return 0; 161err:
162 kfree(b);
163 return ret;
158} 164}
159 165
160static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, 166static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index c3a0e87066eb..f7bb78c1873c 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1901,19 +1901,30 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
1901 s32 TransferBufferLength, int bOut) 1901 s32 TransferBufferLength, int bOut)
1902{ 1902{
1903 int r; 1903 int r;
1904 unsigned char *buf;
1905
1906 buf = kmalloc(TransferBufferLength, GFP_KERNEL);
1907 if (!buf)
1908 return -ENOMEM;
1909
1904 if (!bOut) { 1910 if (!bOut) {
1905 r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1911 r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1906 Request, 1912 Request,
1907 USB_TYPE_VENDOR | USB_RECIP_DEVICE | 1913 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
1908 USB_DIR_IN, 1914 USB_DIR_IN,
1909 Value, Index, TransferBuffer, 1915 Value, Index, buf,
1910 TransferBufferLength, HZ * 5); 1916 TransferBufferLength, HZ * 5);
1917
1918 if (r >= 0)
1919 memcpy(TransferBuffer, buf, TransferBufferLength);
1911 } else { 1920 } else {
1921 memcpy(buf, TransferBuffer, TransferBufferLength);
1912 r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1922 r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1913 Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1923 Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1914 Value, Index, TransferBuffer, 1924 Value, Index, buf,
1915 TransferBufferLength, HZ * 5); 1925 TransferBufferLength, HZ * 5);
1916 } 1926 }
1927 kfree(buf);
1917 return r; 1928 return r;
1918} 1929}
1919 1930
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index db200c9d796d..22a9aae16291 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -147,20 +147,26 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
147int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value) 147int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
148{ 148{
149 struct usb_device *udev = dev->udev; 149 struct usb_device *udev = dev->udev;
150 unsigned char *buf;
150 int ret; 151 int ret;
151 152
153 buf = kmalloc(sizeof(u8), GFP_KERNEL);
154 if (!buf)
155 return -ENOMEM;
156
152 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 157 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
153 0x00, 158 0x00,
154 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 159 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
155 0x00, 160 0x00,
156 index, 161 index,
157 (u8 *) value, 162 buf,
158 sizeof(u8), 163 sizeof(u8),
159 500); 164 500);
160 if (ret < 0) 165 if (ret >= 0)
161 return ret; 166 memcpy(value, buf, sizeof(u8));
162 else 167
163 return 0; 168 kfree(buf);
169 return ret;
164} 170}
165 171
166static int stk_start_stream(struct stk_camera *dev) 172static int stk_start_stream(struct stk_camera *dev)
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index c0bb0c793e84..dbbc4303bdd0 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -46,12 +46,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
46 host->pdata = pdev->dev.platform_data; 46 host->pdata = pdev->dev.platform_data;
47 47
48 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 48 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
49 /* Get registers' physical base address */
50 host->phy_regs = regs->start;
51 host->regs = devm_ioremap_resource(&pdev->dev, regs); 49 host->regs = devm_ioremap_resource(&pdev->dev, regs);
52 if (IS_ERR(host->regs)) 50 if (IS_ERR(host->regs))
53 return PTR_ERR(host->regs); 51 return PTR_ERR(host->regs);
54 52
53 /* Get registers' physical base address */
54 host->phy_regs = regs->start;
55
55 platform_set_drvdata(pdev, host); 56 platform_set_drvdata(pdev, host);
56 return dw_mci_probe(host); 57 return dw_mci_probe(host);
57} 58}
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 8ef44a2a2fd9..90ed2e12d345 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -647,6 +647,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
647 if (msm_host->pwr_irq < 0) { 647 if (msm_host->pwr_irq < 0) {
648 dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", 648 dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
649 msm_host->pwr_irq); 649 msm_host->pwr_irq);
650 ret = msm_host->pwr_irq;
650 goto clk_disable; 651 goto clk_disable;
651 } 652 }
652 653
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 0f68a99fc4ad..141bd70a49c2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -161,7 +161,7 @@ int gpmi_init(struct gpmi_nand_data *this)
161 161
162 ret = gpmi_enable_clk(this); 162 ret = gpmi_enable_clk(this);
163 if (ret) 163 if (ret)
164 goto err_out; 164 return ret;
165 ret = gpmi_reset_block(r->gpmi_regs, false); 165 ret = gpmi_reset_block(r->gpmi_regs, false);
166 if (ret) 166 if (ret)
167 goto err_out; 167 goto err_out;
@@ -197,6 +197,7 @@ int gpmi_init(struct gpmi_nand_data *this)
197 gpmi_disable_clk(this); 197 gpmi_disable_clk(this);
198 return 0; 198 return 0;
199err_out: 199err_out:
200 gpmi_disable_clk(this);
200 return ret; 201 return ret;
201} 202}
202 203
@@ -270,7 +271,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
270 271
271 ret = gpmi_enable_clk(this); 272 ret = gpmi_enable_clk(this);
272 if (ret) 273 if (ret)
273 goto err_out; 274 return ret;
274 275
275 /* 276 /*
276 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this 277 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
@@ -308,6 +309,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
308 gpmi_disable_clk(this); 309 gpmi_disable_clk(this);
309 return 0; 310 return 0;
310err_out: 311err_out:
312 gpmi_disable_clk(this);
311 return ret; 313 return ret;
312} 314}
313 315
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index d54f666417e1..dbf256217b3e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -86,6 +86,8 @@ struct mtk_ecc {
86 struct completion done; 86 struct completion done;
87 struct mutex lock; 87 struct mutex lock;
88 u32 sectors; 88 u32 sectors;
89
90 u8 eccdata[112];
89}; 91};
90 92
91static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, 93static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
@@ -366,9 +368,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
366 u8 *data, u32 bytes) 368 u8 *data, u32 bytes)
367{ 369{
368 dma_addr_t addr; 370 dma_addr_t addr;
369 u8 *p; 371 u32 len;
370 u32 len, i, val; 372 int ret;
371 int ret = 0;
372 373
373 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); 374 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
374 ret = dma_mapping_error(ecc->dev, addr); 375 ret = dma_mapping_error(ecc->dev, addr);
@@ -393,14 +394,12 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
393 394
394 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ 395 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
395 len = (config->strength * ECC_PARITY_BITS + 7) >> 3; 396 len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
396 p = data + bytes;
397 397
398 /* write the parity bytes generated by the ECC back to the OOB region */ 398 /* write the parity bytes generated by the ECC back to temp buffer */
399 for (i = 0; i < len; i++) { 399 __ioread32_copy(ecc->eccdata, ecc->regs + ECC_ENCPAR(0), round_up(len, 4));
400 if ((i % 4) == 0) 400
401 val = readl(ecc->regs + ECC_ENCPAR(i / 4)); 401 /* copy into possibly unaligned OOB region with actual length */
402 p[i] = (val >> ((i % 4) * 8)) & 0xff; 402 memcpy(data + bytes, ecc->eccdata, len);
403 }
404timeout: 403timeout:
405 404
406 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); 405 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index e5718e5ecf92..3bde96a3f7bf 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1095,10 +1095,11 @@ static void nand_release_data_interface(struct nand_chip *chip)
1095/** 1095/**
1096 * nand_reset - Reset and initialize a NAND device 1096 * nand_reset - Reset and initialize a NAND device
1097 * @chip: The NAND chip 1097 * @chip: The NAND chip
1098 * @chipnr: Internal die id
1098 * 1099 *
1099 * Returns 0 for success or negative error code otherwise 1100 * Returns 0 for success or negative error code otherwise
1100 */ 1101 */
1101int nand_reset(struct nand_chip *chip) 1102int nand_reset(struct nand_chip *chip, int chipnr)
1102{ 1103{
1103 struct mtd_info *mtd = nand_to_mtd(chip); 1104 struct mtd_info *mtd = nand_to_mtd(chip);
1104 int ret; 1105 int ret;
@@ -1107,9 +1108,17 @@ int nand_reset(struct nand_chip *chip)
1107 if (ret) 1108 if (ret)
1108 return ret; 1109 return ret;
1109 1110
1111 /*
1112 * The CS line has to be released before we can apply the new NAND
1113 * interface settings, hence this weird ->select_chip() dance.
1114 */
1115 chip->select_chip(mtd, chipnr);
1110 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 1116 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1117 chip->select_chip(mtd, -1);
1111 1118
1119 chip->select_chip(mtd, chipnr);
1112 ret = nand_setup_data_interface(chip); 1120 ret = nand_setup_data_interface(chip);
1121 chip->select_chip(mtd, -1);
1113 if (ret) 1122 if (ret)
1114 return ret; 1123 return ret;
1115 1124
@@ -1185,8 +1194,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1185 /* Shift to get chip number */ 1194 /* Shift to get chip number */
1186 chipnr = ofs >> chip->chip_shift; 1195 chipnr = ofs >> chip->chip_shift;
1187 1196
1188 chip->select_chip(mtd, chipnr);
1189
1190 /* 1197 /*
1191 * Reset the chip. 1198 * Reset the chip.
1192 * If we want to check the WP through READ STATUS and check the bit 7 1199 * If we want to check the WP through READ STATUS and check the bit 7
@@ -1194,7 +1201,9 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1194 * some operation can also clear the bit 7 of status register 1201 * some operation can also clear the bit 7 of status register
1195 * eg. erase/program a locked block 1202 * eg. erase/program a locked block
1196 */ 1203 */
1197 nand_reset(chip); 1204 nand_reset(chip, chipnr);
1205
1206 chip->select_chip(mtd, chipnr);
1198 1207
1199 /* Check, if it is write protected */ 1208 /* Check, if it is write protected */
1200 if (nand_check_wp(mtd)) { 1209 if (nand_check_wp(mtd)) {
@@ -1244,8 +1253,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1244 /* Shift to get chip number */ 1253 /* Shift to get chip number */
1245 chipnr = ofs >> chip->chip_shift; 1254 chipnr = ofs >> chip->chip_shift;
1246 1255
1247 chip->select_chip(mtd, chipnr);
1248
1249 /* 1256 /*
1250 * Reset the chip. 1257 * Reset the chip.
1251 * If we want to check the WP through READ STATUS and check the bit 7 1258 * If we want to check the WP through READ STATUS and check the bit 7
@@ -1253,7 +1260,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1253 * some operation can also clear the bit 7 of status register 1260 * some operation can also clear the bit 7 of status register
1254 * eg. erase/program a locked block 1261 * eg. erase/program a locked block
1255 */ 1262 */
1256 nand_reset(chip); 1263 nand_reset(chip, chipnr);
1264
1265 chip->select_chip(mtd, chipnr);
1257 1266
1258 /* Check, if it is write protected */ 1267 /* Check, if it is write protected */
1259 if (nand_check_wp(mtd)) { 1268 if (nand_check_wp(mtd)) {
@@ -2940,10 +2949,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2940 } 2949 }
2941 2950
2942 chipnr = (int)(to >> chip->chip_shift); 2951 chipnr = (int)(to >> chip->chip_shift);
2943 chip->select_chip(mtd, chipnr);
2944
2945 /* Shift to get page */
2946 page = (int)(to >> chip->page_shift);
2947 2952
2948 /* 2953 /*
2949 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one 2954 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
@@ -2951,7 +2956,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2951 * if we don't do this. I have no clue why, but I seem to have 'fixed' 2956 * if we don't do this. I have no clue why, but I seem to have 'fixed'
2952 * it in the doc2000 driver in August 1999. dwmw2. 2957 * it in the doc2000 driver in August 1999. dwmw2.
2953 */ 2958 */
2954 nand_reset(chip); 2959 nand_reset(chip, chipnr);
2960
2961 chip->select_chip(mtd, chipnr);
2962
2963 /* Shift to get page */
2964 page = (int)(to >> chip->page_shift);
2955 2965
2956 /* Check, if it is write protected */ 2966 /* Check, if it is write protected */
2957 if (nand_check_wp(mtd)) { 2967 if (nand_check_wp(mtd)) {
@@ -3984,14 +3994,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3984 int i, maf_idx; 3994 int i, maf_idx;
3985 u8 id_data[8]; 3995 u8 id_data[8];
3986 3996
3987 /* Select the device */
3988 chip->select_chip(mtd, 0);
3989
3990 /* 3997 /*
3991 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 3998 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3992 * after power-up. 3999 * after power-up.
3993 */ 4000 */
3994 nand_reset(chip); 4001 nand_reset(chip, 0);
4002
4003 /* Select the device */
4004 chip->select_chip(mtd, 0);
3995 4005
3996 /* Send the command for reading device ID */ 4006 /* Send the command for reading device ID */
3997 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 4007 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
@@ -4329,17 +4339,31 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4329 return PTR_ERR(type); 4339 return PTR_ERR(type);
4330 } 4340 }
4331 4341
4342 /* Initialize the ->data_interface field. */
4332 ret = nand_init_data_interface(chip); 4343 ret = nand_init_data_interface(chip);
4333 if (ret) 4344 if (ret)
4334 return ret; 4345 return ret;
4335 4346
4347 /*
4348 * Setup the data interface correctly on the chip and controller side.
4349 * This explicit call to nand_setup_data_interface() is only required
4350 * for the first die, because nand_reset() has been called before
4351 * ->data_interface and ->default_onfi_timing_mode were set.
4352 * For the other dies, nand_reset() will automatically switch to the
4353 * best mode for us.
4354 */
4355 ret = nand_setup_data_interface(chip);
4356 if (ret)
4357 return ret;
4358
4336 chip->select_chip(mtd, -1); 4359 chip->select_chip(mtd, -1);
4337 4360
4338 /* Check for a chip array */ 4361 /* Check for a chip array */
4339 for (i = 1; i < maxchips; i++) { 4362 for (i = 1; i < maxchips; i++) {
4340 chip->select_chip(mtd, i);
4341 /* See comment in nand_get_flash_type for reset */ 4363 /* See comment in nand_get_flash_type for reset */
4342 nand_reset(chip); 4364 nand_reset(chip, i);
4365
4366 chip->select_chip(mtd, i);
4343 /* Send the command for reading device ID */ 4367 /* Send the command for reading device ID */
4344 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 4368 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4345 /* Read manufacturer and device IDs */ 4369 /* Read manufacturer and device IDs */
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 035f50c03281..bed19994c1e9 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -637,8 +637,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
637 } 637 }
638 } 638 }
639 639
640 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
641
642 if (pp->ops->host_init) 640 if (pp->ops->host_init)
643 pp->ops->host_init(pp); 641 pp->ops->host_init(pp);
644 642
@@ -809,6 +807,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
809{ 807{
810 u32 val; 808 u32 val;
811 809
810 /* get iATU unroll support */
811 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
812 dev_dbg(pp->dev, "iATU unroll: %s\n",
813 pp->iatu_unroll_enabled ? "enabled" : "disabled");
814
812 /* set the number of lanes */ 815 /* set the number of lanes */
813 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); 816 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
814 val &= ~PORT_LINK_MODE_MASK; 817 val &= ~PORT_LINK_MODE_MASK;
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index ef0a84c7a588..35936409b2d4 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -533,11 +533,11 @@ static int qcom_pcie_probe(struct platform_device *pdev)
533 if (IS_ERR(pcie->phy)) 533 if (IS_ERR(pcie->phy))
534 return PTR_ERR(pcie->phy); 534 return PTR_ERR(pcie->phy);
535 535
536 pp->dev = dev;
536 ret = pcie->ops->get_resources(pcie); 537 ret = pcie->ops->get_resources(pcie);
537 if (ret) 538 if (ret)
538 return ret; 539 return ret;
539 540
540 pp->dev = dev;
541 pp->root_bus_nr = -1; 541 pp->root_bus_nr = -1;
542 pp->ops = &qcom_pcie_dw_ops; 542 pp->ops = &qcom_pcie_dw_ops;
543 543
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 3d53d636b17b..f0cfb0451757 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2636,18 +2636,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2636 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; 2636 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
2637 struct CommandControlBlock *ccb; 2637 struct CommandControlBlock *ccb;
2638 int target = cmd->device->id; 2638 int target = cmd->device->id;
2639 int lun = cmd->device->lun;
2640 uint8_t scsicmd = cmd->cmnd[0];
2641 cmd->scsi_done = done; 2639 cmd->scsi_done = done;
2642 cmd->host_scribble = NULL; 2640 cmd->host_scribble = NULL;
2643 cmd->result = 0; 2641 cmd->result = 0;
2644 if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
2645 if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
2646 cmd->result = (DID_NO_CONNECT << 16);
2647 }
2648 cmd->scsi_done(cmd);
2649 return 0;
2650 }
2651 if (target == 16) { 2642 if (target == 16) {
2652 /* virtual device for iop message transfer */ 2643 /* virtual device for iop message transfer */
2653 arcmsr_handle_virtual_command(acb, cmd); 2644 arcmsr_handle_virtual_command(acb, cmd);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9ff57dee72d7..d8b1fbd4c8aa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,16 +1700,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1700 goto out_done; 1700 goto out_done;
1701 } 1701 }
1702 1702
1703 switch (scmd->cmnd[0]) { 1703 /*
1704 case SYNCHRONIZE_CACHE: 1704 * FW takes care of flush cache on its own for Virtual Disk.
1705 /* 1705 * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
1706 * FW takes care of flush cache on its own 1706 */
1707 * No need to send it down 1707 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
1708 */
1709 scmd->result = DID_OK << 16; 1708 scmd->result = DID_OK << 16;
1710 goto out_done; 1709 goto out_done;
1711 default:
1712 break;
1713 } 1710 }
1714 1711
1715 return instance->instancet->build_and_issue_cmd(instance, scmd); 1712 return instance->instancet->build_and_issue_cmd(instance, scmd);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c905709707f0..cf04a364fd8b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5134,6 +5134,7 @@ static void __exit scsi_debug_exit(void)
5134 bus_unregister(&pseudo_lld_bus); 5134 bus_unregister(&pseudo_lld_bus);
5135 root_device_unregister(pseudo_primary); 5135 root_device_unregister(pseudo_primary);
5136 5136
5137 vfree(map_storep);
5137 vfree(dif_storep); 5138 vfree(dif_storep);
5138 vfree(fake_storep); 5139 vfree(fake_storep);
5139 kfree(sdebug_q_arr); 5140 kfree(sdebug_q_arr);
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index ea15cc638097..4d9bd02ede47 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -482,6 +482,8 @@ static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
482 flags); 482 flags);
483 memset(&bdev->rds_info, 0, sizeof(bdev->rds_info)); 483 memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
484 } 484 }
485 if (err)
486 return err;
485 487
486 return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, 488 return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
487 bdev->cache_fm_rds_system); 489 bdev->cache_fm_rds_system);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index d624a527777f..031bc08d000d 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -829,8 +829,9 @@ static long vfio_pci_ioctl(void *device_data,
829 829
830 } else if (cmd == VFIO_DEVICE_SET_IRQS) { 830 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
831 struct vfio_irq_set hdr; 831 struct vfio_irq_set hdr;
832 size_t size;
832 u8 *data = NULL; 833 u8 *data = NULL;
833 int ret = 0; 834 int max, ret = 0;
834 835
835 minsz = offsetofend(struct vfio_irq_set, count); 836 minsz = offsetofend(struct vfio_irq_set, count);
836 837
@@ -838,23 +839,31 @@ static long vfio_pci_ioctl(void *device_data,
838 return -EFAULT; 839 return -EFAULT;
839 840
840 if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || 841 if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
842 hdr.count >= (U32_MAX - hdr.start) ||
841 hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | 843 hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
842 VFIO_IRQ_SET_ACTION_TYPE_MASK)) 844 VFIO_IRQ_SET_ACTION_TYPE_MASK))
843 return -EINVAL; 845 return -EINVAL;
844 846
845 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { 847 max = vfio_pci_get_irq_count(vdev, hdr.index);
846 size_t size; 848 if (hdr.start >= max || hdr.start + hdr.count > max)
847 int max = vfio_pci_get_irq_count(vdev, hdr.index); 849 return -EINVAL;
848 850
849 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) 851 switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
850 size = sizeof(uint8_t); 852 case VFIO_IRQ_SET_DATA_NONE:
851 else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) 853 size = 0;
852 size = sizeof(int32_t); 854 break;
853 else 855 case VFIO_IRQ_SET_DATA_BOOL:
854 return -EINVAL; 856 size = sizeof(uint8_t);
857 break;
858 case VFIO_IRQ_SET_DATA_EVENTFD:
859 size = sizeof(int32_t);
860 break;
861 default:
862 return -EINVAL;
863 }
855 864
856 if (hdr.argsz - minsz < hdr.count * size || 865 if (size) {
857 hdr.start >= max || hdr.start + hdr.count > max) 866 if (hdr.argsz - minsz < hdr.count * size)
858 return -EINVAL; 867 return -EINVAL;
859 868
860 data = memdup_user((void __user *)(arg + minsz), 869 data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index c2e60893cd09..1c46045b0e7f 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -256,7 +256,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
256 if (!is_irq_none(vdev)) 256 if (!is_irq_none(vdev))
257 return -EINVAL; 257 return -EINVAL;
258 258
259 vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); 259 vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
260 if (!vdev->ctx) 260 if (!vdev->ctx)
261 return -ENOMEM; 261 return -ENOMEM;
262 262
diff --git a/drivers/virtio/config.c b/drivers/virtio/config.c
deleted file mode 100644
index f70bcd2ff98f..000000000000
--- a/drivers/virtio/config.c
+++ /dev/null
@@ -1,12 +0,0 @@
1/* Configuration space parsing helpers for virtio.
2 *
3 * The configuration is [type][len][... len bytes ...] fields.
4 *
5 * Copyright 2007 Rusty Russell, IBM Corporation.
6 * GPL v2 or later.
7 */
8#include <linux/err.h>
9#include <linux/virtio.h>
10#include <linux/virtio_config.h>
11#include <linux/bug.h>
12
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e7003db12c4..181793f07852 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -577,6 +577,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
577 577
578 virtio_device_ready(vdev); 578 virtio_device_ready(vdev);
579 579
580 if (towards_target(vb))
581 virtballoon_changed(vdev);
580 return 0; 582 return 0;
581 583
582out_del_vqs: 584out_del_vqs:
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 8c4e61783441..6d9e5173d5fa 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
212 return -ENODEV; 212 return -ENODEV;
213 } 213 }
214 214
215 rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); 215 rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
216 if (rc) 216 if (rc) {
217 rc = dma_set_mask_and_coherent(&pci_dev->dev, 217 rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
218 DMA_BIT_MASK(32)); 218 } else {
219 /*
220 * The virtio ring base address is expressed as a 32-bit PFN,
221 * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
222 */
223 dma_set_coherent_mask(&pci_dev->dev,
224 DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
225 }
226
219 if (rc) 227 if (rc)
220 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); 228 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
221 229
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ed9c9eeedfe5..489bfc61cf30 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
167 * making all of the arch DMA ops work on the vring device itself 167 * making all of the arch DMA ops work on the vring device itself
168 * is a mess. For now, we use the parent device for DMA ops. 168 * is a mess. For now, we use the parent device for DMA ops.
169 */ 169 */
170static struct device *vring_dma_dev(const struct vring_virtqueue *vq) 170static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171{ 171{
172 return vq->vq.vdev->dev.parent; 172 return vq->vq.vdev->dev.parent;
173} 173}
@@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
732 732
733 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 733 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
734 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 734 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
735 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 735 if (!vq->event)
736 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
736 } 737 }
737 738
738} 739}
@@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
764 * entry. Always do both to keep code simple. */ 765 * entry. Always do both to keep code simple. */
765 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 766 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
766 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 767 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
767 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 768 if (!vq->event)
769 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
768 } 770 }
769 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 771 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
770 END_USE(vq); 772 END_USE(vq);
@@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
832 * more to do. */ 834 * more to do. */
833 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 835 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
834 * either clear the flags bit or point the event index at the next 836 * either clear the flags bit or point the event index at the next
835 * entry. Always do both to keep code simple. */ 837 * entry. Always update the event index to keep code simple. */
836 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 838 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
837 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 839 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
838 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 840 if (!vq->event)
841 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
839 } 842 }
840 /* TODO: tune this threshold */ 843 /* TODO: tune this threshold */
841 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; 844 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
@@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
953 /* No callback? Tell other side not to bother us. */ 956 /* No callback? Tell other side not to bother us. */
954 if (!callback) { 957 if (!callback) {
955 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 958 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
956 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); 959 if (!vq->event)
960 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
957 } 961 }
958 962
959 /* Put everything in free lists. */ 963 /* Put everything in free lists. */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 210c94ac8818..4607af38c72e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2647,7 +2647,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2647 2647
2648 btrfs_free_delayed_extent_op(extent_op); 2648 btrfs_free_delayed_extent_op(extent_op);
2649 if (ret) { 2649 if (ret) {
2650 spin_lock(&delayed_refs->lock);
2650 locked_ref->processing = 0; 2651 locked_ref->processing = 0;
2652 delayed_refs->num_heads_ready++;
2653 spin_unlock(&delayed_refs->lock);
2651 btrfs_delayed_ref_unlock(locked_ref); 2654 btrfs_delayed_ref_unlock(locked_ref);
2652 btrfs_put_delayed_ref(ref); 2655 btrfs_put_delayed_ref(ref);
2653 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", 2656 btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 66a755150056..8ed05d95584a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5569,7 +5569,7 @@ void le_bitmap_set(u8 *map, unsigned int start, int len)
5569 *p |= mask_to_set; 5569 *p |= mask_to_set;
5570 len -= bits_to_set; 5570 len -= bits_to_set;
5571 bits_to_set = BITS_PER_BYTE; 5571 bits_to_set = BITS_PER_BYTE;
5572 mask_to_set = ~(u8)0; 5572 mask_to_set = ~0;
5573 p++; 5573 p++;
5574 } 5574 }
5575 if (len) { 5575 if (len) {
@@ -5589,7 +5589,7 @@ void le_bitmap_clear(u8 *map, unsigned int start, int len)
5589 *p &= ~mask_to_clear; 5589 *p &= ~mask_to_clear;
5590 len -= bits_to_clear; 5590 len -= bits_to_clear;
5591 bits_to_clear = BITS_PER_BYTE; 5591 bits_to_clear = BITS_PER_BYTE;
5592 mask_to_clear = ~(u8)0; 5592 mask_to_clear = ~0;
5593 p++; 5593 p++;
5594 } 5594 }
5595 if (len) { 5595 if (len) {
@@ -5679,7 +5679,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5679 kaddr[offset] |= mask_to_set; 5679 kaddr[offset] |= mask_to_set;
5680 len -= bits_to_set; 5680 len -= bits_to_set;
5681 bits_to_set = BITS_PER_BYTE; 5681 bits_to_set = BITS_PER_BYTE;
5682 mask_to_set = ~(u8)0; 5682 mask_to_set = ~0;
5683 if (++offset >= PAGE_SIZE && len > 0) { 5683 if (++offset >= PAGE_SIZE && len > 0) {
5684 offset = 0; 5684 offset = 0;
5685 page = eb->pages[++i]; 5685 page = eb->pages[++i];
@@ -5721,7 +5721,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5721 kaddr[offset] &= ~mask_to_clear; 5721 kaddr[offset] &= ~mask_to_clear;
5722 len -= bits_to_clear; 5722 len -= bits_to_clear;
5723 bits_to_clear = BITS_PER_BYTE; 5723 bits_to_clear = BITS_PER_BYTE;
5724 mask_to_clear = ~(u8)0; 5724 mask_to_clear = ~0;
5725 if (++offset >= PAGE_SIZE && len > 0) { 5725 if (++offset >= PAGE_SIZE && len > 0) {
5726 offset = 0; 5726 offset = 0;
5727 page = eb->pages[++i]; 5727 page = eb->pages[++i];
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 2b790bda7998..8e3a5a266917 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4605,8 +4605,8 @@ delete:
4605 BUG_ON(ret); 4605 BUG_ON(ret);
4606 if (btrfs_should_throttle_delayed_refs(trans, root)) 4606 if (btrfs_should_throttle_delayed_refs(trans, root))
4607 btrfs_async_run_delayed_refs(root, 4607 btrfs_async_run_delayed_refs(root,
4608 trans->transid, 4608 trans->delayed_ref_updates * 2,
4609 trans->delayed_ref_updates * 2, 0); 4609 trans->transid, 0);
4610 if (be_nice) { 4610 if (be_nice) {
4611 if (truncate_space_check(trans, root, 4611 if (truncate_space_check(trans, root,
4612 extent_num_bytes)) { 4612 extent_num_bytes)) {
@@ -8931,9 +8931,14 @@ again:
8931 * So even we call qgroup_free_data(), it won't decrease reserved 8931 * So even we call qgroup_free_data(), it won't decrease reserved
8932 * space. 8932 * space.
8933 * 2) Not written to disk 8933 * 2) Not written to disk
8934 * This means the reserved space should be freed here. 8934 * This means the reserved space should be freed here. However,
8935 * if a truncate invalidates the page (by clearing PageDirty)
8936 * and the page is accounted for while allocating extent
8937 * in btrfs_check_data_free_space() we let delayed_ref to
8938 * free the entire extent.
8935 */ 8939 */
8936 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE); 8940 if (PageDirty(page))
8941 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
8937 if (!inode_evicting) { 8942 if (!inode_evicting) {
8938 clear_extent_bit(tree, page_start, page_end, 8943 clear_extent_bit(tree, page_start, page_end,
8939 EXTENT_LOCKED | EXTENT_DIRTY | 8944 EXTENT_LOCKED | EXTENT_DIRTY |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 18e1aa0f85f5..7acbd2cf6192 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3814,6 +3814,11 @@ process_slot:
3814 } 3814 }
3815 btrfs_release_path(path); 3815 btrfs_release_path(path);
3816 key.offset = next_key_min_offset; 3816 key.offset = next_key_min_offset;
3817
3818 if (fatal_signal_pending(current)) {
3819 ret = -EINTR;
3820 goto out;
3821 }
3817 } 3822 }
3818 ret = 0; 3823 ret = 0;
3819 3824
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0ec8ffa37ab0..c4af0cdb783d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2728,7 +2728,14 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2728 2728
2729 bytenr = btrfs_node_blockptr(upper->eb, slot); 2729 bytenr = btrfs_node_blockptr(upper->eb, slot);
2730 if (lowest) { 2730 if (lowest) {
2731 BUG_ON(bytenr != node->bytenr); 2731 if (bytenr != node->bytenr) {
2732 btrfs_err(root->fs_info,
2733 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2734 bytenr, node->bytenr, slot,
2735 upper->eb->start);
2736 err = -EIO;
2737 goto next;
2738 }
2732 } else { 2739 } else {
2733 if (node->eb->start == bytenr) 2740 if (node->eb->start == bytenr)
2734 goto next; 2741 goto next;
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index b10d557f9c9e..ee36efd5aece 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -84,6 +84,8 @@ struct nfsd_net {
84 struct list_head client_lru; 84 struct list_head client_lru;
85 struct list_head close_lru; 85 struct list_head close_lru;
86 struct list_head del_recall_lru; 86 struct list_head del_recall_lru;
87
88 /* protected by blocked_locks_lock */
87 struct list_head blocked_locks_lru; 89 struct list_head blocked_locks_lru;
88 90
89 struct delayed_work laundromat_work; 91 struct delayed_work laundromat_work;
@@ -91,6 +93,9 @@ struct nfsd_net {
91 /* client_lock protects the client lru list and session hash table */ 93 /* client_lock protects the client lru list and session hash table */
92 spinlock_t client_lock; 94 spinlock_t client_lock;
93 95
96 /* protects blocked_locks_lru */
97 spinlock_t blocked_locks_lock;
98
94 struct file *rec_file; 99 struct file *rec_file;
95 bool in_grace; 100 bool in_grace;
96 const struct nfsd4_client_tracking_ops *client_tracking_ops; 101 const struct nfsd4_client_tracking_ops *client_tracking_ops;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9752beb78659..4b4beaaa4eaa 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -217,7 +217,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
217{ 217{
218 struct nfsd4_blocked_lock *cur, *found = NULL; 218 struct nfsd4_blocked_lock *cur, *found = NULL;
219 219
220 spin_lock(&nn->client_lock); 220 spin_lock(&nn->blocked_locks_lock);
221 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 221 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
222 if (fh_match(fh, &cur->nbl_fh)) { 222 if (fh_match(fh, &cur->nbl_fh)) {
223 list_del_init(&cur->nbl_list); 223 list_del_init(&cur->nbl_list);
@@ -226,7 +226,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
226 break; 226 break;
227 } 227 }
228 } 228 }
229 spin_unlock(&nn->client_lock); 229 spin_unlock(&nn->blocked_locks_lock);
230 if (found) 230 if (found)
231 posix_unblock_lock(&found->nbl_lock); 231 posix_unblock_lock(&found->nbl_lock);
232 return found; 232 return found;
@@ -1227,9 +1227,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1227 1227
1228static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) 1228static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1229{ 1229{
1230 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); 1230 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1231
1232 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
1233 1231
1234 list_del_init(&stp->st_locks); 1232 list_del_init(&stp->st_locks);
1235 nfs4_unhash_stid(&stp->st_stid); 1233 nfs4_unhash_stid(&stp->st_stid);
@@ -1238,12 +1236,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1238 1236
1239static void release_lock_stateid(struct nfs4_ol_stateid *stp) 1237static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1240{ 1238{
1241 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); 1239 struct nfs4_client *clp = stp->st_stid.sc_client;
1242 bool unhashed; 1240 bool unhashed;
1243 1241
1244 spin_lock(&oo->oo_owner.so_client->cl_lock); 1242 spin_lock(&clp->cl_lock);
1245 unhashed = unhash_lock_stateid(stp); 1243 unhashed = unhash_lock_stateid(stp);
1246 spin_unlock(&oo->oo_owner.so_client->cl_lock); 1244 spin_unlock(&clp->cl_lock);
1247 if (unhashed) 1245 if (unhashed)
1248 nfs4_put_stid(&stp->st_stid); 1246 nfs4_put_stid(&stp->st_stid);
1249} 1247}
@@ -4665,7 +4663,7 @@ nfs4_laundromat(struct nfsd_net *nn)
4665 * indefinitely once the lock does become free. 4663 * indefinitely once the lock does become free.
4666 */ 4664 */
4667 BUG_ON(!list_empty(&reaplist)); 4665 BUG_ON(!list_empty(&reaplist));
4668 spin_lock(&nn->client_lock); 4666 spin_lock(&nn->blocked_locks_lock);
4669 while (!list_empty(&nn->blocked_locks_lru)) { 4667 while (!list_empty(&nn->blocked_locks_lru)) {
4670 nbl = list_first_entry(&nn->blocked_locks_lru, 4668 nbl = list_first_entry(&nn->blocked_locks_lru,
4671 struct nfsd4_blocked_lock, nbl_lru); 4669 struct nfsd4_blocked_lock, nbl_lru);
@@ -4678,7 +4676,7 @@ nfs4_laundromat(struct nfsd_net *nn)
4678 list_move(&nbl->nbl_lru, &reaplist); 4676 list_move(&nbl->nbl_lru, &reaplist);
4679 list_del_init(&nbl->nbl_list); 4677 list_del_init(&nbl->nbl_list);
4680 } 4678 }
4681 spin_unlock(&nn->client_lock); 4679 spin_unlock(&nn->blocked_locks_lock);
4682 4680
4683 while (!list_empty(&reaplist)) { 4681 while (!list_empty(&reaplist)) {
4684 nbl = list_first_entry(&nn->blocked_locks_lru, 4682 nbl = list_first_entry(&nn->blocked_locks_lru,
@@ -5439,13 +5437,13 @@ nfsd4_lm_notify(struct file_lock *fl)
5439 bool queue = false; 5437 bool queue = false;
5440 5438
5441 /* An empty list means that something else is going to be using it */ 5439 /* An empty list means that something else is going to be using it */
5442 spin_lock(&nn->client_lock); 5440 spin_lock(&nn->blocked_locks_lock);
5443 if (!list_empty(&nbl->nbl_list)) { 5441 if (!list_empty(&nbl->nbl_list)) {
5444 list_del_init(&nbl->nbl_list); 5442 list_del_init(&nbl->nbl_list);
5445 list_del_init(&nbl->nbl_lru); 5443 list_del_init(&nbl->nbl_lru);
5446 queue = true; 5444 queue = true;
5447 } 5445 }
5448 spin_unlock(&nn->client_lock); 5446 spin_unlock(&nn->blocked_locks_lock);
5449 5447
5450 if (queue) 5448 if (queue)
5451 nfsd4_run_cb(&nbl->nbl_cb); 5449 nfsd4_run_cb(&nbl->nbl_cb);
@@ -5868,10 +5866,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5868 5866
5869 if (fl_flags & FL_SLEEP) { 5867 if (fl_flags & FL_SLEEP) {
5870 nbl->nbl_time = jiffies; 5868 nbl->nbl_time = jiffies;
5871 spin_lock(&nn->client_lock); 5869 spin_lock(&nn->blocked_locks_lock);
5872 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 5870 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
5873 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); 5871 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
5874 spin_unlock(&nn->client_lock); 5872 spin_unlock(&nn->blocked_locks_lock);
5875 } 5873 }
5876 5874
5877 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); 5875 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
@@ -5900,10 +5898,10 @@ out:
5900 if (nbl) { 5898 if (nbl) {
5901 /* dequeue it if we queued it before */ 5899 /* dequeue it if we queued it before */
5902 if (fl_flags & FL_SLEEP) { 5900 if (fl_flags & FL_SLEEP) {
5903 spin_lock(&nn->client_lock); 5901 spin_lock(&nn->blocked_locks_lock);
5904 list_del_init(&nbl->nbl_list); 5902 list_del_init(&nbl->nbl_list);
5905 list_del_init(&nbl->nbl_lru); 5903 list_del_init(&nbl->nbl_lru);
5906 spin_unlock(&nn->client_lock); 5904 spin_unlock(&nn->blocked_locks_lock);
5907 } 5905 }
5908 free_blocked_lock(nbl); 5906 free_blocked_lock(nbl);
5909 } 5907 }
@@ -6943,9 +6941,11 @@ static int nfs4_state_create_net(struct net *net)
6943 INIT_LIST_HEAD(&nn->client_lru); 6941 INIT_LIST_HEAD(&nn->client_lru);
6944 INIT_LIST_HEAD(&nn->close_lru); 6942 INIT_LIST_HEAD(&nn->close_lru);
6945 INIT_LIST_HEAD(&nn->del_recall_lru); 6943 INIT_LIST_HEAD(&nn->del_recall_lru);
6946 INIT_LIST_HEAD(&nn->blocked_locks_lru);
6947 spin_lock_init(&nn->client_lock); 6944 spin_lock_init(&nn->client_lock);
6948 6945
6946 spin_lock_init(&nn->blocked_locks_lock);
6947 INIT_LIST_HEAD(&nn->blocked_locks_lru);
6948
6949 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 6949 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
6950 get_net(net); 6950 get_net(net);
6951 6951
@@ -7063,14 +7063,14 @@ nfs4_state_shutdown_net(struct net *net)
7063 } 7063 }
7064 7064
7065 BUG_ON(!list_empty(&reaplist)); 7065 BUG_ON(!list_empty(&reaplist));
7066 spin_lock(&nn->client_lock); 7066 spin_lock(&nn->blocked_locks_lock);
7067 while (!list_empty(&nn->blocked_locks_lru)) { 7067 while (!list_empty(&nn->blocked_locks_lru)) {
7068 nbl = list_first_entry(&nn->blocked_locks_lru, 7068 nbl = list_first_entry(&nn->blocked_locks_lru,
7069 struct nfsd4_blocked_lock, nbl_lru); 7069 struct nfsd4_blocked_lock, nbl_lru);
7070 list_move(&nbl->nbl_lru, &reaplist); 7070 list_move(&nbl->nbl_lru, &reaplist);
7071 list_del_init(&nbl->nbl_list); 7071 list_del_init(&nbl->nbl_list);
7072 } 7072 }
7073 spin_unlock(&nn->client_lock); 7073 spin_unlock(&nn->blocked_locks_lock);
7074 7074
7075 while (!list_empty(&reaplist)) { 7075 while (!list_empty(&reaplist)) {
7076 nbl = list_first_entry(&nn->blocked_locks_lru, 7076 nbl = list_first_entry(&nn->blocked_locks_lru,
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index aeb60f791418..36795eed40b0 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -178,6 +178,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
178 len -= bytes; 178 len -= bytes;
179 } 179 }
180 180
181 if (!error)
182 error = vfs_fsync(new_file, 0);
181 fput(new_file); 183 fput(new_file);
182out_fput: 184out_fput:
183 fput(old_file); 185 fput(old_file);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c58f01babf30..7fb53d055537 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -270,9 +270,6 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type)
270 if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode)) 270 if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
271 return NULL; 271 return NULL;
272 272
273 if (!realinode->i_op->get_acl)
274 return NULL;
275
276 old_cred = ovl_override_creds(inode->i_sb); 273 old_cred = ovl_override_creds(inode->i_sb);
277 acl = get_acl(realinode, type); 274 acl = get_acl(realinode, type);
278 revert_creds(old_cred); 275 revert_creds(old_cred);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index bcf3965be819..edd46a0e951d 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1037,6 +1037,21 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
1037 1037
1038 posix_acl_release(acl); 1038 posix_acl_release(acl);
1039 1039
1040 /*
1041 * Check if sgid bit needs to be cleared (actual setacl operation will
1042 * be done with mounter's capabilities and so that won't do it for us).
1043 */
1044 if (unlikely(inode->i_mode & S_ISGID) &&
1045 handler->flags == ACL_TYPE_ACCESS &&
1046 !in_group_p(inode->i_gid) &&
1047 !capable_wrt_inode_uidgid(inode, CAP_FSETID)) {
1048 struct iattr iattr = { .ia_valid = ATTR_KILL_SGID };
1049
1050 err = ovl_setattr(dentry, &iattr);
1051 if (err)
1052 return err;
1053 }
1054
1040 err = ovl_xattr_set(dentry, handler->name, value, size, flags); 1055 err = ovl_xattr_set(dentry, handler->name, value, size, flags);
1041 if (!err) 1056 if (!err)
1042 ovl_copyattr(ovl_inode_real(inode, NULL), inode); 1057 ovl_copyattr(ovl_inode_real(inode, NULL), inode);
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 43cf193e54d6..8b4dc62470ff 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -47,8 +47,14 @@ struct drm_crtc;
47 * @src_h: height of visible portion of plane (in 16.16) 47 * @src_h: height of visible portion of plane (in 16.16)
48 * @rotation: rotation of the plane 48 * @rotation: rotation of the plane
49 * @zpos: priority of the given plane on crtc (optional) 49 * @zpos: priority of the given plane on crtc (optional)
50 * Note that multiple active planes on the same crtc can have an identical
51 * zpos value. The rule to solving the conflict is to compare the plane
52 * object IDs; the plane with a higher ID must be stacked on top of a
53 * plane with a lower ID.
50 * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1 54 * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
51 * where N is the number of active planes for given crtc 55 * where N is the number of active planes for given crtc. Note that
56 * the driver must call drm_atomic_normalize_zpos() to update this before
57 * it can be trusted.
52 * @src: clipped source coordinates of the plane (in 16.16) 58 * @src: clipped source coordinates of the plane (in 16.16)
53 * @dst: clipped destination coordinates of the plane 59 * @dst: clipped destination coordinates of the plane
54 * @visible: visibility of the plane 60 * @visible: visibility of the plane
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c5d3d5024fc8..d8905a229f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1184 int page); 1184 int page);
1185 1185
1186/* Reset and initialize a NAND device */ 1186/* Reset and initialize a NAND device */
1187int nand_reset(struct nand_chip *chip); 1187int nand_reset(struct nand_chip *chip, int chipnr);
1188 1188
1189/* Free resources held by the NAND device */ 1189/* Free resources held by the NAND device */
1190void nand_cleanup(struct nand_chip *chip); 1190void nand_cleanup(struct nand_chip *chip);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 42d4027f9e26..154fd689fe02 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5192,21 +5192,14 @@ void sched_show_task(struct task_struct *p)
5192 int ppid; 5192 int ppid;
5193 unsigned long state = p->state; 5193 unsigned long state = p->state;
5194 5194
5195 if (!try_get_task_stack(p))
5196 return;
5195 if (state) 5197 if (state)
5196 state = __ffs(state) + 1; 5198 state = __ffs(state) + 1;
5197 printk(KERN_INFO "%-15.15s %c", p->comm, 5199 printk(KERN_INFO "%-15.15s %c", p->comm,
5198 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 5200 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5199#if BITS_PER_LONG == 32
5200 if (state == TASK_RUNNING)
5201 printk(KERN_CONT " running ");
5202 else
5203 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
5204#else
5205 if (state == TASK_RUNNING) 5201 if (state == TASK_RUNNING)
5206 printk(KERN_CONT " running task "); 5202 printk(KERN_CONT " running task ");
5207 else
5208 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5209#endif
5210#ifdef CONFIG_DEBUG_STACK_USAGE 5203#ifdef CONFIG_DEBUG_STACK_USAGE
5211 free = stack_not_used(p); 5204 free = stack_not_used(p);
5212#endif 5205#endif
@@ -5221,6 +5214,7 @@ void sched_show_task(struct task_struct *p)
5221 5214
5222 print_worker_info(KERN_INFO, p); 5215 print_worker_info(KERN_INFO, p);
5223 show_stack(p, NULL); 5216 show_stack(p, NULL);
5217 put_task_stack(p);
5224} 5218}
5225 5219
5226void show_state_filter(unsigned long state_filter) 5220void show_state_filter(unsigned long state_filter)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8fd42aa7c4bd..072d791dce2d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -92,7 +92,7 @@ int _node_numa_mem_[MAX_NUMNODES];
92#endif 92#endif
93 93
94#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 94#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
95volatile u64 latent_entropy __latent_entropy; 95volatile unsigned long latent_entropy __latent_entropy;
96EXPORT_SYMBOL(latent_entropy); 96EXPORT_SYMBOL(latent_entropy);
97#endif 97#endif
98 98
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index d8bd97a5a7c9..3dfd769dc5b5 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1616,7 +1616,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
1616{ 1616{
1617 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1617 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1618 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1618 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1619 __be32 seq; 1619 __be32 *seq = NULL;
1620 struct kvec iov; 1620 struct kvec iov;
1621 struct xdr_buf verf_buf; 1621 struct xdr_buf verf_buf;
1622 struct xdr_netobj mic; 1622 struct xdr_netobj mic;
@@ -1631,9 +1631,12 @@ gss_validate(struct rpc_task *task, __be32 *p)
1631 goto out_bad; 1631 goto out_bad;
1632 if (flav != RPC_AUTH_GSS) 1632 if (flav != RPC_AUTH_GSS)
1633 goto out_bad; 1633 goto out_bad;
1634 seq = htonl(task->tk_rqstp->rq_seqno); 1634 seq = kmalloc(4, GFP_NOFS);
1635 iov.iov_base = &seq; 1635 if (!seq)
1636 iov.iov_len = sizeof(seq); 1636 goto out_bad;
1637 *seq = htonl(task->tk_rqstp->rq_seqno);
1638 iov.iov_base = seq;
1639 iov.iov_len = 4;
1637 xdr_buf_from_iov(&iov, &verf_buf); 1640 xdr_buf_from_iov(&iov, &verf_buf);
1638 mic.data = (u8 *)p; 1641 mic.data = (u8 *)p;
1639 mic.len = len; 1642 mic.len = len;
@@ -1653,11 +1656,13 @@ gss_validate(struct rpc_task *task, __be32 *p)
1653 gss_put_ctx(ctx); 1656 gss_put_ctx(ctx);
1654 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", 1657 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
1655 task->tk_pid, __func__); 1658 task->tk_pid, __func__);
1659 kfree(seq);
1656 return p + XDR_QUADLEN(len); 1660 return p + XDR_QUADLEN(len);
1657out_bad: 1661out_bad:
1658 gss_put_ctx(ctx); 1662 gss_put_ctx(ctx);
1659 dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, 1663 dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__,
1660 PTR_ERR(ret)); 1664 PTR_ERR(ret));
1665 kfree(seq);
1661 return ret; 1666 return ret;
1662} 1667}
1663 1668
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 244245bcbbd2..90115ceefd49 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -166,8 +166,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
166 unsigned int usage, struct xdr_netobj *cksumout) 166 unsigned int usage, struct xdr_netobj *cksumout)
167{ 167{
168 struct scatterlist sg[1]; 168 struct scatterlist sg[1];
169 int err; 169 int err = -1;
170 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 170 u8 *checksumdata;
171 u8 rc4salt[4]; 171 u8 rc4salt[4];
172 struct crypto_ahash *md5; 172 struct crypto_ahash *md5;
173 struct crypto_ahash *hmac_md5; 173 struct crypto_ahash *hmac_md5;
@@ -187,23 +187,22 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
187 return GSS_S_FAILURE; 187 return GSS_S_FAILURE;
188 } 188 }
189 189
190 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
191 if (!checksumdata)
192 return GSS_S_FAILURE;
193
190 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 194 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
191 if (IS_ERR(md5)) 195 if (IS_ERR(md5))
192 return GSS_S_FAILURE; 196 goto out_free_cksum;
193 197
194 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, 198 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
195 CRYPTO_ALG_ASYNC); 199 CRYPTO_ALG_ASYNC);
196 if (IS_ERR(hmac_md5)) { 200 if (IS_ERR(hmac_md5))
197 crypto_free_ahash(md5); 201 goto out_free_md5;
198 return GSS_S_FAILURE;
199 }
200 202
201 req = ahash_request_alloc(md5, GFP_KERNEL); 203 req = ahash_request_alloc(md5, GFP_KERNEL);
202 if (!req) { 204 if (!req)
203 crypto_free_ahash(hmac_md5); 205 goto out_free_hmac_md5;
204 crypto_free_ahash(md5);
205 return GSS_S_FAILURE;
206 }
207 206
208 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 207 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
209 208
@@ -232,11 +231,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
232 231
233 ahash_request_free(req); 232 ahash_request_free(req);
234 req = ahash_request_alloc(hmac_md5, GFP_KERNEL); 233 req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
235 if (!req) { 234 if (!req)
236 crypto_free_ahash(hmac_md5); 235 goto out_free_hmac_md5;
237 crypto_free_ahash(md5);
238 return GSS_S_FAILURE;
239 }
240 236
241 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 237 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
242 238
@@ -258,8 +254,12 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
258 cksumout->len = kctx->gk5e->cksumlength; 254 cksumout->len = kctx->gk5e->cksumlength;
259out: 255out:
260 ahash_request_free(req); 256 ahash_request_free(req);
261 crypto_free_ahash(md5); 257out_free_hmac_md5:
262 crypto_free_ahash(hmac_md5); 258 crypto_free_ahash(hmac_md5);
259out_free_md5:
260 crypto_free_ahash(md5);
261out_free_cksum:
262 kfree(checksumdata);
263 return err ? GSS_S_FAILURE : 0; 263 return err ? GSS_S_FAILURE : 0;
264} 264}
265 265
@@ -276,8 +276,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
276 struct crypto_ahash *tfm; 276 struct crypto_ahash *tfm;
277 struct ahash_request *req; 277 struct ahash_request *req;
278 struct scatterlist sg[1]; 278 struct scatterlist sg[1];
279 int err; 279 int err = -1;
280 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 280 u8 *checksumdata;
281 unsigned int checksumlen; 281 unsigned int checksumlen;
282 282
283 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) 283 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
@@ -291,15 +291,17 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
291 return GSS_S_FAILURE; 291 return GSS_S_FAILURE;
292 } 292 }
293 293
294 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
295 if (checksumdata == NULL)
296 return GSS_S_FAILURE;
297
294 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 298 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
295 if (IS_ERR(tfm)) 299 if (IS_ERR(tfm))
296 return GSS_S_FAILURE; 300 goto out_free_cksum;
297 301
298 req = ahash_request_alloc(tfm, GFP_KERNEL); 302 req = ahash_request_alloc(tfm, GFP_KERNEL);
299 if (!req) { 303 if (!req)
300 crypto_free_ahash(tfm); 304 goto out_free_ahash;
301 return GSS_S_FAILURE;
302 }
303 305
304 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 306 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
305 307
@@ -349,7 +351,10 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
349 cksumout->len = kctx->gk5e->cksumlength; 351 cksumout->len = kctx->gk5e->cksumlength;
350out: 352out:
351 ahash_request_free(req); 353 ahash_request_free(req);
354out_free_ahash:
352 crypto_free_ahash(tfm); 355 crypto_free_ahash(tfm);
356out_free_cksum:
357 kfree(checksumdata);
353 return err ? GSS_S_FAILURE : 0; 358 return err ? GSS_S_FAILURE : 0;
354} 359}
355 360
@@ -368,8 +373,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
368 struct crypto_ahash *tfm; 373 struct crypto_ahash *tfm;
369 struct ahash_request *req; 374 struct ahash_request *req;
370 struct scatterlist sg[1]; 375 struct scatterlist sg[1];
371 int err; 376 int err = -1;
372 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 377 u8 *checksumdata;
373 unsigned int checksumlen; 378 unsigned int checksumlen;
374 379
375 if (kctx->gk5e->keyed_cksum == 0) { 380 if (kctx->gk5e->keyed_cksum == 0) {
@@ -383,16 +388,18 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
383 return GSS_S_FAILURE; 388 return GSS_S_FAILURE;
384 } 389 }
385 390
391 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
392 if (!checksumdata)
393 return GSS_S_FAILURE;
394
386 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 395 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
387 if (IS_ERR(tfm)) 396 if (IS_ERR(tfm))
388 return GSS_S_FAILURE; 397 goto out_free_cksum;
389 checksumlen = crypto_ahash_digestsize(tfm); 398 checksumlen = crypto_ahash_digestsize(tfm);
390 399
391 req = ahash_request_alloc(tfm, GFP_KERNEL); 400 req = ahash_request_alloc(tfm, GFP_KERNEL);
392 if (!req) { 401 if (!req)
393 crypto_free_ahash(tfm); 402 goto out_free_ahash;
394 return GSS_S_FAILURE;
395 }
396 403
397 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 404 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
398 405
@@ -433,7 +440,10 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
433 } 440 }
434out: 441out:
435 ahash_request_free(req); 442 ahash_request_free(req);
443out_free_ahash:
436 crypto_free_ahash(tfm); 444 crypto_free_ahash(tfm);
445out_free_cksum:
446 kfree(checksumdata);
437 return err ? GSS_S_FAILURE : 0; 447 return err ? GSS_S_FAILURE : 0;
438} 448}
439 449
@@ -666,14 +676,17 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
666 u32 ret; 676 u32 ret;
667 struct scatterlist sg[1]; 677 struct scatterlist sg[1];
668 SKCIPHER_REQUEST_ON_STACK(req, cipher); 678 SKCIPHER_REQUEST_ON_STACK(req, cipher);
669 u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2]; 679 u8 *data;
670 struct page **save_pages; 680 struct page **save_pages;
671 u32 len = buf->len - offset; 681 u32 len = buf->len - offset;
672 682
673 if (len > ARRAY_SIZE(data)) { 683 if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
674 WARN_ON(0); 684 WARN_ON(0);
675 return -ENOMEM; 685 return -ENOMEM;
676 } 686 }
687 data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS);
688 if (!data)
689 return -ENOMEM;
677 690
678 /* 691 /*
679 * For encryption, we want to read from the cleartext 692 * For encryption, we want to read from the cleartext
@@ -708,6 +721,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
708 ret = write_bytes_to_xdr_buf(buf, offset, data, len); 721 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
709 722
710out: 723out:
724 kfree(data);
711 return ret; 725 return ret;
712} 726}
713 727
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d67f7e1bc82d..45662d7f0943 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -718,30 +718,37 @@ gss_write_null_verf(struct svc_rqst *rqstp)
718static int 718static int
719gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) 719gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
720{ 720{
721 __be32 xdr_seq; 721 __be32 *xdr_seq;
722 u32 maj_stat; 722 u32 maj_stat;
723 struct xdr_buf verf_data; 723 struct xdr_buf verf_data;
724 struct xdr_netobj mic; 724 struct xdr_netobj mic;
725 __be32 *p; 725 __be32 *p;
726 struct kvec iov; 726 struct kvec iov;
727 int err = -1;
727 728
728 svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS); 729 svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
729 xdr_seq = htonl(seq); 730 xdr_seq = kmalloc(4, GFP_KERNEL);
731 if (!xdr_seq)
732 return -1;
733 *xdr_seq = htonl(seq);
730 734
731 iov.iov_base = &xdr_seq; 735 iov.iov_base = xdr_seq;
732 iov.iov_len = sizeof(xdr_seq); 736 iov.iov_len = 4;
733 xdr_buf_from_iov(&iov, &verf_data); 737 xdr_buf_from_iov(&iov, &verf_data);
734 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; 738 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
735 mic.data = (u8 *)(p + 1); 739 mic.data = (u8 *)(p + 1);
736 maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); 740 maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
737 if (maj_stat != GSS_S_COMPLETE) 741 if (maj_stat != GSS_S_COMPLETE)
738 return -1; 742 goto out;
739 *p++ = htonl(mic.len); 743 *p++ = htonl(mic.len);
740 memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len); 744 memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
741 p += XDR_QUADLEN(mic.len); 745 p += XDR_QUADLEN(mic.len);
742 if (!xdr_ressize_check(rqstp, p)) 746 if (!xdr_ressize_check(rqstp, p))
743 return -1; 747 goto out;
744 return 0; 748 err = 0;
749out:
750 kfree(xdr_seq);
751 return err;
745} 752}
746 753
747struct gss_domain { 754struct gss_domain {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 2d8545c34095..20027f8de129 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -177,18 +177,26 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
177 return -EINVAL; 177 return -EINVAL;
178 } 178 }
179 179
180 /* svc_rdma_sendto releases this page */
180 page = alloc_page(RPCRDMA_DEF_GFP); 181 page = alloc_page(RPCRDMA_DEF_GFP);
181 if (!page) 182 if (!page)
182 return -ENOMEM; 183 return -ENOMEM;
183
184 rqst->rq_buffer = page_address(page); 184 rqst->rq_buffer = page_address(page);
185
186 rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
187 if (!rqst->rq_rbuffer) {
188 put_page(page);
189 return -ENOMEM;
190 }
185 return 0; 191 return 0;
186} 192}
187 193
188static void 194static void
189xprt_rdma_bc_free(struct rpc_task *task) 195xprt_rdma_bc_free(struct rpc_task *task)
190{ 196{
191 /* No-op: ctxt and page have already been freed. */ 197 struct rpc_rqst *rqst = task->tk_rqstp;
198
199 kfree(rqst->rq_rbuffer);
192} 200}
193 201
194static int 202static int
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 0137af1c0916..e01c825bc683 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2563,6 +2563,7 @@ static int bc_malloc(struct rpc_task *task)
2563 buf->len = PAGE_SIZE; 2563 buf->len = PAGE_SIZE;
2564 2564
2565 rqst->rq_buffer = buf->data; 2565 rqst->rq_buffer = buf->data;
2566 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2566 return 0; 2567 return 0;
2567} 2568}
2568 2569
diff --git a/scripts/gcc-plugins/cyc_complexity_plugin.c b/scripts/gcc-plugins/cyc_complexity_plugin.c
index 34df974c6ba3..8af7db06122d 100644
--- a/scripts/gcc-plugins/cyc_complexity_plugin.c
+++ b/scripts/gcc-plugins/cyc_complexity_plugin.c
@@ -20,7 +20,7 @@
20 20
21#include "gcc-common.h" 21#include "gcc-common.h"
22 22
23int plugin_is_GPL_compatible; 23__visible int plugin_is_GPL_compatible;
24 24
25static struct plugin_info cyc_complexity_plugin_info = { 25static struct plugin_info cyc_complexity_plugin_info = {
26 .version = "20160225", 26 .version = "20160225",
@@ -49,7 +49,7 @@ static unsigned int cyc_complexity_execute(void)
49 49
50#include "gcc-generate-gimple-pass.h" 50#include "gcc-generate-gimple-pass.h"
51 51
52int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) 52__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
53{ 53{
54 const char * const plugin_name = plugin_info->base_name; 54 const char * const plugin_name = plugin_info->base_name;
55 struct register_pass_info cyc_complexity_pass_info; 55 struct register_pass_info cyc_complexity_pass_info;
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 172850bcd0d9..950fd2e64bb7 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -130,6 +130,7 @@ extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
130#endif 130#endif
131 131
132#define __unused __attribute__((__unused__)) 132#define __unused __attribute__((__unused__))
133#define __visible __attribute__((visibility("default")))
133 134
134#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node)) 135#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
135#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node)) 136#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index ff1939b804ae..8160f1c1b56e 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -77,7 +77,7 @@
77 77
78#include "gcc-common.h" 78#include "gcc-common.h"
79 79
80int plugin_is_GPL_compatible; 80__visible int plugin_is_GPL_compatible;
81 81
82static GTY(()) tree latent_entropy_decl; 82static GTY(()) tree latent_entropy_decl;
83 83
@@ -340,7 +340,7 @@ static enum tree_code get_op(tree *rhs)
340 break; 340 break;
341 } 341 }
342 if (rhs) 342 if (rhs)
343 *rhs = build_int_cstu(unsigned_intDI_type_node, random_const); 343 *rhs = build_int_cstu(long_unsigned_type_node, random_const);
344 return op; 344 return op;
345} 345}
346 346
@@ -372,7 +372,7 @@ static void __perturb_latent_entropy(gimple_stmt_iterator *gsi,
372 enum tree_code op; 372 enum tree_code op;
373 373
374 /* 1. create temporary copy of latent_entropy */ 374 /* 1. create temporary copy of latent_entropy */
375 temp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy"); 375 temp = create_var(long_unsigned_type_node, "temp_latent_entropy");
376 376
377 /* 2. read... */ 377 /* 2. read... */
378 add_referenced_var(latent_entropy_decl); 378 add_referenced_var(latent_entropy_decl);
@@ -459,13 +459,13 @@ static void init_local_entropy(basic_block bb, tree local_entropy)
459 gsi_insert_before(&gsi, call, GSI_NEW_STMT); 459 gsi_insert_before(&gsi, call, GSI_NEW_STMT);
460 update_stmt(call); 460 update_stmt(call);
461 461
462 udi_frame_addr = fold_convert(unsigned_intDI_type_node, frame_addr); 462 udi_frame_addr = fold_convert(long_unsigned_type_node, frame_addr);
463 assign = gimple_build_assign(local_entropy, udi_frame_addr); 463 assign = gimple_build_assign(local_entropy, udi_frame_addr);
464 gsi_insert_after(&gsi, assign, GSI_NEW_STMT); 464 gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
465 update_stmt(assign); 465 update_stmt(assign);
466 466
467 /* 3. create temporary copy of latent_entropy */ 467 /* 3. create temporary copy of latent_entropy */
468 tmp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy"); 468 tmp = create_var(long_unsigned_type_node, "temp_latent_entropy");
469 469
470 /* 4. read the global entropy variable into local entropy */ 470 /* 4. read the global entropy variable into local entropy */
471 add_referenced_var(latent_entropy_decl); 471 add_referenced_var(latent_entropy_decl);
@@ -480,7 +480,7 @@ static void init_local_entropy(basic_block bb, tree local_entropy)
480 update_stmt(assign); 480 update_stmt(assign);
481 481
482 rand_cst = get_random_const(); 482 rand_cst = get_random_const();
483 rand_const = build_int_cstu(unsigned_intDI_type_node, rand_cst); 483 rand_const = build_int_cstu(long_unsigned_type_node, rand_cst);
484 op = get_op(NULL); 484 op = get_op(NULL);
485 assign = create_assign(op, local_entropy, local_entropy, rand_const); 485 assign = create_assign(op, local_entropy, local_entropy, rand_const);
486 gsi_insert_after(&gsi, assign, GSI_NEW_STMT); 486 gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
@@ -529,7 +529,7 @@ static unsigned int latent_entropy_execute(void)
529 } 529 }
530 530
531 /* 1. create the local entropy variable */ 531 /* 1. create the local entropy variable */
532 local_entropy = create_var(unsigned_intDI_type_node, "local_entropy"); 532 local_entropy = create_var(long_unsigned_type_node, "local_entropy");
533 533
534 /* 2. initialize the local entropy variable */ 534 /* 2. initialize the local entropy variable */
535 init_local_entropy(bb, local_entropy); 535 init_local_entropy(bb, local_entropy);
@@ -561,10 +561,9 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
561 if (in_lto_p) 561 if (in_lto_p)
562 return; 562 return;
563 563
564 /* extern volatile u64 latent_entropy */ 564 /* extern volatile unsigned long latent_entropy */
565 gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64); 565 quals = TYPE_QUALS(long_unsigned_type_node) | TYPE_QUAL_VOLATILE;
566 quals = TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE; 566 type = build_qualified_type(long_unsigned_type_node, quals);
567 type = build_qualified_type(long_long_unsigned_type_node, quals);
568 id = get_identifier("latent_entropy"); 567 id = get_identifier("latent_entropy");
569 latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type); 568 latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type);
570 569
@@ -584,8 +583,8 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
584 | TODO_update_ssa 583 | TODO_update_ssa
585#include "gcc-generate-gimple-pass.h" 584#include "gcc-generate-gimple-pass.h"
586 585
587int plugin_init(struct plugin_name_args *plugin_info, 586__visible int plugin_init(struct plugin_name_args *plugin_info,
588 struct plugin_gcc_version *version) 587 struct plugin_gcc_version *version)
589{ 588{
590 bool enabled = true; 589 bool enabled = true;
591 const char * const plugin_name = plugin_info->base_name; 590 const char * const plugin_name = plugin_info->base_name;
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c
index aedd6113cb73..7ea0b3f50739 100644
--- a/scripts/gcc-plugins/sancov_plugin.c
+++ b/scripts/gcc-plugins/sancov_plugin.c
@@ -21,7 +21,7 @@
21 21
22#include "gcc-common.h" 22#include "gcc-common.h"
23 23
24int plugin_is_GPL_compatible; 24__visible int plugin_is_GPL_compatible;
25 25
26tree sancov_fndecl; 26tree sancov_fndecl;
27 27
@@ -86,7 +86,7 @@ static void sancov_start_unit(void __unused *gcc_data, void __unused *user_data)
86#endif 86#endif
87} 87}
88 88
89int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) 89__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
90{ 90{
91 int i; 91 int i;
92 struct register_pass_info sancov_plugin_pass_info; 92 struct register_pass_info sancov_plugin_pass_info;
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
index 877a8a4721b6..c012edbdb13b 100644
--- a/tools/virtio/ringtest/Makefile
+++ b/tools/virtio/ringtest/Makefile
@@ -3,8 +3,8 @@ all:
3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring 3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
4 4
5CFLAGS += -Wall 5CFLAGS += -Wall
6CFLAGS += -pthread -O2 -ggdb 6CFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
7LDFLAGS += -pthread -O2 -ggdb 7LDFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
8 8
9main.o: main.c main.h 9main.o: main.c main.h
10ring.o: ring.c main.h 10ring.o: ring.c main.h
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
index 147abb452a6c..f31353fac541 100644
--- a/tools/virtio/ringtest/main.c
+++ b/tools/virtio/ringtest/main.c
@@ -96,7 +96,13 @@ void set_affinity(const char *arg)
96 assert(!ret); 96 assert(!ret);
97} 97}
98 98
99static void run_guest(void) 99void poll_used(void)
100{
101 while (used_empty())
102 busy_wait();
103}
104
105static void __attribute__((__flatten__)) run_guest(void)
100{ 106{
101 int completed_before; 107 int completed_before;
102 int completed = 0; 108 int completed = 0;
@@ -141,7 +147,7 @@ static void run_guest(void)
141 assert(completed <= bufs); 147 assert(completed <= bufs);
142 assert(started <= bufs); 148 assert(started <= bufs);
143 if (do_sleep) { 149 if (do_sleep) {
144 if (enable_call()) 150 if (used_empty() && enable_call())
145 wait_for_call(); 151 wait_for_call();
146 } else { 152 } else {
147 poll_used(); 153 poll_used();
@@ -149,7 +155,13 @@ static void run_guest(void)
149 } 155 }
150} 156}
151 157
152static void run_host(void) 158void poll_avail(void)
159{
160 while (avail_empty())
161 busy_wait();
162}
163
164static void __attribute__((__flatten__)) run_host(void)
153{ 165{
154 int completed_before; 166 int completed_before;
155 int completed = 0; 167 int completed = 0;
@@ -160,7 +172,7 @@ static void run_host(void)
160 172
161 for (;;) { 173 for (;;) {
162 if (do_sleep) { 174 if (do_sleep) {
163 if (enable_kick()) 175 if (avail_empty() && enable_kick())
164 wait_for_kick(); 176 wait_for_kick();
165 } else { 177 } else {
166 poll_avail(); 178 poll_avail();
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 16917acb0ade..34e63cc4c572 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -56,15 +56,15 @@ void alloc_ring(void);
56int add_inbuf(unsigned, void *, void *); 56int add_inbuf(unsigned, void *, void *);
57void *get_buf(unsigned *, void **); 57void *get_buf(unsigned *, void **);
58void disable_call(); 58void disable_call();
59bool used_empty();
59bool enable_call(); 60bool enable_call();
60void kick_available(); 61void kick_available();
61void poll_used();
62/* host side */ 62/* host side */
63void disable_kick(); 63void disable_kick();
64bool avail_empty();
64bool enable_kick(); 65bool enable_kick();
65bool use_buf(unsigned *, void **); 66bool use_buf(unsigned *, void **);
66void call_used(); 67void call_used();
67void poll_avail();
68 68
69/* implemented by main */ 69/* implemented by main */
70extern bool do_sleep; 70extern bool do_sleep;
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c
index eda2f4824130..b8d1c1daac7c 100644
--- a/tools/virtio/ringtest/noring.c
+++ b/tools/virtio/ringtest/noring.c
@@ -24,8 +24,9 @@ void *get_buf(unsigned *lenp, void **bufp)
24 return "Buffer"; 24 return "Buffer";
25} 25}
26 26
27void poll_used(void) 27bool used_empty()
28{ 28{
29 return false;
29} 30}
30 31
31void disable_call() 32void disable_call()
@@ -54,8 +55,9 @@ bool enable_kick()
54 assert(0); 55 assert(0);
55} 56}
56 57
57void poll_avail(void) 58bool avail_empty()
58{ 59{
60 return false;
59} 61}
60 62
61bool use_buf(unsigned *lenp, void **bufp) 63bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index bd2ad1d3b7a9..635b07b4fdd3 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -133,18 +133,9 @@ void *get_buf(unsigned *lenp, void **bufp)
133 return datap; 133 return datap;
134} 134}
135 135
136void poll_used(void) 136bool used_empty()
137{ 137{
138 void *b; 138 return (tailcnt == headcnt || __ptr_ring_full(&array));
139
140 do {
141 if (tailcnt == headcnt || __ptr_ring_full(&array)) {
142 b = NULL;
143 barrier();
144 } else {
145 b = "Buffer\n";
146 }
147 } while (!b);
148} 139}
149 140
150void disable_call() 141void disable_call()
@@ -173,14 +164,9 @@ bool enable_kick()
173 assert(0); 164 assert(0);
174} 165}
175 166
176void poll_avail(void) 167bool avail_empty()
177{ 168{
178 void *b; 169 return !__ptr_ring_peek(&array);
179
180 do {
181 barrier();
182 b = __ptr_ring_peek(&array);
183 } while (!b);
184} 170}
185 171
186bool use_buf(unsigned *lenp, void **bufp) 172bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
index c25c8d248b6b..747c5dd47be8 100644
--- a/tools/virtio/ringtest/ring.c
+++ b/tools/virtio/ringtest/ring.c
@@ -163,12 +163,11 @@ void *get_buf(unsigned *lenp, void **bufp)
163 return datap; 163 return datap;
164} 164}
165 165
166void poll_used(void) 166bool used_empty()
167{ 167{
168 unsigned head = (ring_size - 1) & guest.last_used_idx; 168 unsigned head = (ring_size - 1) & guest.last_used_idx;
169 169
170 while (ring[head].flags & DESC_HW) 170 return (ring[head].flags & DESC_HW);
171 busy_wait();
172} 171}
173 172
174void disable_call() 173void disable_call()
@@ -180,13 +179,11 @@ void disable_call()
180 179
181bool enable_call() 180bool enable_call()
182{ 181{
183 unsigned head = (ring_size - 1) & guest.last_used_idx;
184
185 event->call_index = guest.last_used_idx; 182 event->call_index = guest.last_used_idx;
186 /* Flush call index write */ 183 /* Flush call index write */
187 /* Barrier D (for pairing) */ 184 /* Barrier D (for pairing) */
188 smp_mb(); 185 smp_mb();
189 return ring[head].flags & DESC_HW; 186 return used_empty();
190} 187}
191 188
192void kick_available(void) 189void kick_available(void)
@@ -213,20 +210,17 @@ void disable_kick()
213 210
214bool enable_kick() 211bool enable_kick()
215{ 212{
216 unsigned head = (ring_size - 1) & host.used_idx;
217
218 event->kick_index = host.used_idx; 213 event->kick_index = host.used_idx;
219 /* Barrier C (for pairing) */ 214 /* Barrier C (for pairing) */
220 smp_mb(); 215 smp_mb();
221 return !(ring[head].flags & DESC_HW); 216 return avail_empty();
222} 217}
223 218
224void poll_avail(void) 219bool avail_empty()
225{ 220{
226 unsigned head = (ring_size - 1) & host.used_idx; 221 unsigned head = (ring_size - 1) & host.used_idx;
227 222
228 while (!(ring[head].flags & DESC_HW)) 223 return !(ring[head].flags & DESC_HW);
229 busy_wait();
230} 224}
231 225
232bool use_buf(unsigned *lenp, void **bufp) 226bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
index 761866212aac..bbc3043b2fb1 100644
--- a/tools/virtio/ringtest/virtio_ring_0_9.c
+++ b/tools/virtio/ringtest/virtio_ring_0_9.c
@@ -194,24 +194,16 @@ void *get_buf(unsigned *lenp, void **bufp)
194 return datap; 194 return datap;
195} 195}
196 196
197void poll_used(void) 197bool used_empty()
198{ 198{
199 unsigned short last_used_idx = guest.last_used_idx;
199#ifdef RING_POLL 200#ifdef RING_POLL
200 unsigned head = (ring_size - 1) & guest.last_used_idx; 201 unsigned short head = last_used_idx & (ring_size - 1);
202 unsigned index = ring.used->ring[head].id;
201 203
202 for (;;) { 204 return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
203 unsigned index = ring.used->ring[head].id;
204
205 if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
206 busy_wait();
207 else
208 break;
209 }
210#else 205#else
211 unsigned head = guest.last_used_idx; 206 return ring.used->idx == last_used_idx;
212
213 while (ring.used->idx == head)
214 busy_wait();
215#endif 207#endif
216} 208}
217 209
@@ -224,22 +216,11 @@ void disable_call()
224 216
225bool enable_call() 217bool enable_call()
226{ 218{
227 unsigned short last_used_idx; 219 vring_used_event(&ring) = guest.last_used_idx;
228
229 vring_used_event(&ring) = (last_used_idx = guest.last_used_idx);
230 /* Flush call index write */ 220 /* Flush call index write */
231 /* Barrier D (for pairing) */ 221 /* Barrier D (for pairing) */
232 smp_mb(); 222 smp_mb();
233#ifdef RING_POLL 223 return used_empty();
234 {
235 unsigned short head = last_used_idx & (ring_size - 1);
236 unsigned index = ring.used->ring[head].id;
237
238 return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
239 }
240#else
241 return ring.used->idx == last_used_idx;
242#endif
243} 224}
244 225
245void kick_available(void) 226void kick_available(void)
@@ -266,36 +247,21 @@ void disable_kick()
266 247
267bool enable_kick() 248bool enable_kick()
268{ 249{
269 unsigned head = host.used_idx; 250 vring_avail_event(&ring) = host.used_idx;
270
271 vring_avail_event(&ring) = head;
272 /* Barrier C (for pairing) */ 251 /* Barrier C (for pairing) */
273 smp_mb(); 252 smp_mb();
274#ifdef RING_POLL 253 return avail_empty();
275 {
276 unsigned index = ring.avail->ring[head & (ring_size - 1)];
277
278 return (index ^ head ^ 0x8000) & ~(ring_size - 1);
279 }
280#else
281 return head == ring.avail->idx;
282#endif
283} 254}
284 255
285void poll_avail(void) 256bool avail_empty()
286{ 257{
287 unsigned head = host.used_idx; 258 unsigned head = host.used_idx;
288#ifdef RING_POLL 259#ifdef RING_POLL
289 for (;;) { 260 unsigned index = ring.avail->ring[head & (ring_size - 1)];
290 unsigned index = ring.avail->ring[head & (ring_size - 1)]; 261
291 if ((index ^ head ^ 0x8000) & ~(ring_size - 1)) 262 return ((index ^ head ^ 0x8000) & ~(ring_size - 1));
292 busy_wait();
293 else
294 break;
295 }
296#else 263#else
297 while (ring.avail->idx == head) 264 return head == ring.avail->idx;
298 busy_wait();
299#endif 265#endif
300} 266}
301 267
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f397e9b20370..a29786dd9522 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -42,6 +42,7 @@
42 42
43#ifdef CONFIG_HAVE_KVM_IRQFD 43#ifdef CONFIG_HAVE_KVM_IRQFD
44 44
45static struct workqueue_struct *irqfd_cleanup_wq;
45 46
46static void 47static void
47irqfd_inject(struct work_struct *work) 48irqfd_inject(struct work_struct *work)
@@ -167,7 +168,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
167 168
168 list_del_init(&irqfd->list); 169 list_del_init(&irqfd->list);
169 170
170 schedule_work(&irqfd->shutdown); 171 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
171} 172}
172 173
173int __attribute__((weak)) kvm_arch_set_irq_inatomic( 174int __attribute__((weak)) kvm_arch_set_irq_inatomic(
@@ -554,7 +555,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
554 * so that we guarantee there will not be any more interrupts on this 555 * so that we guarantee there will not be any more interrupts on this
555 * gsi once this deassign function returns. 556 * gsi once this deassign function returns.
556 */ 557 */
557 flush_work(&irqfd->shutdown); 558 flush_workqueue(irqfd_cleanup_wq);
558 559
559 return 0; 560 return 0;
560} 561}
@@ -591,7 +592,7 @@ kvm_irqfd_release(struct kvm *kvm)
591 * Block until we know all outstanding shutdown jobs have completed 592 * Block until we know all outstanding shutdown jobs have completed
592 * since we do not take a kvm* reference. 593 * since we do not take a kvm* reference.
593 */ 594 */
594 flush_work(&irqfd->shutdown); 595 flush_workqueue(irqfd_cleanup_wq);
595 596
596} 597}
597 598
@@ -621,8 +622,23 @@ void kvm_irq_routing_update(struct kvm *kvm)
621 spin_unlock_irq(&kvm->irqfds.lock); 622 spin_unlock_irq(&kvm->irqfds.lock);
622} 623}
623 624
625/*
626 * create a host-wide workqueue for issuing deferred shutdown requests
627 * aggregated from all vm* instances. We need our own isolated
628 * queue to ease flushing work items when a VM exits.
629 */
630int kvm_irqfd_init(void)
631{
632 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
633 if (!irqfd_cleanup_wq)
634 return -ENOMEM;
635
636 return 0;
637}
638
624void kvm_irqfd_exit(void) 639void kvm_irqfd_exit(void)
625{ 640{
641 destroy_workqueue(irqfd_cleanup_wq);
626} 642}
627#endif 643#endif
628 644
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2907b7b78654..5c360347a1e9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3844,7 +3844,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3844 * kvm_arch_init makes sure there's at most one caller 3844 * kvm_arch_init makes sure there's at most one caller
3845 * for architectures that support multiple implementations, 3845 * for architectures that support multiple implementations,
3846 * like intel and amd on x86. 3846 * like intel and amd on x86.
3847 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
3848 * conflicts in case kvm is already setup for another implementation.
3847 */ 3849 */
3850 r = kvm_irqfd_init();
3851 if (r)
3852 goto out_irqfd;
3848 3853
3849 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3854 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
3850 r = -ENOMEM; 3855 r = -ENOMEM;
@@ -3926,6 +3931,7 @@ out_free_0a:
3926 free_cpumask_var(cpus_hardware_enabled); 3931 free_cpumask_var(cpus_hardware_enabled);
3927out_free_0: 3932out_free_0:
3928 kvm_irqfd_exit(); 3933 kvm_irqfd_exit();
3934out_irqfd:
3929 kvm_arch_exit(); 3935 kvm_arch_exit();
3930out_fail: 3936out_fail:
3931 return r; 3937 return r;