summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/porting6
-rw-r--r--Documentation/filesystems/vfs.txt3
-rw-r--r--Documentation/pinctrl.txt8
-rw-r--r--Documentation/process/stable-kernel-rules.rst2
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt6
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm/probes/kprobes/core.c49
-rw-r--r--arch/arm/probes/kprobes/test-core.c11
-rw-r--r--arch/arm64/mm/fault.c42
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
-rw-r--r--arch/ia64/include/asm/asm-prototypes.h29
-rw-r--r--arch/ia64/lib/Makefile16
-rw-r--r--arch/metag/include/asm/uaccess.h15
-rw-r--r--arch/metag/lib/usercopy.c312
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/fpu.h1
-rw-r--r--arch/mips/include/asm/irq.h15
-rw-r--r--arch/mips/include/asm/spinlock.h8
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/genex.S12
-rw-r--r--arch/mips/kernel/process.c56
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/traps.c17
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c2
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c25
-rw-r--r--arch/mips/ralink/rt3883.c4
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c3
-rw-r--r--arch/powerpc/kernel/align.c27
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c7
-rw-r--r--arch/s390/kvm/gaccess.c7
-rw-r--r--arch/sparc/include/asm/page_64.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h15
-rw-r--r--arch/sparc/include/asm/processor_32.h6
-rw-r--r--arch/sparc/include/asm/processor_64.h4
-rw-r--r--arch/sparc/kernel/head_64.S4
-rw-r--r--arch/sparc/kernel/misctrap.S1
-rw-r--r--arch/sparc/kernel/rtrap_64.S1
-rw-r--r--arch/sparc/kernel/spiterrs.S1
-rw-r--r--arch/sparc/kernel/sun4v_tlb_miss.S1
-rw-r--r--arch/sparc/kernel/urtt_fill.S1
-rw-r--r--arch/sparc/kernel/winfixup.S2
-rw-r--r--arch/sparc/lib/NG2memcpy.S4
-rw-r--r--arch/sparc/lib/NG4memcpy.S1
-rw-r--r--arch/sparc/lib/NG4memset.S1
-rw-r--r--arch/sparc/lib/NGmemcpy.S1
-rw-r--r--arch/sparc/mm/hugetlbpage.c9
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/sparc/mm/srmmu.c1
-rw-r--r--arch/sparc/mm/tlb.c6
-rw-r--r--arch/sparc/mm/tsb.c4
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c11
-rw-r--r--arch/x86/events/intel/lbr.c3
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_schemata.c2
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/signal_compat.c4
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/kvm/vmx.c15
-rw-r--r--arch/x86/mm/init.c41
-rw-r--r--arch/x86/platform/efi/quirks.c4
-rw-r--r--block/blk-mq-sched.c181
-rw-r--r--block/blk-mq-sched.h25
-rw-r--r--block/blk-mq.c85
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/elevator.c114
-rw-r--r--drivers/acpi/acpica/utresrc.c17
-rw-r--r--drivers/acpi/glue.c12
-rw-r--r--drivers/acpi/scan.c19
-rw-r--r--drivers/ata/pata_atiixp.c5
-rw-r--r--drivers/ata/sata_via.c18
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/char/mem.c82
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/cpufreq/cpufreq.c18
-rw-r--r--drivers/crypto/caam/caampkc.c2
-rw-r--r--drivers/crypto/caam/ctrl.c66
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/firmware/efi/libstub/gop.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c45
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c26
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c57
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c2
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c3
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c3
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c4
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c10
-rw-r--r--drivers/iio/gyro/bmg160_core.c12
-rw-r--r--drivers/iio/industrialio-core.c7
-rw-r--r--drivers/iio/pressure/st_pressure_core.c1
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c65
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h3
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c2
-rw-r--r--drivers/md/dm-cache-metadata.c8
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-verity-fec.c18
-rw-r--r--drivers/md/dm-verity-fec.h4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c3
-rw-r--r--drivers/net/team/team.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/usbnet.c19
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/io-cmd.c4
-rw-r--r--drivers/pci/dwc/Kconfig1
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c4
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c4
-rw-r--r--drivers/pci/host/pci-thunder-pem.c10
-rw-r--r--drivers/pinctrl/core.c97
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c26
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c80
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h11
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c11
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c2
-rw-r--r--drivers/pwm/pwm-lpss-pci.c10
-rw-r--r--drivers/pwm/pwm-lpss-platform.c1
-rw-r--r--drivers/pwm/pwm-lpss.c19
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-rockchip.c40
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/staging/android/ashmem.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_alua.c136
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c102
-rw-r--r--drivers/target/target_core_user.c97
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2
-rw-r--r--drivers/video/fbdev/efifb.c66
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c15
-rw-r--r--drivers/video/fbdev/ssd1307fb.c24
-rw-r--r--drivers/video/fbdev/xen-fbfront.c4
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_pci_common.c375
-rw-r--r--drivers/virtio/virtio_pci_common.h43
-rw-r--r--drivers/virtio/virtio_pci_legacy.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--fs/btrfs/inode.c22
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/cifs/cifsfs.c87
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h19
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c16
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/ioctl.c66
-rw-r--r--fs/cifs/smb2misc.c46
-rw-r--r--fs/cifs/smb2ops.c37
-rw-r--r--fs/cifs/smb2pdu.c23
-rw-r--r--fs/cifs/smb2proto.h7
-rw-r--r--fs/cifs/smb2transport.c55
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/dax.c35
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/inode.c41
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/symlink.c3
-rw-r--r--fs/hugetlbfs/inode.c15
-rw-r--r--fs/orangefs/super.c9
-rw-r--r--fs/proc/proc_sysctl.c1
-rw-r--r--fs/proc/task_mmu.c9
-rw-r--r--fs/stat.c86
-rw-r--r--fs/sysfs/file.c6
-rw-r--r--fs/userfaultfd.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h3
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c63
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c35
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h2
-rw-r--r--fs/xfs/xfs_bmap_util.c10
-rw-r--r--fs/xfs/xfs_inode.c19
-rw-r--r--fs/xfs/xfs_iops.c14
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/cgroup.h21
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/mmu_notifier.h13
-rw-r--r--include/linux/nvme.h16
-rw-r--r--include/linux/pinctrl/pinctrl.h3
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/target/target_core_base.h10
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/stat.h5
-rw-r--r--include/uapi/linux/virtio_pci.h2
-rw-r--r--kernel/audit.c67
-rw-r--r--kernel/audit.h8
-rw-r--r--kernel/auditsc.c25
-rw-r--r--kernel/bpf/core.c12
-rw-r--r--kernel/cgroup/cgroup-v1.c2
-rw-r--r--kernel/cgroup/cgroup.c9
-rw-r--r--kernel/irq/affinity.c20
-rw-r--r--kernel/kthread.c3
-rw-r--r--kernel/ptrace.c14
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--lib/iov_iter.c63
-rw-r--r--mm/huge_memory.c99
-rw-r--r--mm/internal.h7
-rw-r--r--mm/mempolicy.c20
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/page_vma_mapped.c15
-rw-r--r--mm/swap.c27
-rw-r--r--mm/swap_cgroup.c2
-rw-r--r--mm/vmstat.c15
-rw-r--r--mm/z3fold.c9
-rw-r--r--mm/zsmalloc.c2
-rw-r--r--net/bridge/br_device.c20
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_multicast.c7
-rw-r--r--net/bridge/br_netlink.c7
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/core/datagram.c23
-rw-r--r--net/core/dev.c1
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_input.c20
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/l2tp/l2tp_ppp.c9
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c17
-rw-r--r--net/netfilter/nf_conntrack_netlink.c41
-rw-r--r--net/netfilter/nf_nat_redirect.c2
-rw-r--r--net/netfilter/nft_hash.c10
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/netfilter/xt_TPROXY.c5
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/socket.c3
-rw-r--r--samples/statx/test-statx.c12
-rw-r--r--tools/perf/util/annotate.c6
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c1
-rw-r--r--tools/power/x86/turbostat/turbostat.82
-rw-r--r--tools/power/x86/turbostat/turbostat.c26
-rw-r--r--tools/testing/selftests/powerpc/Makefile10
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c19
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c20
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c23
-rw-r--r--virt/kvm/arm/vgic/vgic.h11
298 files changed, 3243 insertions, 1816 deletions
diff --git a/.mailmap b/.mailmap
index 67dc22ffc9a8..1d6f4e7280dc 100644
--- a/.mailmap
+++ b/.mailmap
@@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
99Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> 99Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
100Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> 100Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
101Mark Brown <broonie@sirena.org.uk> 101Mark Brown <broonie@sirena.org.uk>
102Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
103Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
102Matthieu CASTET <castet.matthieu@free.fr> 104Matthieu CASTET <castet.matthieu@free.fr>
103Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br> 105Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
104Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> 106Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
@@ -171,6 +173,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
171Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> 173Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
172Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> 174Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
173Takashi YOSHII <takashi.yoshii.zj@renesas.com> 175Takashi YOSHII <takashi.yoshii.zj@renesas.com>
176Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
174Yusuke Goda <goda.yusuke@renesas.com> 177Yusuke Goda <goda.yusuke@renesas.com>
175Gustavo Padovan <gustavo@las.ic.unicamp.br> 178Gustavo Padovan <gustavo@las.ic.unicamp.br>
176Gustavo Padovan <padovan@profusion.mobi> 179Gustavo Padovan <padovan@profusion.mobi>
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index fdcfdd79682a..fe25787ff6d4 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -58,8 +58,7 @@ prototypes:
58 int (*permission) (struct inode *, int, unsigned int); 58 int (*permission) (struct inode *, int, unsigned int);
59 int (*get_acl)(struct inode *, int); 59 int (*get_acl)(struct inode *, int);
60 int (*setattr) (struct dentry *, struct iattr *); 60 int (*setattr) (struct dentry *, struct iattr *);
61 int (*getattr) (const struct path *, struct dentry *, struct kstat *, 61 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
62 u32, unsigned int);
63 ssize_t (*listxattr) (struct dentry *, char *, size_t); 62 ssize_t (*listxattr) (struct dentry *, char *, size_t);
64 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); 63 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
65 void (*update_time)(struct inode *, struct timespec *, int); 64 void (*update_time)(struct inode *, struct timespec *, int);
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 95280079c0b3..5fb17f49f7a2 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -600,3 +600,9 @@ in your dentry operations instead.
600[recommended] 600[recommended]
601 ->readlink is optional for symlinks. Don't set, unless filesystem needs 601 ->readlink is optional for symlinks. Don't set, unless filesystem needs
602 to fake something for readlink(2). 602 to fake something for readlink(2).
603--
604[mandatory]
605 ->getattr() is now passed a struct path rather than a vfsmount and
606 dentry separately, and it now has request_mask and query_flags arguments
607 to specify the fields and sync type requested by statx. Filesystems not
608 supporting any statx-specific features may ignore the new arguments.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 569211703721..94dd27ef4a76 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -382,8 +382,7 @@ struct inode_operations {
382 int (*permission) (struct inode *, int); 382 int (*permission) (struct inode *, int);
383 int (*get_acl)(struct inode *, int); 383 int (*get_acl)(struct inode *, int);
384 int (*setattr) (struct dentry *, struct iattr *); 384 int (*setattr) (struct dentry *, struct iattr *);
385 int (*getattr) (const struct path *, struct dentry *, struct kstat *, 385 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
386 u32, unsigned int);
387 ssize_t (*listxattr) (struct dentry *, char *, size_t); 386 ssize_t (*listxattr) (struct dentry *, char *, size_t);
388 void (*update_time)(struct inode *, struct timespec *, int); 387 void (*update_time)(struct inode *, struct timespec *, int);
389 int (*atomic_open)(struct inode *, struct dentry *, struct file *, 388 int (*atomic_open)(struct inode *, struct dentry *, struct file *,
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index 54bd5faa8782..f2af35f6d6b2 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = {
77 77
78int __init foo_probe(void) 78int __init foo_probe(void)
79{ 79{
80 int error;
81
80 struct pinctrl_dev *pctl; 82 struct pinctrl_dev *pctl;
81 83
82 return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl); 84 error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
85 if (error)
86 return error;
87
88 return pinctrl_enable(pctl);
83} 89}
84 90
85To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and 91To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 11ec2d93a5e0..61e9c78bd6d1 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
124 124
125.. code-block:: none 125.. code-block:: none
126 126
127 Cc: <stable@vger.kernel.org> # 3.3.x- 127 Cc: <stable@vger.kernel.org> # 3.3.x
128 128
129The tag has the meaning of: 129The tag has the meaning of:
130 130
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 76e61c883347..b2f60ca8b60c 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -83,6 +83,12 @@ Groups:
83 83
84 Bits for undefined preemption levels are RAZ/WI. 84 Bits for undefined preemption levels are RAZ/WI.
85 85
86 For historical reasons and to provide ABI compatibility with userspace we
87 export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
88 field in the lower 5 bits of a word, meaning that userspace must always
89 use the lower 5 bits to communicate with the KVM device and must shift the
90 value left by 3 places to obtain the actual priority mask level.
91
86 Limitations: 92 Limitations:
87 - Priorities are not implemented, and registers are RAZ/WI 93 - Priorities are not implemented, and registers are RAZ/WI
88 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2. 94 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
diff --git a/MAINTAINERS b/MAINTAINERS
index 5397f54af5fc..8bc85dc8a71a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4124,14 +4124,13 @@ F: drivers/block/drbd/
4124F: lib/lru_cache.c 4124F: lib/lru_cache.c
4125F: Documentation/blockdev/drbd/ 4125F: Documentation/blockdev/drbd/
4126 4126
4127DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS 4127DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
4128M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 4128M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
4129T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git 4129T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
4130S: Supported 4130S: Supported
4131F: Documentation/kobject.txt 4131F: Documentation/kobject.txt
4132F: drivers/base/ 4132F: drivers/base/
4133F: fs/debugfs/ 4133F: fs/debugfs/
4134F: fs/kernfs/
4135F: fs/sysfs/ 4134F: fs/sysfs/
4136F: include/linux/debugfs.h 4135F: include/linux/debugfs.h
4137F: include/linux/kobj* 4136F: include/linux/kobj*
@@ -7216,6 +7215,14 @@ F: arch/mips/include/uapi/asm/kvm*
7216F: arch/mips/include/asm/kvm* 7215F: arch/mips/include/asm/kvm*
7217F: arch/mips/kvm/ 7216F: arch/mips/kvm/
7218 7217
7218KERNFS
7219M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7220M: Tejun Heo <tj@kernel.org>
7221T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
7222S: Supported
7223F: include/linux/kernfs.h
7224F: fs/kernfs/
7225
7219KEXEC 7226KEXEC
7220M: Eric Biederman <ebiederm@xmission.com> 7227M: Eric Biederman <ebiederm@xmission.com>
7221W: http://kernel.org/pub/linux/utils/kernel/kexec/ 7228W: http://kernel.org/pub/linux/utils/kernel/kexec/
@@ -13311,7 +13318,7 @@ F: drivers/virtio/
13311F: tools/virtio/ 13318F: tools/virtio/
13312F: drivers/net/virtio_net.c 13319F: drivers/net/virtio_net.c
13313F: drivers/block/virtio_blk.c 13320F: drivers/block/virtio_blk.c
13314F: include/linux/virtio_*.h 13321F: include/linux/virtio*.h
13315F: include/uapi/linux/virtio_*.h 13322F: include/uapi/linux/virtio_*.h
13316F: drivers/crypto/virtio/ 13323F: drivers/crypto/virtio/
13317 13324
diff --git a/Makefile b/Makefile
index 7acbcb324bae..efa267a92ba6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc6
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 0b961093ca5c..6d76e528ab8f 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1290 /* copy relevant bits of struct timex. */ 1290 /* copy relevant bits of struct timex. */
1291 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || 1291 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
1292 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - 1292 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
1293 offsetof(struct timex32, time))) 1293 offsetof(struct timex32, tick)))
1294 return -EFAULT; 1294 return -EFAULT;
1295 1295
1296 ret = do_adjtimex(&txc); 1296 ret = do_adjtimex(&txc);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 96dba7cd8be7..314eb6abe1ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
1124 if (__hyp_get_vectors() == hyp_default_vectors) 1124 if (__hyp_get_vectors() == hyp_default_vectors)
1125 cpu_init_hyp_mode(NULL); 1125 cpu_init_hyp_mode(NULL);
1126 } 1126 }
1127
1128 if (vgic_present)
1129 kvm_vgic_init_cpu_hardware();
1127} 1130}
1128 1131
1129static void cpu_hyp_reset(void) 1132static void cpu_hyp_reset(void)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 962616fd4ddd..582a972371cf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
292 phys_addr_t addr = start, end = start + size; 292 phys_addr_t addr = start, end = start + size;
293 phys_addr_t next; 293 phys_addr_t next;
294 294
295 assert_spin_locked(&kvm->mmu_lock);
295 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 296 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
296 do { 297 do {
297 next = stage2_pgd_addr_end(addr, end); 298 next = stage2_pgd_addr_end(addr, end);
298 if (!stage2_pgd_none(*pgd)) 299 if (!stage2_pgd_none(*pgd))
299 unmap_stage2_puds(kvm, pgd, addr, next); 300 unmap_stage2_puds(kvm, pgd, addr, next);
301 /*
302 * If the range is too large, release the kvm->mmu_lock
303 * to prevent starvation and lockup detector warnings.
304 */
305 if (next != end)
306 cond_resched_lock(&kvm->mmu_lock);
300 } while (pgd++, addr = next, addr != end); 307 } while (pgd++, addr = next, addr != end);
301} 308}
302 309
@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
803 int idx; 810 int idx;
804 811
805 idx = srcu_read_lock(&kvm->srcu); 812 idx = srcu_read_lock(&kvm->srcu);
813 down_read(&current->mm->mmap_sem);
806 spin_lock(&kvm->mmu_lock); 814 spin_lock(&kvm->mmu_lock);
807 815
808 slots = kvm_memslots(kvm); 816 slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
810 stage2_unmap_memslot(kvm, memslot); 818 stage2_unmap_memslot(kvm, memslot);
811 819
812 spin_unlock(&kvm->mmu_lock); 820 spin_unlock(&kvm->mmu_lock);
821 up_read(&current->mm->mmap_sem);
813 srcu_read_unlock(&kvm->srcu, idx); 822 srcu_read_unlock(&kvm->srcu, idx);
814} 823}
815 824
@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
829 if (kvm->arch.pgd == NULL) 838 if (kvm->arch.pgd == NULL)
830 return; 839 return;
831 840
841 spin_lock(&kvm->mmu_lock);
832 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 842 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
843 spin_unlock(&kvm->mmu_lock);
844
833 /* Free the HW pgd, one page at a time */ 845 /* Free the HW pgd, one page at a time */
834 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); 846 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
835 kvm->arch.pgd = NULL; 847 kvm->arch.pgd = NULL;
@@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1801 (KVM_PHYS_SIZE >> PAGE_SHIFT)) 1813 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1802 return -EFAULT; 1814 return -EFAULT;
1803 1815
1816 down_read(&current->mm->mmap_sem);
1804 /* 1817 /*
1805 * A memory region could potentially cover multiple VMAs, and any holes 1818 * A memory region could potentially cover multiple VMAs, and any holes
1806 * between them, so iterate over all of them to find out if we can map 1819 * between them, so iterate over all of them to find out if we can map
@@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1844 pa += vm_start - vma->vm_start; 1857 pa += vm_start - vma->vm_start;
1845 1858
1846 /* IO region dirty page logging not allowed */ 1859 /* IO region dirty page logging not allowed */
1847 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) 1860 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1848 return -EINVAL; 1861 ret = -EINVAL;
1862 goto out;
1863 }
1849 1864
1850 ret = kvm_phys_addr_ioremap(kvm, gpa, pa, 1865 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1851 vm_end - vm_start, 1866 vm_end - vm_start,
@@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1857 } while (hva < reg_end); 1872 } while (hva < reg_end);
1858 1873
1859 if (change == KVM_MR_FLAGS_ONLY) 1874 if (change == KVM_MR_FLAGS_ONLY)
1860 return ret; 1875 goto out;
1861 1876
1862 spin_lock(&kvm->mmu_lock); 1877 spin_lock(&kvm->mmu_lock);
1863 if (ret) 1878 if (ret)
@@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1865 else 1880 else
1866 stage2_flush_memslot(kvm, memslot); 1881 stage2_flush_memslot(kvm, memslot);
1867 spin_unlock(&kvm->mmu_lock); 1882 spin_unlock(&kvm->mmu_lock);
1883out:
1884 up_read(&current->mm->mmap_sem);
1868 return ret; 1885 return ret;
1869} 1886}
1870 1887
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 63eabb06f9f1..475811f5383a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
935 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 935 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
936} 936}
937 937
938/*
939 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
940 * that the intention is to allow exporting memory allocated via the
941 * coherent DMA APIs through the dma_buf API, which only accepts a
942 * scattertable. This presents a couple of problems:
943 * 1. Not all memory allocated via the coherent DMA APIs is backed by
944 * a struct page
945 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
946 * as we will try to flush the memory through a different alias to that
947 * actually being used (and the flushes are redundant.)
948 */
938int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 949int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
939 void *cpu_addr, dma_addr_t handle, size_t size, 950 void *cpu_addr, dma_addr_t handle, size_t size,
940 unsigned long attrs) 951 unsigned long attrs)
941{ 952{
942 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 953 unsigned long pfn = dma_to_pfn(dev, handle);
954 struct page *page;
943 int ret; 955 int ret;
944 956
957 /* If the PFN is not valid, we do not have a struct page */
958 if (!pfn_valid(pfn))
959 return -ENXIO;
960
961 page = pfn_to_page(pfn);
962
945 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 963 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
946 if (unlikely(ret)) 964 if (unlikely(ret))
947 return ret; 965 return ret;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b5c7aaf9c76..33a45bd96860 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
303 */ 303 */
304static inline bool security_extensions_enabled(void) 304static inline bool security_extensions_enabled(void)
305{ 305{
306 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); 306 /* Check CPUID Identification Scheme before ID_PFR1 read */
307 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
308 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
309 return 0;
307} 310}
308 311
309static unsigned long __init setup_vectors_base(void) 312static unsigned long __init setup_vectors_base(void)
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index b6dc9d838a9a..ad1f4e6a9e33 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
266#endif 266#endif
267 267
268 if (p) { 268 if (p) {
269 if (cur) { 269 if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
270 /*
271 * Probe hit but conditional execution check failed,
272 * so just skip the instruction and continue as if
273 * nothing had happened.
274 * In this case, we can skip recursing check too.
275 */
276 singlestep_skip(p, regs);
277 } else if (cur) {
270 /* Kprobe is pending, so we're recursing. */ 278 /* Kprobe is pending, so we're recursing. */
271 switch (kcb->kprobe_status) { 279 switch (kcb->kprobe_status) {
272 case KPROBE_HIT_ACTIVE: 280 case KPROBE_HIT_ACTIVE:
273 case KPROBE_HIT_SSDONE: 281 case KPROBE_HIT_SSDONE:
282 case KPROBE_HIT_SS:
274 /* A pre- or post-handler probe got us here. */ 283 /* A pre- or post-handler probe got us here. */
275 kprobes_inc_nmissed_count(p); 284 kprobes_inc_nmissed_count(p);
276 save_previous_kprobe(kcb); 285 save_previous_kprobe(kcb);
@@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
279 singlestep(p, regs, kcb); 288 singlestep(p, regs, kcb);
280 restore_previous_kprobe(kcb); 289 restore_previous_kprobe(kcb);
281 break; 290 break;
291 case KPROBE_REENTER:
292 /* A nested probe was hit in FIQ, it is a BUG */
293 pr_warn("Unrecoverable kprobe detected at %p.\n",
294 p->addr);
295 /* fall through */
282 default: 296 default:
283 /* impossible cases */ 297 /* impossible cases */
284 BUG(); 298 BUG();
285 } 299 }
286 } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { 300 } else {
287 /* Probe hit and conditional execution check ok. */ 301 /* Probe hit and conditional execution check ok. */
288 set_current_kprobe(p); 302 set_current_kprobe(p);
289 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 303 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
304 } 318 }
305 reset_current_kprobe(); 319 reset_current_kprobe();
306 } 320 }
307 } else {
308 /*
309 * Probe hit but conditional execution check failed,
310 * so just skip the instruction and continue as if
311 * nothing had happened.
312 */
313 singlestep_skip(p, regs);
314 } 321 }
315 } else if (cur) { 322 } else if (cur) {
316 /* We probably hit a jprobe. Call its break handler. */ 323 /* We probably hit a jprobe. Call its break handler. */
@@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
434 struct hlist_node *tmp; 441 struct hlist_node *tmp;
435 unsigned long flags, orig_ret_address = 0; 442 unsigned long flags, orig_ret_address = 0;
436 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 443 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
444 kprobe_opcode_t *correct_ret_addr = NULL;
437 445
438 INIT_HLIST_HEAD(&empty_rp); 446 INIT_HLIST_HEAD(&empty_rp);
439 kretprobe_hash_lock(current, &head, &flags); 447 kretprobe_hash_lock(current, &head, &flags);
@@ -456,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
456 /* another task is sharing our hash bucket */ 464 /* another task is sharing our hash bucket */
457 continue; 465 continue;
458 466
467 orig_ret_address = (unsigned long)ri->ret_addr;
468
469 if (orig_ret_address != trampoline_address)
470 /*
471 * This is the real return address. Any other
472 * instances associated with this task are for
473 * other calls deeper on the call stack
474 */
475 break;
476 }
477
478 kretprobe_assert(ri, orig_ret_address, trampoline_address);
479
480 correct_ret_addr = ri->ret_addr;
481 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
482 if (ri->task != current)
483 /* another task is sharing our hash bucket */
484 continue;
485
486 orig_ret_address = (unsigned long)ri->ret_addr;
459 if (ri->rp && ri->rp->handler) { 487 if (ri->rp && ri->rp->handler) {
460 __this_cpu_write(current_kprobe, &ri->rp->kp); 488 __this_cpu_write(current_kprobe, &ri->rp->kp);
461 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 489 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
490 ri->ret_addr = correct_ret_addr;
462 ri->rp->handler(ri, regs); 491 ri->rp->handler(ri, regs);
463 __this_cpu_write(current_kprobe, NULL); 492 __this_cpu_write(current_kprobe, NULL);
464 } 493 }
465 494
466 orig_ret_address = (unsigned long)ri->ret_addr;
467 recycle_rp_inst(ri, &empty_rp); 495 recycle_rp_inst(ri, &empty_rp);
468 496
469 if (orig_ret_address != trampoline_address) 497 if (orig_ret_address != trampoline_address)
@@ -475,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
475 break; 503 break;
476 } 504 }
477 505
478 kretprobe_assert(ri, orig_ret_address, trampoline_address);
479 kretprobe_hash_unlock(current, &flags); 506 kretprobe_hash_unlock(current, &flags);
480 507
481 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 508 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index c893726aa52d..1c98a87786ca 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -977,7 +977,10 @@ static void coverage_end(void)
977void __naked __kprobes_test_case_start(void) 977void __naked __kprobes_test_case_start(void)
978{ 978{
979 __asm__ __volatile__ ( 979 __asm__ __volatile__ (
980 "stmdb sp!, {r4-r11} \n\t" 980 "mov r2, sp \n\t"
981 "bic r3, r2, #7 \n\t"
982 "mov sp, r3 \n\t"
983 "stmdb sp!, {r2-r11} \n\t"
981 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 984 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
982 "bic r0, lr, #1 @ r0 = inline data \n\t" 985 "bic r0, lr, #1 @ r0 = inline data \n\t"
983 "mov r1, sp \n\t" 986 "mov r1, sp \n\t"
@@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
997 "movne pc, r0 \n\t" 1000 "movne pc, r0 \n\t"
998 "mov r0, r4 \n\t" 1001 "mov r0, r4 \n\t"
999 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 1002 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
1000 "ldmia sp!, {r4-r11} \n\t" 1003 "ldmia sp!, {r2-r11} \n\t"
1004 "mov sp, r2 \n\t"
1001 "mov pc, r0 \n\t" 1005 "mov pc, r0 \n\t"
1002 ); 1006 );
1003} 1007}
@@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
1013 "bxne r0 \n\t" 1017 "bxne r0 \n\t"
1014 "mov r0, r4 \n\t" 1018 "mov r0, r4 \n\t"
1015 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 1019 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
1016 "ldmia sp!, {r4-r11} \n\t" 1020 "ldmia sp!, {r2-r11} \n\t"
1021 "mov sp, r2 \n\t"
1017 "bx r0 \n\t" 1022 "bx r0 \n\t"
1018 ); 1023 );
1019} 1024}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 4bf899fb451b..1b35b8bddbfb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -42,7 +42,20 @@
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/tlbflush.h> 43#include <asm/tlbflush.h>
44 44
45static const char *fault_name(unsigned int esr); 45struct fault_info {
46 int (*fn)(unsigned long addr, unsigned int esr,
47 struct pt_regs *regs);
48 int sig;
49 int code;
50 const char *name;
51};
52
53static const struct fault_info fault_info[];
54
55static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
56{
57 return fault_info + (esr & 63);
58}
46 59
47#ifdef CONFIG_KPROBES 60#ifdef CONFIG_KPROBES
48static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) 61static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
@@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
197 struct pt_regs *regs) 210 struct pt_regs *regs)
198{ 211{
199 struct siginfo si; 212 struct siginfo si;
213 const struct fault_info *inf;
200 214
201 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) { 215 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
216 inf = esr_to_fault_info(esr);
202 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", 217 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
203 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, 218 tsk->comm, task_pid_nr(tsk), inf->name, sig,
204 addr, esr); 219 addr, esr);
205 show_pte(tsk->mm, addr); 220 show_pte(tsk->mm, addr);
206 show_regs(regs); 221 show_regs(regs);
@@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
219{ 234{
220 struct task_struct *tsk = current; 235 struct task_struct *tsk = current;
221 struct mm_struct *mm = tsk->active_mm; 236 struct mm_struct *mm = tsk->active_mm;
237 const struct fault_info *inf;
222 238
223 /* 239 /*
224 * If we are in kernel mode at this point, we have no context to 240 * If we are in kernel mode at this point, we have no context to
225 * handle this fault with. 241 * handle this fault with.
226 */ 242 */
227 if (user_mode(regs)) 243 if (user_mode(regs)) {
228 __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs); 244 inf = esr_to_fault_info(esr);
229 else 245 __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
246 } else
230 __do_kernel_fault(mm, addr, esr, regs); 247 __do_kernel_fault(mm, addr, esr, regs);
231} 248}
232 249
@@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
488 return 1; 505 return 1;
489} 506}
490 507
491static const struct fault_info { 508static const struct fault_info fault_info[] = {
492 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
493 int sig;
494 int code;
495 const char *name;
496} fault_info[] = {
497 { do_bad, SIGBUS, 0, "ttbr address size fault" }, 509 { do_bad, SIGBUS, 0, "ttbr address size fault" },
498 { do_bad, SIGBUS, 0, "level 1 address size fault" }, 510 { do_bad, SIGBUS, 0, "level 1 address size fault" },
499 { do_bad, SIGBUS, 0, "level 2 address size fault" }, 511 { do_bad, SIGBUS, 0, "level 2 address size fault" },
@@ -560,19 +572,13 @@ static const struct fault_info {
560 { do_bad, SIGBUS, 0, "unknown 63" }, 572 { do_bad, SIGBUS, 0, "unknown 63" },
561}; 573};
562 574
563static const char *fault_name(unsigned int esr)
564{
565 const struct fault_info *inf = fault_info + (esr & 63);
566 return inf->name;
567}
568
569/* 575/*
570 * Dispatch a data abort to the relevant handler. 576 * Dispatch a data abort to the relevant handler.
571 */ 577 */
572asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, 578asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
573 struct pt_regs *regs) 579 struct pt_regs *regs)
574{ 580{
575 const struct fault_info *inf = fault_info + (esr & 63); 581 const struct fault_info *inf = esr_to_fault_info(esr);
576 struct siginfo info; 582 struct siginfo info;
577 583
578 if (!inf->fn(addr, esr, regs)) 584 if (!inf->fn(addr, esr, regs))
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index e25584d72396..7514a000e361 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt)
294 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 294 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
295 } else if (ps == PUD_SIZE) { 295 } else if (ps == PUD_SIZE) {
296 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 296 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
297 } else if (ps == (PAGE_SIZE * CONT_PTES)) {
298 hugetlb_add_hstate(CONT_PTE_SHIFT);
299 } else if (ps == (PMD_SIZE * CONT_PMDS)) {
300 hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
301 } else { 297 } else {
302 hugetlb_bad_size(); 298 hugetlb_bad_size();
303 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); 299 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt)
306 return 1; 302 return 1;
307} 303}
308__setup("hugepagesz=", setup_hugepagesz); 304__setup("hugepagesz=", setup_hugepagesz);
309
310#ifdef CONFIG_ARM64_64K_PAGES
311static __init int add_default_hugepagesz(void)
312{
313 if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
314 hugetlb_add_hstate(CONT_PTE_SHIFT);
315 return 0;
316}
317arch_initcall(add_default_hugepagesz);
318#endif
diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..a2c139808cfe
--- /dev/null
+++ b/arch/ia64/include/asm/asm-prototypes.h
@@ -0,0 +1,29 @@
1#ifndef _ASM_IA64_ASM_PROTOTYPES_H
2#define _ASM_IA64_ASM_PROTOTYPES_H
3
4#include <asm/cacheflush.h>
5#include <asm/checksum.h>
6#include <asm/esi.h>
7#include <asm/ftrace.h>
8#include <asm/page.h>
9#include <asm/pal.h>
10#include <asm/string.h>
11#include <asm/uaccess.h>
12#include <asm/unwind.h>
13#include <asm/xor.h>
14
15extern const char ia64_ivt[];
16
17signed int __divsi3(signed int, unsigned int);
18signed int __modsi3(signed int, unsigned int);
19
20signed long long __divdi3(signed long long, unsigned long long);
21signed long long __moddi3(signed long long, unsigned long long);
22
23unsigned int __udivsi3(unsigned int, unsigned int);
24unsigned int __umodsi3(unsigned int, unsigned int);
25
26unsigned long long __udivdi3(unsigned long long, unsigned long long);
27unsigned long long __umoddi3(unsigned long long, unsigned long long);
28
29#endif /* _ASM_IA64_ASM_PROTOTYPES_H */
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 1f3d3877618f..0a40b14407b1 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -24,25 +24,25 @@ AFLAGS___modsi3.o = -DMODULO
24AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO 24AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
25 25
26$(obj)/__divdi3.o: $(src)/idiv64.S FORCE 26$(obj)/__divdi3.o: $(src)/idiv64.S FORCE
27 $(call if_changed_dep,as_o_S) 27 $(call if_changed_rule,as_o_S)
28 28
29$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE 29$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
30 $(call if_changed_dep,as_o_S) 30 $(call if_changed_rule,as_o_S)
31 31
32$(obj)/__moddi3.o: $(src)/idiv64.S FORCE 32$(obj)/__moddi3.o: $(src)/idiv64.S FORCE
33 $(call if_changed_dep,as_o_S) 33 $(call if_changed_rule,as_o_S)
34 34
35$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE 35$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
36 $(call if_changed_dep,as_o_S) 36 $(call if_changed_rule,as_o_S)
37 37
38$(obj)/__divsi3.o: $(src)/idiv32.S FORCE 38$(obj)/__divsi3.o: $(src)/idiv32.S FORCE
39 $(call if_changed_dep,as_o_S) 39 $(call if_changed_rule,as_o_S)
40 40
41$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE 41$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
42 $(call if_changed_dep,as_o_S) 42 $(call if_changed_rule,as_o_S)
43 43
44$(obj)/__modsi3.o: $(src)/idiv32.S FORCE 44$(obj)/__modsi3.o: $(src)/idiv32.S FORCE
45 $(call if_changed_dep,as_o_S) 45 $(call if_changed_rule,as_o_S)
46 46
47$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE 47$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
48 $(call if_changed_dep,as_o_S) 48 $(call if_changed_rule,as_o_S)
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 273e61225c27..07238b39638c 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
197 197
198#define strlen_user(str) strnlen_user(str, 32767) 198#define strlen_user(str) strnlen_user(str, 32767)
199 199
200extern unsigned long __must_check __copy_user_zeroing(void *to, 200extern unsigned long raw_copy_from_user(void *to, const void __user *from,
201 const void __user *from, 201 unsigned long n);
202 unsigned long n);
203 202
204static inline unsigned long 203static inline unsigned long
205copy_from_user(void *to, const void __user *from, unsigned long n) 204copy_from_user(void *to, const void __user *from, unsigned long n)
206{ 205{
206 unsigned long res = n;
207 if (likely(access_ok(VERIFY_READ, from, n))) 207 if (likely(access_ok(VERIFY_READ, from, n)))
208 return __copy_user_zeroing(to, from, n); 208 res = raw_copy_from_user(to, from, n);
209 memset(to, 0, n); 209 if (unlikely(res))
210 return n; 210 memset(to + (n - res), 0, res);
211 return res;
211} 212}
212 213
213#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) 214#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
214#define __copy_from_user_inatomic __copy_from_user 215#define __copy_from_user_inatomic __copy_from_user
215 216
216extern unsigned long __must_check __copy_user(void __user *to, 217extern unsigned long __must_check __copy_user(void __user *to,
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9c8e88..2792fc621088 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
29 COPY \ 29 COPY \
30 "1:\n" \ 30 "1:\n" \
31 " .section .fixup,\"ax\"\n" \ 31 " .section .fixup,\"ax\"\n" \
32 " MOV D1Ar1,#0\n" \
33 FIXUP \ 32 FIXUP \
34 " MOVT D1Ar1,#HI(1b)\n" \ 33 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \ 34 " JUMP D1Ar1,#LO(1b)\n" \
@@ -260,27 +259,31 @@
260 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 259 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
261 "22:\n" \ 260 "22:\n" \
262 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 261 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
263 "SUB %3, %3, #32\n" \
264 "23:\n" \ 262 "23:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 263 "SUB %3, %3, #32\n" \
266 "24:\n" \ 264 "24:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
266 "25:\n" \
267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
268 "26:\n" \
268 "SUB %3, %3, #32\n" \ 269 "SUB %3, %3, #32\n" \
269 "DCACHE [%1+#-64], D0Ar6\n" \ 270 "DCACHE [%1+#-64], D0Ar6\n" \
270 "BR $Lloop"id"\n" \ 271 "BR $Lloop"id"\n" \
271 \ 272 \
272 "MOV RAPF, %1\n" \ 273 "MOV RAPF, %1\n" \
273 "25:\n" \ 274 "27:\n" \
274 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 275 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
275 "26:\n" \ 276 "28:\n" \
276 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 277 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
278 "29:\n" \
277 "SUB %3, %3, #32\n" \ 279 "SUB %3, %3, #32\n" \
278 "27:\n" \ 280 "30:\n" \
279 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 281 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
280 "28:\n" \ 282 "31:\n" \
281 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 283 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
284 "32:\n" \
282 "SUB %0, %0, #8\n" \ 285 "SUB %0, %0, #8\n" \
283 "29:\n" \ 286 "33:\n" \
284 "SETL [%0++], D0.7, D1.7\n" \ 287 "SETL [%0++], D0.7, D1.7\n" \
285 "SUB %3, %3, #32\n" \ 288 "SUB %3, %3, #32\n" \
286 "1:" \ 289 "1:" \
@@ -312,11 +315,15 @@
312 " .long 26b,3b\n" \ 315 " .long 26b,3b\n" \
313 " .long 27b,3b\n" \ 316 " .long 27b,3b\n" \
314 " .long 28b,3b\n" \ 317 " .long 28b,3b\n" \
315 " .long 29b,4b\n" \ 318 " .long 29b,3b\n" \
319 " .long 30b,3b\n" \
320 " .long 31b,3b\n" \
321 " .long 32b,3b\n" \
322 " .long 33b,4b\n" \
316 " .previous\n" \ 323 " .previous\n" \
317 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 324 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
318 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 325 : "0" (to), "1" (from), "2" (ret), "3" (n) \
319 : "D1Ar1", "D0Ar2", "memory") 326 : "D1Ar1", "D0Ar2", "cc", "memory")
320 327
321/* rewind 'to' and 'from' pointers when a fault occurs 328/* rewind 'to' and 'from' pointers when a fault occurs
322 * 329 *
@@ -342,7 +349,7 @@
342#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ 349#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
343 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 350 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
344 "LSR D0Ar2, D0Ar2, #8\n" \ 351 "LSR D0Ar2, D0Ar2, #8\n" \
345 "AND D0Ar2, D0Ar2, #0x7\n" \ 352 "ANDS D0Ar2, D0Ar2, #0x7\n" \
346 "ADDZ D0Ar2, D0Ar2, #4\n" \ 353 "ADDZ D0Ar2, D0Ar2, #4\n" \
347 "SUB D0Ar2, D0Ar2, #1\n" \ 354 "SUB D0Ar2, D0Ar2, #1\n" \
348 "MOV D1Ar1, #4\n" \ 355 "MOV D1Ar1, #4\n" \
@@ -403,47 +410,55 @@
403 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 410 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
404 "22:\n" \ 411 "22:\n" \
405 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 412 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
406 "SUB %3, %3, #16\n" \
407 "23:\n" \ 413 "23:\n" \
408 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
409 "24:\n" \
410 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
411 "SUB %3, %3, #16\n" \ 414 "SUB %3, %3, #16\n" \
412 "25:\n" \ 415 "24:\n" \
413 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 416 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "26:\n" \ 417 "25:\n" \
415 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 418 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
419 "26:\n" \
416 "SUB %3, %3, #16\n" \ 420 "SUB %3, %3, #16\n" \
417 "27:\n" \ 421 "27:\n" \
418 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 422 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
419 "28:\n" \ 423 "28:\n" \
420 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 424 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
425 "29:\n" \
426 "SUB %3, %3, #16\n" \
427 "30:\n" \
428 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
429 "31:\n" \
430 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
431 "32:\n" \
421 "SUB %3, %3, #16\n" \ 432 "SUB %3, %3, #16\n" \
422 "DCACHE [%1+#-64], D0Ar6\n" \ 433 "DCACHE [%1+#-64], D0Ar6\n" \
423 "BR $Lloop"id"\n" \ 434 "BR $Lloop"id"\n" \
424 \ 435 \
425 "MOV RAPF, %1\n" \ 436 "MOV RAPF, %1\n" \
426 "29:\n" \ 437 "33:\n" \
427 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 438 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
428 "30:\n" \ 439 "34:\n" \
429 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 440 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
441 "35:\n" \
430 "SUB %3, %3, #16\n" \ 442 "SUB %3, %3, #16\n" \
431 "31:\n" \ 443 "36:\n" \
432 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 444 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
433 "32:\n" \ 445 "37:\n" \
434 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 446 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
447 "38:\n" \
435 "SUB %3, %3, #16\n" \ 448 "SUB %3, %3, #16\n" \
436 "33:\n" \ 449 "39:\n" \
437 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 450 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
438 "34:\n" \ 451 "40:\n" \
439 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 452 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
453 "41:\n" \
440 "SUB %3, %3, #16\n" \ 454 "SUB %3, %3, #16\n" \
441 "35:\n" \ 455 "42:\n" \
442 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 456 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
443 "36:\n" \ 457 "43:\n" \
444 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 458 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
459 "44:\n" \
445 "SUB %0, %0, #4\n" \ 460 "SUB %0, %0, #4\n" \
446 "37:\n" \ 461 "45:\n" \
447 "SETD [%0++], D0.7\n" \ 462 "SETD [%0++], D0.7\n" \
448 "SUB %3, %3, #16\n" \ 463 "SUB %3, %3, #16\n" \
449 "1:" \ 464 "1:" \
@@ -483,11 +498,19 @@
483 " .long 34b,3b\n" \ 498 " .long 34b,3b\n" \
484 " .long 35b,3b\n" \ 499 " .long 35b,3b\n" \
485 " .long 36b,3b\n" \ 500 " .long 36b,3b\n" \
486 " .long 37b,4b\n" \ 501 " .long 37b,3b\n" \
502 " .long 38b,3b\n" \
503 " .long 39b,3b\n" \
504 " .long 40b,3b\n" \
505 " .long 41b,3b\n" \
506 " .long 42b,3b\n" \
507 " .long 43b,3b\n" \
508 " .long 44b,3b\n" \
509 " .long 45b,4b\n" \
487 " .previous\n" \ 510 " .previous\n" \
488 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 511 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
489 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 512 : "0" (to), "1" (from), "2" (ret), "3" (n) \
490 : "D1Ar1", "D0Ar2", "memory") 513 : "D1Ar1", "D0Ar2", "cc", "memory")
491 514
492/* rewind 'to' and 'from' pointers when a fault occurs 515/* rewind 'to' and 'from' pointers when a fault occurs
493 * 516 *
@@ -513,7 +536,7 @@
513#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ 536#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
514 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 537 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
515 "LSR D0Ar2, D0Ar2, #8\n" \ 538 "LSR D0Ar2, D0Ar2, #8\n" \
516 "AND D0Ar2, D0Ar2, #0x7\n" \ 539 "ANDS D0Ar2, D0Ar2, #0x7\n" \
517 "ADDZ D0Ar2, D0Ar2, #4\n" \ 540 "ADDZ D0Ar2, D0Ar2, #4\n" \
518 "SUB D0Ar2, D0Ar2, #1\n" \ 541 "SUB D0Ar2, D0Ar2, #1\n" \
519 "MOV D1Ar1, #4\n" \ 542 "MOV D1Ar1, #4\n" \
@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
538 if ((unsigned long) src & 1) { 561 if ((unsigned long) src & 1) {
539 __asm_copy_to_user_1(dst, src, retn); 562 __asm_copy_to_user_1(dst, src, retn);
540 n--; 563 n--;
564 if (retn)
565 return retn + n;
541 } 566 }
542 if ((unsigned long) dst & 1) { 567 if ((unsigned long) dst & 1) {
543 /* Worst case - byte copy */ 568 /* Worst case - byte copy */
544 while (n > 0) { 569 while (n > 0) {
545 __asm_copy_to_user_1(dst, src, retn); 570 __asm_copy_to_user_1(dst, src, retn);
546 n--; 571 n--;
572 if (retn)
573 return retn + n;
547 } 574 }
548 } 575 }
549 if (((unsigned long) src & 2) && n >= 2) { 576 if (((unsigned long) src & 2) && n >= 2) {
550 __asm_copy_to_user_2(dst, src, retn); 577 __asm_copy_to_user_2(dst, src, retn);
551 n -= 2; 578 n -= 2;
579 if (retn)
580 return retn + n;
552 } 581 }
553 if ((unsigned long) dst & 2) { 582 if ((unsigned long) dst & 2) {
554 /* Second worst case - word copy */ 583 /* Second worst case - word copy */
555 while (n >= 2) { 584 while (n >= 2) {
556 __asm_copy_to_user_2(dst, src, retn); 585 __asm_copy_to_user_2(dst, src, retn);
557 n -= 2; 586 n -= 2;
587 if (retn)
588 return retn + n;
558 } 589 }
559 } 590 }
560 591
@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
569 while (n >= 8) { 600 while (n >= 8) {
570 __asm_copy_to_user_8x64(dst, src, retn); 601 __asm_copy_to_user_8x64(dst, src, retn);
571 n -= 8; 602 n -= 8;
603 if (retn)
604 return retn + n;
572 } 605 }
573 } 606 }
574 if (n >= RAPF_MIN_BUF_SIZE) { 607 if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
581 while (n >= 8) { 614 while (n >= 8) {
582 __asm_copy_to_user_8x64(dst, src, retn); 615 __asm_copy_to_user_8x64(dst, src, retn);
583 n -= 8; 616 n -= 8;
617 if (retn)
618 return retn + n;
584 } 619 }
585 } 620 }
586#endif 621#endif
@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
588 while (n >= 16) { 623 while (n >= 16) {
589 __asm_copy_to_user_16(dst, src, retn); 624 __asm_copy_to_user_16(dst, src, retn);
590 n -= 16; 625 n -= 16;
626 if (retn)
627 return retn + n;
591 } 628 }
592 629
593 while (n >= 4) { 630 while (n >= 4) {
594 __asm_copy_to_user_4(dst, src, retn); 631 __asm_copy_to_user_4(dst, src, retn);
595 n -= 4; 632 n -= 4;
633 if (retn)
634 return retn + n;
596 } 635 }
597 636
598 switch (n) { 637 switch (n) {
@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
609 break; 648 break;
610 } 649 }
611 650
651 /*
652 * If we get here, retn correctly reflects the number of failing
653 * bytes.
654 */
612 return retn; 655 return retn;
613} 656}
614EXPORT_SYMBOL(__copy_user); 657EXPORT_SYMBOL(__copy_user);
@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
617 __asm_copy_user_cont(to, from, ret, \ 660 __asm_copy_user_cont(to, from, ret, \
618 " GETB D1Ar1,[%1++]\n" \ 661 " GETB D1Ar1,[%1++]\n" \
619 "2: SETB [%0++],D1Ar1\n", \ 662 "2: SETB [%0++],D1Ar1\n", \
620 "3: ADD %2,%2,#1\n" \ 663 "3: ADD %2,%2,#1\n", \
621 " SETB [%0++],D1Ar1\n", \
622 " .long 2b,3b\n") 664 " .long 2b,3b\n")
623 665
624#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 666#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
625 __asm_copy_user_cont(to, from, ret, \ 667 __asm_copy_user_cont(to, from, ret, \
626 " GETW D1Ar1,[%1++]\n" \ 668 " GETW D1Ar1,[%1++]\n" \
627 "2: SETW [%0++],D1Ar1\n" COPY, \ 669 "2: SETW [%0++],D1Ar1\n" COPY, \
628 "3: ADD %2,%2,#2\n" \ 670 "3: ADD %2,%2,#2\n" FIXUP, \
629 " SETW [%0++],D1Ar1\n" FIXUP, \
630 " .long 2b,3b\n" TENTRY) 671 " .long 2b,3b\n" TENTRY)
631 672
632#define __asm_copy_from_user_2(to, from, ret) \ 673#define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
636 __asm_copy_from_user_2x_cont(to, from, ret, \ 677 __asm_copy_from_user_2x_cont(to, from, ret, \
637 " GETB D1Ar1,[%1++]\n" \ 678 " GETB D1Ar1,[%1++]\n" \
638 "4: SETB [%0++],D1Ar1\n", \ 679 "4: SETB [%0++],D1Ar1\n", \
639 "5: ADD %2,%2,#1\n" \ 680 "5: ADD %2,%2,#1\n", \
640 " SETB [%0++],D1Ar1\n", \
641 " .long 4b,5b\n") 681 " .long 4b,5b\n")
642 682
643#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 683#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
644 __asm_copy_user_cont(to, from, ret, \ 684 __asm_copy_user_cont(to, from, ret, \
645 " GETD D1Ar1,[%1++]\n" \ 685 " GETD D1Ar1,[%1++]\n" \
646 "2: SETD [%0++],D1Ar1\n" COPY, \ 686 "2: SETD [%0++],D1Ar1\n" COPY, \
647 "3: ADD %2,%2,#4\n" \ 687 "3: ADD %2,%2,#4\n" FIXUP, \
648 " SETD [%0++],D1Ar1\n" FIXUP, \
649 " .long 2b,3b\n" TENTRY) 688 " .long 2b,3b\n" TENTRY)
650 689
651#define __asm_copy_from_user_4(to, from, ret) \ 690#define __asm_copy_from_user_4(to, from, ret) \
652 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") 691 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
653 692
654#define __asm_copy_from_user_5(to, from, ret) \
655 __asm_copy_from_user_4x_cont(to, from, ret, \
656 " GETB D1Ar1,[%1++]\n" \
657 "4: SETB [%0++],D1Ar1\n", \
658 "5: ADD %2,%2,#1\n" \
659 " SETB [%0++],D1Ar1\n", \
660 " .long 4b,5b\n")
661
662#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
663 __asm_copy_from_user_4x_cont(to, from, ret, \
664 " GETW D1Ar1,[%1++]\n" \
665 "4: SETW [%0++],D1Ar1\n" COPY, \
666 "5: ADD %2,%2,#2\n" \
667 " SETW [%0++],D1Ar1\n" FIXUP, \
668 " .long 4b,5b\n" TENTRY)
669
670#define __asm_copy_from_user_6(to, from, ret) \
671 __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
672
673#define __asm_copy_from_user_7(to, from, ret) \
674 __asm_copy_from_user_6x_cont(to, from, ret, \
675 " GETB D1Ar1,[%1++]\n" \
676 "6: SETB [%0++],D1Ar1\n", \
677 "7: ADD %2,%2,#1\n" \
678 " SETB [%0++],D1Ar1\n", \
679 " .long 6b,7b\n")
680
681#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
682 __asm_copy_from_user_4x_cont(to, from, ret, \
683 " GETD D1Ar1,[%1++]\n" \
684 "4: SETD [%0++],D1Ar1\n" COPY, \
685 "5: ADD %2,%2,#4\n" \
686 " SETD [%0++],D1Ar1\n" FIXUP, \
687 " .long 4b,5b\n" TENTRY)
688
689#define __asm_copy_from_user_8(to, from, ret) \
690 __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
691
692#define __asm_copy_from_user_9(to, from, ret) \
693 __asm_copy_from_user_8x_cont(to, from, ret, \
694 " GETB D1Ar1,[%1++]\n" \
695 "6: SETB [%0++],D1Ar1\n", \
696 "7: ADD %2,%2,#1\n" \
697 " SETB [%0++],D1Ar1\n", \
698 " .long 6b,7b\n")
699
700#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
701 __asm_copy_from_user_8x_cont(to, from, ret, \
702 " GETW D1Ar1,[%1++]\n" \
703 "6: SETW [%0++],D1Ar1\n" COPY, \
704 "7: ADD %2,%2,#2\n" \
705 " SETW [%0++],D1Ar1\n" FIXUP, \
706 " .long 6b,7b\n" TENTRY)
707
708#define __asm_copy_from_user_10(to, from, ret) \
709 __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
710
711#define __asm_copy_from_user_11(to, from, ret) \
712 __asm_copy_from_user_10x_cont(to, from, ret, \
713 " GETB D1Ar1,[%1++]\n" \
714 "8: SETB [%0++],D1Ar1\n", \
715 "9: ADD %2,%2,#1\n" \
716 " SETB [%0++],D1Ar1\n", \
717 " .long 8b,9b\n")
718
719#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
720 __asm_copy_from_user_8x_cont(to, from, ret, \
721 " GETD D1Ar1,[%1++]\n" \
722 "6: SETD [%0++],D1Ar1\n" COPY, \
723 "7: ADD %2,%2,#4\n" \
724 " SETD [%0++],D1Ar1\n" FIXUP, \
725 " .long 6b,7b\n" TENTRY)
726
727#define __asm_copy_from_user_12(to, from, ret) \
728 __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
729
730#define __asm_copy_from_user_13(to, from, ret) \
731 __asm_copy_from_user_12x_cont(to, from, ret, \
732 " GETB D1Ar1,[%1++]\n" \
733 "8: SETB [%0++],D1Ar1\n", \
734 "9: ADD %2,%2,#1\n" \
735 " SETB [%0++],D1Ar1\n", \
736 " .long 8b,9b\n")
737
738#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
739 __asm_copy_from_user_12x_cont(to, from, ret, \
740 " GETW D1Ar1,[%1++]\n" \
741 "8: SETW [%0++],D1Ar1\n" COPY, \
742 "9: ADD %2,%2,#2\n" \
743 " SETW [%0++],D1Ar1\n" FIXUP, \
744 " .long 8b,9b\n" TENTRY)
745
746#define __asm_copy_from_user_14(to, from, ret) \
747 __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
748
749#define __asm_copy_from_user_15(to, from, ret) \
750 __asm_copy_from_user_14x_cont(to, from, ret, \
751 " GETB D1Ar1,[%1++]\n" \
752 "10: SETB [%0++],D1Ar1\n", \
753 "11: ADD %2,%2,#1\n" \
754 " SETB [%0++],D1Ar1\n", \
755 " .long 10b,11b\n")
756
757#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
758 __asm_copy_from_user_12x_cont(to, from, ret, \
759 " GETD D1Ar1,[%1++]\n" \
760 "8: SETD [%0++],D1Ar1\n" COPY, \
761 "9: ADD %2,%2,#4\n" \
762 " SETD [%0++],D1Ar1\n" FIXUP, \
763 " .long 8b,9b\n" TENTRY)
764
765#define __asm_copy_from_user_16(to, from, ret) \
766 __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
767
768#define __asm_copy_from_user_8x64(to, from, ret) \ 693#define __asm_copy_from_user_8x64(to, from, ret) \
769 asm volatile ( \ 694 asm volatile ( \
770 " GETL D0Ar2,D1Ar1,[%1++]\n" \ 695 " GETL D0Ar2,D1Ar1,[%1++]\n" \
771 "2: SETL [%0++],D0Ar2,D1Ar1\n" \ 696 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
772 "1:\n" \ 697 "1:\n" \
773 " .section .fixup,\"ax\"\n" \ 698 " .section .fixup,\"ax\"\n" \
774 " MOV D1Ar1,#0\n" \
775 " MOV D0Ar2,#0\n" \
776 "3: ADD %2,%2,#8\n" \ 699 "3: ADD %2,%2,#8\n" \
777 " SETL [%0++],D0Ar2,D1Ar1\n" \
778 " MOVT D0Ar2,#HI(1b)\n" \ 700 " MOVT D0Ar2,#HI(1b)\n" \
779 " JUMP D0Ar2,#LO(1b)\n" \ 701 " JUMP D0Ar2,#LO(1b)\n" \
780 " .previous\n" \ 702 " .previous\n" \
@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
789 * 711 *
790 * Rationale: 712 * Rationale:
791 * A fault occurs while reading from user buffer, which is the 713 * A fault occurs while reading from user buffer, which is the
792 * source. Since the fault is at a single address, we only 714 * source.
793 * need to rewind by 8 bytes.
794 * Since we don't write to kernel buffer until we read first, 715 * Since we don't write to kernel buffer until we read first,
795 * the kernel buffer is at the right state and needn't be 716 * the kernel buffer is at the right state and needn't be
796 * corrected. 717 * corrected, but the source must be rewound to the beginning of
718 * the block, which is LSM_STEP*8 bytes.
719 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
720 * and stored in D0Ar2
721 *
722 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
723 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
724 * a fault happens at the 4th write, LSM_STEP will be 0
725 * instead of 4. The code copes with that.
797 */ 726 */
798#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ 727#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
799 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 728 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
800 "SUB %1, %1, #8\n") 729 "LSR D0Ar2, D0Ar2, #5\n" \
730 "ANDS D0Ar2, D0Ar2, #0x38\n" \
731 "ADDZ D0Ar2, D0Ar2, #32\n" \
732 "SUB %1, %1, D0Ar2\n")
801 733
802/* rewind 'from' pointer when a fault occurs 734/* rewind 'from' pointer when a fault occurs
803 * 735 *
804 * Rationale: 736 * Rationale:
805 * A fault occurs while reading from user buffer, which is the 737 * A fault occurs while reading from user buffer, which is the
806 * source. Since the fault is at a single address, we only 738 * source.
807 * need to rewind by 4 bytes.
808 * Since we don't write to kernel buffer until we read first, 739 * Since we don't write to kernel buffer until we read first,
809 * the kernel buffer is at the right state and needn't be 740 * the kernel buffer is at the right state and needn't be
810 * corrected. 741 * corrected, but the source must be rewound to the beginning of
742 * the block, which is LSM_STEP*4 bytes.
743 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
744 * and stored in D0Ar2
745 *
746 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
747 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
748 * a fault happens at the 4th write, LSM_STEP will be 0
749 * instead of 4. The code copes with that.
811 */ 750 */
812#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ 751#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
813 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 752 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
814 "SUB %1, %1, #4\n") 753 "LSR D0Ar2, D0Ar2, #6\n" \
754 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
755 "ADDZ D0Ar2, D0Ar2, #16\n" \
756 "SUB %1, %1, D0Ar2\n")
815 757
816 758
817/* Copy from user to kernel, zeroing the bytes that were inaccessible in 759/*
818 userland. The return-value is the number of bytes that were 760 * Copy from user to kernel. The return-value is the number of bytes that were
819 inaccessible. */ 761 * inaccessible.
820unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, 762 */
821 unsigned long n) 763unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
764 unsigned long n)
822{ 765{
823 register char *dst asm ("A0.2") = pdst; 766 register char *dst asm ("A0.2") = pdst;
824 register const char __user *src asm ("A1.2") = psrc; 767 register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
830 if ((unsigned long) src & 1) { 773 if ((unsigned long) src & 1) {
831 __asm_copy_from_user_1(dst, src, retn); 774 __asm_copy_from_user_1(dst, src, retn);
832 n--; 775 n--;
776 if (retn)
777 return retn + n;
833 } 778 }
834 if ((unsigned long) dst & 1) { 779 if ((unsigned long) dst & 1) {
835 /* Worst case - byte copy */ 780 /* Worst case - byte copy */
@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
837 __asm_copy_from_user_1(dst, src, retn); 782 __asm_copy_from_user_1(dst, src, retn);
838 n--; 783 n--;
839 if (retn) 784 if (retn)
840 goto copy_exception_bytes; 785 return retn + n;
841 } 786 }
842 } 787 }
843 if (((unsigned long) src & 2) && n >= 2) { 788 if (((unsigned long) src & 2) && n >= 2) {
844 __asm_copy_from_user_2(dst, src, retn); 789 __asm_copy_from_user_2(dst, src, retn);
845 n -= 2; 790 n -= 2;
791 if (retn)
792 return retn + n;
846 } 793 }
847 if ((unsigned long) dst & 2) { 794 if ((unsigned long) dst & 2) {
848 /* Second worst case - word copy */ 795 /* Second worst case - word copy */
@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
850 __asm_copy_from_user_2(dst, src, retn); 797 __asm_copy_from_user_2(dst, src, retn);
851 n -= 2; 798 n -= 2;
852 if (retn) 799 if (retn)
853 goto copy_exception_bytes; 800 return retn + n;
854 } 801 }
855 } 802 }
856 803
857 /* We only need one check after the unalignment-adjustments,
858 because if both adjustments were done, either both or
859 neither reference had an exception. */
860 if (retn != 0)
861 goto copy_exception_bytes;
862
863#ifdef USE_RAPF 804#ifdef USE_RAPF
864 /* 64 bit copy loop */ 805 /* 64 bit copy loop */
865 if (!(((unsigned long) src | (unsigned long) dst) & 7)) { 806 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
872 __asm_copy_from_user_8x64(dst, src, retn); 813 __asm_copy_from_user_8x64(dst, src, retn);
873 n -= 8; 814 n -= 8;
874 if (retn) 815 if (retn)
875 goto copy_exception_bytes; 816 return retn + n;
876 } 817 }
877 } 818 }
878 819
@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
888 __asm_copy_from_user_8x64(dst, src, retn); 829 __asm_copy_from_user_8x64(dst, src, retn);
889 n -= 8; 830 n -= 8;
890 if (retn) 831 if (retn)
891 goto copy_exception_bytes; 832 return retn + n;
892 } 833 }
893 } 834 }
894#endif 835#endif
@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
898 n -= 4; 839 n -= 4;
899 840
900 if (retn) 841 if (retn)
901 goto copy_exception_bytes; 842 return retn + n;
902 } 843 }
903 844
904 /* If we get here, there were no memory read faults. */ 845 /* If we get here, there were no memory read faults. */
@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
924 /* If we get here, retn correctly reflects the number of failing 865 /* If we get here, retn correctly reflects the number of failing
925 bytes. */ 866 bytes. */
926 return retn; 867 return retn;
927
928 copy_exception_bytes:
929 /* We already have "retn" bytes cleared, and need to clear the
930 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
931 memset is preferred here, since this isn't speed-critical code and
932 we'd rather have this a leaf-function than calling memset. */
933 {
934 char *endp;
935 for (endp = dst + n; dst < endp; dst++)
936 *dst = 0;
937 }
938
939 return retn + n;
940} 868}
941EXPORT_SYMBOL(__copy_user_zeroing); 869EXPORT_SYMBOL(raw_copy_from_user);
942 870
943#define __asm_clear_8x64(to, ret) \ 871#define __asm_clear_8x64(to, ret) \
944 asm volatile ( \ 872 asm volatile ( \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a008a9f03072..e0bb576410bb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6
1531 select CPU_SUPPORTS_HIGHMEM 1531 select CPU_SUPPORTS_HIGHMEM
1532 select CPU_SUPPORTS_MSA 1532 select CPU_SUPPORTS_MSA
1533 select GENERIC_CSUM 1533 select GENERIC_CSUM
1534 select MIPS_O32_FP64_SUPPORT if MIPS32_O32 1534 select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
1535 select HAVE_KVM 1535 select HAVE_KVM
1536 help 1536 help
1537 Choose this option to build a kernel for release 6 or later of the 1537 Choose this option to build a kernel for release 6 or later of the
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index f94455f964ec..a2813fe381cf 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -21,6 +21,7 @@
21#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
22#include <asm/fpu_emulator.h> 22#include <asm/fpu_emulator.h>
23#include <asm/hazards.h> 23#include <asm/hazards.h>
24#include <asm/ptrace.h>
24#include <asm/processor.h> 25#include <asm/processor.h>
25#include <asm/current.h> 26#include <asm/current.h>
26#include <asm/msa.h> 27#include <asm/msa.h>
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 956db6e201d1..ddd1c918103b 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -18,9 +18,24 @@
18#include <irq.h> 18#include <irq.h>
19 19
20#define IRQ_STACK_SIZE THREAD_SIZE 20#define IRQ_STACK_SIZE THREAD_SIZE
21#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
21 22
22extern void *irq_stack[NR_CPUS]; 23extern void *irq_stack[NR_CPUS];
23 24
25/*
26 * The highest address on the IRQ stack contains a dummy frame put down in
27 * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
28 *
29 * top ------------
30 * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
31 * ------------
32 * | | <- First frame of IRQ context
33 * ------------
34 *
35 * task sp holds a copy of the task stack pointer where the struct pt_regs
36 * from exception entry can be found.
37 */
38
24static inline bool on_irq_stack(int cpu, unsigned long sp) 39static inline bool on_irq_stack(int cpu, unsigned long sp)
25{ 40{
26 unsigned long low = (unsigned long)irq_stack[cpu]; 41 unsigned long low = (unsigned long)irq_stack[cpu];
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index f485afe51514..a8df44d60607 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
127 " andi %[ticket], %[ticket], 0xffff \n" 127 " andi %[ticket], %[ticket], 0xffff \n"
128 " bne %[ticket], %[my_ticket], 4f \n" 128 " bne %[ticket], %[my_ticket], 4f \n"
129 " subu %[ticket], %[my_ticket], %[ticket] \n" 129 " subu %[ticket], %[my_ticket], %[ticket] \n"
130 "2: \n" 130 "2: .insn \n"
131 " .subsection 2 \n" 131 " .subsection 2 \n"
132 "4: andi %[ticket], %[ticket], 0xffff \n" 132 "4: andi %[ticket], %[ticket], 0xffff \n"
133 " sll %[ticket], 5 \n" 133 " sll %[ticket], 5 \n"
@@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
202 " sc %[ticket], %[ticket_ptr] \n" 202 " sc %[ticket], %[ticket_ptr] \n"
203 " beqz %[ticket], 1b \n" 203 " beqz %[ticket], 1b \n"
204 " li %[ticket], 1 \n" 204 " li %[ticket], 1 \n"
205 "2: \n" 205 "2: .insn \n"
206 " .subsection 2 \n" 206 " .subsection 2 \n"
207 "3: b 2b \n" 207 "3: b 2b \n"
208 " li %[ticket], 0 \n" 208 " li %[ticket], 0 \n"
@@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
382 " .set reorder \n" 382 " .set reorder \n"
383 __WEAK_LLSC_MB 383 __WEAK_LLSC_MB
384 " li %2, 1 \n" 384 " li %2, 1 \n"
385 "2: \n" 385 "2: .insn \n"
386 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 386 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
387 : GCC_OFF_SMALL_ASM() (rw->lock) 387 : GCC_OFF_SMALL_ASM() (rw->lock)
388 : "memory"); 388 : "memory");
@@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
422 " lui %1, 0x8000 \n" 422 " lui %1, 0x8000 \n"
423 " sc %1, %0 \n" 423 " sc %1, %0 \n"
424 " li %2, 1 \n" 424 " li %2, 1 \n"
425 "2: \n" 425 "2: .insn \n"
426 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), 426 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
427 "=&r" (ret) 427 "=&r" (ret)
428 : GCC_OFF_SMALL_ASM() (rw->lock) 428 : GCC_OFF_SMALL_ASM() (rw->lock)
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3e940dbe0262..78faf4292e90 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -386,17 +386,18 @@
386#define __NR_pkey_mprotect (__NR_Linux + 363) 386#define __NR_pkey_mprotect (__NR_Linux + 363)
387#define __NR_pkey_alloc (__NR_Linux + 364) 387#define __NR_pkey_alloc (__NR_Linux + 364)
388#define __NR_pkey_free (__NR_Linux + 365) 388#define __NR_pkey_free (__NR_Linux + 365)
389#define __NR_statx (__NR_Linux + 366)
389 390
390 391
391/* 392/*
392 * Offset of the last Linux o32 flavoured syscall 393 * Offset of the last Linux o32 flavoured syscall
393 */ 394 */
394#define __NR_Linux_syscalls 365 395#define __NR_Linux_syscalls 366
395 396
396#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 397#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
397 398
398#define __NR_O32_Linux 4000 399#define __NR_O32_Linux 4000
399#define __NR_O32_Linux_syscalls 365 400#define __NR_O32_Linux_syscalls 366
400 401
401#if _MIPS_SIM == _MIPS_SIM_ABI64 402#if _MIPS_SIM == _MIPS_SIM_ABI64
402 403
@@ -730,16 +731,17 @@
730#define __NR_pkey_mprotect (__NR_Linux + 323) 731#define __NR_pkey_mprotect (__NR_Linux + 323)
731#define __NR_pkey_alloc (__NR_Linux + 324) 732#define __NR_pkey_alloc (__NR_Linux + 324)
732#define __NR_pkey_free (__NR_Linux + 325) 733#define __NR_pkey_free (__NR_Linux + 325)
734#define __NR_statx (__NR_Linux + 326)
733 735
734/* 736/*
735 * Offset of the last Linux 64-bit flavoured syscall 737 * Offset of the last Linux 64-bit flavoured syscall
736 */ 738 */
737#define __NR_Linux_syscalls 325 739#define __NR_Linux_syscalls 326
738 740
739#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 741#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
740 742
741#define __NR_64_Linux 5000 743#define __NR_64_Linux 5000
742#define __NR_64_Linux_syscalls 325 744#define __NR_64_Linux_syscalls 326
743 745
744#if _MIPS_SIM == _MIPS_SIM_NABI32 746#if _MIPS_SIM == _MIPS_SIM_NABI32
745 747
@@ -1077,15 +1079,16 @@
1077#define __NR_pkey_mprotect (__NR_Linux + 327) 1079#define __NR_pkey_mprotect (__NR_Linux + 327)
1078#define __NR_pkey_alloc (__NR_Linux + 328) 1080#define __NR_pkey_alloc (__NR_Linux + 328)
1079#define __NR_pkey_free (__NR_Linux + 329) 1081#define __NR_pkey_free (__NR_Linux + 329)
1082#define __NR_statx (__NR_Linux + 330)
1080 1083
1081/* 1084/*
1082 * Offset of the last N32 flavoured syscall 1085 * Offset of the last N32 flavoured syscall
1083 */ 1086 */
1084#define __NR_Linux_syscalls 329 1087#define __NR_Linux_syscalls 330
1085 1088
1086#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1089#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1087 1090
1088#define __NR_N32_Linux 6000 1091#define __NR_N32_Linux 6000
1089#define __NR_N32_Linux_syscalls 329 1092#define __NR_N32_Linux_syscalls 330
1090 1093
1091#endif /* _UAPI_ASM_UNISTD_H */ 1094#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index bb5c5d34ba81..a670c0c11875 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -102,6 +102,7 @@ void output_thread_info_defines(void)
102 DEFINE(_THREAD_SIZE, THREAD_SIZE); 102 DEFINE(_THREAD_SIZE, THREAD_SIZE);
103 DEFINE(_THREAD_MASK, THREAD_MASK); 103 DEFINE(_THREAD_MASK, THREAD_MASK);
104 DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); 104 DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
105 DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
105 BLANK(); 106 BLANK();
106} 107}
107 108
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 59476a607add..a00e87b0256d 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg)
361 END(mips_cps_get_bootcfg) 361 END(mips_cps_get_bootcfg)
362 362
363LEAF(mips_cps_boot_vpes) 363LEAF(mips_cps_boot_vpes)
364 PTR_L ta2, COREBOOTCFG_VPEMASK(a0) 364 lw ta2, COREBOOTCFG_VPEMASK(a0)
365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
366 366
367#if defined(CONFIG_CPU_MIPSR6) 367#if defined(CONFIG_CPU_MIPSR6)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 07718bb5fc9d..12422fd4af23 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
1824 } 1824 }
1825 1825
1826 decode_configs(c); 1826 decode_configs(c);
1827 c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; 1827 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
1828 c->writecombine = _CACHE_UNCACHED_ACCELERATED; 1828 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
1829 break; 1829 break;
1830 default: 1830 default:
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 7ec9612cb007..ae810da4d499 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp)
215 beq t0, t1, 2f 215 beq t0, t1, 2f
216 216
217 /* Switch to IRQ stack */ 217 /* Switch to IRQ stack */
218 li t1, _IRQ_STACK_SIZE 218 li t1, _IRQ_STACK_START
219 PTR_ADD sp, t0, t1 219 PTR_ADD sp, t0, t1
220 220
221 /* Save task's sp on IRQ stack so that unwinding can follow it */
222 LONG_S s1, 0(sp)
2212: 2232:
222 jal plat_irq_dispatch 224 jal plat_irq_dispatch
223 225
@@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp)
325 beq t0, t1, 2f 327 beq t0, t1, 2f
326 328
327 /* Switch to IRQ stack */ 329 /* Switch to IRQ stack */
328 li t1, _IRQ_STACK_SIZE 330 li t1, _IRQ_STACK_START
329 PTR_ADD sp, t0, t1 331 PTR_ADD sp, t0, t1
330 332
333 /* Save task's sp on IRQ stack so that unwinding can follow it */
334 LONG_S s1, 0(sp)
3312: 3352:
332 jalr v0 336 jalr v0
333 337
@@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
519 BUILD_HANDLER reserved reserved sti verbose /* others */ 523 BUILD_HANDLER reserved reserved sti verbose /* others */
520 524
521 .align 5 525 .align 5
522 LEAF(handle_ri_rdhwr_vivt) 526 LEAF(handle_ri_rdhwr_tlbp)
523 .set push 527 .set push
524 .set noat 528 .set noat
525 .set noreorder 529 .set noreorder
@@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
538 .set pop 542 .set pop
539 bltz k1, handle_ri /* slow path */ 543 bltz k1, handle_ri /* slow path */
540 /* fall thru */ 544 /* fall thru */
541 END(handle_ri_rdhwr_vivt) 545 END(handle_ri_rdhwr_tlbp)
542 546
543 LEAF(handle_ri_rdhwr) 547 LEAF(handle_ri_rdhwr)
544 .set push 548 .set push
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index fb6b6b650719..b68e10fc453d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
488 unsigned long pc, 488 unsigned long pc,
489 unsigned long *ra) 489 unsigned long *ra)
490{ 490{
491 unsigned long low, high, irq_stack_high;
491 struct mips_frame_info info; 492 struct mips_frame_info info;
492 unsigned long size, ofs; 493 unsigned long size, ofs;
494 struct pt_regs *regs;
493 int leaf; 495 int leaf;
494 extern void ret_from_irq(void);
495 extern void ret_from_exception(void);
496 496
497 if (!stack_page) 497 if (!stack_page)
498 return 0; 498 return 0;
499 499
500 /* 500 /*
501 * If we reached the bottom of interrupt context, 501 * IRQ stacks start at IRQ_STACK_START
502 * return saved pc in pt_regs. 502 * task stacks at THREAD_SIZE - 32
503 */ 503 */
504 if (pc == (unsigned long)ret_from_irq || 504 low = stack_page;
505 pc == (unsigned long)ret_from_exception) { 505 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
506 struct pt_regs *regs; 506 high = stack_page + IRQ_STACK_START;
507 if (*sp >= stack_page && 507 irq_stack_high = high;
508 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { 508 } else {
509 regs = (struct pt_regs *)*sp; 509 high = stack_page + THREAD_SIZE - 32;
510 pc = regs->cp0_epc; 510 irq_stack_high = 0;
511 if (!user_mode(regs) && __kernel_text_address(pc)) { 511 }
512 *sp = regs->regs[29]; 512
513 *ra = regs->regs[31]; 513 /*
514 return pc; 514 * If we reached the top of the interrupt stack, start unwinding
515 } 515 * the interrupted task stack.
516 */
517 if (unlikely(*sp == irq_stack_high)) {
518 unsigned long task_sp = *(unsigned long *)*sp;
519
520 /*
521 * Check that the pointer saved in the IRQ stack head points to
522 * something within the stack of the current task
523 */
524 if (!object_is_on_stack((void *)task_sp))
525 return 0;
526
527 /*
528 * Follow pointer to tasks kernel stack frame where interrupted
529 * state was saved.
530 */
531 regs = (struct pt_regs *)task_sp;
532 pc = regs->cp0_epc;
533 if (!user_mode(regs) && __kernel_text_address(pc)) {
534 *sp = regs->regs[29];
535 *ra = regs->regs[31];
536 return pc;
516 } 537 }
517 return 0; 538 return 0;
518 } 539 }
@@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
533 if (leaf < 0) 554 if (leaf < 0)
534 return 0; 555 return 0;
535 556
536 if (*sp < stack_page || 557 if (*sp < low || *sp + info.frame_size > high)
537 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
538 return 0; 558 return 0;
539 559
540 if (leaf) 560 if (leaf)
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index c29d397eee86..80ed68b2c95e 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -600,3 +600,4 @@ EXPORT(sys_call_table)
600 PTR sys_pkey_mprotect 600 PTR sys_pkey_mprotect
601 PTR sys_pkey_alloc 601 PTR sys_pkey_alloc
602 PTR sys_pkey_free /* 4365 */ 602 PTR sys_pkey_free /* 4365 */
603 PTR sys_statx
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 0687f96ee912..49765b44aa9b 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -438,4 +438,5 @@ EXPORT(sys_call_table)
438 PTR sys_pkey_mprotect 438 PTR sys_pkey_mprotect
439 PTR sys_pkey_alloc 439 PTR sys_pkey_alloc
440 PTR sys_pkey_free /* 5325 */ 440 PTR sys_pkey_free /* 5325 */
441 PTR sys_statx
441 .size sys_call_table,.-sys_call_table 442 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0331ba39a065..90bad2d1b2d3 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -433,4 +433,5 @@ EXPORT(sysn32_call_table)
433 PTR sys_pkey_mprotect 433 PTR sys_pkey_mprotect
434 PTR sys_pkey_alloc 434 PTR sys_pkey_alloc
435 PTR sys_pkey_free 435 PTR sys_pkey_free
436 PTR sys_statx /* 6330 */
436 .size sysn32_call_table,.-sysn32_call_table 437 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 5a47042dd25f..2dd70bd104e1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -588,4 +588,5 @@ EXPORT(sys32_call_table)
588 PTR sys_pkey_mprotect 588 PTR sys_pkey_mprotect
589 PTR sys_pkey_alloc 589 PTR sys_pkey_alloc
590 PTR sys_pkey_free /* 4365 */ 590 PTR sys_pkey_free /* 4365 */
591 PTR sys_statx
591 .size sys32_call_table,.-sys32_call_table 592 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c7d17cfb32f6..b49e7bf9f950 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void);
83extern asmlinkage void handle_sys(void); 83extern asmlinkage void handle_sys(void);
84extern asmlinkage void handle_bp(void); 84extern asmlinkage void handle_bp(void);
85extern asmlinkage void handle_ri(void); 85extern asmlinkage void handle_ri(void);
86extern asmlinkage void handle_ri_rdhwr_vivt(void); 86extern asmlinkage void handle_ri_rdhwr_tlbp(void);
87extern asmlinkage void handle_ri_rdhwr(void); 87extern asmlinkage void handle_ri_rdhwr(void);
88extern asmlinkage void handle_cpu(void); 88extern asmlinkage void handle_cpu(void);
89extern asmlinkage void handle_ov(void); 89extern asmlinkage void handle_ov(void);
@@ -2408,9 +2408,18 @@ void __init trap_init(void)
2408 2408
2409 set_except_vector(EXCCODE_SYS, handle_sys); 2409 set_except_vector(EXCCODE_SYS, handle_sys);
2410 set_except_vector(EXCCODE_BP, handle_bp); 2410 set_except_vector(EXCCODE_BP, handle_bp);
2411 set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri : 2411
2412 (cpu_has_vtag_icache ? 2412 if (rdhwr_noopt)
2413 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 2413 set_except_vector(EXCCODE_RI, handle_ri);
2414 else {
2415 if (cpu_has_vtag_icache)
2416 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2417 else if (current_cpu_type() == CPU_LOONGSON3)
2418 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2419 else
2420 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2421 }
2422
2414 set_except_vector(EXCCODE_CPU, handle_cpu); 2423 set_except_vector(EXCCODE_CPU, handle_cpu);
2415 set_except_vector(EXCCODE_OV, handle_ov); 2424 set_except_vector(EXCCODE_OV, handle_ov);
2416 set_except_vector(EXCCODE_TR, handle_tr); 2425 set_except_vector(EXCCODE_TR, handle_tr);
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 3c3aa05891dd..95bec460b651 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
467 467
468 if (!np_xbar) 468 if (!np_xbar)
469 panic("Failed to load xbar nodes from devicetree"); 469 panic("Failed to load xbar nodes from devicetree");
470 if (of_address_to_resource(np_pmu, 0, &res_xbar)) 470 if (of_address_to_resource(np_xbar, 0, &res_xbar))
471 panic("Failed to get xbar resources"); 471 panic("Failed to get xbar resources");
472 if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), 472 if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
473 res_xbar.name)) 473 res_xbar.name))
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index e7f798d55fbc..3fe99cb271a9 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1562,6 +1562,7 @@ static void probe_vcache(void)
1562 vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; 1562 vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
1563 1563
1564 c->vcache.waybit = 0; 1564 c->vcache.waybit = 0;
1565 c->vcache.waysize = vcache_size / c->vcache.ways;
1565 1566
1566 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", 1567 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1567 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); 1568 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
@@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void)
1664 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ 1665 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1665 scache_size *= 4; 1666 scache_size *= 4;
1666 c->scache.waybit = 0; 1667 c->scache.waybit = 0;
1668 c->scache.waysize = scache_size / c->scache.ways;
1667 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1669 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1668 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1670 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1669 if (scache_size) 1671 if (scache_size)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9bfee8988eaf..4f642e07c2b1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
760static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, 760static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
761 struct uasm_label **l, 761 struct uasm_label **l,
762 unsigned int pte, 762 unsigned int pte,
763 unsigned int ptr) 763 unsigned int ptr,
764 unsigned int flush)
764{ 765{
765#ifdef CONFIG_SMP 766#ifdef CONFIG_SMP
766 UASM_i_SC(p, pte, 0, ptr); 767 UASM_i_SC(p, pte, 0, ptr);
@@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
769#else 770#else
770 UASM_i_SW(p, pte, 0, ptr); 771 UASM_i_SW(p, pte, 0, ptr);
771#endif 772#endif
773 if (cpu_has_ftlb && flush) {
774 BUG_ON(!cpu_has_tlbinv);
775
776 UASM_i_MFC0(p, ptr, C0_ENTRYHI);
777 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
778 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
779 build_tlb_write_entry(p, l, r, tlb_indexed);
780
781 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
782 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
783 build_huge_update_entries(p, pte, ptr);
784 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
785
786 return;
787 }
788
772 build_huge_update_entries(p, pte, ptr); 789 build_huge_update_entries(p, pte, ptr);
773 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 790 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
774} 791}
@@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
2199 uasm_l_tlbl_goaround2(&l, p); 2216 uasm_l_tlbl_goaround2(&l, p);
2200 } 2217 }
2201 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2218 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2202 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2219 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2203#endif 2220#endif
2204 2221
2205 uasm_l_nopage_tlbl(&l, p); 2222 uasm_l_nopage_tlbl(&l, p);
@@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
2254 build_tlb_probe_entry(&p); 2271 build_tlb_probe_entry(&p);
2255 uasm_i_ori(&p, wr.r1, wr.r1, 2272 uasm_i_ori(&p, wr.r1, wr.r1,
2256 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2273 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2257 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2274 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2258#endif 2275#endif
2259 2276
2260 uasm_l_nopage_tlbs(&l, p); 2277 uasm_l_nopage_tlbs(&l, p);
@@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
2310 build_tlb_probe_entry(&p); 2327 build_tlb_probe_entry(&p);
2311 uasm_i_ori(&p, wr.r1, wr.r1, 2328 uasm_i_ori(&p, wr.r1, wr.r1,
2312 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2329 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2313 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2330 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2314#endif 2331#endif
2315 2332
2316 uasm_l_nopage_tlbm(&l, p); 2333 uasm_l_nopage_tlbm(&l, p);
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index c4ffd43d3996..48ce701557a4 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
35static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; 35static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
36static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; 36static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
37static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; 37static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
38static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; 38static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
39static struct rt2880_pmx_func pci_func[] = { 39static struct rt2880_pmx_func pci_func[] = {
40 FUNC("pci-dev", 0, 40, 32), 40 FUNC("pci-dev", 0, 40, 32),
41 FUNC("pci-host2", 1, 40, 32), 41 FUNC("pci-host2", 1, 40, 32),
@@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = {
43 FUNC("pci-fnc", 3, 40, 32) 43 FUNC("pci-fnc", 3, 40, 32)
44}; 44};
45static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; 45static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
46static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; 46static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
47 47
48static struct rt2880_pmx_group rt3883_pinmux_data[] = { 48static struct rt2880_pmx_group rt3883_pinmux_data[] = {
49 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), 49 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 411994551afc..f058e0c3e4d4 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
33 } 33 }
34 34
35 if (len & ~VMX_ALIGN_MASK) { 35 if (len & ~VMX_ALIGN_MASK) {
36 preempt_disable();
36 pagefault_disable(); 37 pagefault_disable();
37 enable_kernel_altivec(); 38 enable_kernel_altivec();
38 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); 39 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
40 disable_kernel_altivec();
39 pagefault_enable(); 41 pagefault_enable();
42 preempt_enable();
40 } 43 }
41 44
42 tail = len & VMX_ALIGN_MASK; 45 tail = len & VMX_ALIGN_MASK;
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index cbc7c42cdb74..ec7a8b099dd9 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
807 nb = aligninfo[instr].len; 807 nb = aligninfo[instr].len;
808 flags = aligninfo[instr].flags; 808 flags = aligninfo[instr].flags;
809 809
810 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ 810 /*
811 if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { 811 * Handle some cases which give overlaps in the DSISR values.
812 nb = 8; 812 */
813 flags = LD+SW; 813 if (IS_XFORM(instruction)) {
814 } else if (IS_XFORM(instruction) && 814 switch (get_xop(instruction)) {
815 ((instruction >> 1) & 0x3ff) == 660) { 815 case 532: /* ldbrx */
816 nb = 8; 816 nb = 8;
817 flags = ST+SW; 817 flags = LD+SW;
818 break;
819 case 660: /* stdbrx */
820 nb = 8;
821 flags = ST+SW;
822 break;
823 case 20: /* lwarx */
824 case 84: /* ldarx */
825 case 116: /* lharx */
826 case 276: /* lqarx */
827 return 0; /* not emulated ever */
828 }
818 } 829 }
819 830
820 /* Byteswap little endian loads and stores */ 831 /* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae179cb1bb3c..c119044cad0d 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,7 +67,7 @@ PPC64_CACHES:
67 * flush all bytes from start through stop-1 inclusive 67 * flush all bytes from start through stop-1 inclusive
68 */ 68 */
69 69
70_GLOBAL(flush_icache_range) 70_GLOBAL_TOC(flush_icache_range)
71BEGIN_FTR_SECTION 71BEGIN_FTR_SECTION
72 PURGE_PREFETCHED_INS 72 PURGE_PREFETCHED_INS
73 blr 73 blr
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
120 * 120 *
121 * flush all bytes from start to stop-1 inclusive 121 * flush all bytes from start to stop-1 inclusive
122 */ 122 */
123_GLOBAL(flush_dcache_range) 123_GLOBAL_TOC(flush_dcache_range)
124 124
125/* 125/*
126 * Flush the data cache to memory 126 * Flush the data cache to memory
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 9cfaa8b69b5f..f997154dfc41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
237 } 237 }
238 238
239 /*
240 * Fixup HFSCR:TM based on CPU features. The bit is set by our
241 * early asm init because at that point we haven't updated our
242 * CPU features from firmware and device-tree. Here we have,
243 * so let's do it.
244 */
245 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
246 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
247
239 /* Set IR and DR in PACA MSR */ 248 /* Set IR and DR in PACA MSR */
240 get_paca()->kernel_msr = MSR_KERNEL; 249 get_paca()->kernel_msr = MSR_KERNEL;
241} 250}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8c68145ba1bd..710e491206ed 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1487 /* start new resize */ 1487 /* start new resize */
1488 1488
1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL); 1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL);
1490 if (!resize) {
1491 ret = -ENOMEM;
1492 goto out;
1493 }
1490 resize->order = shift; 1494 resize->order = shift;
1491 resize->kvm = kvm; 1495 resize->kvm = kvm;
1492 INIT_WORK(&resize->work, resize_hpt_prepare_work); 1496 INIT_WORK(&resize->work, resize_hpt_prepare_work);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index cc332608e656..65bb8f33b399 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
638 unsigned long psize = batch->psize; 638 unsigned long psize = batch->psize;
639 int ssize = batch->ssize; 639 int ssize = batch->ssize;
640 int i; 640 int i;
641 unsigned int use_local;
642
643 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
644 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
641 645
642 local_irq_save(flags); 646 local_irq_save(flags);
643 647
@@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
667 } pte_iterate_hashed_end(); 671 } pte_iterate_hashed_end();
668 } 672 }
669 673
670 if (mmu_has_feature(MMU_FTR_TLBIEL) && 674 if (use_local) {
671 mmu_psize_defs[psize].tlbiel && local) {
672 asm volatile("ptesync":::"memory"); 675 asm volatile("ptesync":::"memory");
673 for (i = 0; i < number; i++) { 676 for (i = 0; i < number; i++) {
674 vpn = batch->vpn[i]; 677 vpn = batch->vpn[i];
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index d55c829a5944..ddbffb715b40 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -168,8 +168,7 @@ union page_table_entry {
168 unsigned long z : 1; /* Zero Bit */ 168 unsigned long z : 1; /* Zero Bit */
169 unsigned long i : 1; /* Page-Invalid Bit */ 169 unsigned long i : 1; /* Page-Invalid Bit */
170 unsigned long p : 1; /* DAT-Protection Bit */ 170 unsigned long p : 1; /* DAT-Protection Bit */
171 unsigned long co : 1; /* Change-Recording Override */ 171 unsigned long : 9;
172 unsigned long : 8;
173 }; 172 };
174}; 173};
175 174
@@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
745 return PGM_PAGE_TRANSLATION; 744 return PGM_PAGE_TRANSLATION;
746 if (pte.z) 745 if (pte.z)
747 return PGM_TRANSLATION_SPEC; 746 return PGM_TRANSLATION_SPEC;
748 if (pte.co && !edat1)
749 return PGM_TRANSLATION_SPEC;
750 dat_protection |= pte.p; 747 dat_protection |= pte.p;
751 raddr.pfra = pte.pfra; 748 raddr.pfra = pte.pfra;
752real_address: 749real_address:
@@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
1182 rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val); 1179 rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
1183 if (!rc && pte.i) 1180 if (!rc && pte.i)
1184 rc = PGM_PAGE_TRANSLATION; 1181 rc = PGM_PAGE_TRANSLATION;
1185 if (!rc && (pte.z || (pte.co && sg->edat_level < 1))) 1182 if (!rc && pte.z)
1186 rc = PGM_TRANSLATION_SPEC; 1183 rc = PGM_TRANSLATION_SPEC;
1187shadow_page: 1184shadow_page:
1188 pte.p |= dat_protection; 1185 pte.p |= dat_protection;
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index f294dd42fc7d..5961b2d8398a 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -17,6 +17,7 @@
17 17
18#define HPAGE_SHIFT 23 18#define HPAGE_SHIFT 23
19#define REAL_HPAGE_SHIFT 22 19#define REAL_HPAGE_SHIFT 22
20#define HPAGE_2GB_SHIFT 31
20#define HPAGE_256MB_SHIFT 28 21#define HPAGE_256MB_SHIFT 28
21#define HPAGE_64K_SHIFT 16 22#define HPAGE_64K_SHIFT 16
22#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) 23#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
@@ -27,7 +28,7 @@
27#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 28#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
28#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 29#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
29#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) 30#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
30#define HUGE_MAX_HSTATE 3 31#define HUGE_MAX_HSTATE 4
31#endif 32#endif
32 33
33#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8a598528ec1f..6fbd931f0570 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
679 return pte_pfn(pte); 679 return pte_pfn(pte);
680} 680}
681 681
682#ifdef CONFIG_TRANSPARENT_HUGEPAGE 682#define __HAVE_ARCH_PMD_WRITE
683static inline unsigned long pmd_dirty(pmd_t pmd) 683static inline unsigned long pmd_write(pmd_t pmd)
684{ 684{
685 pte_t pte = __pte(pmd_val(pmd)); 685 pte_t pte = __pte(pmd_val(pmd));
686 686
687 return pte_dirty(pte); 687 return pte_write(pte);
688} 688}
689 689
690static inline unsigned long pmd_young(pmd_t pmd) 690#ifdef CONFIG_TRANSPARENT_HUGEPAGE
691static inline unsigned long pmd_dirty(pmd_t pmd)
691{ 692{
692 pte_t pte = __pte(pmd_val(pmd)); 693 pte_t pte = __pte(pmd_val(pmd));
693 694
694 return pte_young(pte); 695 return pte_dirty(pte);
695} 696}
696 697
697static inline unsigned long pmd_write(pmd_t pmd) 698static inline unsigned long pmd_young(pmd_t pmd)
698{ 699{
699 pte_t pte = __pte(pmd_val(pmd)); 700 pte_t pte = __pte(pmd_val(pmd));
700 701
701 return pte_write(pte); 702 return pte_young(pte);
702} 703}
703 704
704static inline unsigned long pmd_trans_huge(pmd_t pmd) 705static inline unsigned long pmd_trans_huge(pmd_t pmd)
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 365d4cb267b4..dd27159819eb 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -18,12 +18,6 @@
18#include <asm/signal.h> 18#include <asm/signal.h>
19#include <asm/page.h> 19#include <asm/page.h>
20 20
21/*
22 * The sparc has no problems with write protection
23 */
24#define wp_works_ok 1
25#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
26
27/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... 21/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
28 * That one page is used to protect kernel from intruders, so that 22 * That one page is used to protect kernel from intruders, so that
29 * we can make our access_ok test faster 23 * we can make our access_ok test faster
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 6448cfc8292f..b58ee9018433 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -18,10 +18,6 @@
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19#include <asm/page.h> 19#include <asm/page.h>
20 20
21/* The sparc has no problems with write protection */
22#define wp_works_ok 1
23#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
24
25/* 21/*
26 * User lives in his very own context, and cannot reference us. Note 22 * User lives in his very own context, and cannot reference us. Note
27 * that TASK_SIZE is a misnomer, it really gives maximum user virtual 23 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 6aa3da152c20..44101196d02b 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -96,6 +96,7 @@ sparc64_boot:
96 andn %g1, PSTATE_AM, %g1 96 andn %g1, PSTATE_AM, %g1
97 wrpr %g1, 0x0, %pstate 97 wrpr %g1, 0x0, %pstate
98 ba,a,pt %xcc, 1f 98 ba,a,pt %xcc, 1f
99 nop
99 100
100 .globl prom_finddev_name, prom_chosen_path, prom_root_node 101 .globl prom_finddev_name, prom_chosen_path, prom_root_node
101 .globl prom_getprop_name, prom_mmu_name, prom_peer_name 102 .globl prom_getprop_name, prom_mmu_name, prom_peer_name
@@ -613,6 +614,7 @@ niagara_tlb_fixup:
613 nop 614 nop
614 615
615 ba,a,pt %xcc, 80f 616 ba,a,pt %xcc, 80f
617 nop
616niagara4_patch: 618niagara4_patch:
617 call niagara4_patch_copyops 619 call niagara4_patch_copyops
618 nop 620 nop
@@ -622,6 +624,7 @@ niagara4_patch:
622 nop 624 nop
623 625
624 ba,a,pt %xcc, 80f 626 ba,a,pt %xcc, 80f
627 nop
625 628
626niagara2_patch: 629niagara2_patch:
627 call niagara2_patch_copyops 630 call niagara2_patch_copyops
@@ -632,6 +635,7 @@ niagara2_patch:
632 nop 635 nop
633 636
634 ba,a,pt %xcc, 80f 637 ba,a,pt %xcc, 80f
638 nop
635 639
636niagara_patch: 640niagara_patch:
637 call niagara_patch_copyops 641 call niagara_patch_copyops
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
index 34b4933900bf..9276d2f0dd86 100644
--- a/arch/sparc/kernel/misctrap.S
+++ b/arch/sparc/kernel/misctrap.S
@@ -82,6 +82,7 @@ do_stdfmna:
82 call handle_stdfmna 82 call handle_stdfmna
83 add %sp, PTREGS_OFF, %o0 83 add %sp, PTREGS_OFF, %o0
84 ba,a,pt %xcc, rtrap 84 ba,a,pt %xcc, rtrap
85 nop
85 .size do_stdfmna,.-do_stdfmna 86 .size do_stdfmna,.-do_stdfmna
86 87
87 .type breakpoint_trap,#function 88 .type breakpoint_trap,#function
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 216948ca4382..709a82ebd294 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
237 bne,pt %xcc, user_rtt_fill_32bit 237 bne,pt %xcc, user_rtt_fill_32bit
238 wrpr %g1, %cwp 238 wrpr %g1, %cwp
239 ba,a,pt %xcc, user_rtt_fill_64bit 239 ba,a,pt %xcc, user_rtt_fill_64bit
240 nop
240 241
241user_rtt_fill_fixup_dax: 242user_rtt_fill_fixup_dax:
242 ba,pt %xcc, user_rtt_fill_fixup_common 243 ba,pt %xcc, user_rtt_fill_fixup_common
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
index 4a73009f66a5..d7e540842809 100644
--- a/arch/sparc/kernel/spiterrs.S
+++ b/arch/sparc/kernel/spiterrs.S
@@ -86,6 +86,7 @@ __spitfire_cee_trap_continue:
86 rd %pc, %g7 86 rd %pc, %g7
87 87
88 ba,a,pt %xcc, 2f 88 ba,a,pt %xcc, 2f
89 nop
89 90
901: ba,pt %xcc, etrap_irq 911: ba,pt %xcc, etrap_irq
91 rd %pc, %g7 92 rd %pc, %g7
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
index 6179e19bc9b9..c19f352f46c7 100644
--- a/arch/sparc/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc/kernel/sun4v_tlb_miss.S
@@ -352,6 +352,7 @@ sun4v_mna:
352 call sun4v_do_mna 352 call sun4v_do_mna
353 add %sp, PTREGS_OFF, %o0 353 add %sp, PTREGS_OFF, %o0
354 ba,a,pt %xcc, rtrap 354 ba,a,pt %xcc, rtrap
355 nop
355 356
356 /* Privileged Action. */ 357 /* Privileged Action. */
357sun4v_privact: 358sun4v_privact:
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
index 5604a2b051d4..364af3250646 100644
--- a/arch/sparc/kernel/urtt_fill.S
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -92,6 +92,7 @@ user_rtt_fill_fixup_common:
92 call sun4v_data_access_exception 92 call sun4v_data_access_exception
93 nop 93 nop
94 ba,a,pt %xcc, rtrap 94 ba,a,pt %xcc, rtrap
95 nop
95 96
961: call spitfire_data_access_exception 971: call spitfire_data_access_exception
97 nop 98 nop
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
index 855019a8590e..1ee173cc3c39 100644
--- a/arch/sparc/kernel/winfixup.S
+++ b/arch/sparc/kernel/winfixup.S
@@ -152,6 +152,8 @@ fill_fixup_dax:
152 call sun4v_data_access_exception 152 call sun4v_data_access_exception
153 nop 153 nop
154 ba,a,pt %xcc, rtrap 154 ba,a,pt %xcc, rtrap
155 nop
1551: call spitfire_data_access_exception 1561: call spitfire_data_access_exception
156 nop 157 nop
157 ba,a,pt %xcc, rtrap 158 ba,a,pt %xcc, rtrap
159 nop
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index c629dbd121b6..64dcd6cdb606 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
326 blu 170f 326 blu 170f
327 nop 327 nop
328 ba,a,pt %xcc, 180f 328 ba,a,pt %xcc, 180f
329 nop
329 330
3304: /* 32 <= low bits < 48 */ 3314: /* 32 <= low bits < 48 */
331 blu 150f 332 blu 150f
332 nop 333 nop
333 ba,a,pt %xcc, 160f 334 ba,a,pt %xcc, 160f
335 nop
3345: /* 0 < low bits < 32 */ 3365: /* 0 < low bits < 32 */
335 blu,a 6f 337 blu,a 6f
336 cmp %g2, 8 338 cmp %g2, 8
@@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
338 blu 130f 340 blu 130f
339 nop 341 nop
340 ba,a,pt %xcc, 140f 342 ba,a,pt %xcc, 140f
343 nop
3416: /* 0 < low bits < 16 */ 3446: /* 0 < low bits < 16 */
342 bgeu 120f 345 bgeu 120f
343 nop 346 nop
@@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
475 brz,pt %o2, 85f 478 brz,pt %o2, 85f
476 sub %o0, %o1, GLOBAL_SPARE 479 sub %o0, %o1, GLOBAL_SPARE
477 ba,a,pt %XCC, 90f 480 ba,a,pt %XCC, 90f
481 nop
478 482
479 .align 64 483 .align 64
48075: /* 16 < len <= 64 */ 48475: /* 16 < len <= 64 */
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 75bb93b1437f..78ea962edcbe 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
530 bne,pt %icc, 1b 530 bne,pt %icc, 1b
531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) 531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
532 ba,a,pt %icc, .Lexit 532 ba,a,pt %icc, .Lexit
533 nop
533 .size FUNC_NAME, .-FUNC_NAME 534 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NG4memset.S b/arch/sparc/lib/NG4memset.S
index 41da4bdd95cb..7c0c81f18837 100644
--- a/arch/sparc/lib/NG4memset.S
+++ b/arch/sparc/lib/NG4memset.S
@@ -102,4 +102,5 @@ NG4bzero:
102 bne,pt %icc, 1b 102 bne,pt %icc, 1b
103 add %o0, 0x30, %o0 103 add %o0, 0x30, %o0
104 ba,a,pt %icc, .Lpostloop 104 ba,a,pt %icc, .Lpostloop
105 nop
105 .size NG4bzero,.-NG4bzero 106 .size NG4bzero,.-NG4bzero
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index d88c4ed50a00..cd654a719b27 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
394 brz,pt %i2, 85f 394 brz,pt %i2, 85f
395 sub %o0, %i1, %i3 395 sub %o0, %i1, %i3
396 ba,a,pt %XCC, 90f 396 ba,a,pt %XCC, 90f
397 nop
397 398
398 .align 64 399 .align 64
39970: /* 16 < len <= 64 */ 40070: /* 16 < len <= 64 */
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 323bc6b6e3ad..ee5273ad918d 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144 144
145 switch (shift) { 145 switch (shift) {
146 case HPAGE_2GB_SHIFT:
147 hugepage_size = _PAGE_SZ2GB_4V;
148 pte_val(entry) |= _PAGE_PMD_HUGE;
149 break;
146 case HPAGE_256MB_SHIFT: 150 case HPAGE_256MB_SHIFT:
147 hugepage_size = _PAGE_SZ256MB_4V; 151 hugepage_size = _PAGE_SZ256MB_4V;
148 pte_val(entry) |= _PAGE_PMD_HUGE; 152 pte_val(entry) |= _PAGE_PMD_HUGE;
@@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
183 unsigned int shift; 187 unsigned int shift;
184 188
185 switch (tte_szbits) { 189 switch (tte_szbits) {
190 case _PAGE_SZ2GB_4V:
191 shift = HPAGE_2GB_SHIFT;
192 break;
186 case _PAGE_SZ256MB_4V: 193 case _PAGE_SZ256MB_4V:
187 shift = HPAGE_256MB_SHIFT; 194 shift = HPAGE_256MB_SHIFT;
188 break; 195 break;
@@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
261 if (!pmd) 268 if (!pmd)
262 return NULL; 269 return NULL;
263 270
264 if (sz == PMD_SHIFT) 271 if (sz >= PMD_SIZE)
265 pte = (pte_t *)pmd; 272 pte = (pte_t *)pmd;
266 else 273 else
267 pte = pte_alloc_map(mm, pmd, addr); 274 pte = pte_alloc_map(mm, pmd, addr);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ccd455328989..0cda653ae007 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string)
337 hugepage_shift = ilog2(hugepage_size); 337 hugepage_shift = ilog2(hugepage_size);
338 338
339 switch (hugepage_shift) { 339 switch (hugepage_shift) {
340 case HPAGE_2GB_SHIFT:
341 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
342 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
343 break;
340 case HPAGE_256MB_SHIFT: 344 case HPAGE_256MB_SHIFT:
341 hv_pgsz_mask = HV_PGSZ_MASK_256MB; 345 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
342 hv_pgsz_idx = HV_PGSZ_IDX_256MB; 346 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
@@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr)
1563 if ((long)addr < 0L) { 1567 if ((long)addr < 0L) {
1564 unsigned long pa = __pa(addr); 1568 unsigned long pa = __pa(addr);
1565 1569
1566 if ((addr >> max_phys_bits) != 0UL) 1570 if ((pa >> max_phys_bits) != 0UL)
1567 return false; 1571 return false;
1568 1572
1569 return pfn_valid(pa >> PAGE_SHIFT); 1573 return pfn_valid(pa >> PAGE_SHIFT);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index def82f6d626f..8e76ebba2986 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -54,6 +54,7 @@
54enum mbus_module srmmu_modtype; 54enum mbus_module srmmu_modtype;
55static unsigned int hwbug_bitmask; 55static unsigned int hwbug_bitmask;
56int vac_cache_size; 56int vac_cache_size;
57EXPORT_SYMBOL(vac_cache_size);
57int vac_line_size; 58int vac_line_size;
58 59
59extern struct resource sparc_iomap; 60extern struct resource sparc_iomap;
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index afda3bbf7854..ee8066c3d96c 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
154 if (pte_val(*pte) & _PAGE_VALID) { 154 if (pte_val(*pte) & _PAGE_VALID) {
155 bool exec = pte_exec(*pte); 155 bool exec = pte_exec(*pte);
156 156
157 tlb_batch_add_one(mm, vaddr, exec, false); 157 tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
158 } 158 }
159 pte++; 159 pte++;
160 vaddr += PAGE_SIZE; 160 vaddr += PAGE_SIZE;
@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
209 pte_t orig_pte = __pte(pmd_val(orig)); 209 pte_t orig_pte = __pte(pmd_val(orig));
210 bool exec = pte_exec(orig_pte); 210 bool exec = pte_exec(orig_pte);
211 211
212 tlb_batch_add_one(mm, addr, exec, true); 212 tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, 213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
214 true); 214 REAL_HPAGE_SHIFT);
215 } else { 215 } else {
216 tlb_batch_pmd_scan(mm, addr, orig); 216 tlb_batch_pmd_scan(mm, addr, orig);
217 } 217 }
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 0a04811f06b7..bedf08b22a47 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
122 122
123 spin_lock_irqsave(&mm->context.lock, flags); 123 spin_lock_irqsave(&mm->context.lock, flags);
124 124
125 if (tb->hugepage_shift < HPAGE_SHIFT) { 125 if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
128 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 128 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
155 155
156 spin_lock_irqsave(&mm->context.lock, flags); 156 spin_lock_irqsave(&mm->context.lock, flags);
157 157
158 if (hugepage_shift < HPAGE_SHIFT) { 158 if (hugepage_shift < REAL_HPAGE_SHIFT) {
159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
161 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 161 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 7853b53959cd..3f9d1a83891a 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
30{ 30{
31 vdso32_enabled = simple_strtoul(s, NULL, 0); 31 vdso32_enabled = simple_strtoul(s, NULL, 0);
32 32
33 if (vdso32_enabled > 1) 33 if (vdso32_enabled > 1) {
34 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); 34 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
35 vdso32_enabled = 0;
36 }
35 37
36 return 1; 38 return 1;
37} 39}
@@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
62/* Register vsyscall32 into the ABI table */ 64/* Register vsyscall32 into the ABI table */
63#include <linux/sysctl.h> 65#include <linux/sysctl.h>
64 66
67static const int zero;
68static const int one = 1;
69
65static struct ctl_table abi_table2[] = { 70static struct ctl_table abi_table2[] = {
66 { 71 {
67 .procname = "vsyscall32", 72 .procname = "vsyscall32",
68 .data = &vdso32_enabled, 73 .data = &vdso32_enabled,
69 .maxlen = sizeof(int), 74 .maxlen = sizeof(int),
70 .mode = 0644, 75 .mode = 0644,
71 .proc_handler = proc_dointvec 76 .proc_handler = proc_dointvec_minmax,
77 .extra1 = (int *)&zero,
78 .extra2 = (int *)&one,
72 }, 79 },
73 {} 80 {}
74}; 81};
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 81b321ace8e0..f924629836a8 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
507 cpuc->lbr_entries[i].to = msr_lastbranch.to; 507 cpuc->lbr_entries[i].to = msr_lastbranch.to;
508 cpuc->lbr_entries[i].mispred = 0; 508 cpuc->lbr_entries[i].mispred = 0;
509 cpuc->lbr_entries[i].predicted = 0; 509 cpuc->lbr_entries[i].predicted = 0;
510 cpuc->lbr_entries[i].in_tx = 0;
511 cpuc->lbr_entries[i].abort = 0;
512 cpuc->lbr_entries[i].cycles = 0;
510 cpuc->lbr_entries[i].reserved = 0; 513 cpuc->lbr_entries[i].reserved = 0;
511 } 514 }
512 cpuc->lbr_stack.nr = i; 515 cpuc->lbr_stack.nr = i;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 9d49c18b5ea9..3762536619f8 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -287,7 +287,7 @@ struct task_struct;
287 287
288#define ARCH_DLINFO_IA32 \ 288#define ARCH_DLINFO_IA32 \
289do { \ 289do { \
290 if (vdso32_enabled) { \ 290 if (VDSO_CURRENT_BASE) { \
291 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ 291 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
292 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ 292 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
293 } \ 293 } \
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
index f369cb8db0d5..badd2b31a560 100644
--- a/arch/x86/kernel/cpu/intel_rdt_schemata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
200 } 200 }
201 201
202out: 202out:
203 rdtgroup_kn_unlock(of->kn);
204 for_each_enabled_rdt_resource(r) { 203 for_each_enabled_rdt_resource(r) {
205 kfree(r->tmp_cbms); 204 kfree(r->tmp_cbms);
206 r->tmp_cbms = NULL; 205 r->tmp_cbms = NULL;
207 } 206 }
207 rdtgroup_kn_unlock(of->kn);
208 return ret ?: nbytes; 208 return ret ?: nbytes;
209} 209}
210 210
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 396c042e9d0e..cc30a74e4adb 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
846 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, 846 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
847 me->comm, me->pid, where, frame, 847 me->comm, me->pid, where, frame,
848 regs->ip, regs->sp, regs->orig_ax); 848 regs->ip, regs->sp, regs->orig_ax);
849 print_vma_addr(" in ", regs->ip); 849 print_vma_addr(KERN_CONT " in ", regs->ip);
850 pr_cont("\n"); 850 pr_cont("\n");
851 } 851 }
852 852
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index ec1f756f9dc9..71beb28600d4 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
151 151
152 if (from->si_signo == SIGSEGV) { 152 if (from->si_signo == SIGSEGV) {
153 if (from->si_code == SEGV_BNDERR) { 153 if (from->si_code == SEGV_BNDERR) {
154 compat_uptr_t lower = (unsigned long)&to->si_lower; 154 compat_uptr_t lower = (unsigned long)from->si_lower;
155 compat_uptr_t upper = (unsigned long)&to->si_upper; 155 compat_uptr_t upper = (unsigned long)from->si_upper;
156 put_user_ex(lower, &to->si_lower); 156 put_user_ex(lower, &to->si_lower);
157 put_user_ex(upper, &to->si_upper); 157 put_user_ex(upper, &to->si_upper);
158 } 158 }
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 948443e115c1..4e496379a871 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
255 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 255 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
256 tsk->comm, tsk->pid, str, 256 tsk->comm, tsk->pid, str,
257 regs->ip, regs->sp, error_code); 257 regs->ip, regs->sp, error_code);
258 print_vma_addr(" in ", regs->ip); 258 print_vma_addr(KERN_CONT " in ", regs->ip);
259 pr_cont("\n"); 259 pr_cont("\n");
260 } 260 }
261 261
@@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
519 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 519 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
520 tsk->comm, task_pid_nr(tsk), 520 tsk->comm, task_pid_nr(tsk),
521 regs->ip, regs->sp, error_code); 521 regs->ip, regs->sp, error_code);
522 print_vma_addr(" in ", regs->ip); 522 print_vma_addr(KERN_CONT " in ", regs->ip);
523 pr_cont("\n"); 523 pr_cont("\n");
524 } 524 }
525 525
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2ee00dbbbd51..259e9b28ccf8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8198,6 +8198,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
8198 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 8198 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
8199 case EXIT_REASON_PREEMPTION_TIMER: 8199 case EXIT_REASON_PREEMPTION_TIMER:
8200 return false; 8200 return false;
8201 case EXIT_REASON_PML_FULL:
8202 /* We don't expose PML support to L1. */
8203 return false;
8201 default: 8204 default:
8202 return true; 8205 return true;
8203 } 8206 }
@@ -10267,6 +10270,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10267 10270
10268 } 10271 }
10269 10272
10273 if (enable_pml) {
10274 /*
10275 * Conceptually we want to copy the PML address and index from
10276 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
10277 * since we always flush the log on each vmexit, this happens
10278 * to be equivalent to simply resetting the fields in vmcs02.
10279 */
10280 ASSERT(vmx->pml_pg);
10281 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
10282 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
10283 }
10284
10270 if (nested_cpu_has_ept(vmcs12)) { 10285 if (nested_cpu_has_ept(vmcs12)) {
10271 kvm_mmu_unload(vcpu); 10286 kvm_mmu_unload(vcpu);
10272 nested_ept_init_mmu_context(vcpu); 10287 nested_ept_init_mmu_context(vcpu);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 22af912d66d2..889e7619a091 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
644 * is valid. The argument is a physical page number. 644 * is valid. The argument is a physical page number.
645 * 645 *
646 * 646 * On x86, access has to be given to the first megabyte of RAM because that
647 * On x86, access has to be given to the first megabyte of ram because that area 647 * area traditionally contains BIOS code and data regions used by X, dosemu,
648 * contains BIOS code and data regions used by X and dosemu and similar apps. 648 * and similar apps. Since they map the entire memory range, the whole range
649 * Access has to be given to non-kernel-ram areas as well, these contain the PCI 649 * must be allowed (for mapping), but any areas that would otherwise be
650 * mmio resources as well as potential bios/acpi data regions. 650 * disallowed are flagged as being "zero filled" instead of rejected.
651 * Access has to be given to non-kernel-ram areas as well, these contain the
652 * PCI mmio resources as well as potential bios/acpi data regions.
651 */ 653 */
652int devmem_is_allowed(unsigned long pagenr) 654int devmem_is_allowed(unsigned long pagenr)
653{ 655{
654 if (pagenr < 256) 656 if (page_is_ram(pagenr)) {
655 return 1; 657 /*
656 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) 658 * For disallowed memory regions in the low 1MB range,
659 * request that the page be shown as all zeros.
660 */
661 if (pagenr < 256)
662 return 2;
663
664 return 0;
665 }
666
667 /*
668 * This must follow RAM test, since System RAM is considered a
669 * restricted resource under CONFIG_STRICT_IOMEM.
670 */
671 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
672 /* Low 1MB bypasses iomem restrictions. */
673 if (pagenr < 256)
674 return 1;
675
657 return 0; 676 return 0;
658 if (!page_is_ram(pagenr)) 677 }
659 return 1; 678
660 return 0; 679 return 1;
661} 680}
662 681
663void free_init_pages(char *what, unsigned long begin, unsigned long end) 682void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 30031d5293c4..cdfe8c628959 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
201 return; 201 return;
202 } 202 }
203 203
204 /* No need to reserve regions that will never be freed. */
205 if (md.attribute & EFI_MEMORY_RUNTIME)
206 return;
207
204 size += addr % EFI_PAGE_SIZE; 208 size += addr % EFI_PAGE_SIZE;
205 size = round_up(size, EFI_PAGE_SIZE); 209 size = round_up(size, EFI_PAGE_SIZE);
206 addr = round_down(addr, EFI_PAGE_SIZE); 210 addr = round_down(addr, EFI_PAGE_SIZE);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 09af8ff18719..c974a1bbf4cb 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -171,7 +171,8 @@ void blk_mq_sched_put_request(struct request *rq)
171 171
172void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 172void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
173{ 173{
174 struct elevator_queue *e = hctx->queue->elevator; 174 struct request_queue *q = hctx->queue;
175 struct elevator_queue *e = q->elevator;
175 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; 176 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
176 bool did_work = false; 177 bool did_work = false;
177 LIST_HEAD(rq_list); 178 LIST_HEAD(rq_list);
@@ -203,10 +204,10 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
203 */ 204 */
204 if (!list_empty(&rq_list)) { 205 if (!list_empty(&rq_list)) {
205 blk_mq_sched_mark_restart_hctx(hctx); 206 blk_mq_sched_mark_restart_hctx(hctx);
206 did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); 207 did_work = blk_mq_dispatch_rq_list(q, &rq_list);
207 } else if (!has_sched_dispatch) { 208 } else if (!has_sched_dispatch) {
208 blk_mq_flush_busy_ctxs(hctx, &rq_list); 209 blk_mq_flush_busy_ctxs(hctx, &rq_list);
209 blk_mq_dispatch_rq_list(hctx, &rq_list); 210 blk_mq_dispatch_rq_list(q, &rq_list);
210 } 211 }
211 212
212 /* 213 /*
@@ -222,7 +223,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
222 if (!rq) 223 if (!rq)
223 break; 224 break;
224 list_add(&rq->queuelist, &rq_list); 225 list_add(&rq->queuelist, &rq_list);
225 } while (blk_mq_dispatch_rq_list(hctx, &rq_list)); 226 } while (blk_mq_dispatch_rq_list(q, &rq_list));
226 } 227 }
227} 228}
228 229
@@ -317,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
317 return true; 318 return true;
318} 319}
319 320
320static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) 321static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
321{ 322{
322 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { 323 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
323 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 324 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
324 if (blk_mq_hctx_has_pending(hctx)) 325 if (blk_mq_hctx_has_pending(hctx)) {
325 blk_mq_run_hw_queue(hctx, true); 326 blk_mq_run_hw_queue(hctx, true);
327 return true;
328 }
326 } 329 }
330 return false;
327} 331}
328 332
329void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) 333/**
330{ 334 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
331 struct request_queue *q = hctx->queue; 335 * @pos: loop cursor.
332 unsigned int i; 336 * @skip: the list element that will not be examined. Iteration starts at
337 * @skip->next.
338 * @head: head of the list to examine. This list must have at least one
339 * element, namely @skip.
340 * @member: name of the list_head structure within typeof(*pos).
341 */
342#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
343 for ((pos) = (skip); \
344 (pos = (pos)->member.next != (head) ? list_entry_rcu( \
345 (pos)->member.next, typeof(*pos), member) : \
346 list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
347 (pos) != (skip); )
333 348
334 if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 349/*
335 if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 350 * Called after a driver tag has been freed to check whether a hctx needs to
336 queue_for_each_hw_ctx(q, hctx, i) 351 * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
337 blk_mq_sched_restart_hctx(hctx); 352 * queues in a round-robin fashion if the tag set of @hctx is shared with other
353 * hardware queues.
354 */
355void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
356{
357 struct blk_mq_tags *const tags = hctx->tags;
358 struct blk_mq_tag_set *const set = hctx->queue->tag_set;
359 struct request_queue *const queue = hctx->queue, *q;
360 struct blk_mq_hw_ctx *hctx2;
361 unsigned int i, j;
362
363 if (set->flags & BLK_MQ_F_TAG_SHARED) {
364 rcu_read_lock();
365 list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
366 tag_set_list) {
367 queue_for_each_hw_ctx(q, hctx2, i)
368 if (hctx2->tags == tags &&
369 blk_mq_sched_restart_hctx(hctx2))
370 goto done;
371 }
372 j = hctx->queue_num + 1;
373 for (i = 0; i < queue->nr_hw_queues; i++, j++) {
374 if (j == queue->nr_hw_queues)
375 j = 0;
376 hctx2 = queue->queue_hw_ctx[j];
377 if (hctx2->tags == tags &&
378 blk_mq_sched_restart_hctx(hctx2))
379 break;
338 } 380 }
381done:
382 rcu_read_unlock();
339 } else { 383 } else {
340 blk_mq_sched_restart_hctx(hctx); 384 blk_mq_sched_restart_hctx(hctx);
341 } 385 }
@@ -431,11 +475,67 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
431 } 475 }
432} 476}
433 477
434int blk_mq_sched_setup(struct request_queue *q) 478static int blk_mq_sched_alloc_tags(struct request_queue *q,
479 struct blk_mq_hw_ctx *hctx,
480 unsigned int hctx_idx)
481{
482 struct blk_mq_tag_set *set = q->tag_set;
483 int ret;
484
485 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
486 set->reserved_tags);
487 if (!hctx->sched_tags)
488 return -ENOMEM;
489
490 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
491 if (ret)
492 blk_mq_sched_free_tags(set, hctx, hctx_idx);
493
494 return ret;
495}
496
497static void blk_mq_sched_tags_teardown(struct request_queue *q)
435{ 498{
436 struct blk_mq_tag_set *set = q->tag_set; 499 struct blk_mq_tag_set *set = q->tag_set;
437 struct blk_mq_hw_ctx *hctx; 500 struct blk_mq_hw_ctx *hctx;
438 int ret, i; 501 int i;
502
503 queue_for_each_hw_ctx(q, hctx, i)
504 blk_mq_sched_free_tags(set, hctx, i);
505}
506
507int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
508 unsigned int hctx_idx)
509{
510 struct elevator_queue *e = q->elevator;
511
512 if (!e)
513 return 0;
514
515 return blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
516}
517
518void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
519 unsigned int hctx_idx)
520{
521 struct elevator_queue *e = q->elevator;
522
523 if (!e)
524 return;
525
526 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
527}
528
529int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
530{
531 struct blk_mq_hw_ctx *hctx;
532 unsigned int i;
533 int ret;
534
535 if (!e) {
536 q->elevator = NULL;
537 return 0;
538 }
439 539
440 /* 540 /*
441 * Default to 256, since we don't split into sync/async like the 541 * Default to 256, since we don't split into sync/async like the
@@ -443,49 +543,30 @@ int blk_mq_sched_setup(struct request_queue *q)
443 */ 543 */
444 q->nr_requests = 2 * BLKDEV_MAX_RQ; 544 q->nr_requests = 2 * BLKDEV_MAX_RQ;
445 545
446 /*
447 * We're switching to using an IO scheduler, so setup the hctx
448 * scheduler tags and switch the request map from the regular
449 * tags to scheduler tags. First allocate what we need, so we
450 * can safely fail and fallback, if needed.
451 */
452 ret = 0;
453 queue_for_each_hw_ctx(q, hctx, i) { 546 queue_for_each_hw_ctx(q, hctx, i) {
454 hctx->sched_tags = blk_mq_alloc_rq_map(set, i, 547 ret = blk_mq_sched_alloc_tags(q, hctx, i);
455 q->nr_requests, set->reserved_tags);
456 if (!hctx->sched_tags) {
457 ret = -ENOMEM;
458 break;
459 }
460 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
461 if (ret) 548 if (ret)
462 break; 549 goto err;
463 } 550 }
464 551
465 /* 552 ret = e->ops.mq.init_sched(q, e);
466 * If we failed, free what we did allocate 553 if (ret)
467 */ 554 goto err;
468 if (ret) {
469 queue_for_each_hw_ctx(q, hctx, i) {
470 if (!hctx->sched_tags)
471 continue;
472 blk_mq_sched_free_tags(set, hctx, i);
473 }
474
475 return ret;
476 }
477 555
478 return 0; 556 return 0;
557
558err:
559 blk_mq_sched_tags_teardown(q);
560 q->elevator = NULL;
561 return ret;
479} 562}
480 563
481void blk_mq_sched_teardown(struct request_queue *q) 564void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
482{ 565{
483 struct blk_mq_tag_set *set = q->tag_set; 566 if (e->type->ops.mq.exit_sched)
484 struct blk_mq_hw_ctx *hctx; 567 e->type->ops.mq.exit_sched(e);
485 int i; 568 blk_mq_sched_tags_teardown(q);
486 569 q->elevator = NULL;
487 queue_for_each_hw_ctx(q, hctx, i)
488 blk_mq_sched_free_tags(set, hctx, i);
489} 570}
490 571
491int blk_mq_sched_init(struct request_queue *q) 572int blk_mq_sched_init(struct request_queue *q)
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index a75b16b123f7..3a9e6e40558b 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
19 struct request **merged_request); 19 struct request **merged_request);
20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); 20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); 21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); 22void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
23 23
24void blk_mq_sched_insert_request(struct request *rq, bool at_head, 24void blk_mq_sched_insert_request(struct request *rq, bool at_head,
25 bool run_queue, bool async, bool can_block); 25 bool run_queue, bool async, bool can_block);
@@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
32 struct list_head *rq_list, 32 struct list_head *rq_list,
33 struct request *(*get_rq)(struct blk_mq_hw_ctx *)); 33 struct request *(*get_rq)(struct blk_mq_hw_ctx *));
34 34
35int blk_mq_sched_setup(struct request_queue *q); 35int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
36void blk_mq_sched_teardown(struct request_queue *q); 36void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
37
38int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
39 unsigned int hctx_idx);
40void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
41 unsigned int hctx_idx);
37 42
38int blk_mq_sched_init(struct request_queue *q); 43int blk_mq_sched_init(struct request_queue *q);
39 44
@@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
131 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 136 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
132} 137}
133 138
134/*
135 * Mark a hardware queue and the request queue it belongs to as needing a
136 * restart.
137 */
138static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
139{
140 struct request_queue *q = hctx->queue;
141
142 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
143 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
144 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
145 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
146}
147
148static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 139static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
149{ 140{
150 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 141 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6b6e7bc041db..572966f49596 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -321,7 +321,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
321 321
322 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); 322 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
323 323
324 blk_mq_put_ctx(alloc_data.ctx);
325 blk_queue_exit(q); 324 blk_queue_exit(q);
326 325
327 if (!rq) 326 if (!rq)
@@ -349,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
349 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 348 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
350 if (sched_tag != -1) 349 if (sched_tag != -1)
351 blk_mq_sched_completed_request(hctx, rq); 350 blk_mq_sched_completed_request(hctx, rq);
352 blk_mq_sched_restart_queues(hctx); 351 blk_mq_sched_restart(hctx);
353 blk_queue_exit(q); 352 blk_queue_exit(q);
354} 353}
355 354
@@ -846,12 +845,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
846 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 845 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
847 }; 846 };
848 847
849 if (rq->tag != -1) { 848 if (rq->tag != -1)
850done: 849 goto done;
851 if (hctx)
852 *hctx = data.hctx;
853 return true;
854 }
855 850
856 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) 851 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
857 data.flags |= BLK_MQ_REQ_RESERVED; 852 data.flags |= BLK_MQ_REQ_RESERVED;
@@ -863,10 +858,12 @@ done:
863 atomic_inc(&data.hctx->nr_active); 858 atomic_inc(&data.hctx->nr_active);
864 } 859 }
865 data.hctx->tags->rqs[rq->tag] = rq; 860 data.hctx->tags->rqs[rq->tag] = rq;
866 goto done;
867 } 861 }
868 862
869 return false; 863done:
864 if (hctx)
865 *hctx = data.hctx;
866 return rq->tag != -1;
870} 867}
871 868
872static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 869static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
@@ -963,14 +960,17 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
963 return true; 960 return true;
964} 961}
965 962
966bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) 963bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
967{ 964{
968 struct request_queue *q = hctx->queue; 965 struct blk_mq_hw_ctx *hctx;
969 struct request *rq; 966 struct request *rq;
970 LIST_HEAD(driver_list); 967 LIST_HEAD(driver_list);
971 struct list_head *dptr; 968 struct list_head *dptr;
972 int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK; 969 int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
973 970
971 if (list_empty(list))
972 return false;
973
974 /* 974 /*
975 * Start off with dptr being NULL, so we start the first request 975 * Start off with dptr being NULL, so we start the first request
976 * immediately, even if we have more pending. 976 * immediately, even if we have more pending.
@@ -981,7 +981,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
981 * Now process all the entries, sending them to the driver. 981 * Now process all the entries, sending them to the driver.
982 */ 982 */
983 errors = queued = 0; 983 errors = queued = 0;
984 while (!list_empty(list)) { 984 do {
985 struct blk_mq_queue_data bd; 985 struct blk_mq_queue_data bd;
986 986
987 rq = list_first_entry(list, struct request, queuelist); 987 rq = list_first_entry(list, struct request, queuelist);
@@ -1052,7 +1052,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1052 */ 1052 */
1053 if (!dptr && list->next != list->prev) 1053 if (!dptr && list->next != list->prev)
1054 dptr = &driver_list; 1054 dptr = &driver_list;
1055 } 1055 } while (!list_empty(list));
1056 1056
1057 hctx->dispatched[queued_to_index(queued)]++; 1057 hctx->dispatched[queued_to_index(queued)]++;
1058 1058
@@ -1135,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1135 return hctx->next_cpu; 1135 return hctx->next_cpu;
1136} 1136}
1137 1137
1138void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1138static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1139 unsigned long msecs)
1139{ 1140{
1140 if (unlikely(blk_mq_hctx_stopped(hctx) || 1141 if (unlikely(blk_mq_hctx_stopped(hctx) ||
1141 !blk_mq_hw_queue_mapped(hctx))) 1142 !blk_mq_hw_queue_mapped(hctx)))
@@ -1152,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1152 put_cpu(); 1153 put_cpu();
1153 } 1154 }
1154 1155
1155 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); 1156 if (msecs == 0)
1157 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
1158 &hctx->run_work);
1159 else
1160 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1161 &hctx->delayed_run_work,
1162 msecs_to_jiffies(msecs));
1163}
1164
1165void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1166{
1167 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1168}
1169EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1170
1171void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1172{
1173 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1156} 1174}
1157 1175
1158void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1176void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -1255,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
1255 __blk_mq_run_hw_queue(hctx); 1273 __blk_mq_run_hw_queue(hctx);
1256} 1274}
1257 1275
1276static void blk_mq_delayed_run_work_fn(struct work_struct *work)
1277{
1278 struct blk_mq_hw_ctx *hctx;
1279
1280 hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
1281
1282 __blk_mq_run_hw_queue(hctx);
1283}
1284
1258static void blk_mq_delay_work_fn(struct work_struct *work) 1285static void blk_mq_delay_work_fn(struct work_struct *work)
1259{ 1286{
1260 struct blk_mq_hw_ctx *hctx; 1287 struct blk_mq_hw_ctx *hctx;
@@ -1924,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
1924 hctx->fq->flush_rq, hctx_idx, 1951 hctx->fq->flush_rq, hctx_idx,
1925 flush_start_tag + hctx_idx); 1952 flush_start_tag + hctx_idx);
1926 1953
1954 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1955
1927 if (set->ops->exit_hctx) 1956 if (set->ops->exit_hctx)
1928 set->ops->exit_hctx(hctx, hctx_idx); 1957 set->ops->exit_hctx(hctx, hctx_idx);
1929 1958
@@ -1960,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
1960 node = hctx->numa_node = set->numa_node; 1989 node = hctx->numa_node = set->numa_node;
1961 1990
1962 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); 1991 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1992 INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
1963 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); 1993 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1964 spin_lock_init(&hctx->lock); 1994 spin_lock_init(&hctx->lock);
1965 INIT_LIST_HEAD(&hctx->dispatch); 1995 INIT_LIST_HEAD(&hctx->dispatch);
@@ -1990,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
1990 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2020 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1991 goto free_bitmap; 2021 goto free_bitmap;
1992 2022
2023 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2024 goto exit_hctx;
2025
1993 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 2026 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1994 if (!hctx->fq) 2027 if (!hctx->fq)
1995 goto exit_hctx; 2028 goto sched_exit_hctx;
1996 2029
1997 if (set->ops->init_request && 2030 if (set->ops->init_request &&
1998 set->ops->init_request(set->driver_data, 2031 set->ops->init_request(set->driver_data,
@@ -2007,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
2007 2040
2008 free_fq: 2041 free_fq:
2009 kfree(hctx->fq); 2042 kfree(hctx->fq);
2043 sched_exit_hctx:
2044 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2010 exit_hctx: 2045 exit_hctx:
2011 if (set->ops->exit_hctx) 2046 if (set->ops->exit_hctx)
2012 set->ops->exit_hctx(hctx, hctx_idx); 2047 set->ops->exit_hctx(hctx, hctx_idx);
@@ -2233,8 +2268,6 @@ void blk_mq_release(struct request_queue *q)
2233 struct blk_mq_hw_ctx *hctx; 2268 struct blk_mq_hw_ctx *hctx;
2234 unsigned int i; 2269 unsigned int i;
2235 2270
2236 blk_mq_sched_teardown(q);
2237
2238 /* hctx kobj stays in hctx */ 2271 /* hctx kobj stays in hctx */
2239 queue_for_each_hw_ctx(q, hctx, i) { 2272 queue_for_each_hw_ctx(q, hctx, i) {
2240 if (!hctx) 2273 if (!hctx)
@@ -2565,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2565 return 0; 2598 return 0;
2566} 2599}
2567 2600
2601static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2602{
2603 if (set->ops->map_queues)
2604 return set->ops->map_queues(set);
2605 else
2606 return blk_mq_map_queues(set);
2607}
2608
2568/* 2609/*
2569 * Alloc a tag set to be associated with one or more request queues. 2610 * Alloc a tag set to be associated with one or more request queues.
2570 * May fail with EINVAL for various error conditions. May adjust the 2611 * May fail with EINVAL for various error conditions. May adjust the
@@ -2619,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2619 if (!set->mq_map) 2660 if (!set->mq_map)
2620 goto out_free_tags; 2661 goto out_free_tags;
2621 2662
2622 if (set->ops->map_queues) 2663 ret = blk_mq_update_queue_map(set);
2623 ret = set->ops->map_queues(set);
2624 else
2625 ret = blk_mq_map_queues(set);
2626 if (ret) 2664 if (ret)
2627 goto out_free_mq_map; 2665 goto out_free_mq_map;
2628 2666
@@ -2714,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2714 blk_mq_freeze_queue(q); 2752 blk_mq_freeze_queue(q);
2715 2753
2716 set->nr_hw_queues = nr_hw_queues; 2754 set->nr_hw_queues = nr_hw_queues;
2755 blk_mq_update_queue_map(set);
2717 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2756 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2718 blk_mq_realloc_hw_ctxs(set, q); 2757 blk_mq_realloc_hw_ctxs(set, q);
2719 2758
diff --git a/block/blk-mq.h b/block/blk-mq.h
index b79f9a7d8cf6..660a17e1d033 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -31,7 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
31void blk_mq_free_queue(struct request_queue *q); 31void blk_mq_free_queue(struct request_queue *q);
32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33void blk_mq_wake_waiters(struct request_queue *q); 33void blk_mq_wake_waiters(struct request_queue *q);
34bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); 34bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); 36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index c44b321335f3..37f0b3ad635e 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj)
816 816
817 if (q->elevator) { 817 if (q->elevator) {
818 ioc_clear_queue(q); 818 ioc_clear_queue(q);
819 elevator_exit(q->elevator); 819 elevator_exit(q, q->elevator);
820 } 820 }
821 821
822 blk_exit_rl(&q->root_rl); 822 blk_exit_rl(&q->root_rl);
diff --git a/block/elevator.c b/block/elevator.c
index 01139f549b5b..dbeecf7be719 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -242,26 +242,21 @@ int elevator_init(struct request_queue *q, char *name)
242 } 242 }
243 } 243 }
244 244
245 if (e->uses_mq) { 245 if (e->uses_mq)
246 err = blk_mq_sched_setup(q); 246 err = blk_mq_init_sched(q, e);
247 if (!err) 247 else
248 err = e->ops.mq.init_sched(q, e);
249 } else
250 err = e->ops.sq.elevator_init_fn(q, e); 248 err = e->ops.sq.elevator_init_fn(q, e);
251 if (err) { 249 if (err)
252 if (e->uses_mq)
253 blk_mq_sched_teardown(q);
254 elevator_put(e); 250 elevator_put(e);
255 }
256 return err; 251 return err;
257} 252}
258EXPORT_SYMBOL(elevator_init); 253EXPORT_SYMBOL(elevator_init);
259 254
260void elevator_exit(struct elevator_queue *e) 255void elevator_exit(struct request_queue *q, struct elevator_queue *e)
261{ 256{
262 mutex_lock(&e->sysfs_lock); 257 mutex_lock(&e->sysfs_lock);
263 if (e->uses_mq && e->type->ops.mq.exit_sched) 258 if (e->uses_mq && e->type->ops.mq.exit_sched)
264 e->type->ops.mq.exit_sched(e); 259 blk_mq_exit_sched(q, e);
265 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) 260 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
266 e->type->ops.sq.elevator_exit_fn(e); 261 e->type->ops.sq.elevator_exit_fn(e);
267 mutex_unlock(&e->sysfs_lock); 262 mutex_unlock(&e->sysfs_lock);
@@ -946,6 +941,45 @@ void elv_unregister(struct elevator_type *e)
946} 941}
947EXPORT_SYMBOL_GPL(elv_unregister); 942EXPORT_SYMBOL_GPL(elv_unregister);
948 943
944static int elevator_switch_mq(struct request_queue *q,
945 struct elevator_type *new_e)
946{
947 int ret;
948
949 blk_mq_freeze_queue(q);
950 blk_mq_quiesce_queue(q);
951
952 if (q->elevator) {
953 if (q->elevator->registered)
954 elv_unregister_queue(q);
955 ioc_clear_queue(q);
956 elevator_exit(q, q->elevator);
957 }
958
959 ret = blk_mq_init_sched(q, new_e);
960 if (ret)
961 goto out;
962
963 if (new_e) {
964 ret = elv_register_queue(q);
965 if (ret) {
966 elevator_exit(q, q->elevator);
967 goto out;
968 }
969 }
970
971 if (new_e)
972 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
973 else
974 blk_add_trace_msg(q, "elv switch: none");
975
976out:
977 blk_mq_unfreeze_queue(q);
978 blk_mq_start_stopped_hw_queues(q, true);
979 return ret;
980
981}
982
949/* 983/*
950 * switch to new_e io scheduler. be careful not to introduce deadlocks - 984 * switch to new_e io scheduler. be careful not to introduce deadlocks -
951 * we don't free the old io scheduler, before we have allocated what we 985 * we don't free the old io scheduler, before we have allocated what we
@@ -958,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
958 bool old_registered = false; 992 bool old_registered = false;
959 int err; 993 int err;
960 994
961 if (q->mq_ops) { 995 if (q->mq_ops)
962 blk_mq_freeze_queue(q); 996 return elevator_switch_mq(q, new_e);
963 blk_mq_quiesce_queue(q);
964 }
965 997
966 /* 998 /*
967 * Turn on BYPASS and drain all requests w/ elevator private data. 999 * Turn on BYPASS and drain all requests w/ elevator private data.
@@ -973,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
973 if (old) { 1005 if (old) {
974 old_registered = old->registered; 1006 old_registered = old->registered;
975 1007
976 if (old->uses_mq) 1008 blk_queue_bypass_start(q);
977 blk_mq_sched_teardown(q);
978
979 if (!q->mq_ops)
980 blk_queue_bypass_start(q);
981 1009
982 /* unregister and clear all auxiliary data of the old elevator */ 1010 /* unregister and clear all auxiliary data of the old elevator */
983 if (old_registered) 1011 if (old_registered)
@@ -987,56 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
987 } 1015 }
988 1016
989 /* allocate, init and register new elevator */ 1017 /* allocate, init and register new elevator */
990 if (new_e) { 1018 err = new_e->ops.sq.elevator_init_fn(q, new_e);
991 if (new_e->uses_mq) { 1019 if (err)
992 err = blk_mq_sched_setup(q); 1020 goto fail_init;
993 if (!err)
994 err = new_e->ops.mq.init_sched(q, new_e);
995 } else
996 err = new_e->ops.sq.elevator_init_fn(q, new_e);
997 if (err)
998 goto fail_init;
999 1021
1000 err = elv_register_queue(q); 1022 err = elv_register_queue(q);
1001 if (err) 1023 if (err)
1002 goto fail_register; 1024 goto fail_register;
1003 } else
1004 q->elevator = NULL;
1005 1025
1006 /* done, kill the old one and finish */ 1026 /* done, kill the old one and finish */
1007 if (old) { 1027 if (old) {
1008 elevator_exit(old); 1028 elevator_exit(q, old);
1009 if (!q->mq_ops) 1029 blk_queue_bypass_end(q);
1010 blk_queue_bypass_end(q);
1011 } 1030 }
1012 1031
1013 if (q->mq_ops) { 1032 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1014 blk_mq_unfreeze_queue(q);
1015 blk_mq_start_stopped_hw_queues(q, true);
1016 }
1017
1018 if (new_e)
1019 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1020 else
1021 blk_add_trace_msg(q, "elv switch: none");
1022 1033
1023 return 0; 1034 return 0;
1024 1035
1025fail_register: 1036fail_register:
1026 if (q->mq_ops) 1037 elevator_exit(q, q->elevator);
1027 blk_mq_sched_teardown(q);
1028 elevator_exit(q->elevator);
1029fail_init: 1038fail_init:
1030 /* switch failed, restore and re-register old elevator */ 1039 /* switch failed, restore and re-register old elevator */
1031 if (old) { 1040 if (old) {
1032 q->elevator = old; 1041 q->elevator = old;
1033 elv_register_queue(q); 1042 elv_register_queue(q);
1034 if (!q->mq_ops) 1043 blk_queue_bypass_end(q);
1035 blk_queue_bypass_end(q);
1036 }
1037 if (q->mq_ops) {
1038 blk_mq_unfreeze_queue(q);
1039 blk_mq_start_stopped_hw_queues(q, true);
1040 } 1044 }
1041 1045
1042 return err; 1046 return err;
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index c86bae7b1d0f..ff096d9755b9 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
421 421
422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources); 422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
423 423
424 /* 424 /* The absolute minimum resource template is one end_tag descriptor */
425 * The absolute minimum resource template is one end_tag descriptor. 425
426 * However, we will treat a lone end_tag as just a simple buffer.
427 */
428 if (aml_length < sizeof(struct aml_resource_end_tag)) { 426 if (aml_length < sizeof(struct aml_resource_end_tag)) {
429 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); 427 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
430 } 428 }
@@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
456 /* Invoke the user function */ 454 /* Invoke the user function */
457 455
458 if (user_function) { 456 if (user_function) {
459 status = user_function(aml, length, offset, 457 status =
460 resource_index, context); 458 user_function(aml, length, offset, resource_index,
459 context);
461 if (ACPI_FAILURE(status)) { 460 if (ACPI_FAILURE(status)) {
462 return_ACPI_STATUS(status); 461 return_ACPI_STATUS(status);
463 } 462 }
@@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
481 *context = aml; 480 *context = aml;
482 } 481 }
483 482
484 /* Check if buffer is defined to be longer than the resource length */
485
486 if (aml_length > (offset + length)) {
487 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
488 }
489
490 /* Normal exit */ 483 /* Normal exit */
491 484
492 return_ACPI_STATUS(AE_OK); 485 return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index fb19e1cdb641..edc8663b5db3 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
99 return -ENODEV; 99 return -ENODEV;
100 100
101 /* 101 /*
102 * If the device has a _HID (or _CID) returning a valid ACPI/PNP 102 * If the device has a _HID returning a valid ACPI/PNP device ID, it is
103 * device ID, it is better to make it look less attractive here, so that 103 * better to make it look less attractive here, so that the other device
104 * the other device with the same _ADR value (that may not have a valid 104 * with the same _ADR value (that may not have a valid device ID) can be
105 * device ID) can be matched going forward. [This means a second spec 105 * matched going forward. [This means a second spec violation in a row,
106 * violation in a row, so whatever we do here is best effort anyway.] 106 * so whatever we do here is best effort anyway.]
107 */ 107 */
108 return sta_present && list_empty(&adev->pnp.ids) ? 108 return sta_present && !adev->pnp.type.platform_id ?
109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; 109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
110} 110}
111 111
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 192691880d55..2433569b02ef 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
1857 return; 1857 return;
1858 1858
1859 device->flags.match_driver = true; 1859 device->flags.match_driver = true;
1860 if (!ret) { 1860 if (ret > 0) {
1861 ret = device_attach(&device->dev); 1861 acpi_device_set_enumerated(device);
1862 if (ret < 0) 1862 goto ok;
1863 return;
1864
1865 if (!ret && device->pnp.type.platform_id)
1866 acpi_default_enumeration(device);
1867 } 1863 }
1868 1864
1865 ret = device_attach(&device->dev);
1866 if (ret < 0)
1867 return;
1868
1869 if (ret > 0 || !device->pnp.type.platform_id)
1870 acpi_device_set_enumerated(device);
1871 else
1872 acpi_default_enumeration(device);
1873
1869 ok: 1874 ok:
1870 list_for_each_entry(child, &device->children, node) 1875 list_for_each_entry(child, &device->children, node)
1871 acpi_bus_attach(child); 1876 acpi_bus_attach(child);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 6c9aa95a9a05..49d705c9f0f7 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
278 }; 278 };
279 const struct ata_port_info *ppi[] = { &info, &info }; 279 const struct ata_port_info *ppi[] = { &info, &info };
280 280
281 /* SB600/700 don't have secondary port wired */
282 if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
283 (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
284 ppi[1] = &ata_dummy_port_info;
285
286 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, 281 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
287 ATA_HOST_PARALLEL_SCAN); 282 ATA_HOST_PARALLEL_SCAN);
288} 283}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 0636d84fbefe..f3f538eec7b3 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
645 } 645 }
646 646
647 /* enable IRQ on hotplug */ 647 if (board_id == vt6421) {
648 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); 648 /* enable IRQ on hotplug */
649 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { 649 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
650 dev_dbg(&pdev->dev, 650 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
651 "enabling SATA hotplug (0x%x)\n", 651 dev_dbg(&pdev->dev,
652 (int) tmp8); 652 "enabling SATA hotplug (0x%x)\n",
653 tmp8 |= SATA_HOTPLUG; 653 (int) tmp8);
654 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); 654 tmp8 |= SATA_HOTPLUG;
655 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
656 }
655 } 657 }
656 658
657 /* 659 /*
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index dceb5edd1e54..0c09d4256108 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
523 523
524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); 524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
525 if (size == PAGE_SIZE) { 525 if (size == PAGE_SIZE) {
526 copy_page(mem, cmem); 526 memcpy(mem, cmem, PAGE_SIZE);
527 } else { 527 } else {
528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); 528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
529 529
@@ -717,7 +717,7 @@ compress_again:
717 717
718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { 718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
719 src = kmap_atomic(page); 719 src = kmap_atomic(page);
720 copy_page(cmem, src); 720 memcpy(cmem, src, PAGE_SIZE);
721 kunmap_atomic(src); 721 kunmap_atomic(src);
722 } else { 722 } else {
723 memcpy(cmem, src, clen); 723 memcpy(cmem, src, clen);
@@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
928 } 928 }
929 929
930 index = sector >> SECTORS_PER_PAGE_SHIFT; 930 index = sector >> SECTORS_PER_PAGE_SHIFT;
931 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; 931 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
932 932
933 bv.bv_page = page; 933 bv.bv_page = page;
934 bv.bv_len = PAGE_SIZE; 934 bv.bv_len = PAGE_SIZE;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6d9cc2d39d22..7e4a9d1296bb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
60#endif 60#endif
61 61
62#ifdef CONFIG_STRICT_DEVMEM 62#ifdef CONFIG_STRICT_DEVMEM
63static inline int page_is_allowed(unsigned long pfn)
64{
65 return devmem_is_allowed(pfn);
66}
63static inline int range_is_allowed(unsigned long pfn, unsigned long size) 67static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64{ 68{
65 u64 from = ((u64)pfn) << PAGE_SHIFT; 69 u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
75 return 1; 79 return 1;
76} 80}
77#else 81#else
82static inline int page_is_allowed(unsigned long pfn)
83{
84 return 1;
85}
78static inline int range_is_allowed(unsigned long pfn, unsigned long size) 86static inline int range_is_allowed(unsigned long pfn, unsigned long size)
79{ 87{
80 return 1; 88 return 1;
@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
122 130
123 while (count > 0) { 131 while (count > 0) {
124 unsigned long remaining; 132 unsigned long remaining;
133 int allowed;
125 134
126 sz = size_inside_page(p, count); 135 sz = size_inside_page(p, count);
127 136
128 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 137 allowed = page_is_allowed(p >> PAGE_SHIFT);
138 if (!allowed)
129 return -EPERM; 139 return -EPERM;
140 if (allowed == 2) {
141 /* Show zeros for restricted memory. */
142 remaining = clear_user(buf, sz);
143 } else {
144 /*
145 * On ia64 if a page has been mapped somewhere as
146 * uncached, then it must also be accessed uncached
147 * by the kernel or data corruption may occur.
148 */
149 ptr = xlate_dev_mem_ptr(p);
150 if (!ptr)
151 return -EFAULT;
130 152
131 /* 153 remaining = copy_to_user(buf, ptr, sz);
132 * On ia64 if a page has been mapped somewhere as uncached, then 154
133 * it must also be accessed uncached by the kernel or data 155 unxlate_dev_mem_ptr(p, ptr);
134 * corruption may occur. 156 }
135 */
136 ptr = xlate_dev_mem_ptr(p);
137 if (!ptr)
138 return -EFAULT;
139 157
140 remaining = copy_to_user(buf, ptr, sz);
141 unxlate_dev_mem_ptr(p, ptr);
142 if (remaining) 158 if (remaining)
143 return -EFAULT; 159 return -EFAULT;
144 160
@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
181#endif 197#endif
182 198
183 while (count > 0) { 199 while (count > 0) {
200 int allowed;
201
184 sz = size_inside_page(p, count); 202 sz = size_inside_page(p, count);
185 203
186 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 204 allowed = page_is_allowed(p >> PAGE_SHIFT);
205 if (!allowed)
187 return -EPERM; 206 return -EPERM;
188 207
189 /* 208 /* Skip actual writing when a page is marked as restricted. */
190 * On ia64 if a page has been mapped somewhere as uncached, then 209 if (allowed == 1) {
191 * it must also be accessed uncached by the kernel or data 210 /*
192 * corruption may occur. 211 * On ia64 if a page has been mapped somewhere as
193 */ 212 * uncached, then it must also be accessed uncached
194 ptr = xlate_dev_mem_ptr(p); 213 * by the kernel or data corruption may occur.
195 if (!ptr) { 214 */
196 if (written) 215 ptr = xlate_dev_mem_ptr(p);
197 break; 216 if (!ptr) {
198 return -EFAULT; 217 if (written)
199 } 218 break;
219 return -EFAULT;
220 }
200 221
201 copied = copy_from_user(ptr, buf, sz); 222 copied = copy_from_user(ptr, buf, sz);
202 unxlate_dev_mem_ptr(p, ptr); 223 unxlate_dev_mem_ptr(p, ptr);
203 if (copied) { 224 if (copied) {
204 written += sz - copied; 225 written += sz - copied;
205 if (written) 226 if (written)
206 break; 227 break;
207 return -EFAULT; 228 return -EFAULT;
229 }
208 } 230 }
209 231
210 buf += sz; 232 buf += sz;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index e9b7e0b3cabe..87fe111d0be6 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
2202 2202
2203 vdev->config->reset(vdev); 2203 vdev->config->reset(vdev);
2204 2204
2205 virtqueue_disable_cb(portdev->c_ivq); 2205 if (use_multiport(portdev))
2206 virtqueue_disable_cb(portdev->c_ivq);
2206 cancel_work_sync(&portdev->control_work); 2207 cancel_work_sync(&portdev->control_work);
2207 cancel_work_sync(&portdev->config_work); 2208 cancel_work_sync(&portdev->config_work);
2208 /* 2209 /*
2209 * Once more: if control_work_handler() was running, it would 2210 * Once more: if control_work_handler() was running, it would
2210 * enable the cb as the last step. 2211 * enable the cb as the last step.
2211 */ 2212 */
2212 virtqueue_disable_cb(portdev->c_ivq); 2213 if (use_multiport(portdev))
2214 virtqueue_disable_cb(portdev->c_ivq);
2213 remove_controlq_data(portdev); 2215 remove_controlq_data(portdev);
2214 2216
2215 list_for_each_entry(port, &portdev->ports, list) { 2217 list_for_each_entry(port, &portdev->ports, list) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index bc96d423781a..0e3f6496524d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2398,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2398 *********************************************************************/ 2398 *********************************************************************/
2399static enum cpuhp_state hp_online; 2399static enum cpuhp_state hp_online;
2400 2400
2401static int cpuhp_cpufreq_online(unsigned int cpu)
2402{
2403 cpufreq_online(cpu);
2404
2405 return 0;
2406}
2407
2408static int cpuhp_cpufreq_offline(unsigned int cpu)
2409{
2410 cpufreq_offline(cpu);
2411
2412 return 0;
2413}
2414
2401/** 2415/**
2402 * cpufreq_register_driver - register a CPU Frequency driver 2416 * cpufreq_register_driver - register a CPU Frequency driver
2403 * @driver_data: A struct cpufreq_driver containing the values# 2417 * @driver_data: A struct cpufreq_driver containing the values#
@@ -2460,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2460 } 2474 }
2461 2475
2462 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online", 2476 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
2463 cpufreq_online, 2477 cpuhp_cpufreq_online,
2464 cpufreq_offline); 2478 cpuhp_cpufreq_offline);
2465 if (ret < 0) 2479 if (ret < 0)
2466 goto err_if_unreg; 2480 goto err_if_unreg;
2467 hp_online = ret; 2481 hp_online = ret;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 32100c4851dd..49cbdcba7883 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
506 ctx->dev = caam_jr_alloc(); 506 ctx->dev = caam_jr_alloc();
507 507
508 if (IS_ERR(ctx->dev)) { 508 if (IS_ERR(ctx->dev)) {
509 dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n"); 509 pr_err("Job Ring Device allocation for transform failed\n");
510 return PTR_ERR(ctx->dev); 510 return PTR_ERR(ctx->dev);
511 } 511 }
512 512
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index fef39f9f41ee..5d7f73d60515 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
281 /* Try to run it through DECO0 */ 281 /* Try to run it through DECO0 */
282 ret = run_descriptor_deco0(ctrldev, desc, &status); 282 ret = run_descriptor_deco0(ctrldev, desc, &status);
283 283
284 if (ret || status) { 284 if (ret ||
285 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
285 dev_err(ctrldev, 286 dev_err(ctrldev,
286 "Failed to deinstantiate RNG4 SH%d\n", 287 "Failed to deinstantiate RNG4 SH%d\n",
287 sh_idx); 288 sh_idx);
@@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev)
301 struct device *ctrldev; 302 struct device *ctrldev;
302 struct caam_drv_private *ctrlpriv; 303 struct caam_drv_private *ctrlpriv;
303 struct caam_ctrl __iomem *ctrl; 304 struct caam_ctrl __iomem *ctrl;
304 int ring;
305 305
306 ctrldev = &pdev->dev; 306 ctrldev = &pdev->dev;
307 ctrlpriv = dev_get_drvdata(ctrldev); 307 ctrlpriv = dev_get_drvdata(ctrldev);
308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; 308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
309 309
310 /* Remove platform devices for JobRs */ 310 /* Remove platform devices under the crypto node */
311 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) 311 of_platform_depopulate(ctrldev);
312 of_device_unregister(ctrlpriv->jrpdev[ring]);
313 312
314 /* De-initialize RNG state handles initialized by this driver. */ 313 /* De-initialize RNG state handles initialized by this driver. */
315 if (ctrlpriv->rng4_sh_init) 314 if (ctrlpriv->rng4_sh_init)
@@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
418DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); 417DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
419#endif 418#endif
420 419
420static const struct of_device_id caam_match[] = {
421 {
422 .compatible = "fsl,sec-v4.0",
423 },
424 {
425 .compatible = "fsl,sec4.0",
426 },
427 {},
428};
429MODULE_DEVICE_TABLE(of, caam_match);
430
421/* Probe routine for CAAM top (controller) level */ 431/* Probe routine for CAAM top (controller) level */
422static int caam_probe(struct platform_device *pdev) 432static int caam_probe(struct platform_device *pdev)
423{ 433{
424 int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 434 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
425 u64 caam_id; 435 u64 caam_id;
426 struct device *dev; 436 struct device *dev;
427 struct device_node *nprop, *np; 437 struct device_node *nprop, *np;
@@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev)
597 goto iounmap_ctrl; 607 goto iounmap_ctrl;
598 } 608 }
599 609
600 /* 610 ret = of_platform_populate(nprop, caam_match, NULL, dev);
601 * Detect and enable JobRs 611 if (ret) {
602 * First, find out how many ring spec'ed, allocate references 612 dev_err(dev, "JR platform devices creation error\n");
603 * for all, then go probe each one.
604 */
605 rspec = 0;
606 for_each_available_child_of_node(nprop, np)
607 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
608 of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
609 rspec++;
610
611 ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
612 sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
613 if (ctrlpriv->jrpdev == NULL) {
614 ret = -ENOMEM;
615 goto iounmap_ctrl; 613 goto iounmap_ctrl;
616 } 614 }
617 615
618 ring = 0; 616 ring = 0;
619 ridx = 0;
620 ctrlpriv->total_jobrs = 0;
621 for_each_available_child_of_node(nprop, np) 617 for_each_available_child_of_node(nprop, np)
622 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || 618 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
623 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { 619 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
624 ctrlpriv->jrpdev[ring] =
625 of_platform_device_create(np, NULL, dev);
626 if (!ctrlpriv->jrpdev[ring]) {
627 pr_warn("JR physical index %d: Platform device creation error\n",
628 ridx);
629 ridx++;
630 continue;
631 }
632 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) 620 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
633 ((__force uint8_t *)ctrl + 621 ((__force uint8_t *)ctrl +
634 (ridx + JR_BLOCK_NUMBER) * 622 (ring + JR_BLOCK_NUMBER) *
635 BLOCK_OFFSET 623 BLOCK_OFFSET
636 ); 624 );
637 ctrlpriv->total_jobrs++; 625 ctrlpriv->total_jobrs++;
638 ring++; 626 ring++;
639 ridx++; 627 }
640 }
641 628
642 /* Check to see if QI present. If so, enable */ 629 /* Check to see if QI present. If so, enable */
643 ctrlpriv->qi_present = 630 ctrlpriv->qi_present =
@@ -847,17 +834,6 @@ disable_caam_ipg:
847 return ret; 834 return ret;
848} 835}
849 836
850static struct of_device_id caam_match[] = {
851 {
852 .compatible = "fsl,sec-v4.0",
853 },
854 {
855 .compatible = "fsl,sec4.0",
856 },
857 {},
858};
859MODULE_DEVICE_TABLE(of, caam_match);
860
861static struct platform_driver caam_driver = { 837static struct platform_driver caam_driver = {
862 .driver = { 838 .driver = {
863 .name = "caam", 839 .name = "caam",
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e2bcacc1a921..dbed8baeebe5 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -66,7 +66,6 @@ struct caam_drv_private_jr {
66struct caam_drv_private { 66struct caam_drv_private {
67 67
68 struct device *dev; 68 struct device *dev;
69 struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
70 struct platform_device *pdev; 69 struct platform_device *pdev;
71 70
72 /* Physical-presence section */ 71 /* Physical-presence section */
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 932742e4cf23..24c461dea7af 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
149 149
150 status = __gop_query32(sys_table_arg, gop32, &info, &size, 150 status = __gop_query32(sys_table_arg, gop32, &info, &size,
151 &current_fb_base); 151 &current_fb_base);
152 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 152 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
153 info->pixel_format != PIXEL_BLT_ONLY) {
153 /* 154 /*
154 * Systems that use the UEFI Console Splitter may 155 * Systems that use the UEFI Console Splitter may
155 * provide multiple GOP devices, not all of which are 156 * provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
266 267
267 status = __gop_query64(sys_table_arg, gop64, &info, &size, 268 status = __gop_query64(sys_table_arg, gop64, &info, &size,
268 &current_fb_base); 269 &current_fb_base);
269 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 270 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
271 info->pixel_format != PIXEL_BLT_ONLY) {
270 /* 272 /*
271 * Systems that use the UEFI Console Splitter may 273 * Systems that use the UEFI Console Splitter may
272 * provide multiple GOP devices, not all of which are 274 * provide multiple GOP devices, not all of which are
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index da48819ff2e6..b78d9239e48f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1317,7 +1317,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1317 if (!fence) { 1317 if (!fence) {
1318 event_free(gpu, event); 1318 event_free(gpu, event);
1319 ret = -ENOMEM; 1319 ret = -ENOMEM;
1320 goto out_pm_put; 1320 goto out_unlock;
1321 } 1321 }
1322 1322
1323 gpu->event[event].fence = fence; 1323 gpu->event[event].fence = fence;
@@ -1357,6 +1357,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1357 hangcheck_timer_reset(gpu); 1357 hangcheck_timer_reset(gpu);
1358 ret = 0; 1358 ret = 0;
1359 1359
1360out_unlock:
1360 mutex_unlock(&gpu->lock); 1361 mutex_unlock(&gpu->lock);
1361 1362
1362out_pm_put: 1363out_pm_put:
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index b7d7721e72fa..40af17ec6312 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
285{ 285{
286 int ret; 286 int ret;
287 287
288 if (vgpu->failsafe)
289 return 0;
290
291 if (WARN_ON(bytes > 4)) 288 if (WARN_ON(bytes > 4))
292 return -EINVAL; 289 return -EINVAL;
293 290
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f1f426a97aa9..d186c157f65f 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -775,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
775 _EL_OFFSET_STATUS_PTR); 775 _EL_OFFSET_STATUS_PTR);
776 776
777 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); 777 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
778 ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; 778 ctx_status_ptr.read_ptr = 0;
779 ctx_status_ptr.write_ptr = 0x7;
779 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 780 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
780} 781}
781 782
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 933a7c211a1c..dce8d15f706f 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
75 struct gvt_firmware_header *h; 75 struct gvt_firmware_header *h;
76 void *firmware; 76 void *firmware;
77 void *p; 77 void *p;
78 unsigned long size; 78 unsigned long size, crc32_start;
79 int i; 79 int i;
80 int ret; 80 int ret;
81 81
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
83 firmware = vzalloc(size); 83 firmware = vzalloc(size);
84 if (!firmware) 84 if (!firmware)
85 return -ENOMEM; 85 return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
112 112
113 memcpy(gvt->firmware.mmio, p, info->mmio_size); 113 memcpy(gvt->firmware.mmio, p, info->mmio_size);
114 114
115 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
116 h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
117
115 firmware_attr.size = size; 118 firmware_attr.size = size;
116 firmware_attr.private = firmware; 119 firmware_attr.private = firmware;
117 120
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
234 237
235 firmware->mmio = mem; 238 firmware->mmio = mem;
236 239
237 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", 240 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
238 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, 241 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
239 pdev->revision); 242 pdev->revision);
240 243
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 3b9d59e457ba..ef3baa0c4754 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
52 .vgpu_create = intel_gvt_create_vgpu, 52 .vgpu_create = intel_gvt_create_vgpu,
53 .vgpu_destroy = intel_gvt_destroy_vgpu, 53 .vgpu_destroy = intel_gvt_destroy_vgpu,
54 .vgpu_reset = intel_gvt_reset_vgpu, 54 .vgpu_reset = intel_gvt_reset_vgpu,
55 .vgpu_activate = intel_gvt_activate_vgpu,
56 .vgpu_deactivate = intel_gvt_deactivate_vgpu,
55}; 57};
56 58
57/** 59/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 6dfc48b63b71..becae2fa3b29 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
382void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 382void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
383 unsigned int engine_mask); 383 unsigned int engine_mask);
384void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 384void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
385 385void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
386void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
386 387
387/* validating GM functions */ 388/* validating GM functions */
388#define vgpu_gmadr_is_aperture(vgpu, gmadr) \ 389#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -449,6 +450,8 @@ struct intel_gvt_ops {
449 struct intel_vgpu_type *); 450 struct intel_vgpu_type *);
450 void (*vgpu_destroy)(struct intel_vgpu *); 451 void (*vgpu_destroy)(struct intel_vgpu *);
451 void (*vgpu_reset)(struct intel_vgpu *); 452 void (*vgpu_reset)(struct intel_vgpu *);
453 void (*vgpu_activate)(struct intel_vgpu *);
454 void (*vgpu_deactivate)(struct intel_vgpu *);
452}; 455};
453 456
454 457
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index d641214578a7..e466259034e2 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -544,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
544 if (ret) 544 if (ret)
545 goto undo_group; 545 goto undo_group;
546 546
547 intel_gvt_ops->vgpu_activate(vgpu);
548
547 atomic_set(&vgpu->vdev.released, 0); 549 atomic_set(&vgpu->vdev.released, 0);
548 return ret; 550 return ret;
549 551
@@ -569,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
569 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) 571 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
570 return; 572 return;
571 573
574 intel_gvt_ops->vgpu_deactivate(vgpu);
575
572 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, 576 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
573 &vgpu->vdev.iommu_notifier); 577 &vgpu->vdev.iommu_notifier);
574 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); 578 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
@@ -1340,13 +1344,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1340 1344
1341static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1345static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1342{ 1346{
1343 struct intel_vgpu *vgpu = info->vgpu;
1344
1345 if (!info) {
1346 gvt_vgpu_err("kvmgt_guest_info invalid\n");
1347 return false;
1348 }
1349
1350 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1347 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1351 kvm_put_kvm(info->kvm); 1348 kvm_put_kvm(info->kvm);
1352 kvmgt_protect_table_destroy(info); 1349 kvmgt_protect_table_destroy(info);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 41cfa5ccae84..649ef280cc9a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -72,7 +72,7 @@ static struct {
72 char *name; 72 char *name;
73} vgpu_types[] = { 73} vgpu_types[] = {
74/* Fixed vGPU type table */ 74/* Fixed vGPU type table */
75 { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, 75 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, 76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, 77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, 78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
@@ -179,20 +179,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
179} 179}
180 180
181/** 181/**
182 * intel_gvt_destroy_vgpu - destroy a virtual GPU 182 * intel_gvt_active_vgpu - activate a virtual GPU
183 * @vgpu: virtual GPU 183 * @vgpu: virtual GPU
184 * 184 *
185 * This function is called when user wants to destroy a virtual GPU. 185 * This function is called when user wants to activate a virtual GPU.
186 * 186 *
187 */ 187 */
188void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) 188void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
189{
190 mutex_lock(&vgpu->gvt->lock);
191 vgpu->active = true;
192 mutex_unlock(&vgpu->gvt->lock);
193}
194
195/**
196 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
197 * @vgpu: virtual GPU
198 *
199 * This function is called when user wants to deactivate a virtual GPU.
200 * All virtual GPU runtime information will be destroyed.
201 *
202 */
203void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
189{ 204{
190 struct intel_gvt *gvt = vgpu->gvt; 205 struct intel_gvt *gvt = vgpu->gvt;
191 206
192 mutex_lock(&gvt->lock); 207 mutex_lock(&gvt->lock);
193 208
194 vgpu->active = false; 209 vgpu->active = false;
195 idr_remove(&gvt->vgpu_idr, vgpu->id);
196 210
197 if (atomic_read(&vgpu->running_workload_num)) { 211 if (atomic_read(&vgpu->running_workload_num)) {
198 mutex_unlock(&gvt->lock); 212 mutex_unlock(&gvt->lock);
@@ -201,6 +215,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
201 } 215 }
202 216
203 intel_vgpu_stop_schedule(vgpu); 217 intel_vgpu_stop_schedule(vgpu);
218
219 mutex_unlock(&gvt->lock);
220}
221
222/**
223 * intel_gvt_destroy_vgpu - destroy a virtual GPU
224 * @vgpu: virtual GPU
225 *
226 * This function is called when user wants to destroy a virtual GPU.
227 *
228 */
229void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
230{
231 struct intel_gvt *gvt = vgpu->gvt;
232
233 mutex_lock(&gvt->lock);
234
235 WARN(vgpu->active, "vGPU is still active!\n");
236
237 idr_remove(&gvt->vgpu_idr, vgpu->id);
204 intel_vgpu_clean_sched_policy(vgpu); 238 intel_vgpu_clean_sched_policy(vgpu);
205 intel_vgpu_clean_gvt_context(vgpu); 239 intel_vgpu_clean_gvt_context(vgpu);
206 intel_vgpu_clean_execlist(vgpu); 240 intel_vgpu_clean_execlist(vgpu);
@@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
277 if (ret) 311 if (ret)
278 goto out_clean_shadow_ctx; 312 goto out_clean_shadow_ctx;
279 313
280 vgpu->active = true;
281 mutex_unlock(&gvt->lock); 314 mutex_unlock(&gvt->lock);
282 315
283 return vgpu; 316 return vgpu;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1c75402a59c1..5c089b3c2a7e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1434,8 +1434,6 @@ static int i915_drm_suspend(struct drm_device *dev)
1434 goto out; 1434 goto out;
1435 } 1435 }
1436 1436
1437 intel_guc_suspend(dev_priv);
1438
1439 intel_display_suspend(dev); 1437 intel_display_suspend(dev);
1440 1438
1441 intel_dp_mst_suspend(dev); 1439 intel_dp_mst_suspend(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1e53c31b6826..46fcd8b7080a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -806,6 +806,7 @@ struct intel_csr {
806 func(has_resource_streamer); \ 806 func(has_resource_streamer); \
807 func(has_runtime_pm); \ 807 func(has_runtime_pm); \
808 func(has_snoop); \ 808 func(has_snoop); \
809 func(unfenced_needs_alignment); \
809 func(cursor_needs_physical); \ 810 func(cursor_needs_physical); \
810 func(hws_needs_physical); \ 811 func(hws_needs_physical); \
811 func(overlay_needs_physical); \ 812 func(overlay_needs_physical); \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 67b1fc5a0331..fe531f904062 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4348,6 +4348,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
4348 i915_gem_context_lost(dev_priv); 4348 i915_gem_context_lost(dev_priv);
4349 mutex_unlock(&dev->struct_mutex); 4349 mutex_unlock(&dev->struct_mutex);
4350 4350
4351 intel_guc_suspend(dev_priv);
4352
4351 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4353 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4352 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4354 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4353 4355
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 30e0675fd7da..15a15d00a6bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -888,6 +888,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
888 struct list_head ordered_vmas; 888 struct list_head ordered_vmas;
889 struct list_head pinned_vmas; 889 struct list_head pinned_vmas;
890 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; 890 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
891 bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
891 int retry; 892 int retry;
892 893
893 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; 894 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -908,7 +909,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
908 if (!has_fenced_gpu_access) 909 if (!has_fenced_gpu_access)
909 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; 910 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
910 need_fence = 911 need_fence =
911 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 912 (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
913 needs_unfenced_map) &&
912 i915_gem_object_is_tiled(obj); 914 i915_gem_object_is_tiled(obj);
913 need_mappable = need_fence || need_reloc_mappable(vma); 915 need_mappable = need_fence || need_reloc_mappable(vma);
914 916
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2801a4d56324..96e45a4d5441 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2704,7 +2704,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2704 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2704 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2705 2705
2706 if (unlikely(ggtt->do_idle_maps)) { 2706 if (unlikely(ggtt->do_idle_maps)) {
2707 if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) { 2707 if (i915_gem_wait_for_idle(dev_priv, 0)) {
2708 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); 2708 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2709 /* Wait a bit, in hopes it avoids the hang */ 2709 /* Wait a bit, in hopes it avoids the hang */
2710 udelay(10); 2710 udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index e7c3c0318ff6..da70bfe97ec5 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
37 37
38static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 38static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39{ 39{
40 /* The timeline struct (as part of the ppgtt underneath a context)
41 * may be freed when the request is no longer in use by the GPU.
42 * We could extend the life of a context to beyond that of all
43 * fences, possibly keeping the hw resource around indefinitely,
44 * or we just give them a false name. Since
45 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
46 * lie seems justifiable.
47 */
48 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
49 return "signaled";
50
40 return to_request(fence)->timeline->common->name; 51 return to_request(fence)->timeline->common->name;
41} 52}
42 53
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index d5d2b4c6ed38..70b3832a79dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
53 BUG(); 53 BUG();
54} 54}
55 55
56static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
57{
58 if (!unlock)
59 return;
60
61 mutex_unlock(&dev->struct_mutex);
62
63 /* expedite the RCU grace period to free some request slabs */
64 synchronize_rcu_expedited();
65}
66
56static bool any_vma_pinned(struct drm_i915_gem_object *obj) 67static bool any_vma_pinned(struct drm_i915_gem_object *obj)
57{ 68{
58 struct i915_vma *vma; 69 struct i915_vma *vma;
@@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
232 intel_runtime_pm_put(dev_priv); 243 intel_runtime_pm_put(dev_priv);
233 244
234 i915_gem_retire_requests(dev_priv); 245 i915_gem_retire_requests(dev_priv);
235 if (unlock)
236 mutex_unlock(&dev_priv->drm.struct_mutex);
237 246
238 /* expedite the RCU grace period to free some request slabs */ 247 i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
239 synchronize_rcu_expedited();
240 248
241 return count; 249 return count;
242} 250}
@@ -293,8 +301,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
293 count += obj->base.size >> PAGE_SHIFT; 301 count += obj->base.size >> PAGE_SHIFT;
294 } 302 }
295 303
296 if (unlock) 304 i915_gem_shrinker_unlock(dev, unlock);
297 mutex_unlock(&dev->struct_mutex);
298 305
299 return count; 306 return count;
300} 307}
@@ -321,8 +328,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
321 sc->nr_to_scan - freed, 328 sc->nr_to_scan - freed,
322 I915_SHRINK_BOUND | 329 I915_SHRINK_BOUND |
323 I915_SHRINK_UNBOUND); 330 I915_SHRINK_UNBOUND);
324 if (unlock) 331
325 mutex_unlock(&dev->struct_mutex); 332 i915_gem_shrinker_unlock(dev, unlock);
326 333
327 return freed; 334 return freed;
328} 335}
@@ -364,8 +371,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
364 struct shrinker_lock_uninterruptible *slu) 371 struct shrinker_lock_uninterruptible *slu)
365{ 372{
366 dev_priv->mm.interruptible = slu->was_interruptible; 373 dev_priv->mm.interruptible = slu->was_interruptible;
367 if (slu->unlock) 374 i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
368 mutex_unlock(&dev_priv->drm.struct_mutex);
369} 375}
370 376
371static int 377static int
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index ecb487b5356f..9bbbd4e83e3c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -60,6 +60,7 @@
60 .has_overlay = 1, .overlay_needs_physical = 1, \ 60 .has_overlay = 1, .overlay_needs_physical = 1, \
61 .has_gmch_display = 1, \ 61 .has_gmch_display = 1, \
62 .hws_needs_physical = 1, \ 62 .hws_needs_physical = 1, \
63 .unfenced_needs_alignment = 1, \
63 .ring_mask = RENDER_RING, \ 64 .ring_mask = RENDER_RING, \
64 GEN_DEFAULT_PIPEOFFSETS, \ 65 GEN_DEFAULT_PIPEOFFSETS, \
65 CURSOR_OFFSETS 66 CURSOR_OFFSETS
@@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = {
101 .platform = INTEL_I915G, .cursor_needs_physical = 1, 102 .platform = INTEL_I915G, .cursor_needs_physical = 1,
102 .has_overlay = 1, .overlay_needs_physical = 1, 103 .has_overlay = 1, .overlay_needs_physical = 1,
103 .hws_needs_physical = 1, 104 .hws_needs_physical = 1,
105 .unfenced_needs_alignment = 1,
104}; 106};
105 107
106static const struct intel_device_info intel_i915gm_info = { 108static const struct intel_device_info intel_i915gm_info = {
@@ -112,6 +114,7 @@ static const struct intel_device_info intel_i915gm_info = {
112 .supports_tv = 1, 114 .supports_tv = 1,
113 .has_fbc = 1, 115 .has_fbc = 1,
114 .hws_needs_physical = 1, 116 .hws_needs_physical = 1,
117 .unfenced_needs_alignment = 1,
115}; 118};
116 119
117static const struct intel_device_info intel_i945g_info = { 120static const struct intel_device_info intel_i945g_info = {
@@ -120,6 +123,7 @@ static const struct intel_device_info intel_i945g_info = {
120 .has_hotplug = 1, .cursor_needs_physical = 1, 123 .has_hotplug = 1, .cursor_needs_physical = 1,
121 .has_overlay = 1, .overlay_needs_physical = 1, 124 .has_overlay = 1, .overlay_needs_physical = 1,
122 .hws_needs_physical = 1, 125 .hws_needs_physical = 1,
126 .unfenced_needs_alignment = 1,
123}; 127};
124 128
125static const struct intel_device_info intel_i945gm_info = { 129static const struct intel_device_info intel_i945gm_info = {
@@ -130,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
130 .supports_tv = 1, 134 .supports_tv = 1,
131 .has_fbc = 1, 135 .has_fbc = 1,
132 .hws_needs_physical = 1, 136 .hws_needs_physical = 1,
137 .unfenced_needs_alignment = 1,
133}; 138};
134 139
135static const struct intel_device_info intel_g33_info = { 140static const struct intel_device_info intel_g33_info = {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a1b7eec58be2..70964ca9251e 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1705 */ 1705 */
1706 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 1706 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
1707 ret = -ENODEV; 1707 ret = -ENODEV;
1708 goto err_alloc; 1708 goto err_flags;
1709 } 1709 }
1710 1710
1711 list_add(&stream->link, &dev_priv->perf.streams); 1711 list_add(&stream->link, &dev_priv->perf.streams);
@@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1728 1728
1729err_open: 1729err_open:
1730 list_del(&stream->link); 1730 list_del(&stream->link);
1731err_flags:
1731 if (stream->ops->destroy) 1732 if (stream->ops->destroy)
1732 stream->ops->destroy(stream); 1733 stream->ops->destroy(stream);
1733err_alloc: 1734err_alloc:
@@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
1793 if (ret) 1794 if (ret)
1794 return ret; 1795 return ret;
1795 1796
1797 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
1798 DRM_DEBUG("Unknown i915 perf property ID\n");
1799 return -EINVAL;
1800 }
1801
1796 switch ((enum drm_i915_perf_property_id)id) { 1802 switch ((enum drm_i915_perf_property_id)id) {
1797 case DRM_I915_PERF_PROP_CTX_HANDLE: 1803 case DRM_I915_PERF_PROP_CTX_HANDLE:
1798 props->single_context = 1; 1804 props->single_context = 1;
@@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
1862 props->oa_periodic = true; 1868 props->oa_periodic = true;
1863 props->oa_period_exponent = value; 1869 props->oa_period_exponent = value;
1864 break; 1870 break;
1865 default: 1871 case DRM_I915_PERF_PROP_MAX:
1866 MISSING_CASE(id); 1872 MISSING_CASE(id);
1867 DRM_DEBUG("Unknown i915 perf property ID\n");
1868 return -EINVAL; 1873 return -EINVAL;
1869 } 1874 }
1870 1875
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 471af3b480ad..47517a02f0a4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
670static struct intel_engine_cs * 670static struct intel_engine_cs *
671pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) 671pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
672{ 672{
673 struct intel_engine_cs *engine; 673 struct intel_engine_cs *engine =
674 container_of(pt, struct drm_i915_gem_request, priotree)->engine;
675
676 GEM_BUG_ON(!locked);
674 677
675 engine = container_of(pt,
676 struct drm_i915_gem_request,
677 priotree)->engine;
678 if (engine != locked) { 678 if (engine != locked) {
679 if (locked) 679 spin_unlock(&locked->timeline->lock);
680 spin_unlock_irq(&locked->timeline->lock); 680 spin_lock(&engine->timeline->lock);
681 spin_lock_irq(&engine->timeline->lock);
682 } 681 }
683 682
684 return engine; 683 return engine;
@@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
686 685
687static void execlists_schedule(struct drm_i915_gem_request *request, int prio) 686static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
688{ 687{
689 struct intel_engine_cs *engine = NULL; 688 struct intel_engine_cs *engine;
690 struct i915_dependency *dep, *p; 689 struct i915_dependency *dep, *p;
691 struct i915_dependency stack; 690 struct i915_dependency stack;
692 LIST_HEAD(dfs); 691 LIST_HEAD(dfs);
@@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
720 list_for_each_entry_safe(dep, p, &dfs, dfs_link) { 719 list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
721 struct i915_priotree *pt = dep->signaler; 720 struct i915_priotree *pt = dep->signaler;
722 721
723 list_for_each_entry(p, &pt->signalers_list, signal_link) 722 /* Within an engine, there can be no cycle, but we may
723 * refer to the same dependency chain multiple times
724 * (redundant dependencies are not eliminated) and across
725 * engines.
726 */
727 list_for_each_entry(p, &pt->signalers_list, signal_link) {
728 GEM_BUG_ON(p->signaler->priority < pt->priority);
724 if (prio > READ_ONCE(p->signaler->priority)) 729 if (prio > READ_ONCE(p->signaler->priority))
725 list_move_tail(&p->dfs_link, &dfs); 730 list_move_tail(&p->dfs_link, &dfs);
731 }
726 732
727 list_safe_reset_next(dep, p, dfs_link); 733 list_safe_reset_next(dep, p, dfs_link);
728 if (!RB_EMPTY_NODE(&pt->node))
729 continue;
730
731 engine = pt_lock_engine(pt, engine);
732
733 /* If it is not already in the rbtree, we can update the
734 * priority inplace and skip over it (and its dependencies)
735 * if it is referenced *again* as we descend the dfs.
736 */
737 if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
738 pt->priority = prio;
739 list_del_init(&dep->dfs_link);
740 }
741 } 734 }
742 735
736 engine = request->engine;
737 spin_lock_irq(&engine->timeline->lock);
738
743 /* Fifo and depth-first replacement ensure our deps execute before us */ 739 /* Fifo and depth-first replacement ensure our deps execute before us */
744 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { 740 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
745 struct i915_priotree *pt = dep->signaler; 741 struct i915_priotree *pt = dep->signaler;
@@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
751 if (prio <= pt->priority) 747 if (prio <= pt->priority)
752 continue; 748 continue;
753 749
754 GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
755
756 pt->priority = prio; 750 pt->priority = prio;
757 rb_erase(&pt->node, &engine->execlist_queue); 751 if (!RB_EMPTY_NODE(&pt->node)) {
758 if (insert_request(pt, &engine->execlist_queue)) 752 rb_erase(&pt->node, &engine->execlist_queue);
759 engine->execlist_first = &pt->node; 753 if (insert_request(pt, &engine->execlist_queue))
754 engine->execlist_first = &pt->node;
755 }
760 } 756 }
761 757
762 if (engine) 758 spin_unlock_irq(&engine->timeline->lock);
763 spin_unlock_irq(&engine->timeline->lock);
764 759
765 /* XXX Do we need to preempt to make room for us and our deps? */ 760 /* XXX Do we need to preempt to make room for us and our deps? */
766} 761}
@@ -1440,7 +1435,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
1440 GEM_BUG_ON(request->ctx != port[0].request->ctx); 1435 GEM_BUG_ON(request->ctx != port[0].request->ctx);
1441 1436
1442 /* Reset WaIdleLiteRestore:bdw,skl as well */ 1437 /* Reset WaIdleLiteRestore:bdw,skl as well */
1443 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32); 1438 request->tail =
1439 intel_ring_wrap(request->ring,
1440 request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
1444} 1441}
1445 1442
1446static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1443static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 13dccb18cd43..8cb2078c5bfc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -521,11 +521,17 @@ static inline void intel_ring_advance(struct intel_ring *ring)
521 */ 521 */
522} 522}
523 523
524static inline u32
525intel_ring_wrap(const struct intel_ring *ring, u32 pos)
526{
527 return pos & (ring->size - 1);
528}
529
524static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr) 530static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
525{ 531{
526 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ 532 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
527 u32 offset = addr - ring->vaddr; 533 u32 offset = addr - ring->vaddr;
528 return offset & (ring->size - 1); 534 return intel_ring_wrap(ring, offset);
529} 535}
530 536
531int __intel_ring_space(int head, int tail, int size); 537int __intel_ring_space(int head, int tail, int size);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 0b4440ffbeae..a9182d5e6011 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -995,7 +995,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
995{ 995{
996 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); 996 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
997 __drm_atomic_helper_plane_destroy_state(&asyw->state); 997 __drm_atomic_helper_plane_destroy_state(&asyw->state);
998 dma_fence_put(asyw->state.fence);
999 kfree(asyw); 998 kfree(asyw);
1000} 999}
1001 1000
@@ -1007,7 +1006,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
1007 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL))) 1006 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
1008 return NULL; 1007 return NULL;
1009 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state); 1008 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
1010 asyw->state.fence = NULL;
1011 asyw->interval = 1; 1009 asyw->interval = 1;
1012 asyw->sema = armw->sema; 1010 asyw->sema = armw->sema;
1013 asyw->ntfy = armw->ntfy; 1011 asyw->ntfy = armw->ntfy;
@@ -2036,6 +2034,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2036 u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; 2034 u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
2037 u32 hfrontp = mode->hsync_start - mode->hdisplay; 2035 u32 hfrontp = mode->hsync_start - mode->hdisplay;
2038 u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; 2036 u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
2037 u32 blankus;
2039 struct nv50_head_mode *m = &asyh->mode; 2038 struct nv50_head_mode *m = &asyh->mode;
2040 2039
2041 m->h.active = mode->htotal; 2040 m->h.active = mode->htotal;
@@ -2049,9 +2048,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2049 m->v.blanks = m->v.active - vfrontp - 1; 2048 m->v.blanks = m->v.active - vfrontp - 1;
2050 2049
2051 /*XXX: Safe underestimate, even "0" works */ 2050 /*XXX: Safe underestimate, even "0" works */
2052 m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active; 2051 blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
2053 m->v.blankus *= 1000; 2052 blankus *= 1000;
2054 m->v.blankus /= mode->clock; 2053 blankus /= mode->clock;
2054 m->v.blankus = blankus;
2055 2055
2056 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 2056 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2057 m->v.blank2e = m->v.active + m->v.synce + vbackp; 2057 m->v.blank2e = m->v.active + m->v.synce + vbackp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 273562dd6bbd..3b86a7399567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@ nv4a_chipset = {
714 .i2c = nv04_i2c_new, 714 .i2c = nv04_i2c_new,
715 .imem = nv40_instmem_new, 715 .imem = nv40_instmem_new,
716 .mc = nv44_mc_new, 716 .mc = nv44_mc_new,
717 .mmu = nv44_mmu_new, 717 .mmu = nv04_mmu_new,
718 .pci = nv40_pci_new, 718 .pci = nv40_pci_new,
719 .therm = nv40_therm_new, 719 .therm = nv40_therm_new,
720 .timer = nv41_timer_new, 720 .timer = nv41_timer_new,
@@ -2271,6 +2271,35 @@ nv136_chipset = {
2271 .fifo = gp100_fifo_new, 2271 .fifo = gp100_fifo_new,
2272}; 2272};
2273 2273
2274static const struct nvkm_device_chip
2275nv137_chipset = {
2276 .name = "GP107",
2277 .bar = gf100_bar_new,
2278 .bios = nvkm_bios_new,
2279 .bus = gf100_bus_new,
2280 .devinit = gm200_devinit_new,
2281 .fb = gp102_fb_new,
2282 .fuse = gm107_fuse_new,
2283 .gpio = gk104_gpio_new,
2284 .i2c = gm200_i2c_new,
2285 .ibus = gm200_ibus_new,
2286 .imem = nv50_instmem_new,
2287 .ltc = gp100_ltc_new,
2288 .mc = gp100_mc_new,
2289 .mmu = gf100_mmu_new,
2290 .pci = gp100_pci_new,
2291 .pmu = gp102_pmu_new,
2292 .timer = gk20a_timer_new,
2293 .top = gk104_top_new,
2294 .ce[0] = gp102_ce_new,
2295 .ce[1] = gp102_ce_new,
2296 .ce[2] = gp102_ce_new,
2297 .ce[3] = gp102_ce_new,
2298 .disp = gp102_disp_new,
2299 .dma = gf119_dma_new,
2300 .fifo = gp100_fifo_new,
2301};
2302
2274static int 2303static int
2275nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, 2304nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
2276 struct nvkm_notify *notify) 2305 struct nvkm_notify *notify)
@@ -2708,6 +2737,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2708 case 0x132: device->chip = &nv132_chipset; break; 2737 case 0x132: device->chip = &nv132_chipset; break;
2709 case 0x134: device->chip = &nv134_chipset; break; 2738 case 0x134: device->chip = &nv134_chipset; break;
2710 case 0x136: device->chip = &nv136_chipset; break; 2739 case 0x136: device->chip = &nv136_chipset; break;
2740 case 0x137: device->chip = &nv137_chipset; break;
2711 default: 2741 default:
2712 nvdev_error(device, "unknown chipset (%08x)\n", boot0); 2742 nvdev_error(device, "unknown chipset (%08x)\n", boot0);
2713 goto done; 2743 goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 003ac915eaad..8a8895246d26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
198 } 198 }
199 199
200 if (type == 0x00000010) { 200 if (type == 0x00000010) {
201 if (!nv31_mpeg_mthd(mpeg, mthd, data)) 201 if (nv31_mpeg_mthd(mpeg, mthd, data))
202 show &= ~0x01000000; 202 show &= ~0x01000000;
203 } 203 }
204 } 204 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index e536f37e24b0..c3cf02ed468e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
172 } 172 }
173 173
174 if (type == 0x00000010) { 174 if (type == 0x00000010) {
175 if (!nv44_mpeg_mthd(subdev->device, mthd, data)) 175 if (nv44_mpeg_mthd(subdev->device, mthd, data))
176 show &= ~0x01000000; 176 show &= ~0x01000000;
177 } 177 }
178 } 178 }
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 917dcb978c2c..0c87b1ac6b68 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/fb.h> 15#include <linux/fb.h>
16#include <linux/prefetch.h> 16#include <linux/prefetch.h>
17#include <asm/unaligned.h>
17 18
18#include <drm/drmP.h> 19#include <drm/drmP.h>
19#include "udl_drv.h" 20#include "udl_drv.h"
@@ -163,7 +164,7 @@ static void udl_compress_hline16(
163 const u8 *const start = pixel; 164 const u8 *const start = pixel;
164 const uint16_t repeating_pixel_val16 = pixel_val16; 165 const uint16_t repeating_pixel_val16 = pixel_val16;
165 166
166 *(uint16_t *)cmd = cpu_to_be16(pixel_val16); 167 put_unaligned_be16(pixel_val16, cmd);
167 168
168 cmd += 2; 169 cmd += 2;
169 pixel += bpp; 170 pixel += bpp;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 63ec1993eaaa..d162f0dc76e3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid)
819 hid->group = HID_GROUP_WACOM; 819 hid->group = HID_GROUP_WACOM;
820 break; 820 break;
821 case USB_VENDOR_ID_SYNAPTICS: 821 case USB_VENDOR_ID_SYNAPTICS:
822 if (hid->group == HID_GROUP_GENERIC || 822 if (hid->group == HID_GROUP_GENERIC)
823 hid->group == HID_GROUP_MULTITOUCH_WIN_8)
824 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 823 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
825 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 824 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
826 /* 825 /*
@@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 2095 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
2097 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 2097 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
2100 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, 2100 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
2101 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, 2101 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4e2648c86c8c..b26c030926c1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1028,6 +1028,9 @@
1028#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045 1028#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045
1029#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d 1029#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d
1030 1030
1031#define USB_VENDOR_ID_UGEE 0x28bd
1032#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
1033
1031#define USB_VENDOR_ID_UNITEC 0x227d 1034#define USB_VENDOR_ID_UNITEC 0x227d
1032#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 1035#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
1033#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19 1036#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 1509d7287ff3..e3e6e5c893cc 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev,
977 } 977 }
978 break; 978 break;
979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610: 979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
980 case USB_DEVICE_ID_UGEE_TABLET_EX07S:
980 /* If this is the pen interface */ 981 /* If this is the pen interface */
981 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { 982 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
982 rc = uclogic_tablet_enable(hdev); 983 rc = uclogic_tablet_enable(hdev);
@@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = {
1069 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 1071 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
1071 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 1072 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
1073 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
1072 { } 1074 { }
1073}; 1075};
1074MODULE_DEVICE_TABLE(hid, uclogic_devices); 1076MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index ca5759c0c318..43a6cb078193 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -370,10 +370,12 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
370 name = "accel_3d"; 370 name = "accel_3d";
371 channel_spec = accel_3d_channels; 371 channel_spec = accel_3d_channels;
372 channel_size = sizeof(accel_3d_channels); 372 channel_size = sizeof(accel_3d_channels);
373 indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
373 } else { 374 } else {
374 name = "gravity"; 375 name = "gravity";
375 channel_spec = gravity_channels; 376 channel_spec = gravity_channels;
376 channel_size = sizeof(gravity_channels); 377 channel_size = sizeof(gravity_channels);
378 indio_dev->num_channels = ARRAY_SIZE(gravity_channels);
377 } 379 }
378 ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage, 380 ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
379 &accel_state->common_attributes); 381 &accel_state->common_attributes);
@@ -395,7 +397,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
395 goto error_free_dev_mem; 397 goto error_free_dev_mem;
396 } 398 }
397 399
398 indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
399 indio_dev->dev.parent = &pdev->dev; 400 indio_dev->dev.parent = &pdev->dev;
400 indio_dev->info = &accel_3d_info; 401 indio_dev->info = &accel_3d_info;
401 indio_dev->name = name; 402 indio_dev->name = name;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index d6c372bb433b..c17596f7ed2c 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -61,7 +61,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
61 ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data); 61 ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
62 if (ret < 0) 62 if (ret < 0)
63 break; 63 break;
64 64 ret = IIO_VAL_INT;
65 *val = data; 65 *val = data;
66 break; 66 break;
67 case IIO_CHAN_INFO_CALIBBIAS: 67 case IIO_CHAN_INFO_CALIBBIAS:
@@ -76,7 +76,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
76 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++) 76 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
77 st->core.calib[i] = 77 st->core.calib[i] =
78 st->core.resp->sensor_offset.offset[i]; 78 st->core.resp->sensor_offset.offset[i];
79 79 ret = IIO_VAL_INT;
80 *val = st->core.calib[idx]; 80 *val = st->core.calib[idx];
81 break; 81 break;
82 case IIO_CHAN_INFO_SCALE: 82 case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 7afdac42ed42..01e02b9926d4 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -379,6 +379,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
379{ 379{
380 380
381 struct hid_sensor_hub_attribute_info timestamp; 381 struct hid_sensor_hub_attribute_info timestamp;
382 s32 value;
383 int ret;
382 384
383 hid_sensor_get_reporting_interval(hsdev, usage_id, st); 385 hid_sensor_get_reporting_interval(hsdev, usage_id, st);
384 386
@@ -417,6 +419,14 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
417 st->sensitivity.index, st->sensitivity.report_id, 419 st->sensitivity.index, st->sensitivity.report_id,
418 timestamp.index, timestamp.report_id); 420 timestamp.index, timestamp.report_id);
419 421
422 ret = sensor_hub_get_feature(hsdev,
423 st->power_state.report_id,
424 st->power_state.index, sizeof(value), &value);
425 if (ret < 0)
426 return ret;
427 if (value < 0)
428 return -EINVAL;
429
420 return 0; 430 return 0;
421} 431}
422EXPORT_SYMBOL(hid_sensor_parse_common_attributes); 432EXPORT_SYMBOL(hid_sensor_parse_common_attributes);
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index f7fcfa886f72..821919dd245b 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -27,6 +27,7 @@
27#include <linux/iio/trigger_consumer.h> 27#include <linux/iio/trigger_consumer.h>
28#include <linux/iio/triggered_buffer.h> 28#include <linux/iio/triggered_buffer.h>
29#include <linux/regmap.h> 29#include <linux/regmap.h>
30#include <linux/delay.h>
30#include "bmg160.h" 31#include "bmg160.h"
31 32
32#define BMG160_IRQ_NAME "bmg160_event" 33#define BMG160_IRQ_NAME "bmg160_event"
@@ -52,6 +53,9 @@
52#define BMG160_DEF_BW 100 53#define BMG160_DEF_BW 100
53#define BMG160_REG_PMU_BW_RES BIT(7) 54#define BMG160_REG_PMU_BW_RES BIT(7)
54 55
56#define BMG160_GYRO_REG_RESET 0x14
57#define BMG160_GYRO_RESET_VAL 0xb6
58
55#define BMG160_REG_INT_MAP_0 0x17 59#define BMG160_REG_INT_MAP_0 0x17
56#define BMG160_INT_MAP_0_BIT_ANY BIT(1) 60#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
57 61
@@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
236 int ret; 240 int ret;
237 unsigned int val; 241 unsigned int val;
238 242
243 /*
244 * Reset chip to get it in a known good state. A delay of 30ms after
245 * reset is required according to the datasheet.
246 */
247 regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
248 BMG160_GYRO_RESET_VAL);
249 usleep_range(30000, 30700);
250
239 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val); 251 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
240 if (ret < 0) { 252 if (ret < 0) {
241 dev_err(dev, "Error reading reg_chip_id\n"); 253 dev_err(dev, "Error reading reg_chip_id\n");
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index d18ded45bedd..3ff91e02fee3 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -610,10 +610,9 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1); 610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); 611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
612 case IIO_VAL_FRACTIONAL_LOG2: 612 case IIO_VAL_FRACTIONAL_LOG2:
613 tmp = (s64)vals[0] * 1000000000LL >> vals[1]; 613 tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
614 tmp1 = do_div(tmp, 1000000000LL); 614 tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
615 tmp0 = tmp; 615 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
616 return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
617 case IIO_VAL_INT_MULTIPLE: 616 case IIO_VAL_INT_MULTIPLE:
618 { 617 {
619 int i; 618 int i;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 5f2680855552..fd0edca0e656 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -457,6 +457,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
458 }, 458 },
459 .multi_read_bit = true, 459 .multi_read_bit = true,
460 .bootime = 2,
460 }, 461 },
461}; 462};
462 463
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 91cbe86b25c8..fcbed35e95a8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
817 rx_wr->sg_list = &rx_desc->rx_sg; 817 rx_wr->sg_list = &rx_desc->rx_sg;
818 rx_wr->num_sge = 1; 818 rx_wr->num_sge = 1;
819 rx_wr->next = rx_wr + 1; 819 rx_wr->next = rx_wr + 1;
820 rx_desc->in_use = false;
820 } 821 }
821 rx_wr--; 822 rx_wr--;
822 rx_wr->next = NULL; /* mark end of work requests list */ 823 rx_wr->next = NULL; /* mark end of work requests list */
@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
835 struct ib_recv_wr *rx_wr_failed, rx_wr; 836 struct ib_recv_wr *rx_wr_failed, rx_wr;
836 int ret; 837 int ret;
837 838
839 if (!rx_desc->in_use) {
840 /*
841 * if the descriptor is not in-use we already reposted it
842 * for recv, so just silently return
843 */
844 return 0;
845 }
846
847 rx_desc->in_use = false;
838 rx_wr.wr_cqe = &rx_desc->rx_cqe; 848 rx_wr.wr_cqe = &rx_desc->rx_cqe;
839 rx_wr.sg_list = &rx_desc->rx_sg; 849 rx_wr.sg_list = &rx_desc->rx_sg;
840 rx_wr.num_sge = 1; 850 rx_wr.num_sge = 1;
@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1397 return; 1407 return;
1398 } 1408 }
1399 1409
1410 rx_desc->in_use = true;
1411
1400 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1412 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1401 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1413 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1402 1414
@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1659 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); 1671 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1660 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1672 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1661 1673
1662 if (ret) 1674 if (ret) {
1663 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1675 /*
1664 else 1676 * transport_generic_request_failure() expects to have
1665 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1677 * plus two references to handle queue-full, so re-add
1678 * one here as target-core will have already dropped
1679 * it after the first isert_put_datain() callback.
1680 */
1681 kref_get(&cmd->cmd_kref);
1682 transport_generic_request_failure(cmd, cmd->pi_err);
1683 } else {
1684 /*
1685 * XXX: isert_put_response() failure is not retried.
1686 */
1687 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1688 if (ret)
1689 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
1690 }
1666} 1691}
1667 1692
1668static void 1693static void
@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1699 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1724 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1700 spin_unlock_bh(&cmd->istate_lock); 1725 spin_unlock_bh(&cmd->istate_lock);
1701 1726
1702 if (ret) { 1727 /*
1703 target_put_sess_cmd(se_cmd); 1728 * transport_generic_request_failure() will drop the extra
1704 transport_send_check_condition_and_sense(se_cmd, 1729 * se_cmd->cmd_kref reference after T10-PI error, and handle
1705 se_cmd->pi_err, 0); 1730 * any non-zero ->queue_status() callback error retries.
1706 } else { 1731 */
1732 if (ret)
1733 transport_generic_request_failure(se_cmd, se_cmd->pi_err);
1734 else
1707 target_execute_cmd(se_cmd); 1735 target_execute_cmd(se_cmd);
1708 }
1709} 1736}
1710 1737
1711static void 1738static void
@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2171 chain_wr = &isert_cmd->tx_desc.send_wr; 2198 chain_wr = &isert_cmd->tx_desc.send_wr;
2172 } 2199 }
2173 2200
2174 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2201 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2175 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); 2202 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2176 return 1; 2203 isert_cmd, rc);
2204 return rc;
2177} 2205}
2178 2206
2179static int 2207static int
2180isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2208isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2181{ 2209{
2182 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2210 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2211 int ret;
2183 2212
2184 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2213 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2185 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2214 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2186 2215
2187 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2216 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2188 isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2217 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2189 &isert_cmd->tx_desc.tx_cqe, NULL); 2218 &isert_cmd->tx_desc.tx_cqe, NULL);
2190 2219
2191 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2220 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2192 isert_cmd); 2221 isert_cmd, ret);
2193 return 0; 2222 return ret;
2194} 2223}
2195 2224
2196static int 2225static int
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c02ada57d7f5..87d994de8c91 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -60,7 +60,7 @@
60 60
61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ 61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ 62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
63 sizeof(struct ib_cqe))) 63 sizeof(struct ib_cqe) + sizeof(bool)))
64 64
65#define ISCSI_ISER_SG_TABLESIZE 256 65#define ISCSI_ISER_SG_TABLESIZE 256
66 66
@@ -85,6 +85,7 @@ struct iser_rx_desc {
85 u64 dma_addr; 85 u64 dma_addr;
86 struct ib_sge rx_sg; 86 struct ib_sge rx_sg;
87 struct ib_cqe rx_cqe; 87 struct ib_cqe rx_cqe;
88 bool in_use;
88 char pad[ISER_RX_PAD_SIZE]; 89 char pad[ISER_RX_PAD_SIZE];
89} __packed; 90} __packed;
90 91
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 155fcb3b6230..153b1ee13e03 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -202,6 +202,7 @@ static const struct xpad_device {
202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, 204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
205 { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
205 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, 206 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
206 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, 207 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
207 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, 208 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = {
326 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 327 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
327 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ 328 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
328 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ 329 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
330 XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
329 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ 331 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
330 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ 332 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
331 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ 333 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9753e5..2d203b422129 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
230 return -ENOMEM; 230 return -ENOMEM;
231 } 231 }
232 232
233 raw_spin_lock_init(&cd->rlock);
234
233 cd->gpc_base = of_iomap(node, 0); 235 cd->gpc_base = of_iomap(node, 0);
234 if (!cd->gpc_base) { 236 if (!cd->gpc_base) {
235 pr_err("fsl-gpcv2: unable to map gpc registers\n"); 237 pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index e4c2c1a1e993..6735c8d6a445 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -932,7 +932,7 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
932 *result = true; 932 *result = true;
933 933
934 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, 934 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
935 from_cblock(begin), &cmd->dirty_cursor); 935 from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
936 if (r) { 936 if (r) {
937 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__); 937 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
938 return r; 938 return r;
@@ -959,14 +959,16 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
959 return 0; 959 return 0;
960 } 960 }
961 961
962 begin = to_cblock(from_cblock(begin) + 1);
963 if (begin == end)
964 break;
965
962 r = dm_bitset_cursor_next(&cmd->dirty_cursor); 966 r = dm_bitset_cursor_next(&cmd->dirty_cursor);
963 if (r) { 967 if (r) {
964 DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__); 968 DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
965 dm_bitset_cursor_end(&cmd->dirty_cursor); 969 dm_bitset_cursor_end(&cmd->dirty_cursor);
966 return r; 970 return r;
967 } 971 }
968
969 begin = to_cblock(from_cblock(begin) + 1);
970 } 972 }
971 973
972 dm_bitset_cursor_end(&cmd->dirty_cursor); 974 dm_bitset_cursor_end(&cmd->dirty_cursor);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index f8564d63982f..1e217ba84d09 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3726,7 +3726,7 @@ static int raid_preresume(struct dm_target *ti)
3726 return r; 3726 return r;
3727 3727
3728 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */ 3728 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
3729 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && 3729 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3730 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { 3730 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3731 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors, 3731 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3732 to_bytes(rs->requested_bitmap_chunk_sectors), 0); 3732 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 28955b94d2b2..0b081d170087 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -755,6 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
755 /* Undo dm_start_request() before requeuing */ 755 /* Undo dm_start_request() before requeuing */
756 rq_end_stats(md, rq); 756 rq_end_stats(md, rq);
757 rq_completed(md, rq_data_dir(rq), false); 757 rq_completed(md, rq_data_dir(rq), false);
758 blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
758 return BLK_MQ_RQ_QUEUE_BUSY; 759 return BLK_MQ_RQ_QUEUE_BUSY;
759 } 760 }
760 761
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0f0eb8a3d922..78f36012eaca 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -146,8 +146,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
146 block = fec_buffer_rs_block(v, fio, n, i); 146 block = fec_buffer_rs_block(v, fio, n, i);
147 res = fec_decode_rs8(v, fio, block, &par[offset], neras); 147 res = fec_decode_rs8(v, fio, block, &par[offset], neras);
148 if (res < 0) { 148 if (res < 0) {
149 dm_bufio_release(buf);
150
151 r = res; 149 r = res;
152 goto error; 150 goto error;
153 } 151 }
@@ -172,6 +170,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
172done: 170done:
173 r = corrected; 171 r = corrected;
174error: 172error:
173 dm_bufio_release(buf);
174
175 if (r < 0 && neras) 175 if (r < 0 && neras)
176 DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", 176 DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
177 v->data_dev->name, (unsigned long long)rsb, r); 177 v->data_dev->name, (unsigned long long)rsb, r);
@@ -269,7 +269,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
269 &is_zero) == 0) { 269 &is_zero) == 0) {
270 /* skip known zero blocks entirely */ 270 /* skip known zero blocks entirely */
271 if (is_zero) 271 if (is_zero)
272 continue; 272 goto done;
273 273
274 /* 274 /*
275 * skip if we have already found the theoretical 275 * skip if we have already found the theoretical
@@ -439,6 +439,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
439 if (!verity_fec_is_enabled(v)) 439 if (!verity_fec_is_enabled(v))
440 return -EOPNOTSUPP; 440 return -EOPNOTSUPP;
441 441
442 if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
443 DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
444 return -EIO;
445 }
446
447 fio->level++;
448
442 if (type == DM_VERITY_BLOCK_TYPE_METADATA) 449 if (type == DM_VERITY_BLOCK_TYPE_METADATA)
443 block += v->data_blocks; 450 block += v->data_blocks;
444 451
@@ -470,7 +477,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
470 if (r < 0) { 477 if (r < 0) {
471 r = fec_decode_rsb(v, io, fio, rsb, offset, true); 478 r = fec_decode_rsb(v, io, fio, rsb, offset, true);
472 if (r < 0) 479 if (r < 0)
473 return r; 480 goto done;
474 } 481 }
475 482
476 if (dest) 483 if (dest)
@@ -480,6 +487,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
480 r = verity_for_bv_block(v, io, iter, fec_bv_copy); 487 r = verity_for_bv_block(v, io, iter, fec_bv_copy);
481 } 488 }
482 489
490done:
491 fio->level--;
483 return r; 492 return r;
484} 493}
485 494
@@ -520,6 +529,7 @@ void verity_fec_init_io(struct dm_verity_io *io)
520 memset(fio->bufs, 0, sizeof(fio->bufs)); 529 memset(fio->bufs, 0, sizeof(fio->bufs));
521 fio->nbufs = 0; 530 fio->nbufs = 0;
522 fio->output = NULL; 531 fio->output = NULL;
532 fio->level = 0;
523} 533}
524 534
525/* 535/*
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 7fa0298b995e..bb31ce87a933 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -27,6 +27,9 @@
27#define DM_VERITY_FEC_BUF_MAX \ 27#define DM_VERITY_FEC_BUF_MAX \
28 (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) 28 (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
29 29
30/* maximum recursion level for verity_fec_decode */
31#define DM_VERITY_FEC_MAX_RECURSION 4
32
30#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" 33#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
31#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" 34#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
32#define DM_VERITY_OPT_FEC_START "fec_start" 35#define DM_VERITY_OPT_FEC_START "fec_start"
@@ -58,6 +61,7 @@ struct dm_verity_fec_io {
58 unsigned nbufs; /* number of buffers allocated */ 61 unsigned nbufs; /* number of buffers allocated */
59 u8 *output; /* buffer for corrected output */ 62 u8 *output; /* buffer for corrected output */
60 size_t output_pos; 63 size_t output_pos;
64 unsigned level; /* recursion level */
61}; 65};
62 66
63#ifdef CONFIG_DM_VERITY_FEC 67#ifdef CONFIG_DM_VERITY_FEC
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 138f5ae75c0b..4d1fe8d95042 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
557 int work_done = 0; 557 int work_done = 0;
558 558
559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); 559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
560 u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD); 560 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); 561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
562 562
563 /* Handle bus state changes */ 563 /* Handle bus state changes */
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index caed4e6960f8..11662f479e76 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev)
826 826
827 devm_can_led_init(ndev); 827 devm_can_led_init(ndev);
828 828
829 dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", 829 dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
830 priv->regs, ndev->irq);
831 830
832 return 0; 831 return 0;
833fail_candev: 832fail_candev:
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 65c056e2f705..450b6a9fdec7 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO) 991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
992 992
993static void ___team_compute_features(struct team *team) 993static void __team_compute_features(struct team *team)
994{ 994{
995 struct team_port *port; 995 struct team_port *port;
996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; 996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team)
1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1024} 1024}
1025 1025
1026static void __team_compute_features(struct team *team)
1027{
1028 ___team_compute_features(team);
1029 netdev_change_features(team->dev);
1030}
1031
1032static void team_compute_features(struct team *team) 1026static void team_compute_features(struct team *team)
1033{ 1027{
1034 mutex_lock(&team->lock); 1028 mutex_lock(&team->lock);
1035 ___team_compute_features(team); 1029 __team_compute_features(team);
1036 mutex_unlock(&team->lock); 1030 mutex_unlock(&team->lock);
1037 netdev_change_features(team->dev); 1031 netdev_change_features(team->dev);
1038} 1032}
@@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev)
1641 team_notify_peers_fini(team); 1635 team_notify_peers_fini(team);
1642 team_queue_override_fini(team); 1636 team_queue_override_fini(team);
1643 mutex_unlock(&team->lock); 1637 mutex_unlock(&team->lock);
1638 netdev_change_features(dev);
1644} 1639}
1645 1640
1646static void team_destructor(struct net_device *dev) 1641static void team_destructor(struct net_device *dev)
@@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1928 mutex_lock(&team->lock); 1923 mutex_lock(&team->lock);
1929 err = team_port_add(team, port_dev); 1924 err = team_port_add(team, port_dev);
1930 mutex_unlock(&team->lock); 1925 mutex_unlock(&team->lock);
1926
1927 if (!err)
1928 netdev_change_features(dev);
1929
1931 return err; 1930 return err;
1932} 1931}
1933 1932
@@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1939 mutex_lock(&team->lock); 1938 mutex_lock(&team->lock);
1940 err = team_port_del(team, port_dev); 1939 err = team_port_del(team, port_dev);
1941 mutex_unlock(&team->lock); 1940 mutex_unlock(&team->lock);
1941
1942 if (!err)
1943 netdev_change_features(dev);
1944
1942 return err; 1945 return err;
1943} 1946}
1944 1947
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index adbed261cc8a..a3ed8115747c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1202,7 +1202,7 @@ static const struct usb_device_id products[] = {
1202 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1202 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
1203 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1203 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
1204 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1204 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1205 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 1205 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1206 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 1206 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
1207 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 1207 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
1208 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 1208 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1cc945cbeaa3..79048e72c1bd 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1975,7 +1975,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1975 " value=0x%04x index=0x%04x size=%d\n", 1975 " value=0x%04x index=0x%04x size=%d\n",
1976 cmd, reqtype, value, index, size); 1976 cmd, reqtype, value, index, size);
1977 1977
1978 if (data) { 1978 if (size) {
1979 buf = kmalloc(size, GFP_KERNEL); 1979 buf = kmalloc(size, GFP_KERNEL);
1980 if (!buf) 1980 if (!buf)
1981 goto out; 1981 goto out;
@@ -1984,8 +1984,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1984 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1984 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1985 cmd, reqtype, value, index, buf, size, 1985 cmd, reqtype, value, index, buf, size,
1986 USB_CTRL_GET_TIMEOUT); 1986 USB_CTRL_GET_TIMEOUT);
1987 if (err > 0 && err <= size) 1987 if (err > 0 && err <= size) {
1988 memcpy(data, buf, err); 1988 if (data)
1989 memcpy(data, buf, err);
1990 else
1991 netdev_dbg(dev->net,
1992 "Huh? Data requested but thrown away.\n");
1993 }
1989 kfree(buf); 1994 kfree(buf);
1990out: 1995out:
1991 return err; 1996 return err;
@@ -2006,7 +2011,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
2006 buf = kmemdup(data, size, GFP_KERNEL); 2011 buf = kmemdup(data, size, GFP_KERNEL);
2007 if (!buf) 2012 if (!buf)
2008 goto out; 2013 goto out;
2009 } 2014 } else {
2015 if (size) {
2016 WARN_ON_ONCE(1);
2017 err = -EINVAL;
2018 goto out;
2019 }
2020 }
2010 2021
2011 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 2022 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
2012 cmd, reqtype, value, index, buf, size, 2023 cmd, reqtype, value, index, buf, size,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0d241d110ec..666ada6130ab 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2240,14 +2240,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
2240#define MIN_MTU ETH_MIN_MTU 2240#define MIN_MTU ETH_MIN_MTU
2241#define MAX_MTU ETH_MAX_MTU 2241#define MAX_MTU ETH_MAX_MTU
2242 2242
2243static int virtnet_probe(struct virtio_device *vdev) 2243static int virtnet_validate(struct virtio_device *vdev)
2244{ 2244{
2245 int i, err;
2246 struct net_device *dev;
2247 struct virtnet_info *vi;
2248 u16 max_queue_pairs;
2249 int mtu;
2250
2251 if (!vdev->config->get) { 2245 if (!vdev->config->get) {
2252 dev_err(&vdev->dev, "%s failure: config access disabled\n", 2246 dev_err(&vdev->dev, "%s failure: config access disabled\n",
2253 __func__); 2247 __func__);
@@ -2257,6 +2251,25 @@ static int virtnet_probe(struct virtio_device *vdev)
2257 if (!virtnet_validate_features(vdev)) 2251 if (!virtnet_validate_features(vdev))
2258 return -EINVAL; 2252 return -EINVAL;
2259 2253
2254 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2255 int mtu = virtio_cread16(vdev,
2256 offsetof(struct virtio_net_config,
2257 mtu));
2258 if (mtu < MIN_MTU)
2259 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2260 }
2261
2262 return 0;
2263}
2264
2265static int virtnet_probe(struct virtio_device *vdev)
2266{
2267 int i, err;
2268 struct net_device *dev;
2269 struct virtnet_info *vi;
2270 u16 max_queue_pairs;
2271 int mtu;
2272
2260 /* Find if host supports multiqueue virtio_net device */ 2273 /* Find if host supports multiqueue virtio_net device */
2261 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2274 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2262 struct virtio_net_config, 2275 struct virtio_net_config,
@@ -2372,11 +2385,20 @@ static int virtnet_probe(struct virtio_device *vdev)
2372 offsetof(struct virtio_net_config, 2385 offsetof(struct virtio_net_config,
2373 mtu)); 2386 mtu));
2374 if (mtu < dev->min_mtu) { 2387 if (mtu < dev->min_mtu) {
2375 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 2388 /* Should never trigger: MTU was previously validated
2376 } else { 2389 * in virtnet_validate.
2377 dev->mtu = mtu; 2390 */
2378 dev->max_mtu = mtu; 2391 dev_err(&vdev->dev, "device MTU appears to have changed "
2392 "it is now %d < %d", mtu, dev->min_mtu);
2393 goto free_stats;
2379 } 2394 }
2395
2396 dev->mtu = mtu;
2397 dev->max_mtu = mtu;
2398
2399 /* TODO: size buffers correctly in this case. */
2400 if (dev->mtu > ETH_DATA_LEN)
2401 vi->big_packets = true;
2380 } 2402 }
2381 2403
2382 if (vi->any_header_sg) 2404 if (vi->any_header_sg)
@@ -2554,6 +2576,7 @@ static struct virtio_driver virtio_net_driver = {
2554 .driver.name = KBUILD_MODNAME, 2576 .driver.name = KBUILD_MODNAME,
2555 .driver.owner = THIS_MODULE, 2577 .driver.owner = THIS_MODULE,
2556 .id_table = id_table, 2578 .id_table = id_table,
2579 .validate = virtnet_validate,
2557 .probe = virtnet_probe, 2580 .probe = virtnet_probe,
2558 .remove = virtnet_remove, 2581 .remove = virtnet_remove,
2559 .config_changed = virtnet_config_changed, 2582 .config_changed = virtnet_config_changed,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9b3b57fef446..9583a5f58a1d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -270,7 +270,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
270 memset(cmnd, 0, sizeof(*cmnd)); 270 memset(cmnd, 0, sizeof(*cmnd));
271 cmnd->dsm.opcode = nvme_cmd_dsm; 271 cmnd->dsm.opcode = nvme_cmd_dsm;
272 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 272 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
273 cmnd->dsm.nr = segments - 1; 273 cmnd->dsm.nr = cpu_to_le32(segments - 1);
274 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 274 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
275 275
276 req->special_vec.bv_page = virt_to_page(range); 276 req->special_vec.bv_page = virt_to_page(range);
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a7bcff45f437..76450b0c55f1 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req,
100 u16 status; 100 u16 status;
101 101
102 WARN_ON(req == NULL || slog == NULL); 102 WARN_ON(req == NULL || slog == NULL);
103 if (req->cmd->get_log_page.nsid == 0xFFFFFFFF) 103 if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
104 status = nvmet_get_smart_log_all(req, slog); 104 status = nvmet_get_smart_log_all(req, slog);
105 else 105 else
106 status = nvmet_get_smart_log_nsid(req, slog); 106 status = nvmet_get_smart_log_nsid(req, slog);
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 4195115c7e54..6b0baa9caab9 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -180,7 +180,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
180 180
181 sector = le64_to_cpu(write_zeroes->slba) << 181 sector = le64_to_cpu(write_zeroes->slba) <<
182 (req->ns->blksize_shift - 9); 182 (req->ns->blksize_shift - 9);
183 nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) << 183 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
184 (req->ns->blksize_shift - 9)) + 1; 184 (req->ns->blksize_shift - 9)) + 1;
185 185
186 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, 186 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
@@ -230,7 +230,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
230 return 0; 230 return 0;
231 case nvme_cmd_dsm: 231 case nvme_cmd_dsm:
232 req->execute = nvmet_execute_dsm; 232 req->execute = nvmet_execute_dsm;
233 req->data_len = le32_to_cpu(cmd->dsm.nr + 1) * 233 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
234 sizeof(struct nvme_dsm_range); 234 sizeof(struct nvme_dsm_range);
235 return 0; 235 return 0;
236 case nvme_cmd_write_zeroes: 236 case nvme_cmd_write_zeroes:
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index dfb8a69afc28..d2d2ba5b8a68 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -89,6 +89,7 @@ config PCI_HISI
89 depends on PCI_MSI_IRQ_DOMAIN 89 depends on PCI_MSI_IRQ_DOMAIN
90 select PCIEPORTBUS 90 select PCIEPORTBUS
91 select PCIE_DW_HOST 91 select PCIE_DW_HOST
92 select PCI_HOST_COMMON
92 help 93 help
93 Say Y here if you want PCIe controller support on HiSilicon 94 Say Y here if you want PCIe controller support on HiSilicon
94 Hip05 and Hip06 SoCs 95 Hip05 and Hip06 SoCs
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index fcd3ef845883..6d23683c0892 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -234,6 +234,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
234 return 0; 234 return 0;
235} 235}
236 236
237static const struct dw_pcie_ops dw_pcie_ops = {
238};
239
237static int artpec6_pcie_probe(struct platform_device *pdev) 240static int artpec6_pcie_probe(struct platform_device *pdev)
238{ 241{
239 struct device *dev = &pdev->dev; 242 struct device *dev = &pdev->dev;
@@ -252,6 +255,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
252 return -ENOMEM; 255 return -ENOMEM;
253 256
254 pci->dev = dev; 257 pci->dev = dev;
258 pci->ops = &dw_pcie_ops;
255 259
256 artpec6_pcie->pci = pci; 260 artpec6_pcie->pci = pci;
257 261
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index b6c832ba39dd..f20d494922ab 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -86,6 +86,9 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
86 return 0; 86 return 0;
87} 87}
88 88
89static const struct dw_pcie_ops dw_pcie_ops = {
90};
91
89static int dw_plat_pcie_probe(struct platform_device *pdev) 92static int dw_plat_pcie_probe(struct platform_device *pdev)
90{ 93{
91 struct device *dev = &pdev->dev; 94 struct device *dev = &pdev->dev;
@@ -103,6 +106,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
103 return -ENOMEM; 106 return -ENOMEM;
104 107
105 pci->dev = dev; 108 pci->dev = dev;
109 pci->ops = &dw_pcie_ops;
106 110
107 dw_plat_pcie->pci = pci; 111 dw_plat_pcie->pci = pci;
108 112
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index b89c373555c5..6e031b522529 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -375,7 +375,6 @@ static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
375 index -= node * PEM_MAX_DOM_IN_NODE; 375 index -= node * PEM_MAX_DOM_IN_NODE;
376 res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | 376 res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
377 FIELD_PREP(PEM_INDX_MASK, index); 377 FIELD_PREP(PEM_INDX_MASK, index);
378 res_pem->end = res_pem->start + SZ_16M - 1;
379 res_pem->flags = IORESOURCE_MEM; 378 res_pem->flags = IORESOURCE_MEM;
380} 379}
381 380
@@ -399,8 +398,15 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
399 */ 398 */
400 if (ret) { 399 if (ret) {
401 thunder_pem_legacy_fw(root, res_pem); 400 thunder_pem_legacy_fw(root, res_pem);
402 /* Reserve PEM-specific resources and PCI configuration space */ 401 /*
402 * Reserve 64K size PEM specific resources. The full 16M range
403 * size is required for thunder_pem_init() call.
404 */
405 res_pem->end = res_pem->start + SZ_64K - 1;
403 thunder_pem_reserve_range(dev, root->segment, res_pem); 406 thunder_pem_reserve_range(dev, root->segment, res_pem);
407 res_pem->end = res_pem->start + SZ_16M - 1;
408
409 /* Reserve PCI configuration space as well. */
404 thunder_pem_reserve_range(dev, root->segment, &cfg->res); 410 thunder_pem_reserve_range(dev, root->segment, &cfg->res);
405 } 411 }
406 412
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index d69046537b75..32822b0d9cd0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2010,29 +2010,57 @@ out_err:
2010 return ERR_PTR(ret); 2010 return ERR_PTR(ret);
2011} 2011}
2012 2012
2013static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) 2013static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
2014{ 2014{
2015 pctldev->p = create_pinctrl(pctldev->dev, pctldev); 2015 pctldev->p = create_pinctrl(pctldev->dev, pctldev);
2016 if (!IS_ERR(pctldev->p)) { 2016 if (PTR_ERR(pctldev->p) == -ENODEV) {
2017 kref_get(&pctldev->p->users); 2017 dev_dbg(pctldev->dev, "no hogs found\n");
2018 pctldev->hog_default =
2019 pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
2020 if (IS_ERR(pctldev->hog_default)) {
2021 dev_dbg(pctldev->dev,
2022 "failed to lookup the default state\n");
2023 } else {
2024 if (pinctrl_select_state(pctldev->p,
2025 pctldev->hog_default))
2026 dev_err(pctldev->dev,
2027 "failed to select default state\n");
2028 }
2029 2018
2030 pctldev->hog_sleep = 2019 return 0;
2031 pinctrl_lookup_state(pctldev->p, 2020 }
2032 PINCTRL_STATE_SLEEP); 2021
2033 if (IS_ERR(pctldev->hog_sleep)) 2022 if (IS_ERR(pctldev->p)) {
2034 dev_dbg(pctldev->dev, 2023 dev_err(pctldev->dev, "error claiming hogs: %li\n",
2035 "failed to lookup the sleep state\n"); 2024 PTR_ERR(pctldev->p));
2025
2026 return PTR_ERR(pctldev->p);
2027 }
2028
2029 kref_get(&pctldev->p->users);
2030 pctldev->hog_default =
2031 pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
2032 if (IS_ERR(pctldev->hog_default)) {
2033 dev_dbg(pctldev->dev,
2034 "failed to lookup the default state\n");
2035 } else {
2036 if (pinctrl_select_state(pctldev->p,
2037 pctldev->hog_default))
2038 dev_err(pctldev->dev,
2039 "failed to select default state\n");
2040 }
2041
2042 pctldev->hog_sleep =
2043 pinctrl_lookup_state(pctldev->p,
2044 PINCTRL_STATE_SLEEP);
2045 if (IS_ERR(pctldev->hog_sleep))
2046 dev_dbg(pctldev->dev,
2047 "failed to lookup the sleep state\n");
2048
2049 return 0;
2050}
2051
2052int pinctrl_enable(struct pinctrl_dev *pctldev)
2053{
2054 int error;
2055
2056 error = pinctrl_claim_hogs(pctldev);
2057 if (error) {
2058 dev_err(pctldev->dev, "could not claim hogs: %i\n",
2059 error);
2060 mutex_destroy(&pctldev->mutex);
2061 kfree(pctldev);
2062
2063 return error;
2036 } 2064 }
2037 2065
2038 mutex_lock(&pinctrldev_list_mutex); 2066 mutex_lock(&pinctrldev_list_mutex);
@@ -2043,6 +2071,7 @@ static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
2043 2071
2044 return 0; 2072 return 0;
2045} 2073}
2074EXPORT_SYMBOL_GPL(pinctrl_enable);
2046 2075
2047/** 2076/**
2048 * pinctrl_register() - register a pin controller device 2077 * pinctrl_register() - register a pin controller device
@@ -2065,25 +2094,30 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
2065 if (IS_ERR(pctldev)) 2094 if (IS_ERR(pctldev))
2066 return pctldev; 2095 return pctldev;
2067 2096
2068 error = pinctrl_create_and_start(pctldev); 2097 error = pinctrl_enable(pctldev);
2069 if (error) { 2098 if (error)
2070 mutex_destroy(&pctldev->mutex);
2071 kfree(pctldev);
2072
2073 return ERR_PTR(error); 2099 return ERR_PTR(error);
2074 }
2075 2100
2076 return pctldev; 2101 return pctldev;
2077 2102
2078} 2103}
2079EXPORT_SYMBOL_GPL(pinctrl_register); 2104EXPORT_SYMBOL_GPL(pinctrl_register);
2080 2105
2106/**
2107 * pinctrl_register_and_init() - register and init pin controller device
2108 * @pctldesc: descriptor for this pin controller
2109 * @dev: parent device for this pin controller
2110 * @driver_data: private pin controller data for this pin controller
2111 * @pctldev: pin controller device
2112 *
2113 * Note that pinctrl_enable() still needs to be manually called after
2114 * this once the driver is ready.
2115 */
2081int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, 2116int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
2082 struct device *dev, void *driver_data, 2117 struct device *dev, void *driver_data,
2083 struct pinctrl_dev **pctldev) 2118 struct pinctrl_dev **pctldev)
2084{ 2119{
2085 struct pinctrl_dev *p; 2120 struct pinctrl_dev *p;
2086 int error;
2087 2121
2088 p = pinctrl_init_controller(pctldesc, dev, driver_data); 2122 p = pinctrl_init_controller(pctldesc, dev, driver_data);
2089 if (IS_ERR(p)) 2123 if (IS_ERR(p))
@@ -2097,15 +2131,6 @@ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
2097 */ 2131 */
2098 *pctldev = p; 2132 *pctldev = p;
2099 2133
2100 error = pinctrl_create_and_start(p);
2101 if (error) {
2102 mutex_destroy(&p->mutex);
2103 kfree(p);
2104 *pctldev = NULL;
2105
2106 return error;
2107 }
2108
2109 return 0; 2134 return 0;
2110} 2135}
2111EXPORT_SYMBOL_GPL(pinctrl_register_and_init); 2136EXPORT_SYMBOL_GPL(pinctrl_register_and_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index a7ace9e1ad81..74bd90dfd7b1 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -790,7 +790,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
790 790
791 dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); 791 dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
792 792
793 return 0; 793 return pinctrl_enable(ipctl->pctl);
794 794
795free: 795free:
796 imx_free_resources(ipctl); 796 imx_free_resources(ipctl);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index f80134e3e0b6..9ff790174906 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -13,6 +13,7 @@
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16#include <linux/dmi.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> 19#include <linux/init.h>
@@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
1524 chained_irq_exit(chip, desc); 1525 chained_irq_exit(chip, desc);
1525} 1526}
1526 1527
1528/*
1529 * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
1530 * tables. Since we leave GPIOs that are not capable of generating
1531 * interrupts out of the irqdomain the numbering will be different and
1532 * cause devices using the hardcoded IRQ numbers fail. In order not to
1533 * break such machines we will only mask pins from irqdomain if the machine
1534 * is not listed below.
1535 */
1536static const struct dmi_system_id chv_no_valid_mask[] = {
1537 {
1538 /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
1539 .ident = "Acer Chromebook (CYAN)",
1540 .matches = {
1541 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1542 DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
1543 DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
1544 },
1545 }
1546};
1547
1527static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) 1548static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1528{ 1549{
1529 const struct chv_gpio_pinrange *range; 1550 const struct chv_gpio_pinrange *range;
1530 struct gpio_chip *chip = &pctrl->chip; 1551 struct gpio_chip *chip = &pctrl->chip;
1552 bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
1531 int ret, i, offset; 1553 int ret, i, offset;
1532 1554
1533 *chip = chv_gpio_chip; 1555 *chip = chv_gpio_chip;
@@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1536 chip->label = dev_name(pctrl->dev); 1558 chip->label = dev_name(pctrl->dev);
1537 chip->parent = pctrl->dev; 1559 chip->parent = pctrl->dev;
1538 chip->base = -1; 1560 chip->base = -1;
1539 chip->irq_need_valid_mask = true; 1561 chip->irq_need_valid_mask = need_valid_mask;
1540 1562
1541 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); 1563 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
1542 if (ret) { 1564 if (ret) {
@@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1567 intsel &= CHV_PADCTRL0_INTSEL_MASK; 1589 intsel &= CHV_PADCTRL0_INTSEL_MASK;
1568 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; 1590 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
1569 1591
1570 if (intsel >= pctrl->community->nirqs) 1592 if (need_valid_mask && intsel >= pctrl->community->nirqs)
1571 clear_bit(i, chip->irq_valid_mask); 1593 clear_bit(i, chip->irq_valid_mask);
1572 } 1594 }
1573 1595
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 8b2d45e85bae..9c267dcda094 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1781,7 +1781,7 @@ static int pcs_probe(struct platform_device *pdev)
1781 dev_info(pcs->dev, "%i pins at pa %p size %u\n", 1781 dev_info(pcs->dev, "%i pins at pa %p size %u\n",
1782 pcs->desc.npins, pcs->base, pcs->size); 1782 pcs->desc.npins, pcs->base, pcs->size);
1783 1783
1784 return 0; 1784 return pinctrl_enable(pcs->pctl);
1785 1785
1786free: 1786free:
1787 pcs_free_resources(pcs); 1787 pcs_free_resources(pcs);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index f9b49967f512..63e51b56a22a 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
1468 1468
1469/* pin banks of exynos5433 pin-controller - ALIVE */ 1469/* pin banks of exynos5433 pin-controller - ALIVE */
1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = { 1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
1471 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), 1471 EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
1472 EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), 1472 EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
1473 EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), 1473 EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
1474 EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), 1474 EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
1475 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), 1475 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
1476 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), 1476 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
1477 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), 1477 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
1478 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), 1478 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
1479 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), 1479 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
1480}; 1480};
1481 1481
1482/* pin banks of exynos5433 pin-controller - AUD */ 1482/* pin banks of exynos5433 pin-controller - AUD */
1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = { 1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
1484 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), 1484 EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
1485 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), 1485 EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
1486}; 1486};
1487 1487
1488/* pin banks of exynos5433 pin-controller - CPIF */ 1488/* pin banks of exynos5433 pin-controller - CPIF */
1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = { 1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
1490 EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), 1490 EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
1491}; 1491};
1492 1492
1493/* pin banks of exynos5433 pin-controller - eSE */ 1493/* pin banks of exynos5433 pin-controller - eSE */
1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = { 1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
1495 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), 1495 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
1496}; 1496};
1497 1497
1498/* pin banks of exynos5433 pin-controller - FINGER */ 1498/* pin banks of exynos5433 pin-controller - FINGER */
1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = { 1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
1500 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), 1500 EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
1501}; 1501};
1502 1502
1503/* pin banks of exynos5433 pin-controller - FSYS */ 1503/* pin banks of exynos5433 pin-controller - FSYS */
1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = { 1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
1505 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), 1505 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
1506 EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), 1506 EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
1507 EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), 1507 EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
1508 EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), 1508 EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
1509 EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), 1509 EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
1510 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), 1510 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
1511}; 1511};
1512 1512
1513/* pin banks of exynos5433 pin-controller - IMEM */ 1513/* pin banks of exynos5433 pin-controller - IMEM */
1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = { 1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
1515 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), 1515 EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
1516}; 1516};
1517 1517
1518/* pin banks of exynos5433 pin-controller - NFC */ 1518/* pin banks of exynos5433 pin-controller - NFC */
1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = { 1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
1520 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), 1520 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
1521}; 1521};
1522 1522
1523/* pin banks of exynos5433 pin-controller - PERIC */ 1523/* pin banks of exynos5433 pin-controller - PERIC */
1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = { 1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
1525 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), 1525 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
1526 EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), 1526 EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
1527 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), 1527 EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
1528 EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), 1528 EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
1529 EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), 1529 EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
1530 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), 1530 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
1531 EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), 1531 EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
1532 EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), 1532 EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
1533 EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), 1533 EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
1534 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), 1534 EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
1535 EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), 1535 EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
1536 EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), 1536 EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
1537 EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), 1537 EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
1538 EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), 1538 EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
1539 EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), 1539 EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
1540 EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), 1540 EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
1541 EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), 1541 EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
1542}; 1542};
1543 1543
1544/* pin banks of exynos5433 pin-controller - TOUCH */ 1544/* pin banks of exynos5433 pin-controller - TOUCH */
1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = { 1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
1546 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), 1546 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
1547}; 1547};
1548 1548
1549/* 1549/*
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index a473092fb8d2..cd046eb7d705 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -79,17 +79,6 @@
79 .name = id \ 79 .name = id \
80 } 80 }
81 81
82#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
83 { \
84 .type = &bank_type_alive, \
85 .pctl_offset = reg, \
86 .nr_pins = pins, \
87 .eint_type = EINT_TYPE_WKUP, \
88 .eint_offset = offs, \
89 .name = id, \
90 .pctl_res_idx = pctl_idx, \
91 } \
92
93#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \ 82#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
94 { \ 83 { \
95 .type = &exynos5433_bank_type_off, \ 84 .type = &exynos5433_bank_type_off, \
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 08150a321be6..a70157f0acf4 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -816,6 +816,13 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
816 pmx->pctl_desc.pins = pmx->pins; 816 pmx->pctl_desc.pins = pmx->pins;
817 pmx->pctl_desc.npins = pfc->info->nr_pins; 817 pmx->pctl_desc.npins = pfc->info->nr_pins;
818 818
819 return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, 819 ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
820 &pmx->pctl); 820 &pmx->pctl);
821 if (ret) {
822 dev_err(pfc->dev, "could not register: %i\n", ret);
823
824 return ret;
825 }
826
827 return pinctrl_enable(pmx->pctl);
821} 828}
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 717e3404900c..362c50918c13 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -893,6 +893,8 @@ static int ti_iodelay_probe(struct platform_device *pdev)
893 893
894 platform_set_drvdata(pdev, iod); 894 platform_set_drvdata(pdev, iod);
895 895
896 return pinctrl_enable(iod->pctl);
897
896exit_out: 898exit_out:
897 of_node_put(np); 899 of_node_put(np);
898 return ret; 900 return ret;
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index 053088b9b66e..c1527cb645be 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
36 .clk_rate = 19200000, 36 .clk_rate = 19200000,
37 .npwm = 4, 37 .npwm = 4,
38 .base_unit_bits = 22, 38 .base_unit_bits = 22,
39 .bypass = true,
40};
41
42/* Tangier */
43static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
44 .clk_rate = 19200000,
45 .npwm = 4,
46 .base_unit_bits = 22,
39}; 47};
40 48
41static int pwm_lpss_probe_pci(struct pci_dev *pdev, 49static int pwm_lpss_probe_pci(struct pci_dev *pdev,
@@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
97 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info}, 105 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
98 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, 106 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
99 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info}, 107 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
100 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info}, 108 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
101 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info}, 109 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
102 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, 110 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
103 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, 111 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index b22b6fdadb9a..5d6ed1507d29 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
37 .clk_rate = 19200000, 37 .clk_rate = 19200000,
38 .npwm = 4, 38 .npwm = 4,
39 .base_unit_bits = 22, 39 .base_unit_bits = 22,
40 .bypass = true,
40}; 41};
41 42
42static int pwm_lpss_probe_platform(struct platform_device *pdev) 43static int pwm_lpss_probe_platform(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 689d2c1cbead..8db0d40ccacd 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM); 57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
58} 58}
59 59
60static int pwm_lpss_update(struct pwm_device *pwm) 60static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
61{ 61{
62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip); 62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM; 63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
@@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm)
65 u32 val; 65 u32 val;
66 int err; 66 int err;
67 67
68 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
69
70 /* 68 /*
71 * PWM Configuration register has SW_UPDATE bit that is set when a new 69 * PWM Configuration register has SW_UPDATE bit that is set when a new
72 * configuration is written to the register. The bit is automatically 70 * configuration is written to the register. The bit is automatically
@@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
122 pwm_lpss_write(pwm, ctrl); 120 pwm_lpss_write(pwm, ctrl);
123} 121}
124 122
123static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
124{
125 if (cond)
126 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
127}
128
125static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, 129static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
126 struct pwm_state *state) 130 struct pwm_state *state)
127{ 131{
@@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
137 return ret; 141 return ret;
138 } 142 }
139 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 143 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
140 ret = pwm_lpss_update(pwm); 144 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
145 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
146 ret = pwm_lpss_wait_for_update(pwm);
141 if (ret) { 147 if (ret) {
142 pm_runtime_put(chip->dev); 148 pm_runtime_put(chip->dev);
143 return ret; 149 return ret;
144 } 150 }
145 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); 151 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
146 } else { 152 } else {
147 ret = pwm_lpss_is_updating(pwm); 153 ret = pwm_lpss_is_updating(pwm);
148 if (ret) 154 if (ret)
149 return ret; 155 return ret;
150 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 156 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
151 return pwm_lpss_update(pwm); 157 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
158 return pwm_lpss_wait_for_update(pwm);
152 } 159 }
153 } else if (pwm_is_enabled(pwm)) { 160 } else if (pwm_is_enabled(pwm)) {
154 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); 161 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index c94cd7c2695d..98306bb02cfe 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo {
22 unsigned long clk_rate; 22 unsigned long clk_rate;
23 unsigned int npwm; 23 unsigned int npwm;
24 unsigned long base_unit_bits; 24 unsigned long base_unit_bits;
25 bool bypass;
25}; 26};
26 27
27struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, 28struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index ef89df1f7336..744d56197286 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
191 return 0; 191 return 0;
192} 192}
193 193
194static int rockchip_pwm_enable(struct pwm_chip *chip,
195 struct pwm_device *pwm,
196 bool enable,
197 enum pwm_polarity polarity)
198{
199 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
200 int ret;
201
202 if (enable) {
203 ret = clk_enable(pc->clk);
204 if (ret)
205 return ret;
206 }
207
208 pc->data->set_enable(chip, pwm, enable, polarity);
209
210 if (!enable)
211 clk_disable(pc->clk);
212
213 return 0;
214}
215
194static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, 216static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
195 struct pwm_state *state) 217 struct pwm_state *state)
196{ 218{
@@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
207 return ret; 229 return ret;
208 230
209 if (state->polarity != curstate.polarity && enabled) { 231 if (state->polarity != curstate.polarity && enabled) {
210 pc->data->set_enable(chip, pwm, false, state->polarity); 232 ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
233 if (ret)
234 goto out;
211 enabled = false; 235 enabled = false;
212 } 236 }
213 237
214 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period); 238 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
215 if (ret) { 239 if (ret) {
216 if (enabled != curstate.enabled) 240 if (enabled != curstate.enabled)
217 pc->data->set_enable(chip, pwm, !enabled, 241 rockchip_pwm_enable(chip, pwm, !enabled,
218 state->polarity); 242 state->polarity);
219
220 goto out; 243 goto out;
221 } 244 }
222 245
223 if (state->enabled != enabled) 246 if (state->enabled != enabled) {
224 pc->data->set_enable(chip, pwm, state->enabled, 247 ret = rockchip_pwm_enable(chip, pwm, state->enabled,
225 state->polarity); 248 state->polarity);
249 if (ret)
250 goto out;
251 }
226 252
227 /* 253 /*
228 * Update the state with the real hardware, which can differ a bit 254 * Update the state with the real hardware, which can differ a bit
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 19125d72f322..e5a2d590a104 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -496,7 +496,7 @@ static void scsi_run_queue(struct request_queue *q)
496 scsi_starved_list_run(sdev->host); 496 scsi_starved_list_run(sdev->host);
497 497
498 if (q->mq_ops) 498 if (q->mq_ops)
499 blk_mq_start_stopped_hw_queues(q, false); 499 blk_mq_run_hw_queues(q, false);
500 else 500 else
501 blk_run_queue(q); 501 blk_run_queue(q);
502} 502}
@@ -667,7 +667,7 @@ static bool scsi_end_request(struct request *req, int error,
667 !list_empty(&sdev->host->starved_list)) 667 !list_empty(&sdev->host->starved_list))
668 kblockd_schedule_work(&sdev->requeue_work); 668 kblockd_schedule_work(&sdev->requeue_work);
669 else 669 else
670 blk_mq_start_stopped_hw_queues(q, true); 670 blk_mq_run_hw_queues(q, true);
671 } else { 671 } else {
672 unsigned long flags; 672 unsigned long flags;
673 673
@@ -1974,7 +1974,7 @@ out:
1974 case BLK_MQ_RQ_QUEUE_BUSY: 1974 case BLK_MQ_RQ_QUEUE_BUSY:
1975 if (atomic_read(&sdev->device_busy) == 0 && 1975 if (atomic_read(&sdev->device_busy) == 0 &&
1976 !scsi_device_blocked(sdev)) 1976 !scsi_device_blocked(sdev))
1977 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); 1977 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1978 break; 1978 break;
1979 case BLK_MQ_RQ_QUEUE_ERROR: 1979 case BLK_MQ_RQ_QUEUE_ERROR:
1980 /* 1980 /*
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 7cbad0d45b9c..6ba270e0494d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -409,6 +409,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
409 ret = PTR_ERR(vmfile); 409 ret = PTR_ERR(vmfile);
410 goto out; 410 goto out;
411 } 411 }
412 vmfile->f_mode |= FMODE_LSEEK;
412 asma->file = vmfile; 413 asma->file = vmfile;
413 } 414 }
414 get_file(asma->file); 415 get_file(asma->file);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index a91802432f2f..e3f9ed3690b7 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
485 485
486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
487{ 487{
488 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 488 return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
489 return 0;
490} 489}
491EXPORT_SYMBOL(iscsit_queue_rsp); 490EXPORT_SYMBOL(iscsit_queue_rsp);
492 491
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index bf40f03755dd..344e8448869c 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
1398static int lio_queue_data_in(struct se_cmd *se_cmd) 1398static int lio_queue_data_in(struct se_cmd *se_cmd)
1399{ 1399{
1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1401 struct iscsi_conn *conn = cmd->conn;
1401 1402
1402 cmd->i_state = ISTATE_SEND_DATAIN; 1403 cmd->i_state = ISTATE_SEND_DATAIN;
1403 cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); 1404 return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
1404
1405 return 0;
1406} 1405}
1407 1406
1408static int lio_write_pending(struct se_cmd *se_cmd) 1407static int lio_write_pending(struct se_cmd *se_cmd)
@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
1431static int lio_queue_status(struct se_cmd *se_cmd) 1430static int lio_queue_status(struct se_cmd *se_cmd)
1432{ 1431{
1433 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1432 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1433 struct iscsi_conn *conn = cmd->conn;
1434 1434
1435 cmd->i_state = ISTATE_SEND_STATUS; 1435 cmd->i_state = ISTATE_SEND_STATUS;
1436 1436
1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) { 1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
1438 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1438 return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1439 return 0;
1440 } 1439 }
1441 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); 1440 return conn->conn_transport->iscsit_queue_status(conn, cmd);
1442
1443 return 0;
1444} 1441}
1445 1442
1446static void lio_queue_tm_rsp(struct se_cmd *se_cmd) 1443static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e65bf78ceef3..fce627628200 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -782,22 +782,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) 782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
783 SET_PSTATE_REPLY_OPTIONAL(param); 783 SET_PSTATE_REPLY_OPTIONAL(param);
784 /* 784 /*
785 * The GlobalSAN iSCSI Initiator for MacOSX does
786 * not respond to MaxBurstLength, FirstBurstLength,
787 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
788 * So, we set them to 'reply optional' here, and assume the
789 * the defaults from iscsi_parameters.h if the initiator
790 * is not RFC compliant and the keys are not negotiated.
791 */
792 if (!strcmp(param->name, MAXBURSTLENGTH))
793 SET_PSTATE_REPLY_OPTIONAL(param);
794 if (!strcmp(param->name, FIRSTBURSTLENGTH))
795 SET_PSTATE_REPLY_OPTIONAL(param);
796 if (!strcmp(param->name, DEFAULTTIME2WAIT))
797 SET_PSTATE_REPLY_OPTIONAL(param);
798 if (!strcmp(param->name, DEFAULTTIME2RETAIN))
799 SET_PSTATE_REPLY_OPTIONAL(param);
800 /*
801 * Required for gPXE iSCSI boot client 785 * Required for gPXE iSCSI boot client
802 */ 786 */
803 if (!strcmp(param->name, MAXCONNECTIONS)) 787 if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 5041a9c8bdcb..7d3e2fcc26a0 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
567 } 567 }
568} 568}
569 569
570void iscsit_add_cmd_to_response_queue( 570int iscsit_add_cmd_to_response_queue(
571 struct iscsi_cmd *cmd, 571 struct iscsi_cmd *cmd,
572 struct iscsi_conn *conn, 572 struct iscsi_conn *conn,
573 u8 state) 573 u8 state)
@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
578 if (!qr) { 578 if (!qr) {
579 pr_err("Unable to allocate memory for" 579 pr_err("Unable to allocate memory for"
580 " struct iscsi_queue_req\n"); 580 " struct iscsi_queue_req\n");
581 return; 581 return -ENOMEM;
582 } 582 }
583 INIT_LIST_HEAD(&qr->qr_list); 583 INIT_LIST_HEAD(&qr->qr_list);
584 qr->cmd = cmd; 584 qr->cmd = cmd;
@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
590 spin_unlock_bh(&conn->response_queue_lock); 590 spin_unlock_bh(&conn->response_queue_lock);
591 591
592 wake_up(&conn->queues_wq); 592 wake_up(&conn->queues_wq);
593 return 0;
593} 594}
594 595
595struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 596struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
737{ 738{
738 struct se_cmd *se_cmd = NULL; 739 struct se_cmd *se_cmd = NULL;
739 int rc; 740 int rc;
741 bool op_scsi = false;
740 /* 742 /*
741 * Determine if a struct se_cmd is associated with 743 * Determine if a struct se_cmd is associated with
742 * this struct iscsi_cmd. 744 * this struct iscsi_cmd.
743 */ 745 */
744 switch (cmd->iscsi_opcode) { 746 switch (cmd->iscsi_opcode) {
745 case ISCSI_OP_SCSI_CMD: 747 case ISCSI_OP_SCSI_CMD:
746 se_cmd = &cmd->se_cmd; 748 op_scsi = true;
747 __iscsit_free_cmd(cmd, true, shutdown);
748 /* 749 /*
749 * Fallthrough 750 * Fallthrough
750 */ 751 */
751 case ISCSI_OP_SCSI_TMFUNC: 752 case ISCSI_OP_SCSI_TMFUNC:
752 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 753 se_cmd = &cmd->se_cmd;
753 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 754 __iscsit_free_cmd(cmd, op_scsi, shutdown);
754 __iscsit_free_cmd(cmd, true, shutdown); 755 rc = transport_generic_free_cmd(se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, op_scsi, shutdown);
755 target_put_sess_cmd(se_cmd); 758 target_put_sess_cmd(se_cmd);
756 } 759 }
757 break; 760 break;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 8ff08856516a..9e4197af8708 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
31 struct iscsi_conn_recovery **, itt_t); 31 struct iscsi_conn_recovery **, itt_t);
32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *); 33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
34extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 34extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); 35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); 36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fd7c16a7ca6e..fc4a9c303d55 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
197 /* 197 /*
198 * Set the ASYMMETRIC ACCESS State 198 * Set the ASYMMETRIC ACCESS State
199 */ 199 */
200 buf[off++] |= (atomic_read( 200 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
201 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
202 /* 201 /*
203 * Set supported ASYMMETRIC ACCESS State bits 202 * Set supported ASYMMETRIC ACCESS State bits
204 */ 203 */
@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
710 709
711 spin_lock(&lun->lun_tg_pt_gp_lock); 710 spin_lock(&lun->lun_tg_pt_gp_lock);
712 tg_pt_gp = lun->lun_tg_pt_gp; 711 tg_pt_gp = lun->lun_tg_pt_gp;
713 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 712 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
714 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 713 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
715 714
716 // XXX: keeps using tg_pt_gp witout reference after unlock 715 // XXX: keeps using tg_pt_gp witout reference after unlock
@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
911} 910}
912 911
913/* 912/*
914 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 913 * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
915 */ 914 */
916static int core_alua_update_tpg_primary_metadata( 915static int core_alua_update_tpg_primary_metadata(
917 struct t10_alua_tg_pt_gp *tg_pt_gp) 916 struct t10_alua_tg_pt_gp *tg_pt_gp)
@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
934 "alua_access_state=0x%02x\n" 933 "alua_access_state=0x%02x\n"
935 "alua_access_status=0x%02x\n", 934 "alua_access_status=0x%02x\n",
936 tg_pt_gp->tg_pt_gp_id, 935 tg_pt_gp->tg_pt_gp_id,
937 tg_pt_gp->tg_pt_gp_alua_pending_state, 936 tg_pt_gp->tg_pt_gp_alua_access_state,
938 tg_pt_gp->tg_pt_gp_alua_access_status); 937 tg_pt_gp->tg_pt_gp_alua_access_status);
939 938
940 snprintf(path, ALUA_METADATA_PATH_LEN, 939 snprintf(path, ALUA_METADATA_PATH_LEN,
@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
1013 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1012 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1014} 1013}
1015 1014
1016static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1017{
1018 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
1019 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
1020 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1021 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
1022 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
1023
1024 /*
1025 * Update the ALUA metadata buf that has been allocated in
1026 * core_alua_do_port_transition(), this metadata will be written
1027 * to struct file.
1028 *
1029 * Note that there is the case where we do not want to update the
1030 * metadata when the saved metadata is being parsed in userspace
1031 * when setting the existing port access state and access status.
1032 *
1033 * Also note that the failure to write out the ALUA metadata to
1034 * struct file does NOT affect the actual ALUA transition.
1035 */
1036 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1037 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1038 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1039 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1040 }
1041 /*
1042 * Set the current primary ALUA access state to the requested new state
1043 */
1044 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1045 tg_pt_gp->tg_pt_gp_alua_pending_state);
1046
1047 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1048 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1049 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1050 tg_pt_gp->tg_pt_gp_id,
1051 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1052 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1053
1054 core_alua_queue_state_change_ua(tg_pt_gp);
1055
1056 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1057 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1058 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1059
1060 if (tg_pt_gp->tg_pt_gp_transition_complete)
1061 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1062}
1063
1064static int core_alua_do_transition_tg_pt( 1015static int core_alua_do_transition_tg_pt(
1065 struct t10_alua_tg_pt_gp *tg_pt_gp, 1016 struct t10_alua_tg_pt_gp *tg_pt_gp,
1066 int new_state, 1017 int new_state,
1067 int explicit) 1018 int explicit)
1068{ 1019{
1069 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1020 int prev_state;
1070 DECLARE_COMPLETION_ONSTACK(wait);
1071 1021
1022 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1072 /* Nothing to be done here */ 1023 /* Nothing to be done here */
1073 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 1024 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1025 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1074 return 0; 1026 return 0;
1027 }
1075 1028
1076 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) 1029 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1030 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1077 return -EAGAIN; 1031 return -EAGAIN;
1078 1032 }
1079 /*
1080 * Flush any pending transitions
1081 */
1082 if (!explicit)
1083 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1084 1033
1085 /* 1034 /*
1086 * Save the old primary ALUA access state, and set the current state 1035 * Save the old primary ALUA access state, and set the current state
1087 * to ALUA_ACCESS_STATE_TRANSITION. 1036 * to ALUA_ACCESS_STATE_TRANSITION.
1088 */ 1037 */
1089 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1038 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1090 ALUA_ACCESS_STATE_TRANSITION); 1039 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1091 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1040 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1092 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1041 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1093 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1042 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1094 1043
1095 core_alua_queue_state_change_ua(tg_pt_gp); 1044 core_alua_queue_state_change_ua(tg_pt_gp);
1096 1045
1097 if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1046 if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1047 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1098 return 0; 1048 return 0;
1099 1049 }
1100 tg_pt_gp->tg_pt_gp_alua_previous_state =
1101 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1102 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1103 1050
1104 /* 1051 /*
1105 * Check for the optional ALUA primary state transition delay 1052 * Check for the optional ALUA primary state transition delay
@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt(
1108 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1055 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1109 1056
1110 /* 1057 /*
1111 * Take a reference for workqueue item 1058 * Set the current primary ALUA access state to the requested new state
1112 */ 1059 */
1113 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1060 tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1114 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1115 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1116 1061
1117 schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); 1062 /*
1118 if (explicit) { 1063 * Update the ALUA metadata buf that has been allocated in
1119 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1064 * core_alua_do_port_transition(), this metadata will be written
1120 wait_for_completion(&wait); 1065 * to struct file.
1121 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1066 *
1067 * Note that there is the case where we do not want to update the
1068 * metadata when the saved metadata is being parsed in userspace
1069 * when setting the existing port access state and access status.
1070 *
1071 * Also note that the failure to write out the ALUA metadata to
1072 * struct file does NOT affect the actual ALUA transition.
1073 */
1074 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1075 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1122 } 1076 }
1123 1077
1078 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1079 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1080 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1081 tg_pt_gp->tg_pt_gp_id,
1082 core_alua_dump_state(prev_state),
1083 core_alua_dump_state(new_state));
1084
1085 core_alua_queue_state_change_ua(tg_pt_gp);
1086
1087 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1124 return 0; 1088 return 0;
1125} 1089}
1126 1090
@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1685 } 1649 }
1686 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1650 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1687 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); 1651 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1688 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1652 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1689 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1653 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1690 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1654 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1691 INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1692 core_alua_do_transition_tg_pt_work);
1693 tg_pt_gp->tg_pt_gp_dev = dev; 1655 tg_pt_gp->tg_pt_gp_dev = dev;
1694 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1656 tg_pt_gp->tg_pt_gp_alua_access_state =
1695 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1657 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1696 /* 1658 /*
1697 * Enable both explicit and implicit ALUA support by default 1659 * Enable both explicit and implicit ALUA support by default
1698 */ 1660 */
@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
1797 dev->t10_alua.alua_tg_pt_gps_counter--; 1759 dev->t10_alua.alua_tg_pt_gps_counter--;
1798 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1760 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1799 1761
1800 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1801
1802 /* 1762 /*
1803 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1763 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1804 * core_alua_get_tg_pt_gp_by_name() in 1764 * core_alua_get_tg_pt_gp_by_name() in
@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1938 "Primary Access Status: %s\nTG Port Secondary Access" 1898 "Primary Access Status: %s\nTG Port Secondary Access"
1939 " State: %s\nTG Port Secondary Access Status: %s\n", 1899 " State: %s\nTG Port Secondary Access Status: %s\n",
1940 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1900 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1941 core_alua_dump_state(atomic_read( 1901 core_alua_dump_state(
1942 &tg_pt_gp->tg_pt_gp_alua_access_state)), 1902 tg_pt_gp->tg_pt_gp_alua_access_state),
1943 core_alua_dump_status( 1903 core_alua_dump_status(
1944 tg_pt_gp->tg_pt_gp_alua_access_status), 1904 tg_pt_gp->tg_pt_gp_alua_access_status),
1945 atomic_read(&lun->lun_tg_pt_secondary_offline) ? 1905 atomic_read(&lun->lun_tg_pt_secondary_offline) ?
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 38b5025e4c7a..70657fd56440 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2392 char *page) 2392 char *page)
2393{ 2393{
2394 return sprintf(page, "%d\n", 2394 return sprintf(page, "%d\n",
2395 atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); 2395 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2396} 2396}
2397 2397
2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, 2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index d8a16ca6baa5..d1e6cab8e3d3 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
92 pr_err("Source se_lun->lun_se_dev does not exist\n"); 92 pr_err("Source se_lun->lun_se_dev does not exist\n");
93 return -EINVAL; 93 return -EINVAL;
94 } 94 }
95 if (lun->lun_shutdown) {
96 pr_err("Unable to create mappedlun symlink because"
97 " lun->lun_shutdown=true\n");
98 return -EINVAL;
99 }
95 se_tpg = lun->lun_tpg; 100 se_tpg = lun->lun_tpg;
96 101
97 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; 102 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 6fb191914f45..dfaef4d3b2d2 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -642,6 +642,8 @@ void core_tpg_remove_lun(
642 */ 642 */
643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
644 644
645 lun->lun_shutdown = true;
646
645 core_clear_lun_from_tpg(lun, tpg); 647 core_clear_lun_from_tpg(lun, tpg);
646 /* 648 /*
647 * Wait for any active I/O references to percpu se_lun->lun_ref to 649 * Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -663,6 +665,8 @@ void core_tpg_remove_lun(
663 } 665 }
664 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 666 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
665 hlist_del_rcu(&lun->link); 667 hlist_del_rcu(&lun->link);
668
669 lun->lun_shutdown = false;
666 mutex_unlock(&tpg->tpg_lun_mutex); 670 mutex_unlock(&tpg->tpg_lun_mutex);
667 671
668 percpu_ref_exit(&lun->lun_ref); 672 percpu_ref_exit(&lun->lun_ref);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b1a3cdb29468..a0cd56ee5fe9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache; 64struct kmem_cache *t10_alua_lba_map_mem_cache;
65 65
66static void transport_complete_task_attr(struct se_cmd *cmd); 66static void transport_complete_task_attr(struct se_cmd *cmd);
67static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
67static void transport_handle_queue_full(struct se_cmd *cmd, 68static void transport_handle_queue_full(struct se_cmd *cmd,
68 struct se_device *dev); 69 struct se_device *dev, int err, bool write_pending);
69static int transport_put_cmd(struct se_cmd *cmd); 70static int transport_put_cmd(struct se_cmd *cmd);
70static void target_complete_ok_work(struct work_struct *work); 71static void target_complete_ok_work(struct work_struct *work);
71 72
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
804 805
805 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 806 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
806 transport_write_pending_qf(cmd); 807 transport_write_pending_qf(cmd);
807 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 808 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
809 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
808 transport_complete_qf(cmd); 810 transport_complete_qf(cmd);
809 } 811 }
810} 812}
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1719 } 1721 }
1720 trace_target_cmd_complete(cmd); 1722 trace_target_cmd_complete(cmd);
1721 ret = cmd->se_tfo->queue_status(cmd); 1723 ret = cmd->se_tfo->queue_status(cmd);
1722 if (ret == -EAGAIN || ret == -ENOMEM) 1724 if (ret)
1723 goto queue_full; 1725 goto queue_full;
1724 goto check_stop; 1726 goto check_stop;
1725 default: 1727 default:
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1730 } 1732 }
1731 1733
1732 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1734 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1733 if (ret == -EAGAIN || ret == -ENOMEM) 1735 if (ret)
1734 goto queue_full; 1736 goto queue_full;
1735 1737
1736check_stop: 1738check_stop:
@@ -1739,8 +1741,7 @@ check_stop:
1739 return; 1741 return;
1740 1742
1741queue_full: 1743queue_full:
1742 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1744 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1743 transport_handle_queue_full(cmd, cmd->se_dev);
1744} 1745}
1745EXPORT_SYMBOL(transport_generic_request_failure); 1746EXPORT_SYMBOL(transport_generic_request_failure);
1746 1747
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
1977 int ret = 0; 1978 int ret = 0;
1978 1979
1979 transport_complete_task_attr(cmd); 1980 transport_complete_task_attr(cmd);
1981 /*
1982 * If a fabric driver ->write_pending() or ->queue_data_in() callback
1983 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
1984 * the same callbacks should not be retried. Return CHECK_CONDITION
1985 * if a scsi_status is not already set.
1986 *
1987 * If a fabric driver ->queue_status() has returned non zero, always
1988 * keep retrying no matter what..
1989 */
1990 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
1991 if (cmd->scsi_status)
1992 goto queue_status;
1980 1993
1981 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1994 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
1982 trace_target_cmd_complete(cmd); 1995 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
1983 ret = cmd->se_tfo->queue_status(cmd); 1996 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
1984 goto out; 1997 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
1998 goto queue_status;
1985 } 1999 }
1986 2000
2001 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2002 goto queue_status;
2003
1987 switch (cmd->data_direction) { 2004 switch (cmd->data_direction) {
1988 case DMA_FROM_DEVICE: 2005 case DMA_FROM_DEVICE:
1989 if (cmd->scsi_status) 2006 if (cmd->scsi_status)
@@ -2007,19 +2024,33 @@ queue_status:
2007 break; 2024 break;
2008 } 2025 }
2009 2026
2010out:
2011 if (ret < 0) { 2027 if (ret < 0) {
2012 transport_handle_queue_full(cmd, cmd->se_dev); 2028 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2013 return; 2029 return;
2014 } 2030 }
2015 transport_lun_remove_cmd(cmd); 2031 transport_lun_remove_cmd(cmd);
2016 transport_cmd_check_stop_to_fabric(cmd); 2032 transport_cmd_check_stop_to_fabric(cmd);
2017} 2033}
2018 2034
2019static void transport_handle_queue_full( 2035static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2020 struct se_cmd *cmd, 2036 int err, bool write_pending)
2021 struct se_device *dev)
2022{ 2037{
2038 /*
2039 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2040 * ->queue_data_in() callbacks from new process context.
2041 *
2042 * Otherwise for other errors, transport_complete_qf() will send
2043 * CHECK_CONDITION via ->queue_status() instead of attempting to
2044 * retry associated fabric driver data-transfer callbacks.
2045 */
2046 if (err == -EAGAIN || err == -ENOMEM) {
2047 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2048 TRANSPORT_COMPLETE_QF_OK;
2049 } else {
2050 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2051 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2052 }
2053
2023 spin_lock_irq(&dev->qf_cmd_lock); 2054 spin_lock_irq(&dev->qf_cmd_lock);
2024 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2055 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2025 atomic_inc_mb(&dev->dev_qf_count); 2056 atomic_inc_mb(&dev->dev_qf_count);
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
2083 WARN_ON(!cmd->scsi_status); 2114 WARN_ON(!cmd->scsi_status);
2084 ret = transport_send_check_condition_and_sense( 2115 ret = transport_send_check_condition_and_sense(
2085 cmd, 0, 1); 2116 cmd, 0, 1);
2086 if (ret == -EAGAIN || ret == -ENOMEM) 2117 if (ret)
2087 goto queue_full; 2118 goto queue_full;
2088 2119
2089 transport_lun_remove_cmd(cmd); 2120 transport_lun_remove_cmd(cmd);
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
2109 } else if (rc) { 2140 } else if (rc) {
2110 ret = transport_send_check_condition_and_sense(cmd, 2141 ret = transport_send_check_condition_and_sense(cmd,
2111 rc, 0); 2142 rc, 0);
2112 if (ret == -EAGAIN || ret == -ENOMEM) 2143 if (ret)
2113 goto queue_full; 2144 goto queue_full;
2114 2145
2115 transport_lun_remove_cmd(cmd); 2146 transport_lun_remove_cmd(cmd);
@@ -2134,7 +2165,7 @@ queue_rsp:
2134 if (target_read_prot_action(cmd)) { 2165 if (target_read_prot_action(cmd)) {
2135 ret = transport_send_check_condition_and_sense(cmd, 2166 ret = transport_send_check_condition_and_sense(cmd,
2136 cmd->pi_err, 0); 2167 cmd->pi_err, 0);
2137 if (ret == -EAGAIN || ret == -ENOMEM) 2168 if (ret)
2138 goto queue_full; 2169 goto queue_full;
2139 2170
2140 transport_lun_remove_cmd(cmd); 2171 transport_lun_remove_cmd(cmd);
@@ -2144,7 +2175,7 @@ queue_rsp:
2144 2175
2145 trace_target_cmd_complete(cmd); 2176 trace_target_cmd_complete(cmd);
2146 ret = cmd->se_tfo->queue_data_in(cmd); 2177 ret = cmd->se_tfo->queue_data_in(cmd);
2147 if (ret == -EAGAIN || ret == -ENOMEM) 2178 if (ret)
2148 goto queue_full; 2179 goto queue_full;
2149 break; 2180 break;
2150 case DMA_TO_DEVICE: 2181 case DMA_TO_DEVICE:
@@ -2157,7 +2188,7 @@ queue_rsp:
2157 atomic_long_add(cmd->data_length, 2188 atomic_long_add(cmd->data_length,
2158 &cmd->se_lun->lun_stats.tx_data_octets); 2189 &cmd->se_lun->lun_stats.tx_data_octets);
2159 ret = cmd->se_tfo->queue_data_in(cmd); 2190 ret = cmd->se_tfo->queue_data_in(cmd);
2160 if (ret == -EAGAIN || ret == -ENOMEM) 2191 if (ret)
2161 goto queue_full; 2192 goto queue_full;
2162 break; 2193 break;
2163 } 2194 }
@@ -2166,7 +2197,7 @@ queue_rsp:
2166queue_status: 2197queue_status:
2167 trace_target_cmd_complete(cmd); 2198 trace_target_cmd_complete(cmd);
2168 ret = cmd->se_tfo->queue_status(cmd); 2199 ret = cmd->se_tfo->queue_status(cmd);
2169 if (ret == -EAGAIN || ret == -ENOMEM) 2200 if (ret)
2170 goto queue_full; 2201 goto queue_full;
2171 break; 2202 break;
2172 default: 2203 default:
@@ -2180,8 +2211,8 @@ queue_status:
2180queue_full: 2211queue_full:
2181 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2212 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2182 " data_direction: %d\n", cmd, cmd->data_direction); 2213 " data_direction: %d\n", cmd, cmd->data_direction);
2183 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2214
2184 transport_handle_queue_full(cmd, cmd->se_dev); 2215 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2185} 2216}
2186 2217
2187void target_free_sgl(struct scatterlist *sgl, int nents) 2218void target_free_sgl(struct scatterlist *sgl, int nents)
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2449 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2480 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2450 2481
2451 ret = cmd->se_tfo->write_pending(cmd); 2482 ret = cmd->se_tfo->write_pending(cmd);
2452 if (ret == -EAGAIN || ret == -ENOMEM) 2483 if (ret)
2453 goto queue_full; 2484 goto queue_full;
2454 2485
2455 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2486 return 0;
2456 WARN_ON(ret);
2457
2458 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2459 2487
2460queue_full: 2488queue_full:
2461 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2489 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2462 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2490 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2463 transport_handle_queue_full(cmd, cmd->se_dev);
2464 return 0; 2491 return 0;
2465} 2492}
2466EXPORT_SYMBOL(transport_generic_new_cmd); 2493EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2470 int ret; 2497 int ret;
2471 2498
2472 ret = cmd->se_tfo->write_pending(cmd); 2499 ret = cmd->se_tfo->write_pending(cmd);
2473 if (ret == -EAGAIN || ret == -ENOMEM) { 2500 if (ret) {
2474 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2501 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2475 cmd); 2502 cmd);
2476 transport_handle_queue_full(cmd, cmd->se_dev); 2503 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2477 } 2504 }
2478} 2505}
2479 2506
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3011 __releases(&cmd->t_state_lock) 3038 __releases(&cmd->t_state_lock)
3012 __acquires(&cmd->t_state_lock) 3039 __acquires(&cmd->t_state_lock)
3013{ 3040{
3041 int ret;
3042
3014 assert_spin_locked(&cmd->t_state_lock); 3043 assert_spin_locked(&cmd->t_state_lock);
3015 WARN_ON_ONCE(!irqs_disabled()); 3044 WARN_ON_ONCE(!irqs_disabled());
3016 3045
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3034 trace_target_cmd_complete(cmd); 3063 trace_target_cmd_complete(cmd);
3035 3064
3036 spin_unlock_irq(&cmd->t_state_lock); 3065 spin_unlock_irq(&cmd->t_state_lock);
3037 cmd->se_tfo->queue_status(cmd); 3066 ret = cmd->se_tfo->queue_status(cmd);
3067 if (ret)
3068 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3038 spin_lock_irq(&cmd->t_state_lock); 3069 spin_lock_irq(&cmd->t_state_lock);
3039 3070
3040 return 1; 3071 return 1;
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
3055void transport_send_task_abort(struct se_cmd *cmd) 3086void transport_send_task_abort(struct se_cmd *cmd)
3056{ 3087{
3057 unsigned long flags; 3088 unsigned long flags;
3089 int ret;
3058 3090
3059 spin_lock_irqsave(&cmd->t_state_lock, flags); 3091 spin_lock_irqsave(&cmd->t_state_lock, flags);
3060 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 3092 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
@@ -3090,7 +3122,9 @@ send_abort:
3090 cmd->t_task_cdb[0], cmd->tag); 3122 cmd->t_task_cdb[0], cmd->tag);
3091 3123
3092 trace_target_cmd_complete(cmd); 3124 trace_target_cmd_complete(cmd);
3093 cmd->se_tfo->queue_status(cmd); 3125 ret = cmd->se_tfo->queue_status(cmd);
3126 if (ret)
3127 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3094} 3128}
3095 3129
3096static void target_tmr_work(struct work_struct *work) 3130static void target_tmr_work(struct work_struct *work)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c6874c38a10b..f615c3bbb73e 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
311 DATA_BLOCK_BITS); 311 DATA_BLOCK_BITS);
312} 312}
313 313
314static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap, 314static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
315 struct scatterlist *data_sg, unsigned int data_nents) 315 bool bidi)
316{ 316{
317 struct se_cmd *se_cmd = cmd->se_cmd;
317 int i, block; 318 int i, block;
318 int block_remaining = 0; 319 int block_remaining = 0;
319 void *from, *to; 320 void *from, *to;
320 size_t copy_bytes, from_offset; 321 size_t copy_bytes, from_offset;
321 struct scatterlist *sg; 322 struct scatterlist *sg, *data_sg;
323 unsigned int data_nents;
324 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
325
326 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
327
328 if (!bidi) {
329 data_sg = se_cmd->t_data_sg;
330 data_nents = se_cmd->t_data_nents;
331 } else {
332 uint32_t count;
333
334 /*
335 * For bidi case, the first count blocks are for Data-Out
336 * buffer blocks, and before gathering the Data-In buffer
337 * the Data-Out buffer blocks should be discarded.
338 */
339 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
340 while (count--) {
341 block = find_first_bit(bitmap, DATA_BLOCK_BITS);
342 clear_bit(block, bitmap);
343 }
344
345 data_sg = se_cmd->t_bidi_data_sg;
346 data_nents = se_cmd->t_bidi_data_nents;
347 }
322 348
323 for_each_sg(data_sg, sg, data_nents, i) { 349 for_each_sg(data_sg, sg, data_nents, i) {
324 int sg_remaining = sg->length; 350 int sg_remaining = sg->length;
325 to = kmap_atomic(sg_page(sg)) + sg->offset; 351 to = kmap_atomic(sg_page(sg)) + sg->offset;
326 while (sg_remaining > 0) { 352 while (sg_remaining > 0) {
327 if (block_remaining == 0) { 353 if (block_remaining == 0) {
328 block = find_first_bit(cmd_bitmap, 354 block = find_first_bit(bitmap,
329 DATA_BLOCK_BITS); 355 DATA_BLOCK_BITS);
330 block_remaining = DATA_BLOCK_SIZE; 356 block_remaining = DATA_BLOCK_SIZE;
331 clear_bit(block, cmd_bitmap); 357 clear_bit(block, bitmap);
332 } 358 }
333 copy_bytes = min_t(size_t, sg_remaining, 359 copy_bytes = min_t(size_t, sg_remaining,
334 block_remaining); 360 block_remaining);
@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
394 return true; 420 return true;
395} 421}
396 422
423static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
424{
425 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
426 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
427
428 if (se_cmd->se_cmd_flags & SCF_BIDI) {
429 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
430 data_length += round_up(se_cmd->t_bidi_data_sg->length,
431 DATA_BLOCK_SIZE);
432 }
433
434 return data_length;
435}
436
437static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
438{
439 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
440
441 return data_length / DATA_BLOCK_SIZE;
442}
443
397static sense_reason_t 444static sense_reason_t
398tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 445tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
399{ 446{
@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
407 uint32_t cmd_head; 454 uint32_t cmd_head;
408 uint64_t cdb_off; 455 uint64_t cdb_off;
409 bool copy_to_data_area; 456 bool copy_to_data_area;
410 size_t data_length; 457 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
411 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); 458 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
412 459
413 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 460 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
421 * expensive to tell how many regions are freed in the bitmap 468 * expensive to tell how many regions are freed in the bitmap
422 */ 469 */
423 base_command_size = max(offsetof(struct tcmu_cmd_entry, 470 base_command_size = max(offsetof(struct tcmu_cmd_entry,
424 req.iov[se_cmd->t_bidi_data_nents + 471 req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
425 se_cmd->t_data_nents]),
426 sizeof(struct tcmu_cmd_entry)); 472 sizeof(struct tcmu_cmd_entry));
427 command_size = base_command_size 473 command_size = base_command_size
428 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 474 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
433 479
434 mb = udev->mb_addr; 480 mb = udev->mb_addr;
435 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 481 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
436 data_length = se_cmd->data_length;
437 if (se_cmd->se_cmd_flags & SCF_BIDI) {
438 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
439 data_length += se_cmd->t_bidi_data_sg->length;
440 }
441 if ((command_size > (udev->cmdr_size / 2)) || 482 if ((command_size > (udev->cmdr_size / 2)) ||
442 data_length > udev->data_size) { 483 data_length > udev->data_size) {
443 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 484 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
511 entry->req.iov_dif_cnt = 0; 552 entry->req.iov_dif_cnt = 0;
512 553
513 /* Handle BIDI commands */ 554 /* Handle BIDI commands */
514 iov_cnt = 0; 555 if (se_cmd->se_cmd_flags & SCF_BIDI) {
515 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, 556 iov_cnt = 0;
516 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); 557 iov++;
517 entry->req.iov_bidi_cnt = iov_cnt; 558 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
518 559 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
560 false);
561 entry->req.iov_bidi_cnt = iov_cnt;
562 }
519 /* cmd's data_bitmap is what changed in process */ 563 /* cmd's data_bitmap is what changed in process */
520 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap, 564 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
521 DATA_BLOCK_BITS); 565 DATA_BLOCK_BITS);
@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
592 se_cmd->scsi_sense_length); 636 se_cmd->scsi_sense_length);
593 free_data_area(udev, cmd); 637 free_data_area(udev, cmd);
594 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 638 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
595 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
596
597 /* Get Data-In buffer before clean up */ 639 /* Get Data-In buffer before clean up */
598 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); 640 gather_data_area(udev, cmd, true);
599 gather_data_area(udev, bitmap,
600 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
601 free_data_area(udev, cmd); 641 free_data_area(udev, cmd);
602 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 642 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
603 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); 643 gather_data_area(udev, cmd, false);
604
605 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
606 gather_data_area(udev, bitmap,
607 se_cmd->t_data_sg, se_cmd->t_data_nents);
608 free_data_area(udev, cmd); 644 free_data_area(udev, cmd);
609 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 645 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
610 free_data_area(udev, cmd); 646 free_data_area(udev, cmd);
@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
1196 if (ret < 0) 1232 if (ret < 0)
1197 return ret; 1233 return ret;
1198 1234
1199 if (!val) {
1200 pr_err("Illegal value for cmd_time_out\n");
1201 return -EINVAL;
1202 }
1203
1204 udev->cmd_time_out = val * MSEC_PER_SEC; 1235 udev->cmd_time_out = val * MSEC_PER_SEC;
1205 return count; 1236 return count;
1206} 1237}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index d2351139342f..a82e2bd5ea34 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
373 usb_ep_free_request(fu->ep_in, fu->bot_req_in); 373 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
374 usb_ep_free_request(fu->ep_out, fu->bot_req_out); 374 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
375 usb_ep_free_request(fu->ep_out, fu->cmd.req); 375 usb_ep_free_request(fu->ep_out, fu->cmd.req);
376 usb_ep_free_request(fu->ep_out, fu->bot_status.req); 376 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
377 377
378 kfree(fu->cmd.buf); 378 kfree(fu->cmd.buf);
379 379
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 8c4dc1e1f94f..b827a8113e26 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -10,6 +10,7 @@
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/fb.h> 12#include <linux/fb.h>
13#include <linux/pci.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/screen_info.h> 15#include <linux/screen_info.h>
15#include <video/vga.h> 16#include <video/vga.h>
@@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = {
143}; 144};
144ATTRIBUTE_GROUPS(efifb); 145ATTRIBUTE_GROUPS(efifb);
145 146
147static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
148
146static int efifb_probe(struct platform_device *dev) 149static int efifb_probe(struct platform_device *dev)
147{ 150{
148 struct fb_info *info; 151 struct fb_info *info;
@@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
152 unsigned int size_total; 155 unsigned int size_total;
153 char *option = NULL; 156 char *option = NULL;
154 157
155 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) 158 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
156 return -ENODEV; 159 return -ENODEV;
157 160
158 if (fb_get_options("efifb", &option)) 161 if (fb_get_options("efifb", &option))
@@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = {
360}; 363};
361 364
362builtin_platform_driver(efifb_driver); 365builtin_platform_driver(efifb_driver);
366
367#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
368
369static bool pci_bar_found; /* did we find a BAR matching the efifb base? */
370
371static void claim_efifb_bar(struct pci_dev *dev, int idx)
372{
373 u16 word;
374
375 pci_bar_found = true;
376
377 pci_read_config_word(dev, PCI_COMMAND, &word);
378 if (!(word & PCI_COMMAND_MEMORY)) {
379 pci_dev_disabled = true;
380 dev_err(&dev->dev,
381 "BAR %d: assigned to efifb but device is disabled!\n",
382 idx);
383 return;
384 }
385
386 if (pci_claim_resource(dev, idx)) {
387 pci_dev_disabled = true;
388 dev_err(&dev->dev,
389 "BAR %d: failed to claim resource for efifb!\n", idx);
390 return;
391 }
392
393 dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
394}
395
396static void efifb_fixup_resources(struct pci_dev *dev)
397{
398 u64 base = screen_info.lfb_base;
399 u64 size = screen_info.lfb_size;
400 int i;
401
402 if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
403 return;
404
405 if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
406 base |= (u64)screen_info.ext_lfb_base << 32;
407
408 if (!base)
409 return;
410
411 for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
412 struct resource *res = &dev->resource[i];
413
414 if (!(res->flags & IORESOURCE_MEM))
415 continue;
416
417 if (res->start <= base && res->end >= base + size - 1) {
418 claim_efifb_bar(dev, i);
419 break;
420 }
421 }
422}
423DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
424 16, efifb_fixup_resources);
425
426#endif
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 1abba07b84b3..f4cbfb3b8a09 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
1608 return 0; 1608 return 0;
1609} 1609}
1610 1610
1611static void check_required_callbacks(struct omapfb_device *fbdev)
1612{
1613#define _C(x) (fbdev->ctrl->x != NULL)
1614#define _P(x) (fbdev->panel->x != NULL)
1615 BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
1616 BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
1617 _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
1618 _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
1619 _P(get_caps)));
1620#undef _P
1621#undef _C
1622}
1623
1624/* 1611/*
1625 * Called by LDM binding to probe and attach a new device. 1612 * Called by LDM binding to probe and attach a new device.
1626 * Initialization sequence: 1613 * Initialization sequence:
@@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev,
1705 omapfb_ops.fb_mmap = omapfb_mmap; 1692 omapfb_ops.fb_mmap = omapfb_mmap;
1706 init_state++; 1693 init_state++;
1707 1694
1708 check_required_callbacks(fbdev);
1709
1710 r = planes_init(fbdev); 1695 r = planes_init(fbdev);
1711 if (r) 1696 if (r)
1712 goto cleanup; 1697 goto cleanup;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index bd017b57c47f..f599520374dd 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client,
578 578
579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat"); 579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
580 if (IS_ERR(par->vbat_reg)) { 580 if (IS_ERR(par->vbat_reg)) {
581 dev_err(&client->dev, "failed to get VBAT regulator: %ld\n",
582 PTR_ERR(par->vbat_reg));
583 ret = PTR_ERR(par->vbat_reg); 581 ret = PTR_ERR(par->vbat_reg);
584 goto fb_alloc_error; 582 if (ret == -ENODEV) {
583 par->vbat_reg = NULL;
584 } else {
585 dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
586 ret);
587 goto fb_alloc_error;
588 }
585 } 589 }
586 590
587 if (of_property_read_u32(node, "solomon,width", &par->width)) 591 if (of_property_read_u32(node, "solomon,width", &par->width))
@@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client,
668 udelay(4); 672 udelay(4);
669 } 673 }
670 674
671 ret = regulator_enable(par->vbat_reg); 675 if (par->vbat_reg) {
672 if (ret) { 676 ret = regulator_enable(par->vbat_reg);
673 dev_err(&client->dev, "failed to enable VBAT: %d\n", ret); 677 if (ret) {
674 goto reset_oled_error; 678 dev_err(&client->dev, "failed to enable VBAT: %d\n",
679 ret);
680 goto reset_oled_error;
681 }
675 } 682 }
676 683
677 ret = ssd1307fb_init(par); 684 ret = ssd1307fb_init(par);
@@ -710,7 +717,8 @@ panel_init_error:
710 pwm_put(par->pwm); 717 pwm_put(par->pwm);
711 }; 718 };
712regulator_enable_error: 719regulator_enable_error:
713 regulator_disable(par->vbat_reg); 720 if (par->vbat_reg)
721 regulator_disable(par->vbat_reg);
714reset_oled_error: 722reset_oled_error:
715 fb_deferred_io_cleanup(info); 723 fb_deferred_io_cleanup(info);
716fb_alloc_error: 724fb_alloc_error:
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index d0115a7af0a9..3ee309c50b2d 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
643 break; 643 break;
644 644
645 case XenbusStateInitWait: 645 case XenbusStateInitWait:
646InitWait:
647 xenbus_switch_state(dev, XenbusStateConnected); 646 xenbus_switch_state(dev, XenbusStateConnected);
648 break; 647 break;
649 648
@@ -654,7 +653,8 @@ InitWait:
654 * get Connected twice here. 653 * get Connected twice here.
655 */ 654 */
656 if (dev->state != XenbusStateConnected) 655 if (dev->state != XenbusStateConnected)
657 goto InitWait; /* no InitWait seen yet, fudge it */ 656 /* no InitWait seen yet, fudge it */
657 xenbus_switch_state(dev, XenbusStateConnected);
658 658
659 if (xenbus_read_unsigned(info->xbdev->otherend, 659 if (xenbus_read_unsigned(info->xbdev->otherend,
660 "request-update", 0)) 660 "request-update", 0))
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 400d70b69379..48230a5e12f2 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d)
232 if (device_features & (1ULL << i)) 232 if (device_features & (1ULL << i))
233 __virtio_set_bit(dev, i); 233 __virtio_set_bit(dev, i);
234 234
235 if (drv->validate) {
236 err = drv->validate(dev);
237 if (err)
238 goto err;
239 }
240
235 err = virtio_finalize_features(dev); 241 err = virtio_finalize_features(dev);
236 if (err) 242 if (err)
237 goto err; 243 goto err;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 590534910dc6..698d5d06fa03 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i; 34 int i;
35 35
36 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); 36 if (vp_dev->intx_enabled)
37 for (i = 1; i < vp_dev->msix_vectors; i++) 37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
38 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); 40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
39} 41}
40 42
@@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
60static irqreturn_t vp_vring_interrupt(int irq, void *opaque) 62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
61{ 63{
62 struct virtio_pci_device *vp_dev = opaque; 64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
63 irqreturn_t ret = IRQ_NONE; 66 irqreturn_t ret = IRQ_NONE;
64 struct virtqueue *vq; 67 unsigned long flags;
65 68
66 list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { 69 spin_lock_irqsave(&vp_dev->lock, flags);
67 if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) 70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
68 ret = IRQ_HANDLED; 72 ret = IRQ_HANDLED;
69 } 73 }
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
70 75
71 return ret; 76 return ret;
72} 77}
@@ -97,186 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
97 return vp_vring_interrupt(irq, opaque); 102 return vp_vring_interrupt(irq, opaque);
98} 103}
99 104
100static void vp_remove_vqs(struct virtio_device *vdev) 105static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
101{ 107{
102 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
103 struct virtqueue *vq, *n; 109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned i, v;
111 int err = -ENOMEM;
104 112
105 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 113 vp_dev->msix_vectors = nvectors;
106 if (vp_dev->msix_vector_map) {
107 int v = vp_dev->msix_vector_map[vq->index];
108 114
109 if (v != VIRTIO_MSI_NO_VECTOR) 115 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
110 free_irq(pci_irq_vector(vp_dev->pci_dev, v), 116 GFP_KERNEL);
111 vq); 117 if (!vp_dev->msix_names)
112 } 118 goto error;
113 vp_dev->del_vq(vq); 119 vp_dev->msix_affinity_masks
120 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
121 GFP_KERNEL);
122 if (!vp_dev->msix_affinity_masks)
123 goto error;
124 for (i = 0; i < nvectors; ++i)
125 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
126 GFP_KERNEL))
127 goto error;
128
129 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
130 nvectors, PCI_IRQ_MSIX |
131 (desc ? PCI_IRQ_AFFINITY : 0),
132 desc);
133 if (err < 0)
134 goto error;
135 vp_dev->msix_enabled = 1;
136
137 /* Set the vector used for configuration */
138 v = vp_dev->msix_used_vectors;
139 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
140 "%s-config", name);
141 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
142 vp_config_changed, 0, vp_dev->msix_names[v],
143 vp_dev);
144 if (err)
145 goto error;
146 ++vp_dev->msix_used_vectors;
147
148 v = vp_dev->config_vector(vp_dev, v);
149 /* Verify we had enough resources to assign the vector */
150 if (v == VIRTIO_MSI_NO_VECTOR) {
151 err = -EBUSY;
152 goto error;
114 } 153 }
154
155 if (!per_vq_vectors) {
156 /* Shared vector for all VQs */
157 v = vp_dev->msix_used_vectors;
158 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
159 "%s-virtqueues", name);
160 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
161 vp_vring_interrupt, 0, vp_dev->msix_names[v],
162 vp_dev);
163 if (err)
164 goto error;
165 ++vp_dev->msix_used_vectors;
166 }
167 return 0;
168error:
169 return err;
170}
171
172static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
173 void (*callback)(struct virtqueue *vq),
174 const char *name,
175 u16 msix_vec)
176{
177 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
178 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
179 struct virtqueue *vq;
180 unsigned long flags;
181
182 /* fill out our structure that represents an active queue */
183 if (!info)
184 return ERR_PTR(-ENOMEM);
185
186 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
187 msix_vec);
188 if (IS_ERR(vq))
189 goto out_info;
190
191 info->vq = vq;
192 if (callback) {
193 spin_lock_irqsave(&vp_dev->lock, flags);
194 list_add(&info->node, &vp_dev->virtqueues);
195 spin_unlock_irqrestore(&vp_dev->lock, flags);
196 } else {
197 INIT_LIST_HEAD(&info->node);
198 }
199
200 vp_dev->vqs[index] = info;
201 return vq;
202
203out_info:
204 kfree(info);
205 return vq;
206}
207
208static void vp_del_vq(struct virtqueue *vq)
209{
210 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
211 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
212 unsigned long flags;
213
214 spin_lock_irqsave(&vp_dev->lock, flags);
215 list_del(&info->node);
216 spin_unlock_irqrestore(&vp_dev->lock, flags);
217
218 vp_dev->del_vq(info);
219 kfree(info);
115} 220}
116 221
117/* the config->del_vqs() implementation */ 222/* the config->del_vqs() implementation */
118void vp_del_vqs(struct virtio_device *vdev) 223void vp_del_vqs(struct virtio_device *vdev)
119{ 224{
120 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226 struct virtqueue *vq, *n;
121 int i; 227 int i;
122 228
123 if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) 229 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
124 return; 230 if (vp_dev->per_vq_vectors) {
231 int v = vp_dev->vqs[vq->index]->msix_vector;
125 232
126 vp_remove_vqs(vdev); 233 if (v != VIRTIO_MSI_NO_VECTOR) {
234 int irq = pci_irq_vector(vp_dev->pci_dev, v);
235
236 irq_set_affinity_hint(irq, NULL);
237 free_irq(irq, vq);
238 }
239 }
240 vp_del_vq(vq);
241 }
242 vp_dev->per_vq_vectors = false;
243
244 if (vp_dev->intx_enabled) {
245 free_irq(vp_dev->pci_dev->irq, vp_dev);
246 vp_dev->intx_enabled = 0;
247 }
127 248
128 if (vp_dev->pci_dev->msix_enabled) { 249 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
129 for (i = 0; i < vp_dev->msix_vectors; i++) 250 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
251
252 for (i = 0; i < vp_dev->msix_vectors; i++)
253 if (vp_dev->msix_affinity_masks[i])
130 free_cpumask_var(vp_dev->msix_affinity_masks[i]); 254 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
131 255
256 if (vp_dev->msix_enabled) {
132 /* Disable the vector used for configuration */ 257 /* Disable the vector used for configuration */
133 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); 258 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
134 259
135 kfree(vp_dev->msix_affinity_masks); 260 pci_free_irq_vectors(vp_dev->pci_dev);
136 kfree(vp_dev->msix_names); 261 vp_dev->msix_enabled = 0;
137 kfree(vp_dev->msix_vector_map);
138 } 262 }
139 263
140 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); 264 vp_dev->msix_vectors = 0;
141 pci_free_irq_vectors(vp_dev->pci_dev); 265 vp_dev->msix_used_vectors = 0;
266 kfree(vp_dev->msix_names);
267 vp_dev->msix_names = NULL;
268 kfree(vp_dev->msix_affinity_masks);
269 vp_dev->msix_affinity_masks = NULL;
270 kfree(vp_dev->vqs);
271 vp_dev->vqs = NULL;
142} 272}
143 273
144static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, 274static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
145 struct virtqueue *vqs[], vq_callback_t *callbacks[], 275 struct virtqueue *vqs[], vq_callback_t *callbacks[],
146 const char * const names[], struct irq_affinity *desc) 276 const char * const names[], bool per_vq_vectors,
277 struct irq_affinity *desc)
147{ 278{
148 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 279 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
149 const char *name = dev_name(&vp_dev->vdev.dev);
150 int i, j, err = -ENOMEM, allocated_vectors, nvectors;
151 unsigned flags = PCI_IRQ_MSIX;
152 bool shared = false;
153 u16 msix_vec; 280 u16 msix_vec;
281 int i, err, nvectors, allocated_vectors;
154 282
155 if (desc) { 283 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
156 flags |= PCI_IRQ_AFFINITY; 284 if (!vp_dev->vqs)
157 desc->pre_vectors++; /* virtio config vector */ 285 return -ENOMEM;
158 }
159
160 nvectors = 1;
161 for (i = 0; i < nvqs; i++)
162 if (callbacks[i])
163 nvectors++;
164
165 /* Try one vector per queue first. */
166 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
167 nvectors, flags, desc);
168 if (err < 0) {
169 /* Fallback to one vector for config, one shared for queues. */
170 shared = true;
171 err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
172 PCI_IRQ_MSIX);
173 if (err < 0)
174 return err;
175 }
176 if (err < 0)
177 return err;
178
179 vp_dev->msix_vectors = nvectors;
180 vp_dev->msix_names = kmalloc_array(nvectors,
181 sizeof(*vp_dev->msix_names), GFP_KERNEL);
182 if (!vp_dev->msix_names)
183 goto out_free_irq_vectors;
184
185 vp_dev->msix_affinity_masks = kcalloc(nvectors,
186 sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
187 if (!vp_dev->msix_affinity_masks)
188 goto out_free_msix_names;
189 286
190 for (i = 0; i < nvectors; ++i) { 287 if (per_vq_vectors) {
191 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], 288 /* Best option: one for change interrupt, one per vq. */
192 GFP_KERNEL)) 289 nvectors = 1;
193 goto out_free_msix_affinity_masks; 290 for (i = 0; i < nvqs; ++i)
291 if (callbacks[i])
292 ++nvectors;
293 } else {
294 /* Second best: one for change, shared for all vqs. */
295 nvectors = 2;
194 } 296 }
195 297
196 /* Set the vector used for configuration */ 298 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
197 snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), 299 per_vq_vectors ? desc : NULL);
198 "%s-config", name);
199 err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
200 0, vp_dev->msix_names[0], vp_dev);
201 if (err) 300 if (err)
202 goto out_free_msix_affinity_masks; 301 goto error_find;
203 302
204 /* Verify we had enough resources to assign the vector */ 303 vp_dev->per_vq_vectors = per_vq_vectors;
205 if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { 304 allocated_vectors = vp_dev->msix_used_vectors;
206 err = -EBUSY;
207 goto out_free_config_irq;
208 }
209
210 vp_dev->msix_vector_map = kmalloc_array(nvqs,
211 sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
212 if (!vp_dev->msix_vector_map)
213 goto out_disable_config_irq;
214
215 allocated_vectors = j = 1; /* vector 0 is the config interrupt */
216 for (i = 0; i < nvqs; ++i) { 305 for (i = 0; i < nvqs; ++i) {
217 if (!names[i]) { 306 if (!names[i]) {
218 vqs[i] = NULL; 307 vqs[i] = NULL;
219 continue; 308 continue;
220 } 309 }
221 310
222 if (callbacks[i]) 311 if (!callbacks[i])
223 msix_vec = allocated_vectors;
224 else
225 msix_vec = VIRTIO_MSI_NO_VECTOR; 312 msix_vec = VIRTIO_MSI_NO_VECTOR;
226 313 else if (vp_dev->per_vq_vectors)
227 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 314 msix_vec = allocated_vectors++;
228 msix_vec); 315 else
316 msix_vec = VP_MSIX_VQ_VECTOR;
317 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
318 msix_vec);
229 if (IS_ERR(vqs[i])) { 319 if (IS_ERR(vqs[i])) {
230 err = PTR_ERR(vqs[i]); 320 err = PTR_ERR(vqs[i]);
231 goto out_remove_vqs; 321 goto error_find;
232 } 322 }
233 323
234 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 324 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
235 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
236 continue; 325 continue;
237 }
238 326
239 snprintf(vp_dev->msix_names[j], 327 /* allocate per-vq irq if available and necessary */
240 sizeof(*vp_dev->msix_names), "%s-%s", 328 snprintf(vp_dev->msix_names[msix_vec],
329 sizeof *vp_dev->msix_names,
330 "%s-%s",
241 dev_name(&vp_dev->vdev.dev), names[i]); 331 dev_name(&vp_dev->vdev.dev), names[i]);
242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 332 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
243 vring_interrupt, IRQF_SHARED, 333 vring_interrupt, 0,
244 vp_dev->msix_names[j], vqs[i]); 334 vp_dev->msix_names[msix_vec],
245 if (err) { 335 vqs[i]);
246 /* don't free this irq on error */ 336 if (err)
247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 337 goto error_find;
248 goto out_remove_vqs;
249 }
250 vp_dev->msix_vector_map[i] = msix_vec;
251 j++;
252
253 /*
254 * Use a different vector for each queue if they are available,
255 * else share the same vector for all VQs.
256 */
257 if (!shared)
258 allocated_vectors++;
259 } 338 }
260
261 return 0; 339 return 0;
262 340
263out_remove_vqs: 341error_find:
264 vp_remove_vqs(vdev); 342 vp_del_vqs(vdev);
265 kfree(vp_dev->msix_vector_map);
266out_disable_config_irq:
267 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
268out_free_config_irq:
269 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
270out_free_msix_affinity_masks:
271 for (i = 0; i < nvectors; i++) {
272 if (vp_dev->msix_affinity_masks[i])
273 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
274 }
275 kfree(vp_dev->msix_affinity_masks);
276out_free_msix_names:
277 kfree(vp_dev->msix_names);
278out_free_irq_vectors:
279 pci_free_irq_vectors(vp_dev->pci_dev);
280 return err; 343 return err;
281} 344}
282 345
@@ -287,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
287 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 350 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
288 int i, err; 351 int i, err;
289 352
353 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
354 if (!vp_dev->vqs)
355 return -ENOMEM;
356
290 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 357 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
291 dev_name(&vdev->dev), vp_dev); 358 dev_name(&vdev->dev), vp_dev);
292 if (err) 359 if (err)
293 return err; 360 goto out_del_vqs;
294 361
362 vp_dev->intx_enabled = 1;
363 vp_dev->per_vq_vectors = false;
295 for (i = 0; i < nvqs; ++i) { 364 for (i = 0; i < nvqs; ++i) {
296 if (!names[i]) { 365 if (!names[i]) {
297 vqs[i] = NULL; 366 vqs[i] = NULL;
298 continue; 367 continue;
299 } 368 }
300 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 369 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
301 VIRTIO_MSI_NO_VECTOR); 370 VIRTIO_MSI_NO_VECTOR);
302 if (IS_ERR(vqs[i])) { 371 if (IS_ERR(vqs[i])) {
303 err = PTR_ERR(vqs[i]); 372 err = PTR_ERR(vqs[i]);
304 goto out_remove_vqs; 373 goto out_del_vqs;
305 } 374 }
306 } 375 }
307 376
308 return 0; 377 return 0;
309 378out_del_vqs:
310out_remove_vqs: 379 vp_del_vqs(vdev);
311 vp_remove_vqs(vdev);
312 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
313 return err; 380 return err;
314} 381}
315 382
@@ -320,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
320{ 387{
321 int err; 388 int err;
322 389
323 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); 390 /* Try MSI-X with one vector per queue. */
391 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
324 if (!err) 392 if (!err)
325 return 0; 393 return 0;
394 /* Fallback: MSI-X with one vector for config, one shared for queues. */
395 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
396 if (!err)
397 return 0;
398 /* Finally fall back to regular interrupts. */
326 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); 399 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
327} 400}
328 401
@@ -342,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
342{ 415{
343 struct virtio_device *vdev = vq->vdev; 416 struct virtio_device *vdev = vq->vdev;
344 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 417 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
418 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
419 struct cpumask *mask;
420 unsigned int irq;
345 421
346 if (!vq->callback) 422 if (!vq->callback)
347 return -EINVAL; 423 return -EINVAL;
348 424
349 if (vp_dev->pci_dev->msix_enabled) { 425 if (vp_dev->msix_enabled) {
350 int vec = vp_dev->msix_vector_map[vq->index]; 426 mask = vp_dev->msix_affinity_masks[info->msix_vector];
351 struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; 427 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
352 unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
353
354 if (cpu == -1) 428 if (cpu == -1)
355 irq_set_affinity_hint(irq, NULL); 429 irq_set_affinity_hint(irq, NULL);
356 else { 430 else {
@@ -365,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
365const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) 439const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
366{ 440{
367 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 441 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
368 unsigned int *map = vp_dev->msix_vector_map;
369 442
370 if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) 443 if (!vp_dev->per_vq_vectors ||
444 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
371 return NULL; 445 return NULL;
372 446
373 return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); 447 return pci_irq_get_affinity(vp_dev->pci_dev,
448 vp_dev->vqs[index]->msix_vector);
374} 449}
375 450
376#ifdef CONFIG_PM_SLEEP 451#ifdef CONFIG_PM_SLEEP
@@ -441,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
441 vp_dev->vdev.dev.parent = &pci_dev->dev; 516 vp_dev->vdev.dev.parent = &pci_dev->dev;
442 vp_dev->vdev.dev.release = virtio_pci_release_dev; 517 vp_dev->vdev.dev.release = virtio_pci_release_dev;
443 vp_dev->pci_dev = pci_dev; 518 vp_dev->pci_dev = pci_dev;
519 INIT_LIST_HEAD(&vp_dev->virtqueues);
520 spin_lock_init(&vp_dev->lock);
444 521
445 /* enable the device */ 522 /* enable the device */
446 rc = pci_enable_device(pci_dev); 523 rc = pci_enable_device(pci_dev);
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index ac8c9d788964..e96334aec1e0 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -31,6 +31,17 @@
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33 33
34struct virtio_pci_vq_info {
35 /* the actual virtqueue */
36 struct virtqueue *vq;
37
38 /* the list node for the virtqueues list */
39 struct list_head node;
40
41 /* MSI-X vector (or none) */
42 unsigned msix_vector;
43};
44
34/* Our device structure */ 45/* Our device structure */
35struct virtio_pci_device { 46struct virtio_pci_device {
36 struct virtio_device vdev; 47 struct virtio_device vdev;
@@ -64,25 +75,47 @@ struct virtio_pci_device {
64 /* the IO mapping for the PCI config space */ 75 /* the IO mapping for the PCI config space */
65 void __iomem *ioaddr; 76 void __iomem *ioaddr;
66 77
78 /* a list of queues so we can dispatch IRQs */
79 spinlock_t lock;
80 struct list_head virtqueues;
81
82 /* array of all queues for house-keeping */
83 struct virtio_pci_vq_info **vqs;
84
85 /* MSI-X support */
86 int msix_enabled;
87 int intx_enabled;
67 cpumask_var_t *msix_affinity_masks; 88 cpumask_var_t *msix_affinity_masks;
68 /* Name strings for interrupts. This size should be enough, 89 /* Name strings for interrupts. This size should be enough,
69 * and I'm too lazy to allocate each name separately. */ 90 * and I'm too lazy to allocate each name separately. */
70 char (*msix_names)[256]; 91 char (*msix_names)[256];
71 /* Total Number of MSI-X vectors (including per-VQ ones). */ 92 /* Number of available vectors */
72 int msix_vectors; 93 unsigned msix_vectors;
73 /* Map of per-VQ MSI-X vectors, may be NULL */ 94 /* Vectors allocated, excluding per-vq vectors if any */
74 unsigned *msix_vector_map; 95 unsigned msix_used_vectors;
96
97 /* Whether we have vector per vq */
98 bool per_vq_vectors;
75 99
76 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, 100 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
101 struct virtio_pci_vq_info *info,
77 unsigned idx, 102 unsigned idx,
78 void (*callback)(struct virtqueue *vq), 103 void (*callback)(struct virtqueue *vq),
79 const char *name, 104 const char *name,
80 u16 msix_vec); 105 u16 msix_vec);
81 void (*del_vq)(struct virtqueue *vq); 106 void (*del_vq)(struct virtio_pci_vq_info *info);
82 107
83 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); 108 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
84}; 109};
85 110
111/* Constants for MSI-X */
112/* Use first vector for configuration changes, second and the rest for
113 * virtqueues Thus, we need at least 2 vectors for MSI. */
114enum {
115 VP_MSIX_CONFIG_VECTOR = 0,
116 VP_MSIX_VQ_VECTOR = 1,
117};
118
86/* Convert a generic virtio device to our structure */ 119/* Convert a generic virtio device to our structure */
87static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) 120static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
88{ 121{
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index f7362c5fe18a..4bfa48fb1324 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
112} 112}
113 113
114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
115 struct virtio_pci_vq_info *info,
115 unsigned index, 116 unsigned index,
116 void (*callback)(struct virtqueue *vq), 117 void (*callback)(struct virtqueue *vq),
117 const char *name, 118 const char *name,
@@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
129 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) 130 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
130 return ERR_PTR(-ENOENT); 131 return ERR_PTR(-ENOENT);
131 132
133 info->msix_vector = msix_vec;
134
132 /* create the vring */ 135 /* create the vring */
133 vq = vring_create_virtqueue(index, num, 136 vq = vring_create_virtqueue(index, num,
134 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, 137 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
@@ -159,13 +162,14 @@ out_deactivate:
159 return ERR_PTR(err); 162 return ERR_PTR(err);
160} 163}
161 164
162static void del_vq(struct virtqueue *vq) 165static void del_vq(struct virtio_pci_vq_info *info)
163{ 166{
167 struct virtqueue *vq = info->vq;
164 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 168 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
165 169
166 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); 170 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
167 171
168 if (vp_dev->pci_dev->msix_enabled) { 172 if (vp_dev->msix_enabled) {
169 iowrite16(VIRTIO_MSI_NO_VECTOR, 173 iowrite16(VIRTIO_MSI_NO_VECTOR,
170 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); 174 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
171 /* Flush the write out to device */ 175 /* Flush the write out to device */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 7bc3004b840e..8978f109d2d7 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
293} 293}
294 294
295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
296 struct virtio_pci_vq_info *info,
296 unsigned index, 297 unsigned index,
297 void (*callback)(struct virtqueue *vq), 298 void (*callback)(struct virtqueue *vq),
298 const char *name, 299 const char *name,
@@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
322 /* get offset of notification word for this vq */ 323 /* get offset of notification word for this vq */
323 off = vp_ioread16(&cfg->queue_notify_off); 324 off = vp_ioread16(&cfg->queue_notify_off);
324 325
326 info->msix_vector = msix_vec;
327
325 /* create the vring */ 328 /* create the vring */
326 vq = vring_create_virtqueue(index, num, 329 vq = vring_create_virtqueue(index, num,
327 SMP_CACHE_BYTES, &vp_dev->vdev, 330 SMP_CACHE_BYTES, &vp_dev->vdev,
@@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
405 return 0; 408 return 0;
406} 409}
407 410
408static void del_vq(struct virtqueue *vq) 411static void del_vq(struct virtio_pci_vq_info *info)
409{ 412{
413 struct virtqueue *vq = info->vq;
410 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 414 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
411 415
412 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 416 vp_iowrite16(vq->index, &vp_dev->common->queue_select);
413 417
414 if (vp_dev->pci_dev->msix_enabled) { 418 if (vp_dev->msix_enabled) {
415 vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 419 vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
416 &vp_dev->common->queue_msix_vector); 420 &vp_dev->common->queue_msix_vector);
417 /* Flush the write out to device */ 421 /* Flush the write out to device */
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1f4733b80c87..f3b089b7c0b6 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -442,8 +442,10 @@ static int xenbus_write_transaction(unsigned msg_type,
442 return xenbus_command_reply(u, XS_ERROR, "ENOENT"); 442 return xenbus_command_reply(u, XS_ERROR, "ENOENT");
443 443
444 rc = xenbus_dev_request_and_reply(&u->u.msg, u); 444 rc = xenbus_dev_request_and_reply(&u->u.msg, u);
445 if (rc) 445 if (rc && trans) {
446 list_del(&trans->list);
446 kfree(trans); 447 kfree(trans);
448 }
447 449
448out: 450out:
449 return rc; 451 return rc;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a18510be76c1..5e71f1ea3391 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7910,7 +7910,6 @@ struct btrfs_retry_complete {
7910static void btrfs_retry_endio_nocsum(struct bio *bio) 7910static void btrfs_retry_endio_nocsum(struct bio *bio)
7911{ 7911{
7912 struct btrfs_retry_complete *done = bio->bi_private; 7912 struct btrfs_retry_complete *done = bio->bi_private;
7913 struct inode *inode;
7914 struct bio_vec *bvec; 7913 struct bio_vec *bvec;
7915 int i; 7914 int i;
7916 7915
@@ -7918,12 +7917,12 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
7918 goto end; 7917 goto end;
7919 7918
7920 ASSERT(bio->bi_vcnt == 1); 7919 ASSERT(bio->bi_vcnt == 1);
7921 inode = bio->bi_io_vec->bv_page->mapping->host; 7920 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
7922 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
7923 7921
7924 done->uptodate = 1; 7922 done->uptodate = 1;
7925 bio_for_each_segment_all(bvec, bio, i) 7923 bio_for_each_segment_all(bvec, bio, i)
7926 clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); 7924 clean_io_failure(BTRFS_I(done->inode), done->start,
7925 bvec->bv_page, 0);
7927end: 7926end:
7928 complete(&done->done); 7927 complete(&done->done);
7929 bio_put(bio); 7928 bio_put(bio);
@@ -7973,8 +7972,10 @@ next_block_or_try_again:
7973 7972
7974 start += sectorsize; 7973 start += sectorsize;
7975 7974
7976 if (nr_sectors--) { 7975 nr_sectors--;
7976 if (nr_sectors) {
7977 pgoff += sectorsize; 7977 pgoff += sectorsize;
7978 ASSERT(pgoff < PAGE_SIZE);
7978 goto next_block_or_try_again; 7979 goto next_block_or_try_again;
7979 } 7980 }
7980 } 7981 }
@@ -7986,9 +7987,7 @@ static void btrfs_retry_endio(struct bio *bio)
7986{ 7987{
7987 struct btrfs_retry_complete *done = bio->bi_private; 7988 struct btrfs_retry_complete *done = bio->bi_private;
7988 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7989 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7989 struct inode *inode;
7990 struct bio_vec *bvec; 7990 struct bio_vec *bvec;
7991 u64 start;
7992 int uptodate; 7991 int uptodate;
7993 int ret; 7992 int ret;
7994 int i; 7993 int i;
@@ -7998,11 +7997,8 @@ static void btrfs_retry_endio(struct bio *bio)
7998 7997
7999 uptodate = 1; 7998 uptodate = 1;
8000 7999
8001 start = done->start;
8002
8003 ASSERT(bio->bi_vcnt == 1); 8000 ASSERT(bio->bi_vcnt == 1);
8004 inode = bio->bi_io_vec->bv_page->mapping->host; 8001 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
8005 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
8006 8002
8007 bio_for_each_segment_all(bvec, bio, i) { 8003 bio_for_each_segment_all(bvec, bio, i) {
8008 ret = __readpage_endio_check(done->inode, io_bio, i, 8004 ret = __readpage_endio_check(done->inode, io_bio, i,
@@ -8080,8 +8076,10 @@ next:
8080 8076
8081 ASSERT(nr_sectors); 8077 ASSERT(nr_sectors);
8082 8078
8083 if (--nr_sectors) { 8079 nr_sectors--;
8080 if (nr_sectors) {
8084 pgoff += sectorsize; 8081 pgoff += sectorsize;
8082 ASSERT(pgoff < PAGE_SIZE);
8085 goto next_block; 8083 goto next_block;
8086 } 8084 }
8087 } 8085 }
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index da687dc79cce..9530a333d302 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -549,16 +549,19 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
549 case Opt_ssd: 549 case Opt_ssd:
550 btrfs_set_and_info(info, SSD, 550 btrfs_set_and_info(info, SSD,
551 "use ssd allocation scheme"); 551 "use ssd allocation scheme");
552 btrfs_clear_opt(info->mount_opt, NOSSD);
552 break; 553 break;
553 case Opt_ssd_spread: 554 case Opt_ssd_spread:
554 btrfs_set_and_info(info, SSD_SPREAD, 555 btrfs_set_and_info(info, SSD_SPREAD,
555 "use spread ssd allocation scheme"); 556 "use spread ssd allocation scheme");
556 btrfs_set_opt(info->mount_opt, SSD); 557 btrfs_set_opt(info->mount_opt, SSD);
558 btrfs_clear_opt(info->mount_opt, NOSSD);
557 break; 559 break;
558 case Opt_nossd: 560 case Opt_nossd:
559 btrfs_set_and_info(info, NOSSD, 561 btrfs_set_and_info(info, NOSSD,
560 "not using ssd allocation scheme"); 562 "not using ssd allocation scheme");
561 btrfs_clear_opt(info->mount_opt, SSD); 563 btrfs_clear_opt(info->mount_opt, SSD);
564 btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
562 break; 565 break;
563 case Opt_barrier: 566 case Opt_barrier:
564 btrfs_clear_and_info(info, NOBARRIER, 567 btrfs_clear_and_info(info, NOBARRIER,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 73d56eef5e60..ab8a66d852f9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6213,7 +6213,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6213 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6213 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6214 dev = bbio->stripes[dev_nr].dev; 6214 dev = bbio->stripes[dev_nr].dev;
6215 if (!dev || !dev->bdev || 6215 if (!dev || !dev->bdev ||
6216 (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) { 6216 (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
6217 bbio_error(bbio, first_bio, logical); 6217 bbio_error(bbio, first_bio, logical);
6218 continue; 6218 continue;
6219 } 6219 }
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8c91f37ac0eb..5e9b306bc162 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -973,6 +973,86 @@ out:
973 return rc; 973 return rc;
974} 974}
975 975
976ssize_t cifs_file_copychunk_range(unsigned int xid,
977 struct file *src_file, loff_t off,
978 struct file *dst_file, loff_t destoff,
979 size_t len, unsigned int flags)
980{
981 struct inode *src_inode = file_inode(src_file);
982 struct inode *target_inode = file_inode(dst_file);
983 struct cifsFileInfo *smb_file_src;
984 struct cifsFileInfo *smb_file_target;
985 struct cifs_tcon *src_tcon;
986 struct cifs_tcon *target_tcon;
987 ssize_t rc;
988
989 cifs_dbg(FYI, "copychunk range\n");
990
991 if (src_inode == target_inode) {
992 rc = -EINVAL;
993 goto out;
994 }
995
996 if (!src_file->private_data || !dst_file->private_data) {
997 rc = -EBADF;
998 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
999 goto out;
1000 }
1001
1002 rc = -EXDEV;
1003 smb_file_target = dst_file->private_data;
1004 smb_file_src = src_file->private_data;
1005 src_tcon = tlink_tcon(smb_file_src->tlink);
1006 target_tcon = tlink_tcon(smb_file_target->tlink);
1007
1008 if (src_tcon->ses != target_tcon->ses) {
1009 cifs_dbg(VFS, "source and target of copy not on same server\n");
1010 goto out;
1011 }
1012
1013 /*
1014 * Note: cifs case is easier than btrfs since server responsible for
1015 * checks for proper open modes and file type and if it wants
1016 * server could even support copy of range where source = target
1017 */
1018 lock_two_nondirectories(target_inode, src_inode);
1019
1020 cifs_dbg(FYI, "about to flush pages\n");
1021 /* should we flush first and last page first */
1022 truncate_inode_pages(&target_inode->i_data, 0);
1023
1024 if (target_tcon->ses->server->ops->copychunk_range)
1025 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1026 smb_file_src, smb_file_target, off, len, destoff);
1027 else
1028 rc = -EOPNOTSUPP;
1029
1030 /* force revalidate of size and timestamps of target file now
1031 * that target is updated on the server
1032 */
1033 CIFS_I(target_inode)->time = 0;
1034 /* although unlocking in the reverse order from locking is not
1035 * strictly necessary here it is a little cleaner to be consistent
1036 */
1037 unlock_two_nondirectories(src_inode, target_inode);
1038
1039out:
1040 return rc;
1041}
1042
1043static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1044 struct file *dst_file, loff_t destoff,
1045 size_t len, unsigned int flags)
1046{
1047 unsigned int xid = get_xid();
1048 ssize_t rc;
1049
1050 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1051 len, flags);
1052 free_xid(xid);
1053 return rc;
1054}
1055
976const struct file_operations cifs_file_ops = { 1056const struct file_operations cifs_file_ops = {
977 .read_iter = cifs_loose_read_iter, 1057 .read_iter = cifs_loose_read_iter,
978 .write_iter = cifs_file_write_iter, 1058 .write_iter = cifs_file_write_iter,
@@ -985,6 +1065,7 @@ const struct file_operations cifs_file_ops = {
985 .splice_read = generic_file_splice_read, 1065 .splice_read = generic_file_splice_read,
986 .llseek = cifs_llseek, 1066 .llseek = cifs_llseek,
987 .unlocked_ioctl = cifs_ioctl, 1067 .unlocked_ioctl = cifs_ioctl,
1068 .copy_file_range = cifs_copy_file_range,
988 .clone_file_range = cifs_clone_file_range, 1069 .clone_file_range = cifs_clone_file_range,
989 .setlease = cifs_setlease, 1070 .setlease = cifs_setlease,
990 .fallocate = cifs_fallocate, 1071 .fallocate = cifs_fallocate,
@@ -1002,6 +1083,7 @@ const struct file_operations cifs_file_strict_ops = {
1002 .splice_read = generic_file_splice_read, 1083 .splice_read = generic_file_splice_read,
1003 .llseek = cifs_llseek, 1084 .llseek = cifs_llseek,
1004 .unlocked_ioctl = cifs_ioctl, 1085 .unlocked_ioctl = cifs_ioctl,
1086 .copy_file_range = cifs_copy_file_range,
1005 .clone_file_range = cifs_clone_file_range, 1087 .clone_file_range = cifs_clone_file_range,
1006 .setlease = cifs_setlease, 1088 .setlease = cifs_setlease,
1007 .fallocate = cifs_fallocate, 1089 .fallocate = cifs_fallocate,
@@ -1019,6 +1101,7 @@ const struct file_operations cifs_file_direct_ops = {
1019 .mmap = cifs_file_mmap, 1101 .mmap = cifs_file_mmap,
1020 .splice_read = generic_file_splice_read, 1102 .splice_read = generic_file_splice_read,
1021 .unlocked_ioctl = cifs_ioctl, 1103 .unlocked_ioctl = cifs_ioctl,
1104 .copy_file_range = cifs_copy_file_range,
1022 .clone_file_range = cifs_clone_file_range, 1105 .clone_file_range = cifs_clone_file_range,
1023 .llseek = cifs_llseek, 1106 .llseek = cifs_llseek,
1024 .setlease = cifs_setlease, 1107 .setlease = cifs_setlease,
@@ -1036,6 +1119,7 @@ const struct file_operations cifs_file_nobrl_ops = {
1036 .splice_read = generic_file_splice_read, 1119 .splice_read = generic_file_splice_read,
1037 .llseek = cifs_llseek, 1120 .llseek = cifs_llseek,
1038 .unlocked_ioctl = cifs_ioctl, 1121 .unlocked_ioctl = cifs_ioctl,
1122 .copy_file_range = cifs_copy_file_range,
1039 .clone_file_range = cifs_clone_file_range, 1123 .clone_file_range = cifs_clone_file_range,
1040 .setlease = cifs_setlease, 1124 .setlease = cifs_setlease,
1041 .fallocate = cifs_fallocate, 1125 .fallocate = cifs_fallocate,
@@ -1052,6 +1136,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
1052 .splice_read = generic_file_splice_read, 1136 .splice_read = generic_file_splice_read,
1053 .llseek = cifs_llseek, 1137 .llseek = cifs_llseek,
1054 .unlocked_ioctl = cifs_ioctl, 1138 .unlocked_ioctl = cifs_ioctl,
1139 .copy_file_range = cifs_copy_file_range,
1055 .clone_file_range = cifs_clone_file_range, 1140 .clone_file_range = cifs_clone_file_range,
1056 .setlease = cifs_setlease, 1141 .setlease = cifs_setlease,
1057 .fallocate = cifs_fallocate, 1142 .fallocate = cifs_fallocate,
@@ -1068,6 +1153,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
1068 .mmap = cifs_file_mmap, 1153 .mmap = cifs_file_mmap,
1069 .splice_read = generic_file_splice_read, 1154 .splice_read = generic_file_splice_read,
1070 .unlocked_ioctl = cifs_ioctl, 1155 .unlocked_ioctl = cifs_ioctl,
1156 .copy_file_range = cifs_copy_file_range,
1071 .clone_file_range = cifs_clone_file_range, 1157 .clone_file_range = cifs_clone_file_range,
1072 .llseek = cifs_llseek, 1158 .llseek = cifs_llseek,
1073 .setlease = cifs_setlease, 1159 .setlease = cifs_setlease,
@@ -1079,6 +1165,7 @@ const struct file_operations cifs_dir_ops = {
1079 .release = cifs_closedir, 1165 .release = cifs_closedir,
1080 .read = generic_read_dir, 1166 .read = generic_read_dir,
1081 .unlocked_ioctl = cifs_ioctl, 1167 .unlocked_ioctl = cifs_ioctl,
1168 .copy_file_range = cifs_copy_file_range,
1082 .clone_file_range = cifs_clone_file_range, 1169 .clone_file_range = cifs_clone_file_range,
1083 .llseek = generic_file_llseek, 1170 .llseek = generic_file_llseek,
1084}; 1171};
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index da717fee3026..30bf89b1fd9a 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -139,6 +139,11 @@ extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
139# define cifs_listxattr NULL 139# define cifs_listxattr NULL
140#endif 140#endif
141 141
142extern ssize_t cifs_file_copychunk_range(unsigned int xid,
143 struct file *src_file, loff_t off,
144 struct file *dst_file, loff_t destoff,
145 size_t len, unsigned int flags);
146
142extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 147extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
143#ifdef CONFIG_CIFS_NFSD_EXPORT 148#ifdef CONFIG_CIFS_NFSD_EXPORT
144extern const struct export_operations cifs_export_ops; 149extern const struct export_operations cifs_export_ops;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d42dd3288647..37f5a41cc50c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -243,6 +243,7 @@ struct smb_version_operations {
243 /* verify the message */ 243 /* verify the message */
244 int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); 244 int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
245 bool (*is_oplock_break)(char *, struct TCP_Server_Info *); 245 bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
246 int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
246 void (*downgrade_oplock)(struct TCP_Server_Info *, 247 void (*downgrade_oplock)(struct TCP_Server_Info *,
247 struct cifsInodeInfo *, bool); 248 struct cifsInodeInfo *, bool);
248 /* process transaction2 response */ 249 /* process transaction2 response */
@@ -407,9 +408,10 @@ struct smb_version_operations {
407 char * (*create_lease_buf)(u8 *, u8); 408 char * (*create_lease_buf)(u8 *, u8);
408 /* parse lease context buffer and return oplock/epoch info */ 409 /* parse lease context buffer and return oplock/epoch info */
409 __u8 (*parse_lease_buf)(void *, unsigned int *); 410 __u8 (*parse_lease_buf)(void *, unsigned int *);
410 int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file, 411 ssize_t (*copychunk_range)(const unsigned int,
411 struct cifsFileInfo *target_file, u64 src_off, u64 len, 412 struct cifsFileInfo *src_file,
412 u64 dest_off); 413 struct cifsFileInfo *target_file,
414 u64 src_off, u64 len, u64 dest_off);
413 int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src, 415 int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
414 struct cifsFileInfo *target_file, u64 src_off, u64 len, 416 struct cifsFileInfo *target_file, u64 src_off, u64 len,
415 u64 dest_off); 417 u64 dest_off);
@@ -946,7 +948,6 @@ struct cifs_tcon {
946 bool use_persistent:1; /* use persistent instead of durable handles */ 948 bool use_persistent:1; /* use persistent instead of durable handles */
947#ifdef CONFIG_CIFS_SMB2 949#ifdef CONFIG_CIFS_SMB2
948 bool print:1; /* set if connection to printer share */ 950 bool print:1; /* set if connection to printer share */
949 bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
950 __le32 capabilities; 951 __le32 capabilities;
951 __u32 share_flags; 952 __u32 share_flags;
952 __u32 maximal_access; 953 __u32 maximal_access;
@@ -1343,6 +1344,7 @@ struct mid_q_entry {
1343 void *callback_data; /* general purpose pointer for callback */ 1344 void *callback_data; /* general purpose pointer for callback */
1344 void *resp_buf; /* pointer to received SMB header */ 1345 void *resp_buf; /* pointer to received SMB header */
1345 int mid_state; /* wish this were enum but can not pass to wait_event */ 1346 int mid_state; /* wish this were enum but can not pass to wait_event */
1347 unsigned int mid_flags;
1346 __le16 command; /* smb command code */ 1348 __le16 command; /* smb command code */
1347 bool large_buf:1; /* if valid response, is pointer to large buf */ 1349 bool large_buf:1; /* if valid response, is pointer to large buf */
1348 bool multiRsp:1; /* multiple trans2 responses for one request */ 1350 bool multiRsp:1; /* multiple trans2 responses for one request */
@@ -1350,6 +1352,12 @@ struct mid_q_entry {
1350 bool decrypted:1; /* decrypted entry */ 1352 bool decrypted:1; /* decrypted entry */
1351}; 1353};
1352 1354
1355struct close_cancelled_open {
1356 struct cifs_fid fid;
1357 struct cifs_tcon *tcon;
1358 struct work_struct work;
1359};
1360
1353/* Make code in transport.c a little cleaner by moving 1361/* Make code in transport.c a little cleaner by moving
1354 update of optional stats into function below */ 1362 update of optional stats into function below */
1355#ifdef CONFIG_CIFS_STATS2 1363#ifdef CONFIG_CIFS_STATS2
@@ -1481,6 +1489,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
1481#define MID_RESPONSE_MALFORMED 0x10 1489#define MID_RESPONSE_MALFORMED 0x10
1482#define MID_SHUTDOWN 0x20 1490#define MID_SHUTDOWN 0x20
1483 1491
1492/* Flags */
1493#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
1494
1484/* Types of response buffer returned from SendReceive2 */ 1495/* Types of response buffer returned from SendReceive2 */
1485#define CIFS_NO_BUFFER 0 /* Response buffer not returned */ 1496#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
1486#define CIFS_SMALL_BUFFER 1 1497#define CIFS_SMALL_BUFFER 1
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 066950671929..5d21f00ae341 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1428,6 +1428,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1428 1428
1429 length = cifs_discard_remaining_data(server); 1429 length = cifs_discard_remaining_data(server);
1430 dequeue_mid(mid, rdata->result); 1430 dequeue_mid(mid, rdata->result);
1431 mid->resp_buf = server->smallbuf;
1432 server->smallbuf = NULL;
1431 return length; 1433 return length;
1432} 1434}
1433 1435
@@ -1541,6 +1543,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1541 return cifs_readv_discard(server, mid); 1543 return cifs_readv_discard(server, mid);
1542 1544
1543 dequeue_mid(mid, false); 1545 dequeue_mid(mid, false);
1546 mid->resp_buf = server->smallbuf;
1547 server->smallbuf = NULL;
1544 return length; 1548 return length;
1545} 1549}
1546 1550
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 858698dcde3c..190a855bc844 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -905,10 +905,19 @@ cifs_demultiplex_thread(void *p)
905 905
906 server->lstrp = jiffies; 906 server->lstrp = jiffies;
907 if (mid_entry != NULL) { 907 if (mid_entry != NULL) {
908 if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
909 mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
910 server->ops->handle_cancelled_mid)
911 server->ops->handle_cancelled_mid(
912 mid_entry->resp_buf,
913 server);
914
908 if (!mid_entry->multiRsp || mid_entry->multiEnd) 915 if (!mid_entry->multiRsp || mid_entry->multiEnd)
909 mid_entry->callback(mid_entry); 916 mid_entry->callback(mid_entry);
910 } else if (!server->ops->is_oplock_break || 917 } else if (server->ops->is_oplock_break &&
911 !server->ops->is_oplock_break(buf, server)) { 918 server->ops->is_oplock_break(buf, server)) {
919 cifs_dbg(FYI, "Received oplock break\n");
920 } else {
912 cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", 921 cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
913 atomic_read(&midCount)); 922 atomic_read(&midCount));
914 cifs_dump_mem("Received Data is: ", buf, 923 cifs_dump_mem("Received Data is: ", buf,
@@ -3745,6 +3754,9 @@ try_mount_again:
3745 if (IS_ERR(tcon)) { 3754 if (IS_ERR(tcon)) {
3746 rc = PTR_ERR(tcon); 3755 rc = PTR_ERR(tcon);
3747 tcon = NULL; 3756 tcon = NULL;
3757 if (rc == -EACCES)
3758 goto mount_fail_check;
3759
3748 goto remote_path_check; 3760 goto remote_path_check;
3749 } 3761 }
3750 3762
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index aa3debbba826..21d404535739 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2597 wdata->credits = credits; 2597 wdata->credits = credits;
2598 2598
2599 if (!wdata->cfile->invalidHandle || 2599 if (!wdata->cfile->invalidHandle ||
2600 !cifs_reopen_file(wdata->cfile, false)) 2600 !(rc = cifs_reopen_file(wdata->cfile, false)))
2601 rc = server->ops->async_writev(wdata, 2601 rc = server->ops->async_writev(wdata,
2602 cifs_uncached_writedata_release); 2602 cifs_uncached_writedata_release);
2603 if (rc) { 2603 if (rc) {
@@ -3022,7 +3022,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3022 rdata->credits = credits; 3022 rdata->credits = credits;
3023 3023
3024 if (!rdata->cfile->invalidHandle || 3024 if (!rdata->cfile->invalidHandle ||
3025 !cifs_reopen_file(rdata->cfile, true)) 3025 !(rc = cifs_reopen_file(rdata->cfile, true)))
3026 rc = server->ops->async_readv(rdata); 3026 rc = server->ops->async_readv(rdata);
3027error: 3027error:
3028 if (rc) { 3028 if (rc) {
@@ -3617,7 +3617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3617 } 3617 }
3618 3618
3619 if (!rdata->cfile->invalidHandle || 3619 if (!rdata->cfile->invalidHandle ||
3620 !cifs_reopen_file(rdata->cfile, true)) 3620 !(rc = cifs_reopen_file(rdata->cfile, true)))
3621 rc = server->ops->async_readv(rdata); 3621 rc = server->ops->async_readv(rdata);
3622 if (rc) { 3622 if (rc) {
3623 add_credits_and_wake_if(server, rdata->credits, 0); 3623 add_credits_and_wake_if(server, rdata->credits, 0);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 001528781b6b..265c45fe4ea5 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -34,71 +34,14 @@
34#include "cifs_ioctl.h" 34#include "cifs_ioctl.h"
35#include <linux/btrfs.h> 35#include <linux/btrfs.h>
36 36
37static int cifs_file_clone_range(unsigned int xid, struct file *src_file, 37static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
38 struct file *dst_file)
39{
40 struct inode *src_inode = file_inode(src_file);
41 struct inode *target_inode = file_inode(dst_file);
42 struct cifsFileInfo *smb_file_src;
43 struct cifsFileInfo *smb_file_target;
44 struct cifs_tcon *src_tcon;
45 struct cifs_tcon *target_tcon;
46 int rc;
47
48 cifs_dbg(FYI, "ioctl clone range\n");
49
50 if (!src_file->private_data || !dst_file->private_data) {
51 rc = -EBADF;
52 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
53 goto out;
54 }
55
56 rc = -EXDEV;
57 smb_file_target = dst_file->private_data;
58 smb_file_src = src_file->private_data;
59 src_tcon = tlink_tcon(smb_file_src->tlink);
60 target_tcon = tlink_tcon(smb_file_target->tlink);
61
62 if (src_tcon->ses != target_tcon->ses) {
63 cifs_dbg(VFS, "source and target of copy not on same server\n");
64 goto out;
65 }
66
67 /*
68 * Note: cifs case is easier than btrfs since server responsible for
69 * checks for proper open modes and file type and if it wants
70 * server could even support copy of range where source = target
71 */
72 lock_two_nondirectories(target_inode, src_inode);
73
74 cifs_dbg(FYI, "about to flush pages\n");
75 /* should we flush first and last page first */
76 truncate_inode_pages(&target_inode->i_data, 0);
77
78 if (target_tcon->ses->server->ops->clone_range)
79 rc = target_tcon->ses->server->ops->clone_range(xid,
80 smb_file_src, smb_file_target, 0, src_inode->i_size, 0);
81 else
82 rc = -EOPNOTSUPP;
83
84 /* force revalidate of size and timestamps of target file now
85 that target is updated on the server */
86 CIFS_I(target_inode)->time = 0;
87 /* although unlocking in the reverse order from locking is not
88 strictly necessary here it is a little cleaner to be consistent */
89 unlock_two_nondirectories(src_inode, target_inode);
90out:
91 return rc;
92}
93
94static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
95 unsigned long srcfd) 38 unsigned long srcfd)
96{ 39{
97 int rc; 40 int rc;
98 struct fd src_file; 41 struct fd src_file;
99 struct inode *src_inode; 42 struct inode *src_inode;
100 43
101 cifs_dbg(FYI, "ioctl clone range\n"); 44 cifs_dbg(FYI, "ioctl copychunk range\n");
102 /* the destination must be opened for writing */ 45 /* the destination must be opened for writing */
103 if (!(dst_file->f_mode & FMODE_WRITE)) { 46 if (!(dst_file->f_mode & FMODE_WRITE)) {
104 cifs_dbg(FYI, "file target not open for write\n"); 47 cifs_dbg(FYI, "file target not open for write\n");
@@ -129,7 +72,8 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
129 if (S_ISDIR(src_inode->i_mode)) 72 if (S_ISDIR(src_inode->i_mode))
130 goto out_fput; 73 goto out_fput;
131 74
132 rc = cifs_file_clone_range(xid, src_file.file, dst_file); 75 rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
76 src_inode->i_size, 0);
133 77
134out_fput: 78out_fput:
135 fdput(src_file); 79 fdput(src_file);
@@ -251,7 +195,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
251 } 195 }
252 break; 196 break;
253 case CIFS_IOC_COPYCHUNK_FILE: 197 case CIFS_IOC_COPYCHUNK_FILE:
254 rc = cifs_ioctl_clone(xid, filep, arg); 198 rc = cifs_ioctl_copychunk(xid, filep, arg);
255 break; 199 break;
256 case CIFS_IOC_SET_INTEGRITY: 200 case CIFS_IOC_SET_INTEGRITY:
257 if (pSMBFile == NULL) 201 if (pSMBFile == NULL)
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index fd516ea8b8f8..1a04b3a5beb1 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -659,3 +659,49 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
659 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); 659 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
660 return false; 660 return false;
661} 661}
662
663void
664smb2_cancelled_close_fid(struct work_struct *work)
665{
666 struct close_cancelled_open *cancelled = container_of(work,
667 struct close_cancelled_open, work);
668
669 cifs_dbg(VFS, "Close unmatched open\n");
670
671 SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
672 cancelled->fid.volatile_fid);
673 cifs_put_tcon(cancelled->tcon);
674 kfree(cancelled);
675}
676
677int
678smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
679{
680 struct smb2_sync_hdr *sync_hdr = get_sync_hdr(buffer);
681 struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
682 struct cifs_tcon *tcon;
683 struct close_cancelled_open *cancelled;
684
685 if (sync_hdr->Command != SMB2_CREATE ||
686 sync_hdr->Status != STATUS_SUCCESS)
687 return 0;
688
689 cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
690 if (!cancelled)
691 return -ENOMEM;
692
693 tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
694 sync_hdr->TreeId);
695 if (!tcon) {
696 kfree(cancelled);
697 return -ENOENT;
698 }
699
700 cancelled->fid.persistent_fid = rsp->PersistentFileId;
701 cancelled->fid.volatile_fid = rsp->VolatileFileId;
702 cancelled->tcon = tcon;
703 INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
704 queue_work(cifsiod_wq, &cancelled->work);
705
706 return 0;
707}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0231108d9387..152e37f2ad92 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -21,6 +21,7 @@
21#include <linux/vfs.h> 21#include <linux/vfs.h>
22#include <linux/falloc.h> 22#include <linux/falloc.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/uuid.h>
24#include <crypto/aead.h> 25#include <crypto/aead.h>
25#include "cifsglob.h" 26#include "cifsglob.h"
26#include "smb2pdu.h" 27#include "smb2pdu.h"
@@ -592,8 +593,8 @@ req_res_key_exit:
592 return rc; 593 return rc;
593} 594}
594 595
595static int 596static ssize_t
596smb2_clone_range(const unsigned int xid, 597smb2_copychunk_range(const unsigned int xid,
597 struct cifsFileInfo *srcfile, 598 struct cifsFileInfo *srcfile,
598 struct cifsFileInfo *trgtfile, u64 src_off, 599 struct cifsFileInfo *trgtfile, u64 src_off,
599 u64 len, u64 dest_off) 600 u64 len, u64 dest_off)
@@ -605,13 +606,14 @@ smb2_clone_range(const unsigned int xid,
605 struct cifs_tcon *tcon; 606 struct cifs_tcon *tcon;
606 int chunks_copied = 0; 607 int chunks_copied = 0;
607 bool chunk_sizes_updated = false; 608 bool chunk_sizes_updated = false;
609 ssize_t bytes_written, total_bytes_written = 0;
608 610
609 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); 611 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
610 612
611 if (pcchunk == NULL) 613 if (pcchunk == NULL)
612 return -ENOMEM; 614 return -ENOMEM;
613 615
614 cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n"); 616 cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
615 /* Request a key from the server to identify the source of the copy */ 617 /* Request a key from the server to identify the source of the copy */
616 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink), 618 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
617 srcfile->fid.persistent_fid, 619 srcfile->fid.persistent_fid,
@@ -669,14 +671,16 @@ smb2_clone_range(const unsigned int xid,
669 } 671 }
670 chunks_copied++; 672 chunks_copied++;
671 673
672 src_off += le32_to_cpu(retbuf->TotalBytesWritten); 674 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
673 dest_off += le32_to_cpu(retbuf->TotalBytesWritten); 675 src_off += bytes_written;
674 len -= le32_to_cpu(retbuf->TotalBytesWritten); 676 dest_off += bytes_written;
677 len -= bytes_written;
678 total_bytes_written += bytes_written;
675 679
676 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n", 680 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
677 le32_to_cpu(retbuf->ChunksWritten), 681 le32_to_cpu(retbuf->ChunksWritten),
678 le32_to_cpu(retbuf->ChunkBytesWritten), 682 le32_to_cpu(retbuf->ChunkBytesWritten),
679 le32_to_cpu(retbuf->TotalBytesWritten)); 683 bytes_written);
680 } else if (rc == -EINVAL) { 684 } else if (rc == -EINVAL) {
681 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) 685 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
682 goto cchunk_out; 686 goto cchunk_out;
@@ -713,7 +717,10 @@ smb2_clone_range(const unsigned int xid,
713cchunk_out: 717cchunk_out:
714 kfree(pcchunk); 718 kfree(pcchunk);
715 kfree(retbuf); 719 kfree(retbuf);
716 return rc; 720 if (rc)
721 return rc;
722 else
723 return total_bytes_written;
717} 724}
718 725
719static int 726static int
@@ -2322,6 +2329,7 @@ struct smb_version_operations smb20_operations = {
2322 .clear_stats = smb2_clear_stats, 2329 .clear_stats = smb2_clear_stats,
2323 .print_stats = smb2_print_stats, 2330 .print_stats = smb2_print_stats,
2324 .is_oplock_break = smb2_is_valid_oplock_break, 2331 .is_oplock_break = smb2_is_valid_oplock_break,
2332 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2325 .downgrade_oplock = smb2_downgrade_oplock, 2333 .downgrade_oplock = smb2_downgrade_oplock,
2326 .need_neg = smb2_need_neg, 2334 .need_neg = smb2_need_neg,
2327 .negotiate = smb2_negotiate, 2335 .negotiate = smb2_negotiate,
@@ -2377,7 +2385,7 @@ struct smb_version_operations smb20_operations = {
2377 .set_oplock_level = smb2_set_oplock_level, 2385 .set_oplock_level = smb2_set_oplock_level,
2378 .create_lease_buf = smb2_create_lease_buf, 2386 .create_lease_buf = smb2_create_lease_buf,
2379 .parse_lease_buf = smb2_parse_lease_buf, 2387 .parse_lease_buf = smb2_parse_lease_buf,
2380 .clone_range = smb2_clone_range, 2388 .copychunk_range = smb2_copychunk_range,
2381 .wp_retry_size = smb2_wp_retry_size, 2389 .wp_retry_size = smb2_wp_retry_size,
2382 .dir_needs_close = smb2_dir_needs_close, 2390 .dir_needs_close = smb2_dir_needs_close,
2383 .get_dfs_refer = smb2_get_dfs_refer, 2391 .get_dfs_refer = smb2_get_dfs_refer,
@@ -2404,6 +2412,7 @@ struct smb_version_operations smb21_operations = {
2404 .clear_stats = smb2_clear_stats, 2412 .clear_stats = smb2_clear_stats,
2405 .print_stats = smb2_print_stats, 2413 .print_stats = smb2_print_stats,
2406 .is_oplock_break = smb2_is_valid_oplock_break, 2414 .is_oplock_break = smb2_is_valid_oplock_break,
2415 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2407 .downgrade_oplock = smb2_downgrade_oplock, 2416 .downgrade_oplock = smb2_downgrade_oplock,
2408 .need_neg = smb2_need_neg, 2417 .need_neg = smb2_need_neg,
2409 .negotiate = smb2_negotiate, 2418 .negotiate = smb2_negotiate,
@@ -2459,7 +2468,7 @@ struct smb_version_operations smb21_operations = {
2459 .set_oplock_level = smb21_set_oplock_level, 2468 .set_oplock_level = smb21_set_oplock_level,
2460 .create_lease_buf = smb2_create_lease_buf, 2469 .create_lease_buf = smb2_create_lease_buf,
2461 .parse_lease_buf = smb2_parse_lease_buf, 2470 .parse_lease_buf = smb2_parse_lease_buf,
2462 .clone_range = smb2_clone_range, 2471 .copychunk_range = smb2_copychunk_range,
2463 .wp_retry_size = smb2_wp_retry_size, 2472 .wp_retry_size = smb2_wp_retry_size,
2464 .dir_needs_close = smb2_dir_needs_close, 2473 .dir_needs_close = smb2_dir_needs_close,
2465 .enum_snapshots = smb3_enum_snapshots, 2474 .enum_snapshots = smb3_enum_snapshots,
@@ -2488,6 +2497,7 @@ struct smb_version_operations smb30_operations = {
2488 .print_stats = smb2_print_stats, 2497 .print_stats = smb2_print_stats,
2489 .dump_share_caps = smb2_dump_share_caps, 2498 .dump_share_caps = smb2_dump_share_caps,
2490 .is_oplock_break = smb2_is_valid_oplock_break, 2499 .is_oplock_break = smb2_is_valid_oplock_break,
2500 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2491 .downgrade_oplock = smb2_downgrade_oplock, 2501 .downgrade_oplock = smb2_downgrade_oplock,
2492 .need_neg = smb2_need_neg, 2502 .need_neg = smb2_need_neg,
2493 .negotiate = smb2_negotiate, 2503 .negotiate = smb2_negotiate,
@@ -2545,7 +2555,7 @@ struct smb_version_operations smb30_operations = {
2545 .set_oplock_level = smb3_set_oplock_level, 2555 .set_oplock_level = smb3_set_oplock_level,
2546 .create_lease_buf = smb3_create_lease_buf, 2556 .create_lease_buf = smb3_create_lease_buf,
2547 .parse_lease_buf = smb3_parse_lease_buf, 2557 .parse_lease_buf = smb3_parse_lease_buf,
2548 .clone_range = smb2_clone_range, 2558 .copychunk_range = smb2_copychunk_range,
2549 .duplicate_extents = smb2_duplicate_extents, 2559 .duplicate_extents = smb2_duplicate_extents,
2550 .validate_negotiate = smb3_validate_negotiate, 2560 .validate_negotiate = smb3_validate_negotiate,
2551 .wp_retry_size = smb2_wp_retry_size, 2561 .wp_retry_size = smb2_wp_retry_size,
@@ -2582,6 +2592,7 @@ struct smb_version_operations smb311_operations = {
2582 .print_stats = smb2_print_stats, 2592 .print_stats = smb2_print_stats,
2583 .dump_share_caps = smb2_dump_share_caps, 2593 .dump_share_caps = smb2_dump_share_caps,
2584 .is_oplock_break = smb2_is_valid_oplock_break, 2594 .is_oplock_break = smb2_is_valid_oplock_break,
2595 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2585 .downgrade_oplock = smb2_downgrade_oplock, 2596 .downgrade_oplock = smb2_downgrade_oplock,
2586 .need_neg = smb2_need_neg, 2597 .need_neg = smb2_need_neg,
2587 .negotiate = smb2_negotiate, 2598 .negotiate = smb2_negotiate,
@@ -2639,7 +2650,7 @@ struct smb_version_operations smb311_operations = {
2639 .set_oplock_level = smb3_set_oplock_level, 2650 .set_oplock_level = smb3_set_oplock_level,
2640 .create_lease_buf = smb3_create_lease_buf, 2651 .create_lease_buf = smb3_create_lease_buf,
2641 .parse_lease_buf = smb3_parse_lease_buf, 2652 .parse_lease_buf = smb3_parse_lease_buf,
2642 .clone_range = smb2_clone_range, 2653 .copychunk_range = smb2_copychunk_range,
2643 .duplicate_extents = smb2_duplicate_extents, 2654 .duplicate_extents = smb2_duplicate_extents,
2644/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */ 2655/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
2645 .wp_retry_size = smb2_wp_retry_size, 2656 .wp_retry_size = smb2_wp_retry_size,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index fb75fe908225..fb0da096c2ce 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -563,8 +563,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
563 * but for time being this is our only auth choice so doesn't matter. 563 * but for time being this is our only auth choice so doesn't matter.
564 * We just found a server which sets blob length to zero expecting raw. 564 * We just found a server which sets blob length to zero expecting raw.
565 */ 565 */
566 if (blob_length == 0) 566 if (blob_length == 0) {
567 cifs_dbg(FYI, "missing security blob on negprot\n"); 567 cifs_dbg(FYI, "missing security blob on negprot\n");
568 server->sec_ntlmssp = true;
569 }
568 570
569 rc = cifs_enable_signing(server, ses->sign); 571 rc = cifs_enable_signing(server, ses->sign);
570 if (rc) 572 if (rc)
@@ -1172,9 +1174,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1172 else 1174 else
1173 return -EIO; 1175 return -EIO;
1174 1176
1175 if (tcon && tcon->bad_network_name)
1176 return -ENOENT;
1177
1178 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); 1177 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
1179 if (unc_path == NULL) 1178 if (unc_path == NULL)
1180 return -ENOMEM; 1179 return -ENOMEM;
@@ -1186,6 +1185,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1186 return -EINVAL; 1185 return -EINVAL;
1187 } 1186 }
1188 1187
1188 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
1189 if (tcon)
1190 tcon->tid = 0;
1191
1189 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); 1192 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
1190 if (rc) { 1193 if (rc) {
1191 kfree(unc_path); 1194 kfree(unc_path);
@@ -1274,8 +1277,6 @@ tcon_exit:
1274tcon_error_exit: 1277tcon_error_exit:
1275 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { 1278 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
1276 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); 1279 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
1277 if (tcon)
1278 tcon->bad_network_name = true;
1279 } 1280 }
1280 goto tcon_exit; 1281 goto tcon_exit;
1281} 1282}
@@ -2178,6 +2179,9 @@ void smb2_reconnect_server(struct work_struct *work)
2178 struct cifs_tcon *tcon, *tcon2; 2179 struct cifs_tcon *tcon, *tcon2;
2179 struct list_head tmp_list; 2180 struct list_head tmp_list;
2180 int tcon_exist = false; 2181 int tcon_exist = false;
2182 int rc;
2183 int resched = false;
2184
2181 2185
2182 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ 2186 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
2183 mutex_lock(&server->reconnect_mutex); 2187 mutex_lock(&server->reconnect_mutex);
@@ -2205,13 +2209,18 @@ void smb2_reconnect_server(struct work_struct *work)
2205 spin_unlock(&cifs_tcp_ses_lock); 2209 spin_unlock(&cifs_tcp_ses_lock);
2206 2210
2207 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { 2211 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
2208 if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon)) 2212 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
2213 if (!rc)
2209 cifs_reopen_persistent_handles(tcon); 2214 cifs_reopen_persistent_handles(tcon);
2215 else
2216 resched = true;
2210 list_del_init(&tcon->rlist); 2217 list_del_init(&tcon->rlist);
2211 cifs_put_tcon(tcon); 2218 cifs_put_tcon(tcon);
2212 } 2219 }
2213 2220
2214 cifs_dbg(FYI, "Reconnecting tcons finished\n"); 2221 cifs_dbg(FYI, "Reconnecting tcons finished\n");
2222 if (resched)
2223 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
2215 mutex_unlock(&server->reconnect_mutex); 2224 mutex_unlock(&server->reconnect_mutex);
2216 2225
2217 /* now we can safely release srv struct */ 2226 /* now we can safely release srv struct */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 69e35873b1de..6853454fc871 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
48 struct smb_rqst *rqst); 48 struct smb_rqst *rqst);
49extern struct mid_q_entry *smb2_setup_async_request( 49extern struct mid_q_entry *smb2_setup_async_request(
50 struct TCP_Server_Info *server, struct smb_rqst *rqst); 50 struct TCP_Server_Info *server, struct smb_rqst *rqst);
51extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
52 __u64 ses_id);
53extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
54 __u64 ses_id, __u32 tid);
51extern int smb2_calc_signature(struct smb_rqst *rqst, 55extern int smb2_calc_signature(struct smb_rqst *rqst,
52 struct TCP_Server_Info *server); 56 struct TCP_Server_Info *server);
53extern int smb3_calc_signature(struct smb_rqst *rqst, 57extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -164,6 +168,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
164extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, 168extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
165 const u64 persistent_fid, const u64 volatile_fid, 169 const u64 persistent_fid, const u64 volatile_fid,
166 const __u8 oplock_level); 170 const __u8 oplock_level);
171extern int smb2_handle_cancelled_mid(char *buffer,
172 struct TCP_Server_Info *server);
173void smb2_cancelled_close_fid(struct work_struct *work);
167extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, 174extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
168 u64 persistent_file_id, u64 volatile_file_id, 175 u64 persistent_file_id, u64 volatile_file_id,
169 struct kstatfs *FSData); 176 struct kstatfs *FSData);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 7c3bb1bd7eed..506b67fc93d9 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -115,23 +115,70 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
115 return 0; 115 return 0;
116} 116}
117 117
118struct cifs_ses * 118static struct cifs_ses *
119smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) 119smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
120{ 120{
121 struct cifs_ses *ses; 121 struct cifs_ses *ses;
122 122
123 spin_lock(&cifs_tcp_ses_lock);
124 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 123 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
125 if (ses->Suid != ses_id) 124 if (ses->Suid != ses_id)
126 continue; 125 continue;
127 spin_unlock(&cifs_tcp_ses_lock);
128 return ses; 126 return ses;
129 } 127 }
128
129 return NULL;
130}
131
132struct cifs_ses *
133smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
134{
135 struct cifs_ses *ses;
136
137 spin_lock(&cifs_tcp_ses_lock);
138 ses = smb2_find_smb_ses_unlocked(server, ses_id);
130 spin_unlock(&cifs_tcp_ses_lock); 139 spin_unlock(&cifs_tcp_ses_lock);
131 140
141 return ses;
142}
143
144static struct cifs_tcon *
145smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
146{
147 struct cifs_tcon *tcon;
148
149 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
150 if (tcon->tid != tid)
151 continue;
152 ++tcon->tc_count;
153 return tcon;
154 }
155
132 return NULL; 156 return NULL;
133} 157}
134 158
159/*
160 * Obtain tcon corresponding to the tid in the given
161 * cifs_ses
162 */
163
164struct cifs_tcon *
165smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
166{
167 struct cifs_ses *ses;
168 struct cifs_tcon *tcon;
169
170 spin_lock(&cifs_tcp_ses_lock);
171 ses = smb2_find_smb_ses_unlocked(server, ses_id);
172 if (!ses) {
173 spin_unlock(&cifs_tcp_ses_lock);
174 return NULL;
175 }
176 tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
177 spin_unlock(&cifs_tcp_ses_lock);
178
179 return tcon;
180}
181
135int 182int
136smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) 183smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
137{ 184{
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 526f0533cb4e..f6e13a977fc8 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -752,9 +752,11 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
752 752
753 rc = wait_for_response(ses->server, midQ); 753 rc = wait_for_response(ses->server, midQ);
754 if (rc != 0) { 754 if (rc != 0) {
755 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
755 send_cancel(ses->server, rqst, midQ); 756 send_cancel(ses->server, rqst, midQ);
756 spin_lock(&GlobalMid_Lock); 757 spin_lock(&GlobalMid_Lock);
757 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 758 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
759 midQ->mid_flags |= MID_WAIT_CANCELLED;
758 midQ->callback = DeleteMidQEntry; 760 midQ->callback = DeleteMidQEntry;
759 spin_unlock(&GlobalMid_Lock); 761 spin_unlock(&GlobalMid_Lock);
760 add_credits(ses->server, 1, optype); 762 add_credits(ses->server, 1, optype);
diff --git a/fs/dax.c b/fs/dax.c
index de622d4282a6..85abd741253d 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -373,6 +373,22 @@ restart:
373 } 373 }
374 spin_lock_irq(&mapping->tree_lock); 374 spin_lock_irq(&mapping->tree_lock);
375 375
376 if (!entry) {
377 /*
378 * We needed to drop the page_tree lock while calling
379 * radix_tree_preload() and we didn't have an entry to
380 * lock. See if another thread inserted an entry at
381 * our index during this time.
382 */
383 entry = __radix_tree_lookup(&mapping->page_tree, index,
384 NULL, &slot);
385 if (entry) {
386 radix_tree_preload_end();
387 spin_unlock_irq(&mapping->tree_lock);
388 goto restart;
389 }
390 }
391
376 if (pmd_downgrade) { 392 if (pmd_downgrade) {
377 radix_tree_delete(&mapping->page_tree, index); 393 radix_tree_delete(&mapping->page_tree, index);
378 mapping->nrexceptional--; 394 mapping->nrexceptional--;
@@ -388,19 +404,12 @@ restart:
388 if (err) { 404 if (err) {
389 spin_unlock_irq(&mapping->tree_lock); 405 spin_unlock_irq(&mapping->tree_lock);
390 /* 406 /*
391 * Someone already created the entry? This is a 407 * Our insertion of a DAX entry failed, most likely
392 * normal failure when inserting PMDs in a range 408 * because we were inserting a PMD entry and it
393 * that already contains PTEs. In that case we want 409 * collided with a PTE sized entry at a different
394 * to return -EEXIST immediately. 410 * index in the PMD range. We haven't inserted
395 */ 411 * anything into the radix tree and have no waiters to
396 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD)) 412 * wake.
397 goto restart;
398 /*
399 * Our insertion of a DAX PMD entry failed, most
400 * likely because it collided with a PTE sized entry
401 * at a different index in the PMD range. We haven't
402 * inserted anything into the radix tree and have no
403 * waiters to wake.
404 */ 413 */
405 return ERR_PTR(err); 414 return ERR_PTR(err);
406 } 415 }
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f493af666591..fb69ee2388db 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2466,6 +2466,7 @@ extern int ext4_setattr(struct dentry *, struct iattr *);
2466extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int); 2466extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
2467extern void ext4_evict_inode(struct inode *); 2467extern void ext4_evict_inode(struct inode *);
2468extern void ext4_clear_inode(struct inode *); 2468extern void ext4_clear_inode(struct inode *);
2469extern int ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int);
2469extern int ext4_sync_inode(handle_t *, struct inode *); 2470extern int ext4_sync_inode(handle_t *, struct inode *);
2470extern void ext4_dirty_inode(struct inode *, int); 2471extern void ext4_dirty_inode(struct inode *, int);
2471extern int ext4_change_inode_journal_flag(struct inode *, int); 2472extern int ext4_change_inode_journal_flag(struct inode *, int);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8210c1f43556..cefa9835f275 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -744,7 +744,7 @@ const struct file_operations ext4_file_operations = {
744 744
745const struct inode_operations ext4_file_inode_operations = { 745const struct inode_operations ext4_file_inode_operations = {
746 .setattr = ext4_setattr, 746 .setattr = ext4_setattr,
747 .getattr = ext4_getattr, 747 .getattr = ext4_file_getattr,
748 .listxattr = ext4_listxattr, 748 .listxattr = ext4_listxattr,
749 .get_acl = ext4_get_acl, 749 .get_acl = ext4_get_acl,
750 .set_acl = ext4_set_acl, 750 .set_acl = ext4_set_acl,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4247d8d25687..b9ffa9f4191f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5390,11 +5390,46 @@ err_out:
5390int ext4_getattr(const struct path *path, struct kstat *stat, 5390int ext4_getattr(const struct path *path, struct kstat *stat,
5391 u32 request_mask, unsigned int query_flags) 5391 u32 request_mask, unsigned int query_flags)
5392{ 5392{
5393 struct inode *inode; 5393 struct inode *inode = d_inode(path->dentry);
5394 unsigned long long delalloc_blocks; 5394 struct ext4_inode *raw_inode;
5395 struct ext4_inode_info *ei = EXT4_I(inode);
5396 unsigned int flags;
5397
5398 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5399 stat->result_mask |= STATX_BTIME;
5400 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5401 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5402 }
5403
5404 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5405 if (flags & EXT4_APPEND_FL)
5406 stat->attributes |= STATX_ATTR_APPEND;
5407 if (flags & EXT4_COMPR_FL)
5408 stat->attributes |= STATX_ATTR_COMPRESSED;
5409 if (flags & EXT4_ENCRYPT_FL)
5410 stat->attributes |= STATX_ATTR_ENCRYPTED;
5411 if (flags & EXT4_IMMUTABLE_FL)
5412 stat->attributes |= STATX_ATTR_IMMUTABLE;
5413 if (flags & EXT4_NODUMP_FL)
5414 stat->attributes |= STATX_ATTR_NODUMP;
5415
5416 stat->attributes_mask |= (STATX_ATTR_APPEND |
5417 STATX_ATTR_COMPRESSED |
5418 STATX_ATTR_ENCRYPTED |
5419 STATX_ATTR_IMMUTABLE |
5420 STATX_ATTR_NODUMP);
5395 5421
5396 inode = d_inode(path->dentry);
5397 generic_fillattr(inode, stat); 5422 generic_fillattr(inode, stat);
5423 return 0;
5424}
5425
5426int ext4_file_getattr(const struct path *path, struct kstat *stat,
5427 u32 request_mask, unsigned int query_flags)
5428{
5429 struct inode *inode = d_inode(path->dentry);
5430 u64 delalloc_blocks;
5431
5432 ext4_getattr(path, stat, request_mask, query_flags);
5398 5433
5399 /* 5434 /*
5400 * If there is inline data in the inode, the inode will normally not 5435 * If there is inline data in the inode, the inode will normally not
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 6ad612c576fc..07e5e1405771 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3912,6 +3912,7 @@ const struct inode_operations ext4_dir_inode_operations = {
3912 .tmpfile = ext4_tmpfile, 3912 .tmpfile = ext4_tmpfile,
3913 .rename = ext4_rename2, 3913 .rename = ext4_rename2,
3914 .setattr = ext4_setattr, 3914 .setattr = ext4_setattr,
3915 .getattr = ext4_getattr,
3915 .listxattr = ext4_listxattr, 3916 .listxattr = ext4_listxattr,
3916 .get_acl = ext4_get_acl, 3917 .get_acl = ext4_get_acl,
3917 .set_acl = ext4_set_acl, 3918 .set_acl = ext4_set_acl,
@@ -3920,6 +3921,7 @@ const struct inode_operations ext4_dir_inode_operations = {
3920 3921
3921const struct inode_operations ext4_special_inode_operations = { 3922const struct inode_operations ext4_special_inode_operations = {
3922 .setattr = ext4_setattr, 3923 .setattr = ext4_setattr,
3924 .getattr = ext4_getattr,
3923 .listxattr = ext4_listxattr, 3925 .listxattr = ext4_listxattr,
3924 .get_acl = ext4_get_acl, 3926 .get_acl = ext4_get_acl,
3925 .set_acl = ext4_set_acl, 3927 .set_acl = ext4_set_acl,
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 73b184d161fc..5c8fc53cb0e5 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -85,17 +85,20 @@ errout:
85const struct inode_operations ext4_encrypted_symlink_inode_operations = { 85const struct inode_operations ext4_encrypted_symlink_inode_operations = {
86 .get_link = ext4_encrypted_get_link, 86 .get_link = ext4_encrypted_get_link,
87 .setattr = ext4_setattr, 87 .setattr = ext4_setattr,
88 .getattr = ext4_getattr,
88 .listxattr = ext4_listxattr, 89 .listxattr = ext4_listxattr,
89}; 90};
90 91
91const struct inode_operations ext4_symlink_inode_operations = { 92const struct inode_operations ext4_symlink_inode_operations = {
92 .get_link = page_get_link, 93 .get_link = page_get_link,
93 .setattr = ext4_setattr, 94 .setattr = ext4_setattr,
95 .getattr = ext4_getattr,
94 .listxattr = ext4_listxattr, 96 .listxattr = ext4_listxattr,
95}; 97};
96 98
97const struct inode_operations ext4_fast_symlink_inode_operations = { 99const struct inode_operations ext4_fast_symlink_inode_operations = {
98 .get_link = simple_get_link, 100 .get_link = simple_get_link,
99 .setattr = ext4_setattr, 101 .setattr = ext4_setattr,
102 .getattr = ext4_getattr,
100 .listxattr = ext4_listxattr, 103 .listxattr = ext4_listxattr,
101}; 104};
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 7163fe014b57..dde861387a40 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
137 vma->vm_ops = &hugetlb_vm_ops; 137 vma->vm_ops = &hugetlb_vm_ops;
138 138
139 /*
140 * Offset passed to mmap (before page shift) could have been
141 * negative when represented as a (l)off_t.
142 */
143 if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
144 return -EINVAL;
145
139 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 146 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
140 return -EINVAL; 147 return -EINVAL;
141 148
142 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 149 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
150 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
151 /* check for overflow */
152 if (len < vma_len)
153 return -EINVAL;
143 154
144 inode_lock(inode); 155 inode_lock(inode);
145 file_accessed(file); 156 file_accessed(file);
146 157
147 ret = -ENOMEM; 158 ret = -ENOMEM;
148 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
149
150 if (hugetlb_reserve_pages(inode, 159 if (hugetlb_reserve_pages(inode,
151 vma->vm_pgoff >> huge_page_order(h), 160 vma->vm_pgoff >> huge_page_order(h),
152 len >> huge_page_shift(h), vma, 161 len >> huge_page_shift(h), vma,
@@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
155 164
156 ret = 0; 165 ret = 0;
157 if (vma->vm_flags & VM_WRITE && inode->i_size < len) 166 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
158 inode->i_size = len; 167 i_size_write(inode, len);
159out: 168out:
160 inode_unlock(inode); 169 inode_unlock(inode);
161 170
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 67c24351a67f..cd261c8de53a 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -263,8 +263,13 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
263 if (!new_op) 263 if (!new_op)
264 return -ENOMEM; 264 return -ENOMEM;
265 new_op->upcall.req.features.features = 0; 265 new_op->upcall.req.features.features = 0;
266 ret = service_operation(new_op, "orangefs_features", 0); 266 ret = service_operation(new_op, "orangefs_features",
267 orangefs_features = new_op->downcall.resp.features.features; 267 ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
268 if (!ret)
269 orangefs_features =
270 new_op->downcall.resp.features.features;
271 else
272 orangefs_features = 0;
268 op_release(new_op); 273 op_release(new_op);
269 } else { 274 } else {
270 orangefs_features = 0; 275 orangefs_features = 0;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 8f91ec66baa3..d04ea4349909 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1074,6 +1074,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
1074 1074
1075 if ((table->proc_handler == proc_dostring) || 1075 if ((table->proc_handler == proc_dostring) ||
1076 (table->proc_handler == proc_dointvec) || 1076 (table->proc_handler == proc_dointvec) ||
1077 (table->proc_handler == proc_douintvec) ||
1077 (table->proc_handler == proc_dointvec_minmax) || 1078 (table->proc_handler == proc_dointvec_minmax) ||
1078 (table->proc_handler == proc_dointvec_jiffies) || 1079 (table->proc_handler == proc_dointvec_jiffies) ||
1079 (table->proc_handler == proc_dointvec_userhz_jiffies) || 1080 (table->proc_handler == proc_dointvec_userhz_jiffies) ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f08bd31c1081..312578089544 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -900,7 +900,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
900static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 900static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
901 unsigned long addr, pmd_t *pmdp) 901 unsigned long addr, pmd_t *pmdp)
902{ 902{
903 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 903 pmd_t pmd = *pmdp;
904
905 /* See comment in change_huge_pmd() */
906 pmdp_invalidate(vma, addr, pmdp);
907 if (pmd_dirty(*pmdp))
908 pmd = pmd_mkdirty(pmd);
909 if (pmd_young(*pmdp))
910 pmd = pmd_mkyoung(pmd);
904 911
905 pmd = pmd_wrprotect(pmd); 912 pmd = pmd_wrprotect(pmd);
906 pmd = pmd_clear_soft_dirty(pmd); 913 pmd = pmd_clear_soft_dirty(pmd);
diff --git a/fs/stat.c b/fs/stat.c
index fa0be59340cc..c6c963b2546b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -130,9 +130,13 @@ EXPORT_SYMBOL(vfs_getattr);
130int vfs_statx_fd(unsigned int fd, struct kstat *stat, 130int vfs_statx_fd(unsigned int fd, struct kstat *stat,
131 u32 request_mask, unsigned int query_flags) 131 u32 request_mask, unsigned int query_flags)
132{ 132{
133 struct fd f = fdget_raw(fd); 133 struct fd f;
134 int error = -EBADF; 134 int error = -EBADF;
135 135
136 if (query_flags & ~KSTAT_QUERY_FLAGS)
137 return -EINVAL;
138
139 f = fdget_raw(fd);
136 if (f.file) { 140 if (f.file) {
137 error = vfs_getattr(&f.file->f_path, stat, 141 error = vfs_getattr(&f.file->f_path, stat,
138 request_mask, query_flags); 142 request_mask, query_flags);
@@ -155,9 +159,6 @@ EXPORT_SYMBOL(vfs_statx_fd);
155 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 159 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
156 * at the given name from being referenced. 160 * at the given name from being referenced.
157 * 161 *
158 * The caller must have preset stat->request_mask as for vfs_getattr(). The
159 * flags are also used to load up stat->query_flags.
160 *
161 * 0 will be returned on success, and a -ve error code if unsuccessful. 162 * 0 will be returned on success, and a -ve error code if unsuccessful.
162 */ 163 */
163int vfs_statx(int dfd, const char __user *filename, int flags, 164int vfs_statx(int dfd, const char __user *filename, int flags,
@@ -509,46 +510,38 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
509} 510}
510#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 511#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
511 512
512static inline int __put_timestamp(struct timespec *kts, 513static noinline_for_stack int
513 struct statx_timestamp __user *uts) 514cp_statx(const struct kstat *stat, struct statx __user *buffer)
514{
515 return (__put_user(kts->tv_sec, &uts->tv_sec ) ||
516 __put_user(kts->tv_nsec, &uts->tv_nsec ) ||
517 __put_user(0, &uts->__reserved ));
518}
519
520/*
521 * Set the statx results.
522 */
523static long statx_set_result(struct kstat *stat, struct statx __user *buffer)
524{ 515{
525 uid_t uid = from_kuid_munged(current_user_ns(), stat->uid); 516 struct statx tmp;
526 gid_t gid = from_kgid_munged(current_user_ns(), stat->gid); 517
527 518 memset(&tmp, 0, sizeof(tmp));
528 if (__put_user(stat->result_mask, &buffer->stx_mask ) || 519
529 __put_user(stat->mode, &buffer->stx_mode ) || 520 tmp.stx_mask = stat->result_mask;
530 __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) || 521 tmp.stx_blksize = stat->blksize;
531 __put_user(stat->nlink, &buffer->stx_nlink ) || 522 tmp.stx_attributes = stat->attributes;
532 __put_user(uid, &buffer->stx_uid ) || 523 tmp.stx_nlink = stat->nlink;
533 __put_user(gid, &buffer->stx_gid ) || 524 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
534 __put_user(stat->attributes, &buffer->stx_attributes ) || 525 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
535 __put_user(stat->blksize, &buffer->stx_blksize ) || 526 tmp.stx_mode = stat->mode;
536 __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) || 527 tmp.stx_ino = stat->ino;
537 __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) || 528 tmp.stx_size = stat->size;
538 __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) || 529 tmp.stx_blocks = stat->blocks;
539 __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) || 530 tmp.stx_attributes_mask = stat->attributes_mask;
540 __put_timestamp(&stat->atime, &buffer->stx_atime ) || 531 tmp.stx_atime.tv_sec = stat->atime.tv_sec;
541 __put_timestamp(&stat->btime, &buffer->stx_btime ) || 532 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
542 __put_timestamp(&stat->ctime, &buffer->stx_ctime ) || 533 tmp.stx_btime.tv_sec = stat->btime.tv_sec;
543 __put_timestamp(&stat->mtime, &buffer->stx_mtime ) || 534 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
544 __put_user(stat->ino, &buffer->stx_ino ) || 535 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
545 __put_user(stat->size, &buffer->stx_size ) || 536 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
546 __put_user(stat->blocks, &buffer->stx_blocks ) || 537 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
547 __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) || 538 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
548 __clear_user(&buffer->__spare2, sizeof(buffer->__spare2))) 539 tmp.stx_rdev_major = MAJOR(stat->rdev);
549 return -EFAULT; 540 tmp.stx_rdev_minor = MINOR(stat->rdev);
550 541 tmp.stx_dev_major = MAJOR(stat->dev);
551 return 0; 542 tmp.stx_dev_minor = MINOR(stat->dev);
543
544 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
552} 545}
553 546
554/** 547/**
@@ -570,10 +563,10 @@ SYSCALL_DEFINE5(statx,
570 struct kstat stat; 563 struct kstat stat;
571 int error; 564 int error;
572 565
566 if (mask & STATX__RESERVED)
567 return -EINVAL;
573 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 568 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
574 return -EINVAL; 569 return -EINVAL;
575 if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer)))
576 return -EFAULT;
577 570
578 if (filename) 571 if (filename)
579 error = vfs_statx(dfd, filename, flags, &stat, mask); 572 error = vfs_statx(dfd, filename, flags, &stat, mask);
@@ -581,7 +574,8 @@ SYSCALL_DEFINE5(statx,
581 error = vfs_statx_fd(dfd, &stat, mask, flags); 574 error = vfs_statx_fd(dfd, &stat, mask, flags);
582 if (error) 575 if (error)
583 return error; 576 return error;
584 return statx_set_result(&stat, buffer); 577
578 return cp_statx(&stat, buffer);
585} 579}
586 580
587/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 581/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b803213d1307..39c75a86c67f 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
108{ 108{
109 const struct sysfs_ops *ops = sysfs_file_ops(of->kn); 109 const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
110 struct kobject *kobj = of->kn->parent->priv; 110 struct kobject *kobj = of->kn->parent->priv;
111 size_t len; 111 ssize_t len;
112 112
113 /* 113 /*
114 * If buf != of->prealloc_buf, we don't know how 114 * If buf != of->prealloc_buf, we don't know how
@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
117 if (WARN_ON_ONCE(buf != of->prealloc_buf)) 117 if (WARN_ON_ONCE(buf != of->prealloc_buf))
118 return 0; 118 return 0;
119 len = ops->show(kobj, of->kn->priv, buf); 119 len = ops->show(kobj, of->kn->priv, buf);
120 if (len < 0)
121 return len;
120 if (pos) { 122 if (pos) {
121 if (len <= pos) 123 if (len <= pos)
122 return 0; 124 return 0;
123 len -= pos; 125 len -= pos;
124 memmove(buf, buf + pos, len); 126 memmove(buf, buf + pos, len);
125 } 127 }
126 return min(count, len); 128 return min_t(ssize_t, count, len);
127} 129}
128 130
129/* kernfs write callback for regular sysfs files */ 131/* kernfs write callback for regular sysfs files */
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 1d227b0fcf49..f7555fc25877 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1756,7 +1756,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1756 * protocols: aa:... bb:... 1756 * protocols: aa:... bb:...
1757 */ 1757 */
1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", 1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
1759 pending, total, UFFD_API, UFFD_API_FEATURES, 1759 pending, total, UFFD_API, ctx->features,
1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); 1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
1761} 1761}
1762#endif 1762#endif
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index eb00bc133bca..39f8604f764e 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -125,8 +125,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); 125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
126extern int xfs_dir2_sf_removename(struct xfs_da_args *args); 126extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
127extern int xfs_dir2_sf_replace(struct xfs_da_args *args); 127extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
128extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp, 128extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
129 int size);
130 129
131/* xfs_dir2_readdir.c */ 130/* xfs_dir2_readdir.c */
132extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, 131extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 96b45cd6c63f..e84af093b2ab 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -632,36 +632,49 @@ xfs_dir2_sf_check(
632/* Verify the consistency of an inline directory. */ 632/* Verify the consistency of an inline directory. */
633int 633int
634xfs_dir2_sf_verify( 634xfs_dir2_sf_verify(
635 struct xfs_mount *mp, 635 struct xfs_inode *ip)
636 struct xfs_dir2_sf_hdr *sfp,
637 int size)
638{ 636{
637 struct xfs_mount *mp = ip->i_mount;
638 struct xfs_dir2_sf_hdr *sfp;
639 struct xfs_dir2_sf_entry *sfep; 639 struct xfs_dir2_sf_entry *sfep;
640 struct xfs_dir2_sf_entry *next_sfep; 640 struct xfs_dir2_sf_entry *next_sfep;
641 char *endp; 641 char *endp;
642 const struct xfs_dir_ops *dops; 642 const struct xfs_dir_ops *dops;
643 struct xfs_ifork *ifp;
643 xfs_ino_t ino; 644 xfs_ino_t ino;
644 int i; 645 int i;
645 int i8count; 646 int i8count;
646 int offset; 647 int offset;
648 int size;
649 int error;
647 __uint8_t filetype; 650 __uint8_t filetype;
648 651
652 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
653 /*
654 * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
655 * so we can only trust the mountpoint to have the right pointer.
656 */
649 dops = xfs_dir_get_ops(mp, NULL); 657 dops = xfs_dir_get_ops(mp, NULL);
650 658
659 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
660 sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
661 size = ifp->if_bytes;
662
651 /* 663 /*
652 * Give up if the directory is way too short. 664 * Give up if the directory is way too short.
653 */ 665 */
654 XFS_WANT_CORRUPTED_RETURN(mp, size > 666 if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
655 offsetof(struct xfs_dir2_sf_hdr, parent)); 667 size < xfs_dir2_sf_hdr_size(sfp->i8count))
656 XFS_WANT_CORRUPTED_RETURN(mp, size >= 668 return -EFSCORRUPTED;
657 xfs_dir2_sf_hdr_size(sfp->i8count));
658 669
659 endp = (char *)sfp + size; 670 endp = (char *)sfp + size;
660 671
661 /* Check .. entry */ 672 /* Check .. entry */
662 ino = dops->sf_get_parent_ino(sfp); 673 ino = dops->sf_get_parent_ino(sfp);
663 i8count = ino > XFS_DIR2_MAX_SHORT_INUM; 674 i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
664 XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); 675 error = xfs_dir_ino_validate(mp, ino);
676 if (error)
677 return error;
665 offset = dops->data_first_offset; 678 offset = dops->data_first_offset;
666 679
667 /* Check all reported entries */ 680 /* Check all reported entries */
@@ -672,12 +685,12 @@ xfs_dir2_sf_verify(
672 * Check the fixed-offset parts of the structure are 685 * Check the fixed-offset parts of the structure are
673 * within the data buffer. 686 * within the data buffer.
674 */ 687 */
675 XFS_WANT_CORRUPTED_RETURN(mp, 688 if (((char *)sfep + sizeof(*sfep)) >= endp)
676 ((char *)sfep + sizeof(*sfep)) < endp); 689 return -EFSCORRUPTED;
677 690
678 /* Don't allow names with known bad length. */ 691 /* Don't allow names with known bad length. */
679 XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0); 692 if (sfep->namelen == 0)
680 XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN); 693 return -EFSCORRUPTED;
681 694
682 /* 695 /*
683 * Check that the variable-length part of the structure is 696 * Check that the variable-length part of the structure is
@@ -685,33 +698,39 @@ xfs_dir2_sf_verify(
685 * name component, so nextentry is an acceptable test. 698 * name component, so nextentry is an acceptable test.
686 */ 699 */
687 next_sfep = dops->sf_nextentry(sfp, sfep); 700 next_sfep = dops->sf_nextentry(sfp, sfep);
688 XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep); 701 if (endp < (char *)next_sfep)
702 return -EFSCORRUPTED;
689 703
690 /* Check that the offsets always increase. */ 704 /* Check that the offsets always increase. */
691 XFS_WANT_CORRUPTED_RETURN(mp, 705 if (xfs_dir2_sf_get_offset(sfep) < offset)
692 xfs_dir2_sf_get_offset(sfep) >= offset); 706 return -EFSCORRUPTED;
693 707
694 /* Check the inode number. */ 708 /* Check the inode number. */
695 ino = dops->sf_get_ino(sfp, sfep); 709 ino = dops->sf_get_ino(sfp, sfep);
696 i8count += ino > XFS_DIR2_MAX_SHORT_INUM; 710 i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
697 XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); 711 error = xfs_dir_ino_validate(mp, ino);
712 if (error)
713 return error;
698 714
699 /* Check the file type. */ 715 /* Check the file type. */
700 filetype = dops->sf_get_ftype(sfep); 716 filetype = dops->sf_get_ftype(sfep);
701 XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX); 717 if (filetype >= XFS_DIR3_FT_MAX)
718 return -EFSCORRUPTED;
702 719
703 offset = xfs_dir2_sf_get_offset(sfep) + 720 offset = xfs_dir2_sf_get_offset(sfep) +
704 dops->data_entsize(sfep->namelen); 721 dops->data_entsize(sfep->namelen);
705 722
706 sfep = next_sfep; 723 sfep = next_sfep;
707 } 724 }
708 XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count); 725 if (i8count != sfp->i8count)
709 XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp); 726 return -EFSCORRUPTED;
727 if ((void *)sfep != (void *)endp)
728 return -EFSCORRUPTED;
710 729
711 /* Make sure this whole thing ought to be in local format. */ 730 /* Make sure this whole thing ought to be in local format. */
712 XFS_WANT_CORRUPTED_RETURN(mp, offset + 731 if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
713 (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + 732 (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
714 (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize); 733 return -EFSCORRUPTED;
715 734
716 return 0; 735 return 0;
717} 736}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 9653e964eda4..8a37efe04de3 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -212,6 +212,16 @@ xfs_iformat_fork(
212 if (error) 212 if (error)
213 return error; 213 return error;
214 214
215 /* Check inline dir contents. */
216 if (S_ISDIR(VFS_I(ip)->i_mode) &&
217 dip->di_format == XFS_DINODE_FMT_LOCAL) {
218 error = xfs_dir2_sf_verify(ip);
219 if (error) {
220 xfs_idestroy_fork(ip, XFS_DATA_FORK);
221 return error;
222 }
223 }
224
215 if (xfs_is_reflink_inode(ip)) { 225 if (xfs_is_reflink_inode(ip)) {
216 ASSERT(ip->i_cowfp == NULL); 226 ASSERT(ip->i_cowfp == NULL);
217 xfs_ifork_init_cow(ip); 227 xfs_ifork_init_cow(ip);
@@ -322,8 +332,6 @@ xfs_iformat_local(
322 int whichfork, 332 int whichfork,
323 int size) 333 int size)
324{ 334{
325 int error;
326
327 /* 335 /*
328 * If the size is unreasonable, then something 336 * If the size is unreasonable, then something
329 * is wrong and we just bail out rather than crash in 337 * is wrong and we just bail out rather than crash in
@@ -339,14 +347,6 @@ xfs_iformat_local(
339 return -EFSCORRUPTED; 347 return -EFSCORRUPTED;
340 } 348 }
341 349
342 if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
343 error = xfs_dir2_sf_verify(ip->i_mount,
344 (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
345 size);
346 if (error)
347 return error;
348 }
349
350 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); 350 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
351 return 0; 351 return 0;
352} 352}
@@ -867,7 +867,7 @@ xfs_iextents_copy(
867 * In these cases, the format always takes precedence, because the 867 * In these cases, the format always takes precedence, because the
868 * format indicates the current state of the fork. 868 * format indicates the current state of the fork.
869 */ 869 */
870int 870void
871xfs_iflush_fork( 871xfs_iflush_fork(
872 xfs_inode_t *ip, 872 xfs_inode_t *ip,
873 xfs_dinode_t *dip, 873 xfs_dinode_t *dip,
@@ -877,7 +877,6 @@ xfs_iflush_fork(
877 char *cp; 877 char *cp;
878 xfs_ifork_t *ifp; 878 xfs_ifork_t *ifp;
879 xfs_mount_t *mp; 879 xfs_mount_t *mp;
880 int error;
881 static const short brootflag[2] = 880 static const short brootflag[2] =
882 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 881 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
883 static const short dataflag[2] = 882 static const short dataflag[2] =
@@ -886,7 +885,7 @@ xfs_iflush_fork(
886 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 885 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
887 886
888 if (!iip) 887 if (!iip)
889 return 0; 888 return;
890 ifp = XFS_IFORK_PTR(ip, whichfork); 889 ifp = XFS_IFORK_PTR(ip, whichfork);
891 /* 890 /*
892 * This can happen if we gave up in iformat in an error path, 891 * This can happen if we gave up in iformat in an error path,
@@ -894,19 +893,12 @@ xfs_iflush_fork(
894 */ 893 */
895 if (!ifp) { 894 if (!ifp) {
896 ASSERT(whichfork == XFS_ATTR_FORK); 895 ASSERT(whichfork == XFS_ATTR_FORK);
897 return 0; 896 return;
898 } 897 }
899 cp = XFS_DFORK_PTR(dip, whichfork); 898 cp = XFS_DFORK_PTR(dip, whichfork);
900 mp = ip->i_mount; 899 mp = ip->i_mount;
901 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 900 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
902 case XFS_DINODE_FMT_LOCAL: 901 case XFS_DINODE_FMT_LOCAL:
903 if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
904 error = xfs_dir2_sf_verify(mp,
905 (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
906 ifp->if_bytes);
907 if (error)
908 return error;
909 }
910 if ((iip->ili_fields & dataflag[whichfork]) && 902 if ((iip->ili_fields & dataflag[whichfork]) &&
911 (ifp->if_bytes > 0)) { 903 (ifp->if_bytes > 0)) {
912 ASSERT(ifp->if_u1.if_data != NULL); 904 ASSERT(ifp->if_u1.if_data != NULL);
@@ -959,7 +951,6 @@ xfs_iflush_fork(
959 ASSERT(0); 951 ASSERT(0);
960 break; 952 break;
961 } 953 }
962 return 0;
963} 954}
964 955
965/* 956/*
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 132dc59fdde6..7fb8365326d1 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
140struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); 140struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
141 141
142int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); 142int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
143int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, 143void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
144 struct xfs_inode_log_item *, int); 144 struct xfs_inode_log_item *, int);
145void xfs_idestroy_fork(struct xfs_inode *, int); 145void xfs_idestroy_fork(struct xfs_inode *, int);
146void xfs_idata_realloc(struct xfs_inode *, int, int); 146void xfs_idata_realloc(struct xfs_inode *, int, int);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 8b75dcea5966..828532ce0adc 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1311,8 +1311,16 @@ xfs_free_file_space(
1311 /* 1311 /*
1312 * Now that we've unmap all full blocks we'll have to zero out any 1312 * Now that we've unmap all full blocks we'll have to zero out any
1313 * partial block at the beginning and/or end. xfs_zero_range is 1313 * partial block at the beginning and/or end. xfs_zero_range is
1314 * smart enough to skip any holes, including those we just created. 1314 * smart enough to skip any holes, including those we just created,
1315 * but we must take care not to zero beyond EOF and enlarge i_size.
1315 */ 1316 */
1317
1318 if (offset >= XFS_ISIZE(ip))
1319 return 0;
1320
1321 if (offset + len > XFS_ISIZE(ip))
1322 len = XFS_ISIZE(ip) - offset;
1323
1316 return xfs_zero_range(ip, offset, len, NULL); 1324 return xfs_zero_range(ip, offset, len, NULL);
1317} 1325}
1318 1326
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c7fe2c2123ab..7605d8396596 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -50,6 +50,7 @@
50#include "xfs_log.h" 50#include "xfs_log.h"
51#include "xfs_bmap_btree.h" 51#include "xfs_bmap_btree.h"
52#include "xfs_reflink.h" 52#include "xfs_reflink.h"
53#include "xfs_dir2_priv.h"
53 54
54kmem_zone_t *xfs_inode_zone; 55kmem_zone_t *xfs_inode_zone;
55 56
@@ -3475,7 +3476,6 @@ xfs_iflush_int(
3475 struct xfs_inode_log_item *iip = ip->i_itemp; 3476 struct xfs_inode_log_item *iip = ip->i_itemp;
3476 struct xfs_dinode *dip; 3477 struct xfs_dinode *dip;
3477 struct xfs_mount *mp = ip->i_mount; 3478 struct xfs_mount *mp = ip->i_mount;
3478 int error;
3479 3479
3480 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3480 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3481 ASSERT(xfs_isiflocked(ip)); 3481 ASSERT(xfs_isiflocked(ip));
@@ -3547,6 +3547,12 @@ xfs_iflush_int(
3547 if (ip->i_d.di_version < 3) 3547 if (ip->i_d.di_version < 3)
3548 ip->i_d.di_flushiter++; 3548 ip->i_d.di_flushiter++;
3549 3549
3550 /* Check the inline directory data. */
3551 if (S_ISDIR(VFS_I(ip)->i_mode) &&
3552 ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
3553 xfs_dir2_sf_verify(ip))
3554 goto corrupt_out;
3555
3550 /* 3556 /*
3551 * Copy the dirty parts of the inode into the on-disk inode. We always 3557 * Copy the dirty parts of the inode into the on-disk inode. We always
3552 * copy out the core of the inode, because if the inode is dirty at all 3558 * copy out the core of the inode, because if the inode is dirty at all
@@ -3558,14 +3564,9 @@ xfs_iflush_int(
3558 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3564 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3559 ip->i_d.di_flushiter = 0; 3565 ip->i_d.di_flushiter = 0;
3560 3566
3561 error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3567 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3562 if (error) 3568 if (XFS_IFORK_Q(ip))
3563 return error; 3569 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3564 if (XFS_IFORK_Q(ip)) {
3565 error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3566 if (error)
3567 return error;
3568 }
3569 xfs_inobp_check(mp, bp); 3570 xfs_inobp_check(mp, bp);
3570 3571
3571 /* 3572 /*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 229cc6a6d8ef..ebfc13350f9a 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -516,6 +516,20 @@ xfs_vn_getattr(
516 stat->blocks = 516 stat->blocks =
517 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); 517 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
518 518
519 if (ip->i_d.di_version == 3) {
520 if (request_mask & STATX_BTIME) {
521 stat->result_mask |= STATX_BTIME;
522 stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
523 stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
524 }
525 }
526
527 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
528 stat->attributes |= STATX_ATTR_IMMUTABLE;
529 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
530 stat->attributes |= STATX_ATTR_APPEND;
531 if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
532 stat->attributes |= STATX_ATTR_NODUMP;
519 533
520 switch (inode->i_mode & S_IFMT) { 534 switch (inode->i_mode & S_IFMT) {
521 case S_IFBLK: 535 case S_IFBLK:
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 2a6d9b1558e0..26d67ce3c18d 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -583,7 +583,7 @@ xfs_inumbers(
583 return error; 583 return error;
584 584
585 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 585 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
586 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 586 buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
587 do { 587 do {
588 struct xfs_inobt_rec_incore r; 588 struct xfs_inobt_rec_incore r;
589 int stat; 589 int stat;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7cdfe167074f..143db9c523e2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -261,9 +261,9 @@
261 */ 261 */
262#ifndef RO_AFTER_INIT_DATA 262#ifndef RO_AFTER_INIT_DATA
263#define RO_AFTER_INIT_DATA \ 263#define RO_AFTER_INIT_DATA \
264 __start_ro_after_init = .; \ 264 VMLINUX_SYMBOL(__start_ro_after_init) = .; \
265 *(.data..ro_after_init) \ 265 *(.data..ro_after_init) \
266 __end_ro_after_init = .; 266 VMLINUX_SYMBOL(__end_ro_after_init) = .;
267#endif 267#endif
268 268
269/* 269/*
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index b72dd2ad5f44..c0b3d999c266 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); 295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
296int kvm_vgic_map_resources(struct kvm *kvm); 296int kvm_vgic_map_resources(struct kvm *kvm);
297int kvm_vgic_hyp_init(void); 297int kvm_vgic_hyp_init(void);
298void kvm_vgic_init_cpu_hardware(void);
298 299
299int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, 300int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
300 bool level); 301 bool level);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..9382c5da7a2e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
51 51
52 atomic_t nr_active; 52 atomic_t nr_active;
53 53
54 struct delayed_work delayed_run_work;
54 struct delayed_work delay_work; 55 struct delayed_work delay_work;
55 56
56 struct hlist_node cpuhp_dead; 57 struct hlist_node cpuhp_dead;
@@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
238void blk_mq_start_hw_queues(struct request_queue *q); 239void blk_mq_start_hw_queues(struct request_queue *q);
239void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 240void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
240void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 241void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
242void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
241void blk_mq_run_hw_queues(struct request_queue *q, bool async); 243void blk_mq_run_hw_queues(struct request_queue *q, bool async);
242void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 244void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
243void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 245void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5a7da607ca04..7548f332121a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -610,7 +610,6 @@ struct request_queue {
610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
611#define QUEUE_FLAG_DAX 26 /* device supports DAX */ 611#define QUEUE_FLAG_DAX 26 /* device supports DAX */
612#define QUEUE_FLAG_STATS 27 /* track rq completion times */ 612#define QUEUE_FLAG_STATS 27 /* track rq completion times */
613#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
614 613
615#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 614#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
616 (1 << QUEUE_FLAG_STACKABLE) | \ 615 (1 << QUEUE_FLAG_STACKABLE) | \
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index f6b43fbb141c..af9c86e958bd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
570 pr_cont_kernfs_path(cgrp->kn); 570 pr_cont_kernfs_path(cgrp->kn);
571} 571}
572 572
573static inline void cgroup_init_kthreadd(void)
574{
575 /*
576 * kthreadd is inherited by all kthreads, keep it in the root so
577 * that the new kthreads are guaranteed to stay in the root until
578 * initialization is finished.
579 */
580 current->no_cgroup_migration = 1;
581}
582
583static inline void cgroup_kthread_ready(void)
584{
585 /*
586 * This kthread finished initialization. The creator should have
587 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
588 */
589 current->no_cgroup_migration = 0;
590}
591
573#else /* !CONFIG_CGROUPS */ 592#else /* !CONFIG_CGROUPS */
574 593
575struct cgroup_subsys_state; 594struct cgroup_subsys_state;
@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
590 609
591static inline int cgroup_init_early(void) { return 0; } 610static inline int cgroup_init_early(void) { return 0; }
592static inline int cgroup_init(void) { return 0; } 611static inline int cgroup_init(void) { return 0; }
612static inline void cgroup_init_kthreadd(void) {}
613static inline void cgroup_kthread_ready(void) {}
593 614
594static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 615static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
595 struct cgroup *ancestor) 616 struct cgroup *ancestor)
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index aebecc4ed088..22d39e8d4de1 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
211extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); 211extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
212 212
213extern int elevator_init(struct request_queue *, char *); 213extern int elevator_init(struct request_queue *, char *);
214extern void elevator_exit(struct elevator_queue *); 214extern void elevator_exit(struct request_queue *, struct elevator_queue *);
215extern int elevator_change(struct request_queue *, const char *); 215extern int elevator_change(struct request_queue *, const char *);
216extern bool elv_bio_merge_ok(struct request *, struct bio *); 216extern bool elv_bio_merge_ok(struct request *, struct bio *);
217extern struct elevator_queue *elevator_alloc(struct request_queue *, 217extern struct elevator_queue *elevator_alloc(struct request_queue *,
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index eafc965b3eb8..dc30f3d057eb 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -96,6 +96,9 @@
96#define GICH_MISR_EOI (1 << 0) 96#define GICH_MISR_EOI (1 << 0)
97#define GICH_MISR_U (1 << 1) 97#define GICH_MISR_U (1 << 1)
98 98
99#define GICV_PMR_PRIORITY_SHIFT 3
100#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
101
99#ifndef __ASSEMBLY__ 102#ifndef __ASSEMBLY__
100 103
101#include <linux/irqdomain.h> 104#include <linux/irqdomain.h>
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 51891fb0d3ce..c91b3bcd158f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
394 ___pud; \ 394 ___pud; \
395}) 395})
396 396
397#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
398({ \
399 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
400 pmd_t ___pmd; \
401 \
402 ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
403 mmu_notifier_invalidate_range(__mm, ___haddr, \
404 ___haddr + HPAGE_PMD_SIZE); \
405 \
406 ___pmd; \
407})
408
409/* 397/*
410 * set_pte_at_notify() sets the pte _after_ running the notifier. 398 * set_pte_at_notify() sets the pte _after_ running the notifier.
411 * This is safe to start by updating the secondary MMUs, because the primary MMU 399 * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
489#define ptep_clear_flush_notify ptep_clear_flush 477#define ptep_clear_flush_notify ptep_clear_flush
490#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush 478#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
491#define pudp_huge_clear_flush_notify pudp_huge_clear_flush 479#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
492#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
493#define set_pte_at_notify set_pte_at 480#define set_pte_at_notify set_pte_at
494 481
495#endif /* CONFIG_MMU_NOTIFIER */ 482#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c43d435d4225..9061780b141f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -64,26 +64,26 @@ enum {
64 * RDMA_QPTYPE field 64 * RDMA_QPTYPE field
65 */ 65 */
66enum { 66enum {
67 NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ 67 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
68 NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ 68 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
69}; 69};
70 70
71/* RDMA QP Service Type codes for Discovery Log Page entry TSAS 71/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
72 * RDMA_QPTYPE field 72 * RDMA_QPTYPE field
73 */ 73 */
74enum { 74enum {
75 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ 75 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
76 NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ 76 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
77 NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ 77 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
78 NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ 78 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
79 NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ 79 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
80}; 80};
81 81
82/* RDMA Connection Management Service Type codes for Discovery Log Page 82/* RDMA Connection Management Service Type codes for Discovery Log Page
83 * entry TSAS RDMA_CMS field 83 * entry TSAS RDMA_CMS field
84 */ 84 */
85enum { 85enum {
86 NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ 86 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
87}; 87};
88 88
89#define NVMF_AQ_DEPTH 32 89#define NVMF_AQ_DEPTH 32
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 8ce2d87a238b..5e45385c5bdc 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -145,8 +145,9 @@ struct pinctrl_desc {
145extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, 145extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
146 struct device *dev, void *driver_data, 146 struct device *dev, void *driver_data,
147 struct pinctrl_dev **pctldev); 147 struct pinctrl_dev **pctldev);
148extern int pinctrl_enable(struct pinctrl_dev *pctldev);
148 149
149/* Please use pinctrl_register_and_init() instead */ 150/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
150extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, 151extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
151 struct device *dev, void *driver_data); 152 struct device *dev, void *driver_data);
152 153
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d67eee84fd43..4cf9a59a4d08 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -604,6 +604,10 @@ struct task_struct {
604#ifdef CONFIG_COMPAT_BRK 604#ifdef CONFIG_COMPAT_BRK
605 unsigned brk_randomized:1; 605 unsigned brk_randomized:1;
606#endif 606#endif
607#ifdef CONFIG_CGROUPS
608 /* disallow userland-initiated cgroup migration */
609 unsigned no_cgroup_migration:1;
610#endif
607 611
608 unsigned long atomic_flags; /* Flags requiring atomic access. */ 612 unsigned long atomic_flags; /* Flags requiring atomic access. */
609 613
diff --git a/include/linux/stat.h b/include/linux/stat.h
index c76e524fb34b..64b6b3aece21 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -26,6 +26,7 @@ struct kstat {
26 unsigned int nlink; 26 unsigned int nlink;
27 uint32_t blksize; /* Preferred I/O size */ 27 uint32_t blksize; /* Preferred I/O size */
28 u64 attributes; 28 u64 attributes;
29 u64 attributes_mask;
29#define KSTAT_ATTR_FS_IOC_FLAGS \ 30#define KSTAT_ATTR_FS_IOC_FLAGS \
30 (STATX_ATTR_COMPRESSED | \ 31 (STATX_ATTR_COMPRESSED | \
31 STATX_ATTR_IMMUTABLE | \ 32 STATX_ATTR_IMMUTABLE | \
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 804e34c6f981..f2d36a3d3005 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -39,7 +39,10 @@ struct iov_iter {
39 }; 39 };
40 union { 40 union {
41 unsigned long nr_segs; 41 unsigned long nr_segs;
42 int idx; 42 struct {
43 int idx;
44 int start_idx;
45 };
43 }; 46 };
44}; 47};
45 48
@@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
81size_t iov_iter_copy_from_user_atomic(struct page *page, 84size_t iov_iter_copy_from_user_atomic(struct page *page,
82 struct iov_iter *i, unsigned long offset, size_t bytes); 85 struct iov_iter *i, unsigned long offset, size_t bytes);
83void iov_iter_advance(struct iov_iter *i, size_t bytes); 86void iov_iter_advance(struct iov_iter *i, size_t bytes);
87void iov_iter_revert(struct iov_iter *i, size_t bytes);
84int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 88int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
85size_t iov_iter_single_seg_count(const struct iov_iter *i); 89size_t iov_iter_single_seg_count(const struct iov_iter *i);
86size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 90size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 04b0d3f95043..7edfbdb55a99 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -167,6 +167,7 @@ struct virtio_driver {
167 unsigned int feature_table_size; 167 unsigned int feature_table_size;
168 const unsigned int *feature_table_legacy; 168 const unsigned int *feature_table_legacy;
169 unsigned int feature_table_size_legacy; 169 unsigned int feature_table_size_legacy;
170 int (*validate)(struct virtio_device *dev);
170 int (*probe)(struct virtio_device *dev); 171 int (*probe)(struct virtio_device *dev);
171 void (*scan)(struct virtio_device *dev); 172 void (*scan)(struct virtio_device *dev);
172 void (*remove)(struct virtio_device *dev); 173 void (*remove)(struct virtio_device *dev);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 4b784b6e21c0..ccfad0e9c2cd 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -117,6 +117,7 @@ enum transport_state_table {
117 TRANSPORT_ISTATE_PROCESSING = 11, 117 TRANSPORT_ISTATE_PROCESSING = 11,
118 TRANSPORT_COMPLETE_QF_WP = 18, 118 TRANSPORT_COMPLETE_QF_WP = 18,
119 TRANSPORT_COMPLETE_QF_OK = 19, 119 TRANSPORT_COMPLETE_QF_OK = 19,
120 TRANSPORT_COMPLETE_QF_ERR = 20,
120}; 121};
121 122
122/* Used for struct se_cmd->se_cmd_flags */ 123/* Used for struct se_cmd->se_cmd_flags */
@@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp {
279 u16 tg_pt_gp_id; 280 u16 tg_pt_gp_id;
280 int tg_pt_gp_valid_id; 281 int tg_pt_gp_valid_id;
281 int tg_pt_gp_alua_supported_states; 282 int tg_pt_gp_alua_supported_states;
282 int tg_pt_gp_alua_pending_state;
283 int tg_pt_gp_alua_previous_state;
284 int tg_pt_gp_alua_access_status; 283 int tg_pt_gp_alua_access_status;
285 int tg_pt_gp_alua_access_type; 284 int tg_pt_gp_alua_access_type;
286 int tg_pt_gp_nonop_delay_msecs; 285 int tg_pt_gp_nonop_delay_msecs;
@@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp {
289 int tg_pt_gp_pref; 288 int tg_pt_gp_pref;
290 int tg_pt_gp_write_metadata; 289 int tg_pt_gp_write_metadata;
291 u32 tg_pt_gp_members; 290 u32 tg_pt_gp_members;
292 atomic_t tg_pt_gp_alua_access_state; 291 int tg_pt_gp_alua_access_state;
293 atomic_t tg_pt_gp_ref_cnt; 292 atomic_t tg_pt_gp_ref_cnt;
294 spinlock_t tg_pt_gp_lock; 293 spinlock_t tg_pt_gp_lock;
295 struct mutex tg_pt_gp_md_mutex; 294 struct mutex tg_pt_gp_transition_mutex;
296 struct se_device *tg_pt_gp_dev; 295 struct se_device *tg_pt_gp_dev;
297 struct config_group tg_pt_gp_group; 296 struct config_group tg_pt_gp_group;
298 struct list_head tg_pt_gp_list; 297 struct list_head tg_pt_gp_list;
299 struct list_head tg_pt_gp_lun_list; 298 struct list_head tg_pt_gp_lun_list;
300 struct se_lun *tg_pt_gp_alua_lun; 299 struct se_lun *tg_pt_gp_alua_lun;
301 struct se_node_acl *tg_pt_gp_alua_nacl; 300 struct se_node_acl *tg_pt_gp_alua_nacl;
302 struct work_struct tg_pt_gp_transition_work;
303 struct completion *tg_pt_gp_transition_complete;
304}; 301};
305 302
306struct t10_vpd { 303struct t10_vpd {
@@ -705,6 +702,7 @@ struct se_lun {
705 u64 unpacked_lun; 702 u64 unpacked_lun;
706#define SE_LUN_LINK_MAGIC 0xffff7771 703#define SE_LUN_LINK_MAGIC 0xffff7771
707 u32 lun_link_magic; 704 u32 lun_link_magic;
705 bool lun_shutdown;
708 bool lun_access_ro; 706 bool lun_access_ro;
709 u32 lun_index; 707 u32 lun_index;
710 708
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index dd9820b1c779..f8d9fed17ba9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -445,6 +445,7 @@ header-y += unistd.h
445header-y += unix_diag.h 445header-y += unix_diag.h
446header-y += usbdevice_fs.h 446header-y += usbdevice_fs.h
447header-y += usbip.h 447header-y += usbip.h
448header-y += userio.h
448header-y += utime.h 449header-y += utime.h
449header-y += utsname.h 450header-y += utsname.h
450header-y += uuid.h 451header-y += uuid.h
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 51a6b86e3700..d538897b8e08 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -114,7 +114,7 @@ struct statx {
114 __u64 stx_ino; /* Inode number */ 114 __u64 stx_ino; /* Inode number */
115 __u64 stx_size; /* File size */ 115 __u64 stx_size; /* File size */
116 __u64 stx_blocks; /* Number of 512-byte blocks allocated */ 116 __u64 stx_blocks; /* Number of 512-byte blocks allocated */
117 __u64 __spare1[1]; 117 __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
118 /* 0x40 */ 118 /* 0x40 */
119 struct statx_timestamp stx_atime; /* Last access time */ 119 struct statx_timestamp stx_atime; /* Last access time */
120 struct statx_timestamp stx_btime; /* File creation time */ 120 struct statx_timestamp stx_btime; /* File creation time */
@@ -152,9 +152,10 @@ struct statx {
152#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ 152#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
153#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ 153#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
154#define STATX_ALL 0x00000fffU /* All currently supported flags */ 154#define STATX_ALL 0x00000fffU /* All currently supported flags */
155#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
155 156
156/* 157/*
157 * Attributes to be found in stx_attributes 158 * Attributes to be found in stx_attributes and masked in stx_attributes_mask.
158 * 159 *
159 * These give information about the features or the state of a file that might 160 * These give information about the features or the state of a file that might
160 * be of use to ordinary userspace programs such as GUIs or ls rather than 161 * be of use to ordinary userspace programs such as GUIs or ls rather than
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 15b4385a2be1..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -79,7 +79,7 @@
79 * configuration space */ 79 * configuration space */
80#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) 80#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20)
81/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ 81/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
82#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) 82#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled)
83 83
84/* Virtio ABI version, this must match exactly */ 84/* Virtio ABI version, this must match exactly */
85#define VIRTIO_PCI_ABI_VERSION 0 85#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/kernel/audit.c b/kernel/audit.c
index d54bf5932374..dc202d582aa1 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -160,7 +160,6 @@ static LIST_HEAD(audit_freelist);
160 160
161/* queue msgs to send via kauditd_task */ 161/* queue msgs to send via kauditd_task */
162static struct sk_buff_head audit_queue; 162static struct sk_buff_head audit_queue;
163static void kauditd_hold_skb(struct sk_buff *skb);
164/* queue msgs due to temporary unicast send problems */ 163/* queue msgs due to temporary unicast send problems */
165static struct sk_buff_head audit_retry_queue; 164static struct sk_buff_head audit_retry_queue;
166/* queue msgs waiting for new auditd connection */ 165/* queue msgs waiting for new auditd connection */
@@ -454,30 +453,6 @@ static void auditd_set(int pid, u32 portid, struct net *net)
454} 453}
455 454
456/** 455/**
457 * auditd_reset - Disconnect the auditd connection
458 *
459 * Description:
460 * Break the auditd/kauditd connection and move all the queued records into the
461 * hold queue in case auditd reconnects.
462 */
463static void auditd_reset(void)
464{
465 struct sk_buff *skb;
466
467 /* if it isn't already broken, break the connection */
468 rcu_read_lock();
469 if (auditd_conn.pid)
470 auditd_set(0, 0, NULL);
471 rcu_read_unlock();
472
473 /* flush all of the main and retry queues to the hold queue */
474 while ((skb = skb_dequeue(&audit_retry_queue)))
475 kauditd_hold_skb(skb);
476 while ((skb = skb_dequeue(&audit_queue)))
477 kauditd_hold_skb(skb);
478}
479
480/**
481 * kauditd_print_skb - Print the audit record to the ring buffer 456 * kauditd_print_skb - Print the audit record to the ring buffer
482 * @skb: audit record 457 * @skb: audit record
483 * 458 *
@@ -505,9 +480,6 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
505{ 480{
506 /* put the record back in the queue at the same place */ 481 /* put the record back in the queue at the same place */
507 skb_queue_head(&audit_hold_queue, skb); 482 skb_queue_head(&audit_hold_queue, skb);
508
509 /* fail the auditd connection */
510 auditd_reset();
511} 483}
512 484
513/** 485/**
@@ -544,9 +516,6 @@ static void kauditd_hold_skb(struct sk_buff *skb)
544 /* we have no other options - drop the message */ 516 /* we have no other options - drop the message */
545 audit_log_lost("kauditd hold queue overflow"); 517 audit_log_lost("kauditd hold queue overflow");
546 kfree_skb(skb); 518 kfree_skb(skb);
547
548 /* fail the auditd connection */
549 auditd_reset();
550} 519}
551 520
552/** 521/**
@@ -567,6 +536,30 @@ static void kauditd_retry_skb(struct sk_buff *skb)
567} 536}
568 537
569/** 538/**
539 * auditd_reset - Disconnect the auditd connection
540 *
541 * Description:
542 * Break the auditd/kauditd connection and move all the queued records into the
543 * hold queue in case auditd reconnects.
544 */
545static void auditd_reset(void)
546{
547 struct sk_buff *skb;
548
549 /* if it isn't already broken, break the connection */
550 rcu_read_lock();
551 if (auditd_conn.pid)
552 auditd_set(0, 0, NULL);
553 rcu_read_unlock();
554
555 /* flush all of the main and retry queues to the hold queue */
556 while ((skb = skb_dequeue(&audit_retry_queue)))
557 kauditd_hold_skb(skb);
558 while ((skb = skb_dequeue(&audit_queue)))
559 kauditd_hold_skb(skb);
560}
561
562/**
570 * auditd_send_unicast_skb - Send a record via unicast to auditd 563 * auditd_send_unicast_skb - Send a record via unicast to auditd
571 * @skb: audit record 564 * @skb: audit record
572 * 565 *
@@ -758,6 +751,7 @@ static int kauditd_thread(void *dummy)
758 NULL, kauditd_rehold_skb); 751 NULL, kauditd_rehold_skb);
759 if (rc < 0) { 752 if (rc < 0) {
760 sk = NULL; 753 sk = NULL;
754 auditd_reset();
761 goto main_queue; 755 goto main_queue;
762 } 756 }
763 757
@@ -767,6 +761,7 @@ static int kauditd_thread(void *dummy)
767 NULL, kauditd_hold_skb); 761 NULL, kauditd_hold_skb);
768 if (rc < 0) { 762 if (rc < 0) {
769 sk = NULL; 763 sk = NULL;
764 auditd_reset();
770 goto main_queue; 765 goto main_queue;
771 } 766 }
772 767
@@ -775,16 +770,18 @@ main_queue:
775 * unicast, dump failed record sends to the retry queue; if 770 * unicast, dump failed record sends to the retry queue; if
776 * sk == NULL due to previous failures we will just do the 771 * sk == NULL due to previous failures we will just do the
777 * multicast send and move the record to the retry queue */ 772 * multicast send and move the record to the retry queue */
778 kauditd_send_queue(sk, portid, &audit_queue, 1, 773 rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
779 kauditd_send_multicast_skb, 774 kauditd_send_multicast_skb,
780 kauditd_retry_skb); 775 kauditd_retry_skb);
776 if (sk == NULL || rc < 0)
777 auditd_reset();
778 sk = NULL;
781 779
782 /* drop our netns reference, no auditd sends past this line */ 780 /* drop our netns reference, no auditd sends past this line */
783 if (net) { 781 if (net) {
784 put_net(net); 782 put_net(net);
785 net = NULL; 783 net = NULL;
786 } 784 }
787 sk = NULL;
788 785
789 /* we have processed all the queues so wake everyone */ 786 /* we have processed all the queues so wake everyone */
790 wake_up(&audit_backlog_wait); 787 wake_up(&audit_backlog_wait);
diff --git a/kernel/audit.h b/kernel/audit.h
index 0f1cf6d1878a..0d87f8ab8778 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -333,13 +333,7 @@ extern u32 audit_sig_sid;
333extern int audit_filter(int msgtype, unsigned int listtype); 333extern int audit_filter(int msgtype, unsigned int listtype);
334 334
335#ifdef CONFIG_AUDITSYSCALL 335#ifdef CONFIG_AUDITSYSCALL
336extern int __audit_signal_info(int sig, struct task_struct *t); 336extern int audit_signal_info(int sig, struct task_struct *t);
337static inline int audit_signal_info(int sig, struct task_struct *t)
338{
339 if (auditd_test_task(t) || (audit_signals && !audit_dummy_context()))
340 return __audit_signal_info(sig, t);
341 return 0;
342}
343extern void audit_filter_inodes(struct task_struct *, struct audit_context *); 337extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
344extern struct list_head *audit_killed_trees(void); 338extern struct list_head *audit_killed_trees(void);
345#else 339#else
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index e59ffc7fc522..1c2333155893 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2249,26 +2249,27 @@ void __audit_ptrace(struct task_struct *t)
2249 * If the audit subsystem is being terminated, record the task (pid) 2249 * If the audit subsystem is being terminated, record the task (pid)
2250 * and uid that is doing that. 2250 * and uid that is doing that.
2251 */ 2251 */
2252int __audit_signal_info(int sig, struct task_struct *t) 2252int audit_signal_info(int sig, struct task_struct *t)
2253{ 2253{
2254 struct audit_aux_data_pids *axp; 2254 struct audit_aux_data_pids *axp;
2255 struct task_struct *tsk = current; 2255 struct task_struct *tsk = current;
2256 struct audit_context *ctx = tsk->audit_context; 2256 struct audit_context *ctx = tsk->audit_context;
2257 kuid_t uid = current_uid(), t_uid = task_uid(t); 2257 kuid_t uid = current_uid(), t_uid = task_uid(t);
2258 2258
2259 if (auditd_test_task(t)) { 2259 if (auditd_test_task(t) &&
2260 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { 2260 (sig == SIGTERM || sig == SIGHUP ||
2261 audit_sig_pid = task_tgid_nr(tsk); 2261 sig == SIGUSR1 || sig == SIGUSR2)) {
2262 if (uid_valid(tsk->loginuid)) 2262 audit_sig_pid = task_tgid_nr(tsk);
2263 audit_sig_uid = tsk->loginuid; 2263 if (uid_valid(tsk->loginuid))
2264 else 2264 audit_sig_uid = tsk->loginuid;
2265 audit_sig_uid = uid; 2265 else
2266 security_task_getsecid(tsk, &audit_sig_sid); 2266 audit_sig_uid = uid;
2267 } 2267 security_task_getsecid(tsk, &audit_sig_sid);
2268 if (!audit_signals || audit_dummy_context())
2269 return 0;
2270 } 2268 }
2271 2269
2270 if (!audit_signals || audit_dummy_context())
2271 return 0;
2272
2272 /* optimize the common case by putting first signal recipient directly 2273 /* optimize the common case by putting first signal recipient directly
2273 * in audit_context */ 2274 * in audit_context */
2274 if (!ctx->target_pid) { 2275 if (!ctx->target_pid) {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index f45827e205d3..b4f1cb0c5ac7 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1162,12 +1162,12 @@ out:
1162 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ 1162 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1163 off = IMM; 1163 off = IMM;
1164load_word: 1164load_word:
1165 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are 1165 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1166 * only appearing in the programs where ctx == 1166 * appearing in the programs where ctx == skb
1167 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] 1167 * (see may_access_skb() in the verifier). All programs
1168 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6, 1168 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1169 * internal BPF verifier will check that BPF_R6 == 1169 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1170 * ctx. 1170 * verifier will check that BPF_R6 == ctx.
1171 * 1171 *
1172 * BPF_ABS and BPF_IND are wrappers of function calls, 1172 * BPF_ABS and BPF_IND are wrappers of function calls,
1173 * so they scratch BPF_R1-BPF_R5 registers, preserve 1173 * so they scratch BPF_R1-BPF_R5 registers, preserve
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 1dc22f6b49f5..12e19f0636ea 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1146,7 +1146,7 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
1146 * path is super cold. Let's just sleep a bit and retry. 1146 * path is super cold. Let's just sleep a bit and retry.
1147 */ 1147 */
1148 pinned_sb = kernfs_pin_sb(root->kf_root, NULL); 1148 pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1149 if (IS_ERR(pinned_sb) || 1149 if (IS_ERR_OR_NULL(pinned_sb) ||
1150 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { 1150 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1151 mutex_unlock(&cgroup_mutex); 1151 mutex_unlock(&cgroup_mutex);
1152 if (!IS_ERR_OR_NULL(pinned_sb)) 1152 if (!IS_ERR_OR_NULL(pinned_sb))
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 48851327a15e..687f5e0194ef 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2425 tsk = tsk->group_leader; 2425 tsk = tsk->group_leader;
2426 2426
2427 /* 2427 /*
2428 * Workqueue threads may acquire PF_NO_SETAFFINITY and become 2428 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2429 * trapped in a cpuset, or RT worker may be born in a cgroup 2429 * If userland migrates such a kthread to a non-root cgroup, it can
2430 * with no rt_runtime allocated. Just say no. 2430 * become trapped in a cpuset, or RT kthread may be born in a
2431 * cgroup with no rt_runtime allocated. Just say no.
2431 */ 2432 */
2432 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { 2433 if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2433 ret = -EINVAL; 2434 ret = -EINVAL;
2434 goto out_unlock_rcu; 2435 goto out_unlock_rcu;
2435 } 2436 }
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 4544b115f5eb..d052947fe785 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
59struct cpumask * 59struct cpumask *
60irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) 60irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
61{ 61{
62 int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; 62 int n, nodes, cpus_per_vec, extra_vecs, curvec;
63 int affv = nvecs - affd->pre_vectors - affd->post_vectors; 63 int affv = nvecs - affd->pre_vectors - affd->post_vectors;
64 int last_affv = affv + affd->pre_vectors; 64 int last_affv = affv + affd->pre_vectors;
65 nodemask_t nodemsk = NODE_MASK_NONE; 65 nodemask_t nodemsk = NODE_MASK_NONE;
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
94 goto done; 94 goto done;
95 } 95 }
96 96
97 /* Spread the vectors per node */
98 vecs_per_node = affv / nodes;
99 /* Account for rounding errors */
100 extra_vecs = affv - (nodes * vecs_per_node);
101
102 for_each_node_mask(n, nodemsk) { 97 for_each_node_mask(n, nodemsk) {
103 int ncpus, v, vecs_to_assign = vecs_per_node; 98 int ncpus, v, vecs_to_assign, vecs_per_node;
99
100 /* Spread the vectors per node */
101 vecs_per_node = (affv - curvec) / nodes;
104 102
105 /* Get the cpus on this node which are in the mask */ 103 /* Get the cpus on this node which are in the mask */
106 cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); 104 cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
107 105
108 /* Calculate the number of cpus per vector */ 106 /* Calculate the number of cpus per vector */
109 ncpus = cpumask_weight(nmsk); 107 ncpus = cpumask_weight(nmsk);
108 vecs_to_assign = min(vecs_per_node, ncpus);
109
110 /* Account for rounding errors */
111 extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
110 112
111 for (v = 0; curvec < last_affv && v < vecs_to_assign; 113 for (v = 0; curvec < last_affv && v < vecs_to_assign;
112 curvec++, v++) { 114 curvec++, v++) {
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
115 /* Account for extra vectors to compensate rounding errors */ 117 /* Account for extra vectors to compensate rounding errors */
116 if (extra_vecs) { 118 if (extra_vecs) {
117 cpus_per_vec++; 119 cpus_per_vec++;
118 if (!--extra_vecs) 120 --extra_vecs;
119 vecs_per_node++;
120 } 121 }
121 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); 122 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
122 } 123 }
123 124
124 if (curvec >= last_affv) 125 if (curvec >= last_affv)
125 break; 126 break;
127 --nodes;
126 } 128 }
127 129
128done: 130done:
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 2f26adea0f84..26db528c1d88 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -20,6 +20,7 @@
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/cgroup.h>
23#include <trace/events/sched.h> 24#include <trace/events/sched.h>
24 25
25static DEFINE_SPINLOCK(kthread_create_lock); 26static DEFINE_SPINLOCK(kthread_create_lock);
@@ -225,6 +226,7 @@ static int kthread(void *_create)
225 226
226 ret = -EINTR; 227 ret = -EINTR;
227 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 228 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
229 cgroup_kthread_ready();
228 __kthread_parkme(self); 230 __kthread_parkme(self);
229 ret = threadfn(data); 231 ret = threadfn(data);
230 } 232 }
@@ -538,6 +540,7 @@ int kthreadd(void *unused)
538 set_mems_allowed(node_states[N_MEMORY]); 540 set_mems_allowed(node_states[N_MEMORY]);
539 541
540 current->flags |= PF_NOFREEZE; 542 current->flags |= PF_NOFREEZE;
543 cgroup_init_kthreadd();
541 544
542 for (;;) { 545 for (;;) {
543 set_current_state(TASK_INTERRUPTIBLE); 546 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0af928712174..266ddcc1d8bb 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -184,11 +184,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
184 184
185 WARN_ON(!task->ptrace || task->parent != current); 185 WARN_ON(!task->ptrace || task->parent != current);
186 186
187 /*
188 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
189 * Recheck state under the lock to close this race.
190 */
187 spin_lock_irq(&task->sighand->siglock); 191 spin_lock_irq(&task->sighand->siglock);
188 if (__fatal_signal_pending(task)) 192 if (task->state == __TASK_TRACED) {
189 wake_up_state(task, __TASK_TRACED); 193 if (__fatal_signal_pending(task))
190 else 194 wake_up_state(task, __TASK_TRACED);
191 task->state = TASK_TRACED; 195 else
196 task->state = TASK_TRACED;
197 }
192 spin_unlock_irq(&task->sighand->siglock); 198 spin_unlock_irq(&task->sighand->siglock);
193} 199}
194 200
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index acf0a5a06da7..8c8714fcb53c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2133,9 +2133,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
2133 if (write) { 2133 if (write) {
2134 if (*negp) 2134 if (*negp)
2135 return -EINVAL; 2135 return -EINVAL;
2136 if (*lvalp > UINT_MAX)
2137 return -EINVAL;
2136 *valp = *lvalp; 2138 *valp = *lvalp;
2137 } else { 2139 } else {
2138 unsigned int val = *valp; 2140 unsigned int val = *valp;
2141 *negp = false;
2139 *lvalp = (unsigned long)val; 2142 *lvalp = (unsigned long)val;
2140 } 2143 }
2141 return 0; 2144 return 0;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 96fc3c043ad6..54e7a90db848 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4826,9 +4826,9 @@ static __init int test_ringbuffer(void)
4826 rb_data[cpu].cnt = cpu; 4826 rb_data[cpu].cnt = cpu;
4827 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], 4827 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4828 "rbtester/%d", cpu); 4828 "rbtester/%d", cpu);
4829 if (WARN_ON(!rb_threads[cpu])) { 4829 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
4830 pr_cont("FAILED\n"); 4830 pr_cont("FAILED\n");
4831 ret = -1; 4831 ret = PTR_ERR(rb_threads[cpu]);
4832 goto out_free; 4832 goto out_free;
4833 } 4833 }
4834 4834
@@ -4838,9 +4838,9 @@ static __init int test_ringbuffer(void)
4838 4838
4839 /* Now create the rb hammer! */ 4839 /* Now create the rb hammer! */
4840 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 4840 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4841 if (WARN_ON(!rb_hammer)) { 4841 if (WARN_ON(IS_ERR(rb_hammer))) {
4842 pr_cont("FAILED\n"); 4842 pr_cont("FAILED\n");
4843 ret = -1; 4843 ret = PTR_ERR(rb_hammer);
4844 goto out_free; 4844 goto out_free;
4845 } 4845 }
4846 4846
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e68604ae3ced..60abc44385b7 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
786} 786}
787EXPORT_SYMBOL(iov_iter_advance); 787EXPORT_SYMBOL(iov_iter_advance);
788 788
789void iov_iter_revert(struct iov_iter *i, size_t unroll)
790{
791 if (!unroll)
792 return;
793 i->count += unroll;
794 if (unlikely(i->type & ITER_PIPE)) {
795 struct pipe_inode_info *pipe = i->pipe;
796 int idx = i->idx;
797 size_t off = i->iov_offset;
798 while (1) {
799 size_t n = off - pipe->bufs[idx].offset;
800 if (unroll < n) {
801 off -= (n - unroll);
802 break;
803 }
804 unroll -= n;
805 if (!unroll && idx == i->start_idx) {
806 off = 0;
807 break;
808 }
809 if (!idx--)
810 idx = pipe->buffers - 1;
811 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
812 }
813 i->iov_offset = off;
814 i->idx = idx;
815 pipe_truncate(i);
816 return;
817 }
818 if (unroll <= i->iov_offset) {
819 i->iov_offset -= unroll;
820 return;
821 }
822 unroll -= i->iov_offset;
823 if (i->type & ITER_BVEC) {
824 const struct bio_vec *bvec = i->bvec;
825 while (1) {
826 size_t n = (--bvec)->bv_len;
827 i->nr_segs++;
828 if (unroll <= n) {
829 i->bvec = bvec;
830 i->iov_offset = n - unroll;
831 return;
832 }
833 unroll -= n;
834 }
835 } else { /* same logics for iovec and kvec */
836 const struct iovec *iov = i->iov;
837 while (1) {
838 size_t n = (--iov)->iov_len;
839 i->nr_segs++;
840 if (unroll <= n) {
841 i->iov = iov;
842 i->iov_offset = n - unroll;
843 return;
844 }
845 unroll -= n;
846 }
847 }
848}
849EXPORT_SYMBOL(iov_iter_revert);
850
789/* 851/*
790 * Return the count of just the current iov_iter segment. 852 * Return the count of just the current iov_iter segment.
791 */ 853 */
@@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
839 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 901 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
840 i->iov_offset = 0; 902 i->iov_offset = 0;
841 i->count = count; 903 i->count = count;
904 i->start_idx = i->idx;
842} 905}
843EXPORT_SYMBOL(iov_iter_pipe); 906EXPORT_SYMBOL(iov_iter_pipe);
844 907
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1ebc93e179f3..f3c4f9d22821 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -240,18 +240,18 @@ static ssize_t defrag_store(struct kobject *kobj,
240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
243 } else if (!memcmp("defer", buf,
244 min(sizeof("defer")-1, count))) {
245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer+madvise", buf, 243 } else if (!memcmp("defer+madvise", buf,
250 min(sizeof("defer+madvise")-1, count))) { 244 min(sizeof("defer+madvise")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer", buf,
250 min(sizeof("defer")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
255 } else if (!memcmp("madvise", buf, 255 } else if (!memcmp("madvise", buf,
256 min(sizeof("madvise")-1, count))) { 256 min(sizeof("madvise")-1, count))) {
257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
@@ -1568,8 +1568,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1568 deactivate_page(page); 1568 deactivate_page(page);
1569 1569
1570 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 1570 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1571 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1571 pmdp_invalidate(vma, addr, pmd);
1572 tlb->fullmm);
1573 orig_pmd = pmd_mkold(orig_pmd); 1572 orig_pmd = pmd_mkold(orig_pmd);
1574 orig_pmd = pmd_mkclean(orig_pmd); 1573 orig_pmd = pmd_mkclean(orig_pmd);
1575 1574
@@ -1724,37 +1723,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1724{ 1723{
1725 struct mm_struct *mm = vma->vm_mm; 1724 struct mm_struct *mm = vma->vm_mm;
1726 spinlock_t *ptl; 1725 spinlock_t *ptl;
1727 int ret = 0; 1726 pmd_t entry;
1727 bool preserve_write;
1728 int ret;
1728 1729
1729 ptl = __pmd_trans_huge_lock(pmd, vma); 1730 ptl = __pmd_trans_huge_lock(pmd, vma);
1730 if (ptl) { 1731 if (!ptl)
1731 pmd_t entry; 1732 return 0;
1732 bool preserve_write = prot_numa && pmd_write(*pmd);
1733 ret = 1;
1734 1733
1735 /* 1734 preserve_write = prot_numa && pmd_write(*pmd);
1736 * Avoid trapping faults against the zero page. The read-only 1735 ret = 1;
1737 * data is likely to be read-cached on the local CPU and
1738 * local/remote hits to the zero page are not interesting.
1739 */
1740 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1741 spin_unlock(ptl);
1742 return ret;
1743 }
1744 1736
1745 if (!prot_numa || !pmd_protnone(*pmd)) { 1737 /*
1746 entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); 1738 * Avoid trapping faults against the zero page. The read-only
1747 entry = pmd_modify(entry, newprot); 1739 * data is likely to be read-cached on the local CPU and
1748 if (preserve_write) 1740 * local/remote hits to the zero page are not interesting.
1749 entry = pmd_mk_savedwrite(entry); 1741 */
1750 ret = HPAGE_PMD_NR; 1742 if (prot_numa && is_huge_zero_pmd(*pmd))
1751 set_pmd_at(mm, addr, pmd, entry); 1743 goto unlock;
1752 BUG_ON(vma_is_anonymous(vma) && !preserve_write && 1744
1753 pmd_write(entry)); 1745 if (prot_numa && pmd_protnone(*pmd))
1754 } 1746 goto unlock;
1755 spin_unlock(ptl); 1747
1756 } 1748 /*
1749 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1750 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1751 * which is also under down_read(mmap_sem):
1752 *
1753 * CPU0: CPU1:
1754 * change_huge_pmd(prot_numa=1)
1755 * pmdp_huge_get_and_clear_notify()
1756 * madvise_dontneed()
1757 * zap_pmd_range()
1758 * pmd_trans_huge(*pmd) == 0 (without ptl)
1759 * // skip the pmd
1760 * set_pmd_at();
1761 * // pmd is re-established
1762 *
1763 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1764 * which may break userspace.
1765 *
1766 * pmdp_invalidate() is required to make sure we don't miss
1767 * dirty/young flags set by hardware.
1768 */
1769 entry = *pmd;
1770 pmdp_invalidate(vma, addr, pmd);
1757 1771
1772 /*
1773 * Recover dirty/young flags. It relies on pmdp_invalidate to not
1774 * corrupt them.
1775 */
1776 if (pmd_dirty(*pmd))
1777 entry = pmd_mkdirty(entry);
1778 if (pmd_young(*pmd))
1779 entry = pmd_mkyoung(entry);
1780
1781 entry = pmd_modify(entry, newprot);
1782 if (preserve_write)
1783 entry = pmd_mk_savedwrite(entry);
1784 ret = HPAGE_PMD_NR;
1785 set_pmd_at(mm, addr, pmd, entry);
1786 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
1787unlock:
1788 spin_unlock(ptl);
1758 return ret; 1789 return ret;
1759} 1790}
1760 1791
diff --git a/mm/internal.h b/mm/internal.h
index ccfc2a2969f4..266efaeaa370 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -481,6 +481,13 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
481enum ttu_flags; 481enum ttu_flags;
482struct tlbflush_unmap_batch; 482struct tlbflush_unmap_batch;
483 483
484
485/*
486 * only for MM internal work items which do not depend on
487 * any allocations or locks which might depend on allocations
488 */
489extern struct workqueue_struct *mm_percpu_wq;
490
484#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 491#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
485void try_to_unmap_flush(void); 492void try_to_unmap_flush(void);
486void try_to_unmap_flush_dirty(void); 493void try_to_unmap_flush_dirty(void);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 75b2745bac41..37d0b334bfe9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1529,7 +1529,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1529COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1529COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1530 compat_ulong_t, maxnode) 1530 compat_ulong_t, maxnode)
1531{ 1531{
1532 long err = 0;
1533 unsigned long __user *nm = NULL; 1532 unsigned long __user *nm = NULL;
1534 unsigned long nr_bits, alloc_size; 1533 unsigned long nr_bits, alloc_size;
1535 DECLARE_BITMAP(bm, MAX_NUMNODES); 1534 DECLARE_BITMAP(bm, MAX_NUMNODES);
@@ -1538,14 +1537,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1538 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1537 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1539 1538
1540 if (nmask) { 1539 if (nmask) {
1541 err = compat_get_bitmap(bm, nmask, nr_bits); 1540 if (compat_get_bitmap(bm, nmask, nr_bits))
1541 return -EFAULT;
1542 nm = compat_alloc_user_space(alloc_size); 1542 nm = compat_alloc_user_space(alloc_size);
1543 err |= copy_to_user(nm, bm, alloc_size); 1543 if (copy_to_user(nm, bm, alloc_size))
1544 return -EFAULT;
1544 } 1545 }
1545 1546
1546 if (err)
1547 return -EFAULT;
1548
1549 return sys_set_mempolicy(mode, nm, nr_bits+1); 1547 return sys_set_mempolicy(mode, nm, nr_bits+1);
1550} 1548}
1551 1549
@@ -1553,7 +1551,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1553 compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1551 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1554 compat_ulong_t, maxnode, compat_ulong_t, flags) 1552 compat_ulong_t, maxnode, compat_ulong_t, flags)
1555{ 1553{
1556 long err = 0;
1557 unsigned long __user *nm = NULL; 1554 unsigned long __user *nm = NULL;
1558 unsigned long nr_bits, alloc_size; 1555 unsigned long nr_bits, alloc_size;
1559 nodemask_t bm; 1556 nodemask_t bm;
@@ -1562,14 +1559,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1562 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1559 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1563 1560
1564 if (nmask) { 1561 if (nmask) {
1565 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 1562 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1563 return -EFAULT;
1566 nm = compat_alloc_user_space(alloc_size); 1564 nm = compat_alloc_user_space(alloc_size);
1567 err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 1565 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1566 return -EFAULT;
1568 } 1567 }
1569 1568
1570 if (err)
1571 return -EFAULT;
1572
1573 return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 1569 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1574} 1570}
1575 1571
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6cbde310abed..f3d603cef2c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,6 +2373,13 @@ void drain_all_pages(struct zone *zone)
2373 */ 2373 */
2374 static cpumask_t cpus_with_pcps; 2374 static cpumask_t cpus_with_pcps;
2375 2375
2376 /*
2377 * Make sure nobody triggers this path before mm_percpu_wq is fully
2378 * initialized.
2379 */
2380 if (WARN_ON_ONCE(!mm_percpu_wq))
2381 return;
2382
2376 /* Workqueues cannot recurse */ 2383 /* Workqueues cannot recurse */
2377 if (current->flags & PF_WQ_WORKER) 2384 if (current->flags & PF_WQ_WORKER)
2378 return; 2385 return;
@@ -2422,7 +2429,7 @@ void drain_all_pages(struct zone *zone)
2422 for_each_cpu(cpu, &cpus_with_pcps) { 2429 for_each_cpu(cpu, &cpus_with_pcps) {
2423 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); 2430 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2424 INIT_WORK(work, drain_local_pages_wq); 2431 INIT_WORK(work, drain_local_pages_wq);
2425 schedule_work_on(cpu, work); 2432 queue_work_on(cpu, mm_percpu_wq, work);
2426 } 2433 }
2427 for_each_cpu(cpu, &cpus_with_pcps) 2434 for_each_cpu(cpu, &cpus_with_pcps)
2428 flush_work(per_cpu_ptr(&pcpu_drain, cpu)); 2435 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -4519,13 +4526,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4519 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4526 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4520 K(node_page_state(pgdat, NR_FILE_DIRTY)), 4527 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4521 K(node_page_state(pgdat, NR_WRITEBACK)), 4528 K(node_page_state(pgdat, NR_WRITEBACK)),
4529 K(node_page_state(pgdat, NR_SHMEM)),
4522#ifdef CONFIG_TRANSPARENT_HUGEPAGE 4530#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4523 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4531 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4524 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4532 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4525 * HPAGE_PMD_NR), 4533 * HPAGE_PMD_NR),
4526 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4534 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4527#endif 4535#endif
4528 K(node_page_state(pgdat, NR_SHMEM)),
4529 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4536 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4530 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4537 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4531 node_page_state(pgdat, NR_PAGES_SCANNED), 4538 node_page_state(pgdat, NR_PAGES_SCANNED),
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index c4c9def8ffea..de9c40d7304a 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -111,12 +111,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
111 if (pvmw->pmd && !pvmw->pte) 111 if (pvmw->pmd && !pvmw->pte)
112 return not_found(pvmw); 112 return not_found(pvmw);
113 113
114 /* Only for THP, seek to next pte entry makes sense */ 114 if (pvmw->pte)
115 if (pvmw->pte) {
116 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
117 return not_found(pvmw);
118 goto next_pte; 115 goto next_pte;
119 }
120 116
121 if (unlikely(PageHuge(pvmw->page))) { 117 if (unlikely(PageHuge(pvmw->page))) {
122 /* when pud is not present, pte will be NULL */ 118 /* when pud is not present, pte will be NULL */
@@ -165,9 +161,14 @@ restart:
165 while (1) { 161 while (1) {
166 if (check_pte(pvmw)) 162 if (check_pte(pvmw))
167 return true; 163 return true;
168next_pte: do { 164next_pte:
165 /* Seek to next pte only makes sense for THP */
166 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
167 return not_found(pvmw);
168 do {
169 pvmw->address += PAGE_SIZE; 169 pvmw->address += PAGE_SIZE;
170 if (pvmw->address >= 170 if (pvmw->address >= pvmw->vma->vm_end ||
171 pvmw->address >=
171 __vma_address(pvmw->page, pvmw->vma) + 172 __vma_address(pvmw->page, pvmw->vma) +
172 hpage_nr_pages(pvmw->page) * PAGE_SIZE) 173 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
173 return not_found(pvmw); 174 return not_found(pvmw);
diff --git a/mm/swap.c b/mm/swap.c
index c4910f14f957..5dabf444d724 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -670,30 +670,19 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
670 670
671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
672 672
673/*
674 * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
675 * workqueue, aiding in getting memory freed.
676 */
677static struct workqueue_struct *lru_add_drain_wq;
678
679static int __init lru_init(void)
680{
681 lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
682
683 if (WARN(!lru_add_drain_wq,
684 "Failed to create workqueue lru_add_drain_wq"))
685 return -ENOMEM;
686
687 return 0;
688}
689early_initcall(lru_init);
690
691void lru_add_drain_all(void) 673void lru_add_drain_all(void)
692{ 674{
693 static DEFINE_MUTEX(lock); 675 static DEFINE_MUTEX(lock);
694 static struct cpumask has_work; 676 static struct cpumask has_work;
695 int cpu; 677 int cpu;
696 678
679 /*
680 * Make sure nobody triggers this path before mm_percpu_wq is fully
681 * initialized.
682 */
683 if (WARN_ON(!mm_percpu_wq))
684 return;
685
697 mutex_lock(&lock); 686 mutex_lock(&lock);
698 get_online_cpus(); 687 get_online_cpus();
699 cpumask_clear(&has_work); 688 cpumask_clear(&has_work);
@@ -707,7 +696,7 @@ void lru_add_drain_all(void)
707 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 696 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
708 need_activate_page_drain(cpu)) { 697 need_activate_page_drain(cpu)) {
709 INIT_WORK(work, lru_add_drain_per_cpu); 698 INIT_WORK(work, lru_add_drain_per_cpu);
710 queue_work_on(cpu, lru_add_drain_wq, work); 699 queue_work_on(cpu, mm_percpu_wq, work);
711 cpumask_set_cpu(cpu, &has_work); 700 cpumask_set_cpu(cpu, &has_work);
712 } 701 }
713 } 702 }
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 310ac0b8f974..ac6318a064d3 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -201,6 +201,8 @@ void swap_cgroup_swapoff(int type)
201 struct page *page = map[i]; 201 struct page *page = map[i];
202 if (page) 202 if (page)
203 __free_page(page); 203 __free_page(page);
204 if (!(i % SWAP_CLUSTER_MAX))
205 cond_resched();
204 } 206 }
205 vfree(map); 207 vfree(map);
206 } 208 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 89f95396ec46..809025ed97ea 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1552,7 +1552,6 @@ static const struct file_operations proc_vmstat_file_operations = {
1552#endif /* CONFIG_PROC_FS */ 1552#endif /* CONFIG_PROC_FS */
1553 1553
1554#ifdef CONFIG_SMP 1554#ifdef CONFIG_SMP
1555static struct workqueue_struct *vmstat_wq;
1556static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1555static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1557int sysctl_stat_interval __read_mostly = HZ; 1556int sysctl_stat_interval __read_mostly = HZ;
1558 1557
@@ -1623,7 +1622,7 @@ static void vmstat_update(struct work_struct *w)
1623 * to occur in the future. Keep on running the 1622 * to occur in the future. Keep on running the
1624 * update worker thread. 1623 * update worker thread.
1625 */ 1624 */
1626 queue_delayed_work_on(smp_processor_id(), vmstat_wq, 1625 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1627 this_cpu_ptr(&vmstat_work), 1626 this_cpu_ptr(&vmstat_work),
1628 round_jiffies_relative(sysctl_stat_interval)); 1627 round_jiffies_relative(sysctl_stat_interval));
1629 } 1628 }
@@ -1702,7 +1701,7 @@ static void vmstat_shepherd(struct work_struct *w)
1702 struct delayed_work *dw = &per_cpu(vmstat_work, cpu); 1701 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1703 1702
1704 if (!delayed_work_pending(dw) && need_update(cpu)) 1703 if (!delayed_work_pending(dw) && need_update(cpu))
1705 queue_delayed_work_on(cpu, vmstat_wq, dw, 0); 1704 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1706 } 1705 }
1707 put_online_cpus(); 1706 put_online_cpus();
1708 1707
@@ -1718,7 +1717,6 @@ static void __init start_shepherd_timer(void)
1718 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), 1717 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1719 vmstat_update); 1718 vmstat_update);
1720 1719
1721 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1722 schedule_delayed_work(&shepherd, 1720 schedule_delayed_work(&shepherd,
1723 round_jiffies_relative(sysctl_stat_interval)); 1721 round_jiffies_relative(sysctl_stat_interval));
1724} 1722}
@@ -1764,11 +1762,16 @@ static int vmstat_cpu_dead(unsigned int cpu)
1764 1762
1765#endif 1763#endif
1766 1764
1765struct workqueue_struct *mm_percpu_wq;
1766
1767void __init init_mm_internals(void) 1767void __init init_mm_internals(void)
1768{ 1768{
1769#ifdef CONFIG_SMP 1769 int ret __maybe_unused;
1770 int ret;
1771 1770
1771 mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
1772 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1773
1774#ifdef CONFIG_SMP
1772 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", 1775 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1773 NULL, vmstat_cpu_dead); 1776 NULL, vmstat_cpu_dead);
1774 if (ret < 0) 1777 if (ret < 0)
diff --git a/mm/z3fold.c b/mm/z3fold.c
index f9492bccfd79..54f63c4a809a 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr)
185 spin_lock(&zhdr->page_lock); 185 spin_lock(&zhdr->page_lock);
186} 186}
187 187
188/* Try to lock a z3fold page */
189static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
190{
191 return spin_trylock(&zhdr->page_lock);
192}
193
188/* Unlock a z3fold page */ 194/* Unlock a z3fold page */
189static inline void z3fold_page_unlock(struct z3fold_header *zhdr) 195static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
190{ 196{
@@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
385 spin_lock(&pool->lock); 391 spin_lock(&pool->lock);
386 zhdr = list_first_entry_or_null(&pool->unbuddied[i], 392 zhdr = list_first_entry_or_null(&pool->unbuddied[i],
387 struct z3fold_header, buddy); 393 struct z3fold_header, buddy);
388 if (!zhdr) { 394 if (!zhdr || !z3fold_page_trylock(zhdr)) {
389 spin_unlock(&pool->lock); 395 spin_unlock(&pool->lock);
390 continue; 396 continue;
391 } 397 }
@@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
394 spin_unlock(&pool->lock); 400 spin_unlock(&pool->lock);
395 401
396 page = virt_to_page(zhdr); 402 page = virt_to_page(zhdr);
397 z3fold_page_lock(zhdr);
398 if (zhdr->first_chunks == 0) { 403 if (zhdr->first_chunks == 0) {
399 if (zhdr->middle_chunks != 0 && 404 if (zhdr->middle_chunks != 0 &&
400 chunks >= zhdr->start_middle) 405 chunks >= zhdr->start_middle)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b7ee9c34dbd6..d41edd28298b 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -276,7 +276,7 @@ struct zs_pool {
276struct zspage { 276struct zspage {
277 struct { 277 struct {
278 unsigned int fullness:FULLNESS_BITS; 278 unsigned int fullness:FULLNESS_BITS;
279 unsigned int class:CLASS_BITS; 279 unsigned int class:CLASS_BITS + 1;
280 unsigned int isolated:ISOLATED_BITS; 280 unsigned int isolated:ISOLATED_BITS;
281 unsigned int magic:MAGIC_VAL_BITS; 281 unsigned int magic:MAGIC_VAL_BITS;
282 }; 282 };
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ea71513fca21..90f49a194249 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -119,6 +119,15 @@ static int br_dev_init(struct net_device *dev)
119 return err; 119 return err;
120} 120}
121 121
122static void br_dev_uninit(struct net_device *dev)
123{
124 struct net_bridge *br = netdev_priv(dev);
125
126 br_multicast_uninit_stats(br);
127 br_vlan_flush(br);
128 free_percpu(br->stats);
129}
130
122static int br_dev_open(struct net_device *dev) 131static int br_dev_open(struct net_device *dev)
123{ 132{
124 struct net_bridge *br = netdev_priv(dev); 133 struct net_bridge *br = netdev_priv(dev);
@@ -332,6 +341,7 @@ static const struct net_device_ops br_netdev_ops = {
332 .ndo_open = br_dev_open, 341 .ndo_open = br_dev_open,
333 .ndo_stop = br_dev_stop, 342 .ndo_stop = br_dev_stop,
334 .ndo_init = br_dev_init, 343 .ndo_init = br_dev_init,
344 .ndo_uninit = br_dev_uninit,
335 .ndo_start_xmit = br_dev_xmit, 345 .ndo_start_xmit = br_dev_xmit,
336 .ndo_get_stats64 = br_get_stats64, 346 .ndo_get_stats64 = br_get_stats64,
337 .ndo_set_mac_address = br_set_mac_address, 347 .ndo_set_mac_address = br_set_mac_address,
@@ -356,14 +366,6 @@ static const struct net_device_ops br_netdev_ops = {
356 .ndo_features_check = passthru_features_check, 366 .ndo_features_check = passthru_features_check,
357}; 367};
358 368
359static void br_dev_free(struct net_device *dev)
360{
361 struct net_bridge *br = netdev_priv(dev);
362
363 free_percpu(br->stats);
364 free_netdev(dev);
365}
366
367static struct device_type br_type = { 369static struct device_type br_type = {
368 .name = "bridge", 370 .name = "bridge",
369}; 371};
@@ -376,7 +378,7 @@ void br_dev_setup(struct net_device *dev)
376 ether_setup(dev); 378 ether_setup(dev);
377 379
378 dev->netdev_ops = &br_netdev_ops; 380 dev->netdev_ops = &br_netdev_ops;
379 dev->destructor = br_dev_free; 381 dev->destructor = free_netdev;
380 dev->ethtool_ops = &br_ethtool_ops; 382 dev->ethtool_ops = &br_ethtool_ops;
381 SET_NETDEV_DEVTYPE(dev, &br_type); 383 SET_NETDEV_DEVTYPE(dev, &br_type);
382 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 384 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 6eb52d422dd9..6d273ca0bf7c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -312,7 +312,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
312 312
313 br_fdb_delete_by_port(br, NULL, 0, 1); 313 br_fdb_delete_by_port(br, NULL, 0, 1);
314 314
315 br_vlan_flush(br);
316 br_multicast_dev_del(br); 315 br_multicast_dev_del(br);
317 cancel_delayed_work_sync(&br->gc_work); 316 cancel_delayed_work_sync(&br->gc_work);
318 317
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b760f2620abf..faa7261a992f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2031,8 +2031,6 @@ void br_multicast_dev_del(struct net_bridge *br)
2031 2031
2032out: 2032out:
2033 spin_unlock_bh(&br->multicast_lock); 2033 spin_unlock_bh(&br->multicast_lock);
2034
2035 free_percpu(br->mcast_stats);
2036} 2034}
2037 2035
2038int br_multicast_set_router(struct net_bridge *br, unsigned long val) 2036int br_multicast_set_router(struct net_bridge *br, unsigned long val)
@@ -2531,6 +2529,11 @@ int br_multicast_init_stats(struct net_bridge *br)
2531 return 0; 2529 return 0;
2532} 2530}
2533 2531
2532void br_multicast_uninit_stats(struct net_bridge *br)
2533{
2534 free_percpu(br->mcast_stats);
2535}
2536
2534static void mcast_stats_add_dir(u64 *dst, u64 *src) 2537static void mcast_stats_add_dir(u64 *dst, u64 *src)
2535{ 2538{
2536 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2539 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e6dea5cd6bd6..650986473577 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1165,11 +1165,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1165 spin_unlock_bh(&br->lock); 1165 spin_unlock_bh(&br->lock);
1166 } 1166 }
1167 1167
1168 err = br_changelink(dev, tb, data); 1168 err = register_netdevice(dev);
1169 if (err) 1169 if (err)
1170 return err; 1170 return err;
1171 1171
1172 return register_netdevice(dev); 1172 err = br_changelink(dev, tb, data);
1173 if (err)
1174 unregister_netdevice(dev);
1175 return err;
1173} 1176}
1174 1177
1175static size_t br_get_size(const struct net_device *brdev) 1178static size_t br_get_size(const struct net_device *brdev)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 61368186edea..0d177280aa84 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -620,6 +620,7 @@ void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
620void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 620void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
621 const struct sk_buff *skb, u8 type, u8 dir); 621 const struct sk_buff *skb, u8 type, u8 dir);
622int br_multicast_init_stats(struct net_bridge *br); 622int br_multicast_init_stats(struct net_bridge *br);
623void br_multicast_uninit_stats(struct net_bridge *br);
623void br_multicast_get_stats(const struct net_bridge *br, 624void br_multicast_get_stats(const struct net_bridge *br,
624 const struct net_bridge_port *p, 625 const struct net_bridge_port *p,
625 struct br_mcast_stats *dest); 626 struct br_mcast_stats *dest);
@@ -760,6 +761,10 @@ static inline int br_multicast_init_stats(struct net_bridge *br)
760 return 0; 761 return 0;
761} 762}
762 763
764static inline void br_multicast_uninit_stats(struct net_bridge *br)
765{
766}
767
763static inline int br_multicast_igmp_type(const struct sk_buff *skb) 768static inline int br_multicast_igmp_type(const struct sk_buff *skb)
764{ 769{
765 return 0; 770 return 0;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 4608aa245410..15ef99469cfe 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -402,7 +402,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
402 struct iov_iter *to, int len) 402 struct iov_iter *to, int len)
403{ 403{
404 int start = skb_headlen(skb); 404 int start = skb_headlen(skb);
405 int i, copy = start - offset; 405 int i, copy = start - offset, start_off = offset, n;
406 struct sk_buff *frag_iter; 406 struct sk_buff *frag_iter;
407 407
408 trace_skb_copy_datagram_iovec(skb, len); 408 trace_skb_copy_datagram_iovec(skb, len);
@@ -411,11 +411,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
411 if (copy > 0) { 411 if (copy > 0) {
412 if (copy > len) 412 if (copy > len)
413 copy = len; 413 copy = len;
414 if (copy_to_iter(skb->data + offset, copy, to) != copy) 414 n = copy_to_iter(skb->data + offset, copy, to);
415 offset += n;
416 if (n != copy)
415 goto short_copy; 417 goto short_copy;
416 if ((len -= copy) == 0) 418 if ((len -= copy) == 0)
417 return 0; 419 return 0;
418 offset += copy;
419 } 420 }
420 421
421 /* Copy paged appendix. Hmm... why does this look so complicated? */ 422 /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -429,13 +430,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
429 if ((copy = end - offset) > 0) { 430 if ((copy = end - offset) > 0) {
430 if (copy > len) 431 if (copy > len)
431 copy = len; 432 copy = len;
432 if (copy_page_to_iter(skb_frag_page(frag), 433 n = copy_page_to_iter(skb_frag_page(frag),
433 frag->page_offset + offset - 434 frag->page_offset + offset -
434 start, copy, to) != copy) 435 start, copy, to);
436 offset += n;
437 if (n != copy)
435 goto short_copy; 438 goto short_copy;
436 if (!(len -= copy)) 439 if (!(len -= copy))
437 return 0; 440 return 0;
438 offset += copy;
439 } 441 }
440 start = end; 442 start = end;
441 } 443 }
@@ -467,6 +469,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
467 */ 469 */
468 470
469fault: 471fault:
472 iov_iter_revert(to, offset - start_off);
470 return -EFAULT; 473 return -EFAULT;
471 474
472short_copy: 475short_copy:
@@ -617,7 +620,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
617 __wsum *csump) 620 __wsum *csump)
618{ 621{
619 int start = skb_headlen(skb); 622 int start = skb_headlen(skb);
620 int i, copy = start - offset; 623 int i, copy = start - offset, start_off = offset;
621 struct sk_buff *frag_iter; 624 struct sk_buff *frag_iter;
622 int pos = 0; 625 int pos = 0;
623 int n; 626 int n;
@@ -627,11 +630,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
627 if (copy > len) 630 if (copy > len)
628 copy = len; 631 copy = len;
629 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); 632 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
633 offset += n;
630 if (n != copy) 634 if (n != copy)
631 goto fault; 635 goto fault;
632 if ((len -= copy) == 0) 636 if ((len -= copy) == 0)
633 return 0; 637 return 0;
634 offset += copy;
635 pos = copy; 638 pos = copy;
636 } 639 }
637 640
@@ -653,12 +656,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
653 offset - start, copy, 656 offset - start, copy,
654 &csum2, to); 657 &csum2, to);
655 kunmap(page); 658 kunmap(page);
659 offset += n;
656 if (n != copy) 660 if (n != copy)
657 goto fault; 661 goto fault;
658 *csump = csum_block_add(*csump, csum2, pos); 662 *csump = csum_block_add(*csump, csum2, pos);
659 if (!(len -= copy)) 663 if (!(len -= copy))
660 return 0; 664 return 0;
661 offset += copy;
662 pos += copy; 665 pos += copy;
663 } 666 }
664 start = end; 667 start = end;
@@ -691,6 +694,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
691 return 0; 694 return 0;
692 695
693fault: 696fault:
697 iov_iter_revert(to, offset - start_off);
694 return -EFAULT; 698 return -EFAULT;
695} 699}
696 700
@@ -775,6 +779,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
775 } 779 }
776 return 0; 780 return 0;
777csum_error: 781csum_error:
782 iov_iter_revert(&msg->msg_iter, chunk);
778 return -EINVAL; 783 return -EINVAL;
779fault: 784fault:
780 return -EFAULT; 785 return -EFAULT;
diff --git a/net/core/dev.c b/net/core/dev.c
index ef9fe60ee294..5d33e2baab2b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6754,7 +6754,6 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags)
6754 6754
6755 return err; 6755 return err;
6756} 6756}
6757EXPORT_SYMBOL(dev_change_xdp_fd);
6758 6757
6759/** 6758/**
6760 * dev_new_index - allocate an ifindex 6759 * dev_new_index - allocate an ifindex
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index fcbdc0c49b0e..038f293c2376 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -462,7 +462,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
462 462
463 clusterip_config_put(cipinfo->config); 463 clusterip_config_put(cipinfo->config);
464 464
465 nf_ct_netns_get(par->net, par->family); 465 nf_ct_netns_put(par->net, par->family);
466} 466}
467 467
468#ifdef CONFIG_COMPAT 468#ifdef CONFIG_COMPAT
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 94f0b5b50e0d..04843ae77b9e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2322,6 +2322,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2322 tcp_init_send_head(sk); 2322 tcp_init_send_head(sk);
2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2324 __sk_dst_reset(sk); 2324 __sk_dst_reset(sk);
2325 tcp_saved_syn_free(tp);
2325 2326
2326 /* Clean up fastopen related fields */ 2327 /* Clean up fastopen related fields */
2327 tcp_free_fastopen_req(tp); 2328 tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 31f2765ef851..a5838858c362 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1935,6 +1935,7 @@ void tcp_enter_loss(struct sock *sk)
1935 struct tcp_sock *tp = tcp_sk(sk); 1935 struct tcp_sock *tp = tcp_sk(sk);
1936 struct net *net = sock_net(sk); 1936 struct net *net = sock_net(sk);
1937 struct sk_buff *skb; 1937 struct sk_buff *skb;
1938 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1938 bool is_reneg; /* is receiver reneging on SACKs? */ 1939 bool is_reneg; /* is receiver reneging on SACKs? */
1939 bool mark_lost; 1940 bool mark_lost;
1940 1941
@@ -1994,15 +1995,18 @@ void tcp_enter_loss(struct sock *sk)
1994 tp->high_seq = tp->snd_nxt; 1995 tp->high_seq = tp->snd_nxt;
1995 tcp_ecn_queue_cwr(tp); 1996 tcp_ecn_queue_cwr(tp);
1996 1997
1997 /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO 1998 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
1998 * if a previous recovery is underway, otherwise it may incorrectly 1999 * loss recovery is underway except recurring timeout(s) on
1999 * call a timeout spurious if some previously retransmitted packets 2000 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
2000 * are s/acked (sec 3.2). We do not apply that retriction since 2001 *
2001 * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS 2002 * In theory F-RTO can be used repeatedly during loss recovery.
2002 * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO 2003 * In practice this interacts badly with broken middle-boxes that
2003 * on PTMU discovery to avoid sending new data. 2004 * falsely raise the receive window, which results in repeated
2005 * timeouts and stop-and-go behavior.
2004 */ 2006 */
2005 tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size; 2007 tp->frto = sysctl_tcp_frto &&
2008 (new_recovery || icsk->icsk_retransmits) &&
2009 !inet_csk(sk)->icsk_mtup.probe_size;
2006} 2010}
2007 2011
2008/* If ACK arrived pointing to a remembered SACK, it means that our 2012/* If ACK arrived pointing to a remembered SACK, it means that our
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0e807a83c1bc..ffc9274b2706 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2995,6 +2995,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2995{ 2995{
2996 struct sk_buff *skb; 2996 struct sk_buff *skb;
2997 2997
2998 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
2999
2998 /* NOTE: No TCP options attached and we never retransmit this. */ 3000 /* NOTE: No TCP options attached and we never retransmit this. */
2999 skb = alloc_skb(MAX_TCP_HEADER, priority); 3001 skb = alloc_skb(MAX_TCP_HEADER, priority);
3000 if (!skb) { 3002 if (!skb) {
@@ -3010,8 +3012,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
3010 /* Send it off. */ 3012 /* Send it off. */
3011 if (tcp_transmit_skb(sk, skb, 0, priority)) 3013 if (tcp_transmit_skb(sk, skb, 0, priority))
3012 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 3014 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3013
3014 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3015} 3015}
3016 3016
3017/* Send a crossed SYN-ACK during socket establishment. 3017/* Send a crossed SYN-ACK during socket establishment.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b330c2abcb24..d6da0fe5acca 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3646,14 +3646,19 @@ restart:
3646 INIT_LIST_HEAD(&del_list); 3646 INIT_LIST_HEAD(&del_list);
3647 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3647 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3648 struct rt6_info *rt = NULL; 3648 struct rt6_info *rt = NULL;
3649 bool keep;
3649 3650
3650 addrconf_del_dad_work(ifa); 3651 addrconf_del_dad_work(ifa);
3651 3652
3653 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3654 !addr_is_local(&ifa->addr);
3655 if (!keep)
3656 list_move(&ifa->if_list, &del_list);
3657
3652 write_unlock_bh(&idev->lock); 3658 write_unlock_bh(&idev->lock);
3653 spin_lock_bh(&ifa->lock); 3659 spin_lock_bh(&ifa->lock);
3654 3660
3655 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && 3661 if (keep) {
3656 !addr_is_local(&ifa->addr)) {
3657 /* set state to skip the notifier below */ 3662 /* set state to skip the notifier below */
3658 state = INET6_IFADDR_STATE_DEAD; 3663 state = INET6_IFADDR_STATE_DEAD;
3659 ifa->state = 0; 3664 ifa->state = 0;
@@ -3665,8 +3670,6 @@ restart:
3665 } else { 3670 } else {
3666 state = ifa->state; 3671 state = ifa->state;
3667 ifa->state = INET6_IFADDR_STATE_DEAD; 3672 ifa->state = INET6_IFADDR_STATE_DEAD;
3668
3669 list_move(&ifa->if_list, &del_list);
3670 } 3673 }
3671 3674
3672 spin_unlock_bh(&ifa->lock); 3675 spin_unlock_bh(&ifa->lock);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 861b255a2d51..32ea0f3d868c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1383,8 +1383,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1383 } else 1383 } else
1384 err = pppol2tp_session_setsockopt(sk, session, optname, val); 1384 err = pppol2tp_session_setsockopt(sk, session, optname, val);
1385 1385
1386 err = 0;
1387
1388end_put_sess: 1386end_put_sess:
1389 sock_put(sk); 1387 sock_put(sk);
1390end: 1388end:
@@ -1507,8 +1505,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1507 1505
1508 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); 1506 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
1509 sock_put(ps->tunnel_sock); 1507 sock_put(ps->tunnel_sock);
1510 } else 1508 if (err)
1509 goto end_put_sess;
1510 } else {
1511 err = pppol2tp_session_getsockopt(sk, session, optname, &val); 1511 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
1512 if (err)
1513 goto end_put_sess;
1514 }
1512 1515
1513 err = -EFAULT; 1516 err = -EFAULT;
1514 if (put_user(len, optlen)) 1517 if (put_user(len, optlen))
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index cb29e598605f..a5ca5e426bae 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -57,7 +57,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
57 hlist_del_rcu(&exp->hnode); 57 hlist_del_rcu(&exp->hnode);
58 net->ct.expect_count--; 58 net->ct.expect_count--;
59 59
60 hlist_del(&exp->lnode); 60 hlist_del_rcu(&exp->lnode);
61 master_help->expecting[exp->class]--; 61 master_help->expecting[exp->class]--;
62 62
63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report); 63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
@@ -363,7 +363,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
363 /* two references : one for hash insert, one for the timer */ 363 /* two references : one for hash insert, one for the timer */
364 refcount_add(2, &exp->use); 364 refcount_add(2, &exp->use);
365 365
366 hlist_add_head(&exp->lnode, &master_help->expectations); 366 hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
367 master_help->expecting[exp->class]++; 367 master_help->expecting[exp->class]++;
368 368
369 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); 369 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 6dc44d9b4190..4eeb3418366a 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -158,16 +158,25 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
158{ 158{
159 struct nf_conntrack_helper *h; 159 struct nf_conntrack_helper *h;
160 160
161 rcu_read_lock();
162
161 h = __nf_conntrack_helper_find(name, l3num, protonum); 163 h = __nf_conntrack_helper_find(name, l3num, protonum);
162#ifdef CONFIG_MODULES 164#ifdef CONFIG_MODULES
163 if (h == NULL) { 165 if (h == NULL) {
164 if (request_module("nfct-helper-%s", name) == 0) 166 rcu_read_unlock();
167 if (request_module("nfct-helper-%s", name) == 0) {
168 rcu_read_lock();
165 h = __nf_conntrack_helper_find(name, l3num, protonum); 169 h = __nf_conntrack_helper_find(name, l3num, protonum);
170 } else {
171 return h;
172 }
166 } 173 }
167#endif 174#endif
168 if (h != NULL && !try_module_get(h->me)) 175 if (h != NULL && !try_module_get(h->me))
169 h = NULL; 176 h = NULL;
170 177
178 rcu_read_unlock();
179
171 return h; 180 return h;
172} 181}
173EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); 182EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
@@ -311,38 +320,36 @@ void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
311} 320}
312EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister); 321EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
313 322
323/* Caller should hold the rcu lock */
314struct nf_ct_helper_expectfn * 324struct nf_ct_helper_expectfn *
315nf_ct_helper_expectfn_find_by_name(const char *name) 325nf_ct_helper_expectfn_find_by_name(const char *name)
316{ 326{
317 struct nf_ct_helper_expectfn *cur; 327 struct nf_ct_helper_expectfn *cur;
318 bool found = false; 328 bool found = false;
319 329
320 rcu_read_lock();
321 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { 330 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
322 if (!strcmp(cur->name, name)) { 331 if (!strcmp(cur->name, name)) {
323 found = true; 332 found = true;
324 break; 333 break;
325 } 334 }
326 } 335 }
327 rcu_read_unlock();
328 return found ? cur : NULL; 336 return found ? cur : NULL;
329} 337}
330EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name); 338EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
331 339
340/* Caller should hold the rcu lock */
332struct nf_ct_helper_expectfn * 341struct nf_ct_helper_expectfn *
333nf_ct_helper_expectfn_find_by_symbol(const void *symbol) 342nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
334{ 343{
335 struct nf_ct_helper_expectfn *cur; 344 struct nf_ct_helper_expectfn *cur;
336 bool found = false; 345 bool found = false;
337 346
338 rcu_read_lock();
339 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { 347 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
340 if (cur->expectfn == symbol) { 348 if (cur->expectfn == symbol) {
341 found = true; 349 found = true;
342 break; 350 break;
343 } 351 }
344 } 352 }
345 rcu_read_unlock();
346 return found ? cur : NULL; 353 return found ? cur : NULL;
347} 354}
348EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol); 355EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ace824ab2e03..aafd25dff8c0 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1490,11 +1490,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
1490 * treat the second attempt as a no-op instead of returning 1490 * treat the second attempt as a no-op instead of returning
1491 * an error. 1491 * an error.
1492 */ 1492 */
1493 if (help && help->helper && 1493 err = -EBUSY;
1494 !strcmp(help->helper->name, helpname)) 1494 if (help) {
1495 return 0; 1495 rcu_read_lock();
1496 else 1496 helper = rcu_dereference(help->helper);
1497 return -EBUSY; 1497 if (helper && !strcmp(helper->name, helpname))
1498 err = 0;
1499 rcu_read_unlock();
1500 }
1501
1502 return err;
1498 } 1503 }
1499 1504
1500 if (!strcmp(helpname, "")) { 1505 if (!strcmp(helpname, "")) {
@@ -1932,9 +1937,9 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
1932 1937
1933 err = 0; 1938 err = 0;
1934 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1939 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1935 events = IPCT_RELATED; 1940 events = 1 << IPCT_RELATED;
1936 else 1941 else
1937 events = IPCT_NEW; 1942 events = 1 << IPCT_NEW;
1938 1943
1939 if (cda[CTA_LABELS] && 1944 if (cda[CTA_LABELS] &&
1940 ctnetlink_attach_labels(ct, cda) == 0) 1945 ctnetlink_attach_labels(ct, cda) == 0)
@@ -2679,8 +2684,8 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2679 last = (struct nf_conntrack_expect *)cb->args[1]; 2684 last = (struct nf_conntrack_expect *)cb->args[1];
2680 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 2685 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2681restart: 2686restart:
2682 hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]], 2687 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
2683 hnode) { 2688 hnode) {
2684 if (l3proto && exp->tuple.src.l3num != l3proto) 2689 if (l3proto && exp->tuple.src.l3num != l3proto)
2685 continue; 2690 continue;
2686 2691
@@ -2731,7 +2736,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2731 rcu_read_lock(); 2736 rcu_read_lock();
2732 last = (struct nf_conntrack_expect *)cb->args[1]; 2737 last = (struct nf_conntrack_expect *)cb->args[1];
2733restart: 2738restart:
2734 hlist_for_each_entry(exp, &help->expectations, lnode) { 2739 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
2735 if (l3proto && exp->tuple.src.l3num != l3proto) 2740 if (l3proto && exp->tuple.src.l3num != l3proto)
2736 continue; 2741 continue;
2737 if (cb->args[1]) { 2742 if (cb->args[1]) {
@@ -2793,6 +2798,12 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
2793 return -ENOENT; 2798 return -ENOENT;
2794 2799
2795 ct = nf_ct_tuplehash_to_ctrack(h); 2800 ct = nf_ct_tuplehash_to_ctrack(h);
2801 /* No expectation linked to this connection tracking. */
2802 if (!nfct_help(ct)) {
2803 nf_ct_put(ct);
2804 return 0;
2805 }
2806
2796 c.data = ct; 2807 c.data = ct;
2797 2808
2798 err = netlink_dump_start(ctnl, skb, nlh, &c); 2809 err = netlink_dump_start(ctnl, skb, nlh, &c);
@@ -3138,23 +3149,27 @@ ctnetlink_create_expect(struct net *net,
3138 return -ENOENT; 3149 return -ENOENT;
3139 ct = nf_ct_tuplehash_to_ctrack(h); 3150 ct = nf_ct_tuplehash_to_ctrack(h);
3140 3151
3152 rcu_read_lock();
3141 if (cda[CTA_EXPECT_HELP_NAME]) { 3153 if (cda[CTA_EXPECT_HELP_NAME]) {
3142 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3154 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3143 3155
3144 helper = __nf_conntrack_helper_find(helpname, u3, 3156 helper = __nf_conntrack_helper_find(helpname, u3,
3145 nf_ct_protonum(ct)); 3157 nf_ct_protonum(ct));
3146 if (helper == NULL) { 3158 if (helper == NULL) {
3159 rcu_read_unlock();
3147#ifdef CONFIG_MODULES 3160#ifdef CONFIG_MODULES
3148 if (request_module("nfct-helper-%s", helpname) < 0) { 3161 if (request_module("nfct-helper-%s", helpname) < 0) {
3149 err = -EOPNOTSUPP; 3162 err = -EOPNOTSUPP;
3150 goto err_ct; 3163 goto err_ct;
3151 } 3164 }
3165 rcu_read_lock();
3152 helper = __nf_conntrack_helper_find(helpname, u3, 3166 helper = __nf_conntrack_helper_find(helpname, u3,
3153 nf_ct_protonum(ct)); 3167 nf_ct_protonum(ct));
3154 if (helper) { 3168 if (helper) {
3155 err = -EAGAIN; 3169 err = -EAGAIN;
3156 goto err_ct; 3170 goto err_rcu;
3157 } 3171 }
3172 rcu_read_unlock();
3158#endif 3173#endif
3159 err = -EOPNOTSUPP; 3174 err = -EOPNOTSUPP;
3160 goto err_ct; 3175 goto err_ct;
@@ -3164,11 +3179,13 @@ ctnetlink_create_expect(struct net *net,
3164 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); 3179 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
3165 if (IS_ERR(exp)) { 3180 if (IS_ERR(exp)) {
3166 err = PTR_ERR(exp); 3181 err = PTR_ERR(exp);
3167 goto err_ct; 3182 goto err_rcu;
3168 } 3183 }
3169 3184
3170 err = nf_ct_expect_related_report(exp, portid, report); 3185 err = nf_ct_expect_related_report(exp, portid, report);
3171 nf_ct_expect_put(exp); 3186 nf_ct_expect_put(exp);
3187err_rcu:
3188 rcu_read_unlock();
3172err_ct: 3189err_ct:
3173 nf_ct_put(ct); 3190 nf_ct_put(ct);
3174 return err; 3191 return err;
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
index d43869879fcf..86067560a318 100644
--- a/net/netfilter/nf_nat_redirect.c
+++ b/net/netfilter/nf_nat_redirect.c
@@ -101,11 +101,13 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
101 rcu_read_lock(); 101 rcu_read_lock();
102 idev = __in6_dev_get(skb->dev); 102 idev = __in6_dev_get(skb->dev);
103 if (idev != NULL) { 103 if (idev != NULL) {
104 read_lock_bh(&idev->lock);
104 list_for_each_entry(ifa, &idev->addr_list, if_list) { 105 list_for_each_entry(ifa, &idev->addr_list, if_list) {
105 newdst = ifa->addr; 106 newdst = ifa->addr;
106 addr = true; 107 addr = true;
107 break; 108 break;
108 } 109 }
110 read_unlock_bh(&idev->lock);
109 } 111 }
110 rcu_read_unlock(); 112 rcu_read_unlock();
111 113
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index a6a4633725bb..52a5079a91a3 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -21,6 +21,7 @@ struct nft_jhash {
21 enum nft_registers sreg:8; 21 enum nft_registers sreg:8;
22 enum nft_registers dreg:8; 22 enum nft_registers dreg:8;
23 u8 len; 23 u8 len;
24 bool autogen_seed:1;
24 u32 modulus; 25 u32 modulus;
25 u32 seed; 26 u32 seed;
26 u32 offset; 27 u32 offset;
@@ -102,10 +103,12 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
102 if (priv->offset + priv->modulus - 1 < priv->offset) 103 if (priv->offset + priv->modulus - 1 < priv->offset)
103 return -EOVERFLOW; 104 return -EOVERFLOW;
104 105
105 if (tb[NFTA_HASH_SEED]) 106 if (tb[NFTA_HASH_SEED]) {
106 priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED])); 107 priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
107 else 108 } else {
109 priv->autogen_seed = true;
108 get_random_bytes(&priv->seed, sizeof(priv->seed)); 110 get_random_bytes(&priv->seed, sizeof(priv->seed));
111 }
109 112
110 return nft_validate_register_load(priv->sreg, len) && 113 return nft_validate_register_load(priv->sreg, len) &&
111 nft_validate_register_store(ctx, priv->dreg, NULL, 114 nft_validate_register_store(ctx, priv->dreg, NULL,
@@ -151,7 +154,8 @@ static int nft_jhash_dump(struct sk_buff *skb,
151 goto nla_put_failure; 154 goto nla_put_failure;
152 if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus))) 155 if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
153 goto nla_put_failure; 156 goto nla_put_failure;
154 if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed))) 157 if (!priv->autogen_seed &&
158 nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
155 goto nla_put_failure; 159 goto nla_put_failure;
156 if (priv->offset != 0) 160 if (priv->offset != 0)
157 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) 161 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 27241a767f17..c64aca611ac5 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
105 tcp_hdrlen = tcph->doff * 4; 105 tcp_hdrlen = tcph->doff * 4;
106 106
107 if (len < tcp_hdrlen) 107 if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
108 return -1; 108 return -1;
109 109
110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
152 if (len > tcp_hdrlen) 152 if (len > tcp_hdrlen)
153 return 0; 153 return 0;
154 154
155 /* tcph->doff has 4 bits, do not wrap it to 0 */
156 if (tcp_hdrlen >= 15 * 4)
157 return 0;
158
155 /* 159 /*
156 * MSS Option not found ?! add it.. 160 * MSS Option not found ?! add it..
157 */ 161 */
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 80cb7babeb64..df7f1df00330 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -393,7 +393,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
393 393
394 rcu_read_lock(); 394 rcu_read_lock();
395 indev = __in6_dev_get(skb->dev); 395 indev = __in6_dev_get(skb->dev);
396 if (indev) 396 if (indev) {
397 read_lock_bh(&indev->lock);
397 list_for_each_entry(ifa, &indev->addr_list, if_list) { 398 list_for_each_entry(ifa, &indev->addr_list, if_list) {
398 if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) 399 if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
399 continue; 400 continue;
@@ -401,6 +402,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
401 laddr = &ifa->addr; 402 laddr = &ifa->addr;
402 break; 403 break;
403 } 404 }
405 read_unlock_bh(&indev->lock);
406 }
404 rcu_read_unlock(); 407 rcu_read_unlock();
405 408
406 return laddr ? laddr : daddr; 409 return laddr ? laddr : daddr;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3e64d23e098c..52a2c55f6d9e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -794,7 +794,7 @@ static void attach_default_qdiscs(struct net_device *dev)
794 } 794 }
795 } 795 }
796#ifdef CONFIG_NET_SCHED 796#ifdef CONFIG_NET_SCHED
797 if (dev->qdisc) 797 if (dev->qdisc != &noop_qdisc)
798 qdisc_hash_add(dev->qdisc, false); 798 qdisc_hash_add(dev->qdisc, false);
799#endif 799#endif
800} 800}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 8e56df8d175d..f16c8d97b7f3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -7174,6 +7174,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
7174 if (sock->state != SS_UNCONNECTED) 7174 if (sock->state != SS_UNCONNECTED)
7175 goto out; 7175 goto out;
7176 7176
7177 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
7178 goto out;
7179
7177 /* If backlog is zero, disable listening. */ 7180 /* If backlog is zero, disable listening. */
7178 if (!backlog) { 7181 if (!backlog) {
7179 if (sctp_sstate(sk, CLOSED)) 7182 if (sctp_sstate(sk, CLOSED))
diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c
index 8571d766331d..d4d77b09412c 100644
--- a/samples/statx/test-statx.c
+++ b/samples/statx/test-statx.c
@@ -141,8 +141,8 @@ static void dump_statx(struct statx *stx)
141 if (stx->stx_mask & STATX_BTIME) 141 if (stx->stx_mask & STATX_BTIME)
142 print_time(" Birth: ", &stx->stx_btime); 142 print_time(" Birth: ", &stx->stx_btime);
143 143
144 if (stx->stx_attributes) { 144 if (stx->stx_attributes_mask) {
145 unsigned char bits; 145 unsigned char bits, mbits;
146 int loop, byte; 146 int loop, byte;
147 147
148 static char attr_representation[64 + 1] = 148 static char attr_representation[64 + 1] =
@@ -160,14 +160,18 @@ static void dump_statx(struct statx *stx)
160 printf("Attributes: %016llx (", stx->stx_attributes); 160 printf("Attributes: %016llx (", stx->stx_attributes);
161 for (byte = 64 - 8; byte >= 0; byte -= 8) { 161 for (byte = 64 - 8; byte >= 0; byte -= 8) {
162 bits = stx->stx_attributes >> byte; 162 bits = stx->stx_attributes >> byte;
163 mbits = stx->stx_attributes_mask >> byte;
163 for (loop = 7; loop >= 0; loop--) { 164 for (loop = 7; loop >= 0; loop--) {
164 int bit = byte + loop; 165 int bit = byte + loop;
165 166
166 if (bits & 0x80) 167 if (!(mbits & 0x80))
168 putchar('.'); /* Not supported */
169 else if (bits & 0x80)
167 putchar(attr_representation[63 - bit]); 170 putchar(attr_representation[63 - bit]);
168 else 171 else
169 putchar('-'); 172 putchar('-'); /* Not set */
170 bits <<= 1; 173 bits <<= 1;
174 mbits <<= 1;
171 } 175 }
172 if (byte) 176 if (byte)
173 putchar(' '); 177 putchar(' ');
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 273f21fa32b5..7aa57225cbf7 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -130,6 +130,12 @@ static struct arch architectures[] = {
130 .name = "powerpc", 130 .name = "powerpc",
131 .init = powerpc__annotate_init, 131 .init = powerpc__annotate_init,
132 }, 132 },
133 {
134 .name = "s390",
135 .objdump = {
136 .comment_char = '#',
137 },
138 },
133}; 139};
134 140
135static void ins__delete(struct ins_operands *ops) 141static void ins__delete(struct ins_operands *ops)
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 93b0aa74ca03..39c2c7d067bb 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -156,6 +156,7 @@ out:
156 */ 156 */
157 case 0x2C: /* Westmere EP - Gulftown */ 157 case 0x2C: /* Westmere EP - Gulftown */
158 cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; 158 cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
159 break;
159 case 0x2A: /* SNB */ 160 case 0x2A: /* SNB */
160 case 0x2D: /* SNB Xeon */ 161 case 0x2D: /* SNB Xeon */
161 case 0x3A: /* IVB */ 162 case 0x3A: /* IVB */
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index fedca3285326..ccf2a69365cc 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -100,6 +100,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics
100\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. 100\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters.
101\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. 101\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
102\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. 102\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
103\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
104\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
103\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. 105\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
104\fBPkgWatt\fP Watts consumed by the whole package. 106\fBPkgWatt\fP Watts consumed by the whole package.
105\fBCorWatt\fP Watts consumed by the core part of the package. 107\fBCorWatt\fP Watts consumed by the core part of the package.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 828dccd3f01e..b11294730771 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1142,7 +1142,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1142 * it is possible for mperf's non-halted cycles + idle states 1142 * it is possible for mperf's non-halted cycles + idle states
1143 * to exceed TSC's all cycles: show c1 = 0% in that case. 1143 * to exceed TSC's all cycles: show c1 = 0% in that case.
1144 */ 1144 */
1145 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) 1145 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak))
1146 old->c1 = 0; 1146 old->c1 = 0;
1147 else { 1147 else {
1148 /* normal case, derive c1 */ 1148 /* normal case, derive c1 */
@@ -2485,8 +2485,10 @@ int snapshot_gfx_mhz(void)
2485 2485
2486 if (fp == NULL) 2486 if (fp == NULL)
2487 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); 2487 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
2488 else 2488 else {
2489 rewind(fp); 2489 rewind(fp);
2490 fflush(fp);
2491 }
2490 2492
2491 retval = fscanf(fp, "%d", &gfx_cur_mhz); 2493 retval = fscanf(fp, "%d", &gfx_cur_mhz);
2492 if (retval != 1) 2494 if (retval != 1)
@@ -3111,7 +3113,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3111 return 0; 3113 return 0;
3112 3114
3113 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " 3115 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
3114 "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n", 3116 "(high %d guar %d eff %d low %d)\n",
3115 cpu, msr, 3117 cpu, msr,
3116 (unsigned int)HWP_HIGHEST_PERF(msr), 3118 (unsigned int)HWP_HIGHEST_PERF(msr),
3117 (unsigned int)HWP_GUARANTEED_PERF(msr), 3119 (unsigned int)HWP_GUARANTEED_PERF(msr),
@@ -3122,7 +3124,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3122 return 0; 3124 return 0;
3123 3125
3124 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " 3126 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
3125 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n", 3127 "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n",
3126 cpu, msr, 3128 cpu, msr,
3127 (unsigned int)(((msr) >> 0) & 0xff), 3129 (unsigned int)(((msr) >> 0) & 0xff),
3128 (unsigned int)(((msr) >> 8) & 0xff), 3130 (unsigned int)(((msr) >> 8) & 0xff),
@@ -3136,7 +3138,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3136 return 0; 3138 return 0;
3137 3139
3138 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " 3140 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
3139 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n", 3141 "(min %d max %d des %d epp 0x%x window 0x%x)\n",
3140 cpu, msr, 3142 cpu, msr,
3141 (unsigned int)(((msr) >> 0) & 0xff), 3143 (unsigned int)(((msr) >> 0) & 0xff),
3142 (unsigned int)(((msr) >> 8) & 0xff), 3144 (unsigned int)(((msr) >> 8) & 0xff),
@@ -3353,17 +3355,19 @@ void rapl_probe(unsigned int family, unsigned int model)
3353 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 3355 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
3354 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 3356 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
3355 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 3357 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
3356 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 3358 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
3357 BIC_PRESENT(BIC_PKG__); 3359 BIC_PRESENT(BIC_PKG__);
3358 BIC_PRESENT(BIC_RAM__); 3360 BIC_PRESENT(BIC_RAM__);
3359 if (rapl_joules) { 3361 if (rapl_joules) {
3360 BIC_PRESENT(BIC_Pkg_J); 3362 BIC_PRESENT(BIC_Pkg_J);
3361 BIC_PRESENT(BIC_Cor_J); 3363 BIC_PRESENT(BIC_Cor_J);
3362 BIC_PRESENT(BIC_RAM_J); 3364 BIC_PRESENT(BIC_RAM_J);
3365 BIC_PRESENT(BIC_GFX_J);
3363 } else { 3366 } else {
3364 BIC_PRESENT(BIC_PkgWatt); 3367 BIC_PRESENT(BIC_PkgWatt);
3365 BIC_PRESENT(BIC_CorWatt); 3368 BIC_PRESENT(BIC_CorWatt);
3366 BIC_PRESENT(BIC_RAMWatt); 3369 BIC_PRESENT(BIC_RAMWatt);
3370 BIC_PRESENT(BIC_GFXWatt);
3367 } 3371 }
3368 break; 3372 break;
3369 case INTEL_FAM6_HASWELL_X: /* HSX */ 3373 case INTEL_FAM6_HASWELL_X: /* HSX */
@@ -3478,7 +3482,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
3478int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3482int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3479{ 3483{
3480 unsigned long long msr; 3484 unsigned long long msr;
3481 unsigned int dts; 3485 unsigned int dts, dts2;
3482 int cpu; 3486 int cpu;
3483 3487
3484 if (!(do_dts || do_ptm)) 3488 if (!(do_dts || do_ptm))
@@ -3503,7 +3507,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3503 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", 3507 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
3504 cpu, msr, tcc_activation_temp - dts); 3508 cpu, msr, tcc_activation_temp - dts);
3505 3509
3506#ifdef THERM_DEBUG
3507 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) 3510 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
3508 return 0; 3511 return 0;
3509 3512
@@ -3511,11 +3514,10 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3511 dts2 = (msr >> 8) & 0x7F; 3514 dts2 = (msr >> 8) & 0x7F;
3512 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 3515 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3513 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 3516 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3514#endif
3515 } 3517 }
3516 3518
3517 3519
3518 if (do_dts) { 3520 if (do_dts && debug) {
3519 unsigned int resolution; 3521 unsigned int resolution;
3520 3522
3521 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 3523 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
@@ -3526,7 +3528,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3526 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", 3528 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
3527 cpu, msr, tcc_activation_temp - dts, resolution); 3529 cpu, msr, tcc_activation_temp - dts, resolution);
3528 3530
3529#ifdef THERM_DEBUG
3530 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) 3531 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
3531 return 0; 3532 return 0;
3532 3533
@@ -3534,7 +3535,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3534 dts2 = (msr >> 8) & 0x7F; 3535 dts2 = (msr >> 8) & 0x7F;
3535 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 3536 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3536 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 3537 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3537#endif
3538 } 3538 }
3539 3539
3540 return 0; 3540 return 0;
@@ -4578,7 +4578,7 @@ int get_and_dump_counters(void)
4578} 4578}
4579 4579
4580void print_version() { 4580void print_version() {
4581 fprintf(outf, "turbostat version 17.02.24" 4581 fprintf(outf, "turbostat version 17.04.12"
4582 " - Len Brown <lenb@kernel.org>\n"); 4582 " - Len Brown <lenb@kernel.org>\n");
4583} 4583}
4584 4584
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 1c5d0575802e..bf13fc2297aa 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -34,34 +34,34 @@ endif
34all: $(SUB_DIRS) 34all: $(SUB_DIRS)
35 35
36$(SUB_DIRS): 36$(SUB_DIRS):
37 BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all 37 BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
38 38
39include ../lib.mk 39include ../lib.mk
40 40
41override define RUN_TESTS 41override define RUN_TESTS
42 @for TARGET in $(SUB_DIRS); do \ 42 @for TARGET in $(SUB_DIRS); do \
43 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 43 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
44 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ 44 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
45 done; 45 done;
46endef 46endef
47 47
48override define INSTALL_RULE 48override define INSTALL_RULE
49 @for TARGET in $(SUB_DIRS); do \ 49 @for TARGET in $(SUB_DIRS); do \
50 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 50 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
51 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\ 51 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\
52 done; 52 done;
53endef 53endef
54 54
55override define EMIT_TESTS 55override define EMIT_TESTS
56 @for TARGET in $(SUB_DIRS); do \ 56 @for TARGET in $(SUB_DIRS); do \
57 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 57 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
58 $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ 58 $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
59 done; 59 done;
60endef 60endef
61 61
62clean: 62clean:
63 @for TARGET in $(SUB_DIRS); do \ 63 @for TARGET in $(SUB_DIRS); do \
64 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 64 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
65 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \ 65 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \
66 done; 66 done;
67 rm -f tags 67 rm -f tags
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 276139a24e6f..702f8108608d 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -392,6 +392,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
392} 392}
393 393
394/** 394/**
395 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
396 *
397 * For a specific CPU, initialize the GIC VE hardware.
398 */
399void kvm_vgic_init_cpu_hardware(void)
400{
401 BUG_ON(preemptible());
402
403 /*
404 * We want to make sure the list registers start out clear so that we
405 * only have the program the used registers.
406 */
407 if (kvm_vgic_global_state.type == VGIC_V2)
408 vgic_v2_init_lrs();
409 else
410 kvm_call_hyp(__vgic_v3_init_lrs);
411}
412
413/**
395 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable 414 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
396 * according to the host GIC model. Accordingly calls either 415 * according to the host GIC model. Accordingly calls either
397 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be 416 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index a3ad7ff95c9b..0a4283ed9aa7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
229 val = vmcr.ctlr; 229 val = vmcr.ctlr;
230 break; 230 break;
231 case GIC_CPU_PRIMASK: 231 case GIC_CPU_PRIMASK:
232 val = vmcr.pmr; 232 /*
233 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
234 * the PMR field as GICH_VMCR.VMPriMask rather than
235 * GICC_PMR.Priority, so we expose the upper five bits of
236 * priority mask to userspace using the lower bits in the
237 * unsigned long.
238 */
239 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
240 GICV_PMR_PRIORITY_SHIFT;
233 break; 241 break;
234 case GIC_CPU_BINPOINT: 242 case GIC_CPU_BINPOINT:
235 val = vmcr.bpr; 243 val = vmcr.bpr;
@@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
262 vmcr.ctlr = val; 270 vmcr.ctlr = val;
263 break; 271 break;
264 case GIC_CPU_PRIMASK: 272 case GIC_CPU_PRIMASK:
265 vmcr.pmr = val; 273 /*
274 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
275 * the PMR field as GICH_VMCR.VMPriMask rather than
276 * GICC_PMR.Priority, so we expose the upper five bits of
277 * priority mask to userspace using the lower bits in the
278 * unsigned long.
279 */
280 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
281 GICV_PMR_PRIORITY_MASK;
266 break; 282 break;
267 case GIC_CPU_BINPOINT: 283 case GIC_CPU_BINPOINT:
268 vmcr.bpr = val; 284 vmcr.bpr = val;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index b834ecdf3225..b637d9c7afe3 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val)
36 return (unsigned long *)val; 36 return (unsigned long *)val;
37} 37}
38 38
39static inline void vgic_v2_write_lr(int lr, u32 val)
40{
41 void __iomem *base = kvm_vgic_global_state.vctrl_base;
42
43 writel_relaxed(val, base + GICH_LR0 + (lr * 4));
44}
45
46void vgic_v2_init_lrs(void)
47{
48 int i;
49
50 for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
51 vgic_v2_write_lr(i, 0);
52}
53
39void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) 54void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
40{ 55{
41 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; 56 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -191,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
191 GICH_VMCR_ALIAS_BINPOINT_MASK; 206 GICH_VMCR_ALIAS_BINPOINT_MASK;
192 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & 207 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
193 GICH_VMCR_BINPOINT_MASK; 208 GICH_VMCR_BINPOINT_MASK;
194 vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & 209 vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
195 GICH_VMCR_PRIMASK_MASK; 210 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
196 211
197 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; 212 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
198} 213}
@@ -207,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
207 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 222 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
208 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> 223 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
209 GICH_VMCR_BINPOINT_SHIFT; 224 GICH_VMCR_BINPOINT_SHIFT;
210 vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> 225 vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
211 GICH_VMCR_PRIMASK_SHIFT; 226 GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
212} 227}
213 228
214void vgic_v2_enable(struct kvm_vcpu *vcpu) 229void vgic_v2_enable(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index db28f7cadab2..6cf557e9f718 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
81 return irq->pending_latch || irq->line_level; 81 return irq->pending_latch || irq->line_level;
82} 82}
83 83
84/*
85 * This struct provides an intermediate representation of the fields contained
86 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
87 * state to userspace can generate either GICv2 or GICv3 CPU interface
88 * registers regardless of the hardware backed GIC used.
89 */
84struct vgic_vmcr { 90struct vgic_vmcr {
85 u32 ctlr; 91 u32 ctlr;
86 u32 abpr; 92 u32 abpr;
87 u32 bpr; 93 u32 bpr;
88 u32 pmr; 94 u32 pmr; /* Priority mask field in the GICC_PMR and
95 * ICC_PMR_EL1 priority field format */
89 /* Below member variable are valid only for GICv3 */ 96 /* Below member variable are valid only for GICv3 */
90 u32 grpen0; 97 u32 grpen0;
91 u32 grpen1; 98 u32 grpen1;
@@ -130,6 +137,8 @@ int vgic_v2_map_resources(struct kvm *kvm);
130int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, 137int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
131 enum vgic_type); 138 enum vgic_type);
132 139
140void vgic_v2_init_lrs(void);
141
133static inline void vgic_get_irq_kref(struct vgic_irq *irq) 142static inline void vgic_get_irq_kref(struct vgic_irq *irq)
134{ 143{
135 if (irq->intid < VGIC_MIN_LPI) 144 if (irq->intid < VGIC_MIN_LPI)