summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/net/bpf_jit_comp.c5
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/configs/default_defconfig39
-rw-r--r--arch/s390/configs/gcov_defconfig28
-rw-r--r--arch/s390/configs/performance_defconfig27
-rw-r--r--arch/s390/configs/zfcpdump_defconfig6
-rw-r--r--arch/s390/crypto/Makefile3
-rw-r--r--arch/s390/crypto/arch_random.c1
-rw-r--r--arch/s390/defconfig8
-rw-r--r--arch/s390/include/asm/Kbuild2
-rw-r--r--arch/s390/include/asm/debug.h3
-rw-r--r--arch/s390/include/asm/device.h10
-rw-r--r--arch/s390/include/asm/dis.h2
-rw-r--r--arch/s390/include/asm/elf.h32
-rw-r--r--arch/s390/include/asm/fb.h12
-rw-r--r--arch/s390/include/asm/io.h4
-rw-r--r--arch/s390/include/asm/kprobes.h20
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/page.h3
-rw-r--r--arch/s390/include/asm/pci.h15
-rw-r--r--arch/s390/include/asm/pci_insn.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h25
-rw-r--r--arch/s390/include/asm/pgtable.h105
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/ptrace.h54
-rw-r--r--arch/s390/include/asm/sigp.h2
-rw-r--r--arch/s390/include/asm/sysinfo.h2
-rw-r--r--arch/s390/include/asm/thread_info.h1
-rw-r--r--arch/s390/include/asm/tlb.h15
-rw-r--r--arch/s390/kernel/debug.c8
-rw-r--r--arch/s390/kernel/dumpstack.c33
-rw-r--r--arch/s390/kernel/entry.S57
-rw-r--r--arch/s390/kernel/ftrace.c4
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c10
-rw-r--r--arch/s390/kernel/perf_event.c3
-rw-r--r--arch/s390/kernel/ptrace.c15
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/traps.c1
-rw-r--r--arch/s390/kernel/uprobes.c12
-rw-r--r--arch/s390/kernel/vdso.c91
-rw-r--r--arch/s390/kernel/vmlinux.lds.S8
-rw-r--r--arch/s390/kernel/vtime.c14
-rw-r--r--arch/s390/kvm/gaccess.c22
-rw-r--r--arch/s390/kvm/gaccess.h4
-rw-r--r--arch/s390/kvm/guestdbg.c6
-rw-r--r--arch/s390/kvm/priv.c8
-rw-r--r--arch/s390/lib/probes.c1
-rw-r--r--arch/s390/lib/uaccess.c4
-rw-r--r--arch/s390/mm/dump_pagetables.c23
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/gmap.c11
-rw-r--r--arch/s390/mm/gup.c33
-rw-r--r--arch/s390/mm/hugetlbpage.c30
-rw-r--r--arch/s390/mm/init.c6
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/s390/mm/pageattr.c30
-rw-r--r--arch/s390/mm/pgalloc.c57
-rw-r--r--arch/s390/mm/pgtable.c6
-rw-r--r--arch/s390/mm/vmem.c44
-rw-r--r--arch/s390/pci/pci.c173
-rw-r--r--arch/s390/pci/pci_clp.c77
-rw-r--r--arch/s390/pci/pci_dma.c4
-rw-r--r--arch/s390/pci/pci_event.c14
-rw-r--r--arch/s390/pci/pci_insn.c10
-rw-r--r--arch/s390/tools/gen_facilities.c2
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/edac/amd64_edac.c40
-rw-r--r--drivers/firmware/efi/efi-pstore.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.c11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c34
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c10
-rw-r--r--drivers/net/ethernet/sfc/nic.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c6
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c1
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/mdio-mux.c11
-rw-r--r--drivers/net/phy/mdio_bus.c6
-rw-r--r--drivers/net/usb/ch9200.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/xen-netfront.c3
-rw-r--r--drivers/s390/block/Kconfig7
-rw-r--r--drivers/s390/block/Makefile3
-rw-r--r--drivers/s390/block/dasd.c76
-rw-r--r--drivers/s390/block/dasd_devmap.c75
-rw-r--r--drivers/s390/block/scm_blk.c262
-rw-r--r--drivers/s390/block/scm_blk.h60
-rw-r--r--drivers/s390/block/scm_blk_cluster.c255
-rw-r--r--drivers/s390/cio/ccwgroup.c4
-rw-r--r--drivers/s390/cio/css.c49
-rw-r--r--drivers/s390/cio/device.c42
-rw-r--r--drivers/s390/cio/qdio_debug.h2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c58
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c12
-rw-r--r--drivers/s390/crypto/ap_bus.c59
-rw-r--r--drivers/s390/crypto/ap_card.c9
-rw-r--r--drivers/s390/crypto/ap_queue.c9
-rw-r--r--drivers/s390/crypto/pkey_api.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c12
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h115
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c4
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c21
-rw-r--r--drivers/s390/net/qeth_core_sys.c24
-rw-r--r--drivers/s390/net/qeth_l2.h2
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l2_sys.c8
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/s390/virtio/virtio_ccw.c2
-rw-r--r--drivers/soc/ti/knav_dma.c2
-rw-r--r--drivers/tty/hvc/Kconfig2
-rw-r--r--fs/cifs/cifsacl.c30
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifsproto.h3
-rw-r--r--fs/cifs/cifssmb.c11
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/cifs/smb2pdu.c21
-rw-r--r--fs/cifs/transport.c4
-rw-r--r--fs/cifs/xattr.c6
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--include/linux/mlx5/fs.h4
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/uapi/linux/bpf.h8
-rw-r--r--include/uapi/linux/if_link.h13
-rw-r--r--kernel/bpf/syscall.c5
-rw-r--r--kernel/bpf/verifier.c133
-rw-r--r--net/core/dev.c57
-rw-r--r--net/core/rtnetlink.c45
-rw-r--r--net/core/sock.c20
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/packet/af_packet.c14
-rw-r--r--net/sched/sch_api.c6
-rw-r--r--net/sctp/ipv6.c46
-rw-r--r--net/tipc/socket.c38
-rw-r--r--samples/bpf/cookie_uid_helper_example.c4
-rw-r--r--samples/bpf/offwaketime_user.c1
-rw-r--r--samples/bpf/sampleip_user.c1
-rw-r--r--samples/bpf/trace_event_user.c1
-rw-r--r--samples/bpf/tracex2_user.c1
-rw-r--r--samples/bpf/xdp1_user.c9
-rw-r--r--samples/bpf/xdp_tx_iptunnel_user.c8
-rw-r--r--tools/build/feature/test-bpf.c1
-rw-r--r--tools/include/uapi/linux/bpf.h11
-rw-r--r--tools/lib/bpf/bpf.c22
-rw-r--r--tools/lib/bpf/bpf.h4
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rw-r--r--tools/testing/selftests/bpf/include/uapi/linux/types.h6
-rw-r--r--tools/testing/selftests/bpf/test_align.c453
173 files changed, 2303 insertions, 1513 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c6e53580aefe..71f930501ade 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -253,8 +253,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
253 */ 253 */
254 off = offsetof(struct bpf_array, ptrs); 254 off = offsetof(struct bpf_array, ptrs);
255 emit_a64_mov_i64(tmp, off, ctx); 255 emit_a64_mov_i64(tmp, off, ctx);
256 emit(A64_LDR64(tmp, r2, tmp), ctx); 256 emit(A64_ADD(1, tmp, r2, tmp), ctx);
257 emit(A64_LDR64(prg, tmp, r3), ctx); 257 emit(A64_LSL(1, prg, r3, 3), ctx);
258 emit(A64_LDR64(prg, tmp, prg), ctx);
258 emit(A64_CBZ(1, prg, jmp_offset), ctx); 259 emit(A64_CBZ(1, prg, jmp_offset), ctx);
259 260
260 /* goto *(prog->bpf_func + prologue_size); */ 261 /* goto *(prog->bpf_func + prologue_size); */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e161fafb495b..bb11f9f30c8d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -64,6 +64,7 @@ config ARCH_SUPPORTS_UPROBES
64 64
65config S390 65config S390
66 def_bool y 66 def_bool y
67 select ARCH_BINFMT_ELF_STATE
67 select ARCH_HAS_DEVMEM_IS_ALLOWED 68 select ARCH_HAS_DEVMEM_IS_ALLOWED
68 select ARCH_HAS_ELF_RANDOMIZE 69 select ARCH_HAS_ELF_RANDOMIZE
69 select ARCH_HAS_GCOV_PROFILE_ALL 70 select ARCH_HAS_GCOV_PROFILE_ALL
@@ -184,7 +185,7 @@ config SCHED_OMIT_FRAME_POINTER
184 185
185config PGTABLE_LEVELS 186config PGTABLE_LEVELS
186 int 187 int
187 default 4 188 default 5
188 189
189source "init/Kconfig" 190source "init/Kconfig"
190 191
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index a5039fa89314..282072206df7 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -30,6 +30,7 @@ CONFIG_USER_NS=y
30CONFIG_SCHED_AUTOGROUP=y 30CONFIG_SCHED_AUTOGROUP=y
31CONFIG_BLK_DEV_INITRD=y 31CONFIG_BLK_DEV_INITRD=y
32CONFIG_EXPERT=y 32CONFIG_EXPERT=y
33# CONFIG_SYSFS_SYSCALL is not set
33CONFIG_BPF_SYSCALL=y 34CONFIG_BPF_SYSCALL=y
34CONFIG_USERFAULTFD=y 35CONFIG_USERFAULTFD=y
35# CONFIG_COMPAT_BRK is not set 36# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
44CONFIG_MODULE_FORCE_UNLOAD=y 45CONFIG_MODULE_FORCE_UNLOAD=y
45CONFIG_MODVERSIONS=y 46CONFIG_MODVERSIONS=y
46CONFIG_MODULE_SRCVERSION_ALL=y 47CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_BLK_DEV_INTEGRITY=y
47CONFIG_BLK_DEV_THROTTLING=y 49CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y
51CONFIG_BLK_WBT_SQ=y
48CONFIG_PARTITION_ADVANCED=y 52CONFIG_PARTITION_ADVANCED=y
49CONFIG_IBM_PARTITION=y 53CONFIG_IBM_PARTITION=y
50CONFIG_BSD_DISKLABEL=y 54CONFIG_BSD_DISKLABEL=y
@@ -90,6 +94,8 @@ CONFIG_UNIX=y
90CONFIG_UNIX_DIAG=m 94CONFIG_UNIX_DIAG=m
91CONFIG_XFRM_USER=m 95CONFIG_XFRM_USER=m
92CONFIG_NET_KEY=m 96CONFIG_NET_KEY=m
97CONFIG_SMC=m
98CONFIG_SMC_DIAG=m
93CONFIG_INET=y 99CONFIG_INET=y
94CONFIG_IP_MULTICAST=y 100CONFIG_IP_MULTICAST=y
95CONFIG_IP_ADVANCED_ROUTER=y 101CONFIG_IP_ADVANCED_ROUTER=y
@@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m
359CONFIG_NET_ACT_SKBEDIT=m 365CONFIG_NET_ACT_SKBEDIT=m
360CONFIG_NET_ACT_CSUM=m 366CONFIG_NET_ACT_CSUM=m
361CONFIG_DNS_RESOLVER=y 367CONFIG_DNS_RESOLVER=y
368CONFIG_NETLINK_DIAG=m
362CONFIG_CGROUP_NET_PRIO=y 369CONFIG_CGROUP_NET_PRIO=y
363CONFIG_BPF_JIT=y 370CONFIG_BPF_JIT=y
364CONFIG_NET_PKTGEN=m 371CONFIG_NET_PKTGEN=m
@@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y
367CONFIG_DMA_CMA=y 374CONFIG_DMA_CMA=y
368CONFIG_CMA_SIZE_MBYTES=0 375CONFIG_CMA_SIZE_MBYTES=0
369CONFIG_CONNECTOR=y 376CONFIG_CONNECTOR=y
377CONFIG_ZRAM=m
370CONFIG_BLK_DEV_LOOP=m 378CONFIG_BLK_DEV_LOOP=m
371CONFIG_BLK_DEV_CRYPTOLOOP=m 379CONFIG_BLK_DEV_CRYPTOLOOP=m
380CONFIG_BLK_DEV_DRBD=m
372CONFIG_BLK_DEV_NBD=m 381CONFIG_BLK_DEV_NBD=m
373CONFIG_BLK_DEV_OSD=m 382CONFIG_BLK_DEV_OSD=m
374CONFIG_BLK_DEV_RAM=y 383CONFIG_BLK_DEV_RAM=y
375CONFIG_BLK_DEV_RAM_SIZE=32768 384CONFIG_BLK_DEV_RAM_SIZE=32768
376CONFIG_CDROM_PKTCDVD=m 385CONFIG_BLK_DEV_RAM_DAX=y
377CONFIG_ATA_OVER_ETH=m
378CONFIG_VIRTIO_BLK=y 386CONFIG_VIRTIO_BLK=y
387CONFIG_BLK_DEV_RBD=m
379CONFIG_ENCLOSURE_SERVICES=m 388CONFIG_ENCLOSURE_SERVICES=m
389CONFIG_GENWQE=m
380CONFIG_RAID_ATTRS=m 390CONFIG_RAID_ATTRS=m
381CONFIG_SCSI=y 391CONFIG_SCSI=y
382CONFIG_BLK_DEV_SD=y 392CONFIG_BLK_DEV_SD=y
@@ -442,6 +452,8 @@ CONFIG_NLMON=m
442# CONFIG_NET_VENDOR_INTEL is not set 452# CONFIG_NET_VENDOR_INTEL is not set
443# CONFIG_NET_VENDOR_MARVELL is not set 453# CONFIG_NET_VENDOR_MARVELL is not set
444CONFIG_MLX4_EN=m 454CONFIG_MLX4_EN=m
455CONFIG_MLX5_CORE=m
456CONFIG_MLX5_CORE_EN=y
445# CONFIG_NET_VENDOR_NATSEMI is not set 457# CONFIG_NET_VENDOR_NATSEMI is not set
446CONFIG_PPP=m 458CONFIG_PPP=m
447CONFIG_PPP_BSDCOMP=m 459CONFIG_PPP_BSDCOMP=m
@@ -452,7 +464,6 @@ CONFIG_PPTP=m
452CONFIG_PPPOL2TP=m 464CONFIG_PPPOL2TP=m
453CONFIG_PPP_ASYNC=m 465CONFIG_PPP_ASYNC=m
454CONFIG_PPP_SYNC_TTY=m 466CONFIG_PPP_SYNC_TTY=m
455# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
456# CONFIG_INPUT_KEYBOARD is not set 467# CONFIG_INPUT_KEYBOARD is not set
457# CONFIG_INPUT_MOUSE is not set 468# CONFIG_INPUT_MOUSE is not set
458# CONFIG_SERIO is not set 469# CONFIG_SERIO is not set
@@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m
471CONFIG_INFINIBAND=m 482CONFIG_INFINIBAND=m
472CONFIG_INFINIBAND_USER_ACCESS=m 483CONFIG_INFINIBAND_USER_ACCESS=m
473CONFIG_MLX4_INFINIBAND=m 484CONFIG_MLX4_INFINIBAND=m
485CONFIG_MLX5_INFINIBAND=m
474CONFIG_VIRTIO_BALLOON=m 486CONFIG_VIRTIO_BALLOON=m
475CONFIG_EXT4_FS=y 487CONFIG_EXT4_FS=y
476CONFIG_EXT4_FS_POSIX_ACL=y 488CONFIG_EXT4_FS_POSIX_ACL=y
@@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y
487CONFIG_XFS_RT=y 499CONFIG_XFS_RT=y
488CONFIG_XFS_DEBUG=y 500CONFIG_XFS_DEBUG=y
489CONFIG_GFS2_FS=m 501CONFIG_GFS2_FS=m
502CONFIG_GFS2_FS_LOCKING_DLM=y
490CONFIG_OCFS2_FS=m 503CONFIG_OCFS2_FS=m
491CONFIG_BTRFS_FS=y 504CONFIG_BTRFS_FS=y
492CONFIG_BTRFS_FS_POSIX_ACL=y 505CONFIG_BTRFS_FS_POSIX_ACL=y
506CONFIG_BTRFS_DEBUG=y
493CONFIG_NILFS2_FS=m 507CONFIG_NILFS2_FS=m
508CONFIG_FS_DAX=y
509CONFIG_EXPORTFS_BLOCK_OPS=y
494CONFIG_FANOTIFY=y 510CONFIG_FANOTIFY=y
511CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
495CONFIG_QUOTA_NETLINK_INTERFACE=y 512CONFIG_QUOTA_NETLINK_INTERFACE=y
513CONFIG_QUOTA_DEBUG=y
496CONFIG_QFMT_V1=m 514CONFIG_QFMT_V1=m
497CONFIG_QFMT_V2=m 515CONFIG_QFMT_V2=m
498CONFIG_AUTOFS4_FS=m 516CONFIG_AUTOFS4_FS=m
@@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y
558CONFIG_DEBUG_SECTION_MISMATCH=y 576CONFIG_DEBUG_SECTION_MISMATCH=y
559CONFIG_MAGIC_SYSRQ=y 577CONFIG_MAGIC_SYSRQ=y
560CONFIG_DEBUG_PAGEALLOC=y 578CONFIG_DEBUG_PAGEALLOC=y
579CONFIG_DEBUG_RODATA_TEST=y
561CONFIG_DEBUG_OBJECTS=y 580CONFIG_DEBUG_OBJECTS=y
562CONFIG_DEBUG_OBJECTS_SELFTEST=y 581CONFIG_DEBUG_OBJECTS_SELFTEST=y
563CONFIG_DEBUG_OBJECTS_FREE=y 582CONFIG_DEBUG_OBJECTS_FREE=y
@@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y
580CONFIG_WQ_WATCHDOG=y 599CONFIG_WQ_WATCHDOG=y
581CONFIG_PANIC_ON_OOPS=y 600CONFIG_PANIC_ON_OOPS=y
582CONFIG_DEBUG_TIMEKEEPING=y 601CONFIG_DEBUG_TIMEKEEPING=y
583CONFIG_TIMER_STATS=y
584CONFIG_DEBUG_RT_MUTEXES=y 602CONFIG_DEBUG_RT_MUTEXES=y
585CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 603CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
586CONFIG_PROVE_LOCKING=y 604CONFIG_PROVE_LOCKING=y
@@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m
595CONFIG_RCU_CPU_STALL_TIMEOUT=300 613CONFIG_RCU_CPU_STALL_TIMEOUT=300
596CONFIG_NOTIFIER_ERROR_INJECTION=m 614CONFIG_NOTIFIER_ERROR_INJECTION=m
597CONFIG_PM_NOTIFIER_ERROR_INJECT=m 615CONFIG_PM_NOTIFIER_ERROR_INJECT=m
616CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
598CONFIG_FAULT_INJECTION=y 617CONFIG_FAULT_INJECTION=y
599CONFIG_FAILSLAB=y 618CONFIG_FAILSLAB=y
600CONFIG_FAIL_PAGE_ALLOC=y 619CONFIG_FAIL_PAGE_ALLOC=y
@@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y
616CONFIG_TRACE_ENUM_MAP_FILE=y 635CONFIG_TRACE_ENUM_MAP_FILE=y
617CONFIG_LKDTM=m 636CONFIG_LKDTM=m
618CONFIG_TEST_LIST_SORT=y 637CONFIG_TEST_LIST_SORT=y
638CONFIG_TEST_SORT=y
619CONFIG_KPROBES_SANITY_TEST=y 639CONFIG_KPROBES_SANITY_TEST=y
620CONFIG_RBTREE_TEST=y 640CONFIG_RBTREE_TEST=y
621CONFIG_INTERVAL_TREE_TEST=m 641CONFIG_INTERVAL_TREE_TEST=m
622CONFIG_PERCPU_TEST=m 642CONFIG_PERCPU_TEST=m
623CONFIG_ATOMIC64_SELFTEST=y 643CONFIG_ATOMIC64_SELFTEST=y
624CONFIG_TEST_STRING_HELPERS=y
625CONFIG_TEST_KSTRTOX=y
626CONFIG_DMA_API_DEBUG=y 644CONFIG_DMA_API_DEBUG=y
627CONFIG_TEST_BPF=m 645CONFIG_TEST_BPF=m
628CONFIG_BUG_ON_DATA_CORRUPTION=y 646CONFIG_BUG_ON_DATA_CORRUPTION=y
@@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y
630CONFIG_ENCRYPTED_KEYS=m 648CONFIG_ENCRYPTED_KEYS=m
631CONFIG_SECURITY=y 649CONFIG_SECURITY=y
632CONFIG_SECURITY_NETWORK=y 650CONFIG_SECURITY_NETWORK=y
651CONFIG_HARDENED_USERCOPY=y
633CONFIG_SECURITY_SELINUX=y 652CONFIG_SECURITY_SELINUX=y
634CONFIG_SECURITY_SELINUX_BOOTPARAM=y 653CONFIG_SECURITY_SELINUX_BOOTPARAM=y
635CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 654CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m
640CONFIG_CRYPTO_DH=m 659CONFIG_CRYPTO_DH=m
641CONFIG_CRYPTO_ECDH=m 660CONFIG_CRYPTO_ECDH=m
642CONFIG_CRYPTO_USER=m 661CONFIG_CRYPTO_USER=m
662CONFIG_CRYPTO_PCRYPT=m
643CONFIG_CRYPTO_CRYPTD=m 663CONFIG_CRYPTO_CRYPTD=m
664CONFIG_CRYPTO_MCRYPTD=m
644CONFIG_CRYPTO_TEST=m 665CONFIG_CRYPTO_TEST=m
645CONFIG_CRYPTO_CCM=m 666CONFIG_CRYPTO_CCM=m
646CONFIG_CRYPTO_GCM=m 667CONFIG_CRYPTO_GCM=m
@@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
648CONFIG_CRYPTO_LRW=m 669CONFIG_CRYPTO_LRW=m
649CONFIG_CRYPTO_PCBC=m 670CONFIG_CRYPTO_PCBC=m
650CONFIG_CRYPTO_KEYWRAP=m 671CONFIG_CRYPTO_KEYWRAP=m
672CONFIG_CRYPTO_CMAC=m
651CONFIG_CRYPTO_XCBC=m 673CONFIG_CRYPTO_XCBC=m
652CONFIG_CRYPTO_VMAC=m 674CONFIG_CRYPTO_VMAC=m
653CONFIG_CRYPTO_CRC32=m 675CONFIG_CRYPTO_CRC32=m
@@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m
657CONFIG_CRYPTO_RMD256=m 679CONFIG_CRYPTO_RMD256=m
658CONFIG_CRYPTO_RMD320=m 680CONFIG_CRYPTO_RMD320=m
659CONFIG_CRYPTO_SHA512=m 681CONFIG_CRYPTO_SHA512=m
682CONFIG_CRYPTO_SHA3=m
660CONFIG_CRYPTO_TGR192=m 683CONFIG_CRYPTO_TGR192=m
661CONFIG_CRYPTO_WP512=m 684CONFIG_CRYPTO_WP512=m
685CONFIG_CRYPTO_AES_TI=m
662CONFIG_CRYPTO_ANUBIS=m 686CONFIG_CRYPTO_ANUBIS=m
663CONFIG_CRYPTO_BLOWFISH=m 687CONFIG_CRYPTO_BLOWFISH=m
664CONFIG_CRYPTO_CAMELLIA=m 688CONFIG_CRYPTO_CAMELLIA=m
@@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m
674CONFIG_CRYPTO_842=m 698CONFIG_CRYPTO_842=m
675CONFIG_CRYPTO_LZ4=m 699CONFIG_CRYPTO_LZ4=m
676CONFIG_CRYPTO_LZ4HC=m 700CONFIG_CRYPTO_LZ4HC=m
701CONFIG_CRYPTO_ANSI_CPRNG=m
677CONFIG_CRYPTO_USER_API_HASH=m 702CONFIG_CRYPTO_USER_API_HASH=m
678CONFIG_CRYPTO_USER_API_SKCIPHER=m 703CONFIG_CRYPTO_USER_API_SKCIPHER=m
679CONFIG_CRYPTO_USER_API_RNG=m 704CONFIG_CRYPTO_USER_API_RNG=m
@@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m
685CONFIG_CRYPTO_SHA512_S390=m 710CONFIG_CRYPTO_SHA512_S390=m
686CONFIG_CRYPTO_DES_S390=m 711CONFIG_CRYPTO_DES_S390=m
687CONFIG_CRYPTO_AES_S390=m 712CONFIG_CRYPTO_AES_S390=m
713CONFIG_CRYPTO_PAES_S390=m
688CONFIG_CRYPTO_GHASH_S390=m 714CONFIG_CRYPTO_GHASH_S390=m
689CONFIG_CRYPTO_CRC32_S390=y 715CONFIG_CRYPTO_CRC32_S390=y
690CONFIG_ASYMMETRIC_KEY_TYPE=y 716CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
692CONFIG_X509_CERTIFICATE_PARSER=m 718CONFIG_X509_CERTIFICATE_PARSER=m
693CONFIG_CRC7=m 719CONFIG_CRC7=m
694CONFIG_CRC8=m 720CONFIG_CRC8=m
721CONFIG_RANDOM32_SELFTEST=y
695CONFIG_CORDIC=m 722CONFIG_CORDIC=m
696CONFIG_CMM=m 723CONFIG_CMM=m
697CONFIG_APPLDATA_BASE=y 724CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 83970b5afb2b..3c6b78189fbc 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 31CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y 32CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y 33CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_BPF_SYSCALL=y 35CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 36CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 37# CONFIG_COMPAT_BRK is not set
@@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y
46CONFIG_MODULE_FORCE_UNLOAD=y 47CONFIG_MODULE_FORCE_UNLOAD=y
47CONFIG_MODVERSIONS=y 48CONFIG_MODVERSIONS=y
48CONFIG_MODULE_SRCVERSION_ALL=y 49CONFIG_MODULE_SRCVERSION_ALL=y
50CONFIG_BLK_DEV_INTEGRITY=y
49CONFIG_BLK_DEV_THROTTLING=y 51CONFIG_BLK_DEV_THROTTLING=y
52CONFIG_BLK_WBT=y
53CONFIG_BLK_WBT_SQ=y
50CONFIG_PARTITION_ADVANCED=y 54CONFIG_PARTITION_ADVANCED=y
51CONFIG_IBM_PARTITION=y 55CONFIG_IBM_PARTITION=y
52CONFIG_BSD_DISKLABEL=y 56CONFIG_BSD_DISKLABEL=y
@@ -88,6 +92,8 @@ CONFIG_UNIX=y
88CONFIG_UNIX_DIAG=m 92CONFIG_UNIX_DIAG=m
89CONFIG_XFRM_USER=m 93CONFIG_XFRM_USER=m
90CONFIG_NET_KEY=m 94CONFIG_NET_KEY=m
95CONFIG_SMC=m
96CONFIG_SMC_DIAG=m
91CONFIG_INET=y 97CONFIG_INET=y
92CONFIG_IP_MULTICAST=y 98CONFIG_IP_MULTICAST=y
93CONFIG_IP_ADVANCED_ROUTER=y 99CONFIG_IP_ADVANCED_ROUTER=y
@@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m
356CONFIG_NET_ACT_SKBEDIT=m 362CONFIG_NET_ACT_SKBEDIT=m
357CONFIG_NET_ACT_CSUM=m 363CONFIG_NET_ACT_CSUM=m
358CONFIG_DNS_RESOLVER=y 364CONFIG_DNS_RESOLVER=y
365CONFIG_NETLINK_DIAG=m
359CONFIG_CGROUP_NET_PRIO=y 366CONFIG_CGROUP_NET_PRIO=y
360CONFIG_BPF_JIT=y 367CONFIG_BPF_JIT=y
361CONFIG_NET_PKTGEN=m 368CONFIG_NET_PKTGEN=m
@@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y
364CONFIG_DMA_CMA=y 371CONFIG_DMA_CMA=y
365CONFIG_CMA_SIZE_MBYTES=0 372CONFIG_CMA_SIZE_MBYTES=0
366CONFIG_CONNECTOR=y 373CONFIG_CONNECTOR=y
374CONFIG_ZRAM=m
367CONFIG_BLK_DEV_LOOP=m 375CONFIG_BLK_DEV_LOOP=m
368CONFIG_BLK_DEV_CRYPTOLOOP=m 376CONFIG_BLK_DEV_CRYPTOLOOP=m
377CONFIG_BLK_DEV_DRBD=m
369CONFIG_BLK_DEV_NBD=m 378CONFIG_BLK_DEV_NBD=m
370CONFIG_BLK_DEV_OSD=m 379CONFIG_BLK_DEV_OSD=m
371CONFIG_BLK_DEV_RAM=y 380CONFIG_BLK_DEV_RAM=y
372CONFIG_BLK_DEV_RAM_SIZE=32768 381CONFIG_BLK_DEV_RAM_SIZE=32768
373CONFIG_CDROM_PKTCDVD=m 382CONFIG_BLK_DEV_RAM_DAX=y
374CONFIG_ATA_OVER_ETH=m
375CONFIG_VIRTIO_BLK=y 383CONFIG_VIRTIO_BLK=y
376CONFIG_ENCLOSURE_SERVICES=m 384CONFIG_ENCLOSURE_SERVICES=m
385CONFIG_GENWQE=m
377CONFIG_RAID_ATTRS=m 386CONFIG_RAID_ATTRS=m
378CONFIG_SCSI=y 387CONFIG_SCSI=y
379CONFIG_BLK_DEV_SD=y 388CONFIG_BLK_DEV_SD=y
@@ -439,6 +448,8 @@ CONFIG_NLMON=m
439# CONFIG_NET_VENDOR_INTEL is not set 448# CONFIG_NET_VENDOR_INTEL is not set
440# CONFIG_NET_VENDOR_MARVELL is not set 449# CONFIG_NET_VENDOR_MARVELL is not set
441CONFIG_MLX4_EN=m 450CONFIG_MLX4_EN=m
451CONFIG_MLX5_CORE=m
452CONFIG_MLX5_CORE_EN=y
442# CONFIG_NET_VENDOR_NATSEMI is not set 453# CONFIG_NET_VENDOR_NATSEMI is not set
443CONFIG_PPP=m 454CONFIG_PPP=m
444CONFIG_PPP_BSDCOMP=m 455CONFIG_PPP_BSDCOMP=m
@@ -449,7 +460,6 @@ CONFIG_PPTP=m
449CONFIG_PPPOL2TP=m 460CONFIG_PPPOL2TP=m
450CONFIG_PPP_ASYNC=m 461CONFIG_PPP_ASYNC=m
451CONFIG_PPP_SYNC_TTY=m 462CONFIG_PPP_SYNC_TTY=m
452# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
453# CONFIG_INPUT_KEYBOARD is not set 463# CONFIG_INPUT_KEYBOARD is not set
454# CONFIG_INPUT_MOUSE is not set 464# CONFIG_INPUT_MOUSE is not set
455# CONFIG_SERIO is not set 465# CONFIG_SERIO is not set
@@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m
468CONFIG_INFINIBAND=m 478CONFIG_INFINIBAND=m
469CONFIG_INFINIBAND_USER_ACCESS=m 479CONFIG_INFINIBAND_USER_ACCESS=m
470CONFIG_MLX4_INFINIBAND=m 480CONFIG_MLX4_INFINIBAND=m
481CONFIG_MLX5_INFINIBAND=m
471CONFIG_VIRTIO_BALLOON=m 482CONFIG_VIRTIO_BALLOON=m
472CONFIG_EXT4_FS=y 483CONFIG_EXT4_FS=y
473CONFIG_EXT4_FS_POSIX_ACL=y 484CONFIG_EXT4_FS_POSIX_ACL=y
@@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y
483CONFIG_XFS_POSIX_ACL=y 494CONFIG_XFS_POSIX_ACL=y
484CONFIG_XFS_RT=y 495CONFIG_XFS_RT=y
485CONFIG_GFS2_FS=m 496CONFIG_GFS2_FS=m
497CONFIG_GFS2_FS_LOCKING_DLM=y
486CONFIG_OCFS2_FS=m 498CONFIG_OCFS2_FS=m
487CONFIG_BTRFS_FS=y 499CONFIG_BTRFS_FS=y
488CONFIG_BTRFS_FS_POSIX_ACL=y 500CONFIG_BTRFS_FS_POSIX_ACL=y
489CONFIG_NILFS2_FS=m 501CONFIG_NILFS2_FS=m
502CONFIG_FS_DAX=y
503CONFIG_EXPORTFS_BLOCK_OPS=y
490CONFIG_FANOTIFY=y 504CONFIG_FANOTIFY=y
505CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
491CONFIG_QUOTA_NETLINK_INTERFACE=y 506CONFIG_QUOTA_NETLINK_INTERFACE=y
492CONFIG_QFMT_V1=m 507CONFIG_QFMT_V1=m
493CONFIG_QFMT_V2=m 508CONFIG_QFMT_V2=m
@@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y
553CONFIG_MAGIC_SYSRQ=y 568CONFIG_MAGIC_SYSRQ=y
554CONFIG_DEBUG_MEMORY_INIT=y 569CONFIG_DEBUG_MEMORY_INIT=y
555CONFIG_PANIC_ON_OOPS=y 570CONFIG_PANIC_ON_OOPS=y
556CONFIG_TIMER_STATS=y
557CONFIG_RCU_TORTURE_TEST=m 571CONFIG_RCU_TORTURE_TEST=m
558CONFIG_RCU_CPU_STALL_TIMEOUT=60 572CONFIG_RCU_CPU_STALL_TIMEOUT=60
559CONFIG_LATENCYTOP=y 573CONFIG_LATENCYTOP=y
@@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y
576CONFIG_ENCRYPTED_KEYS=m 590CONFIG_ENCRYPTED_KEYS=m
577CONFIG_SECURITY=y 591CONFIG_SECURITY=y
578CONFIG_SECURITY_NETWORK=y 592CONFIG_SECURITY_NETWORK=y
593CONFIG_HARDENED_USERCOPY=y
579CONFIG_SECURITY_SELINUX=y 594CONFIG_SECURITY_SELINUX=y
580CONFIG_SECURITY_SELINUX_BOOTPARAM=y 595CONFIG_SECURITY_SELINUX_BOOTPARAM=y
581CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 596CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
599CONFIG_CRYPTO_LRW=m 614CONFIG_CRYPTO_LRW=m
600CONFIG_CRYPTO_PCBC=m 615CONFIG_CRYPTO_PCBC=m
601CONFIG_CRYPTO_KEYWRAP=m 616CONFIG_CRYPTO_KEYWRAP=m
617CONFIG_CRYPTO_CMAC=m
602CONFIG_CRYPTO_XCBC=m 618CONFIG_CRYPTO_XCBC=m
603CONFIG_CRYPTO_VMAC=m 619CONFIG_CRYPTO_VMAC=m
604CONFIG_CRYPTO_CRC32=m 620CONFIG_CRYPTO_CRC32=m
@@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m
611CONFIG_CRYPTO_SHA3=m 627CONFIG_CRYPTO_SHA3=m
612CONFIG_CRYPTO_TGR192=m 628CONFIG_CRYPTO_TGR192=m
613CONFIG_CRYPTO_WP512=m 629CONFIG_CRYPTO_WP512=m
630CONFIG_CRYPTO_AES_TI=m
614CONFIG_CRYPTO_ANUBIS=m 631CONFIG_CRYPTO_ANUBIS=m
615CONFIG_CRYPTO_BLOWFISH=m 632CONFIG_CRYPTO_BLOWFISH=m
616CONFIG_CRYPTO_CAMELLIA=m 633CONFIG_CRYPTO_CAMELLIA=m
@@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m
626CONFIG_CRYPTO_842=m 643CONFIG_CRYPTO_842=m
627CONFIG_CRYPTO_LZ4=m 644CONFIG_CRYPTO_LZ4=m
628CONFIG_CRYPTO_LZ4HC=m 645CONFIG_CRYPTO_LZ4HC=m
646CONFIG_CRYPTO_ANSI_CPRNG=m
629CONFIG_CRYPTO_USER_API_HASH=m 647CONFIG_CRYPTO_USER_API_HASH=m
630CONFIG_CRYPTO_USER_API_SKCIPHER=m 648CONFIG_CRYPTO_USER_API_SKCIPHER=m
631CONFIG_CRYPTO_USER_API_RNG=m 649CONFIG_CRYPTO_USER_API_RNG=m
632CONFIG_CRYPTO_USER_API_AEAD=m 650CONFIG_CRYPTO_USER_API_AEAD=m
633CONFIG_ZCRYPT=m 651CONFIG_ZCRYPT=m
652CONFIG_PKEY=m
634CONFIG_CRYPTO_SHA1_S390=m 653CONFIG_CRYPTO_SHA1_S390=m
635CONFIG_CRYPTO_SHA256_S390=m 654CONFIG_CRYPTO_SHA256_S390=m
636CONFIG_CRYPTO_SHA512_S390=m 655CONFIG_CRYPTO_SHA512_S390=m
637CONFIG_CRYPTO_DES_S390=m 656CONFIG_CRYPTO_DES_S390=m
638CONFIG_CRYPTO_AES_S390=m 657CONFIG_CRYPTO_AES_S390=m
658CONFIG_CRYPTO_PAES_S390=m
639CONFIG_CRYPTO_GHASH_S390=m 659CONFIG_CRYPTO_GHASH_S390=m
640CONFIG_CRYPTO_CRC32_S390=y 660CONFIG_CRYPTO_CRC32_S390=y
641CONFIG_CRC7=m 661CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index fbc6542aaf59..653d72bcc007 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 31CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y 32CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y 33CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_BPF_SYSCALL=y 35CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 36CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 37# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
44CONFIG_MODULE_FORCE_UNLOAD=y 45CONFIG_MODULE_FORCE_UNLOAD=y
45CONFIG_MODVERSIONS=y 46CONFIG_MODVERSIONS=y
46CONFIG_MODULE_SRCVERSION_ALL=y 47CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_BLK_DEV_INTEGRITY=y
47CONFIG_BLK_DEV_THROTTLING=y 49CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y
51CONFIG_BLK_WBT_SQ=y
48CONFIG_PARTITION_ADVANCED=y 52CONFIG_PARTITION_ADVANCED=y
49CONFIG_IBM_PARTITION=y 53CONFIG_IBM_PARTITION=y
50CONFIG_BSD_DISKLABEL=y 54CONFIG_BSD_DISKLABEL=y
@@ -86,6 +90,8 @@ CONFIG_UNIX=y
86CONFIG_UNIX_DIAG=m 90CONFIG_UNIX_DIAG=m
87CONFIG_XFRM_USER=m 91CONFIG_XFRM_USER=m
88CONFIG_NET_KEY=m 92CONFIG_NET_KEY=m
93CONFIG_SMC=m
94CONFIG_SMC_DIAG=m
89CONFIG_INET=y 95CONFIG_INET=y
90CONFIG_IP_MULTICAST=y 96CONFIG_IP_MULTICAST=y
91CONFIG_IP_ADVANCED_ROUTER=y 97CONFIG_IP_ADVANCED_ROUTER=y
@@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m
354CONFIG_NET_ACT_SKBEDIT=m 360CONFIG_NET_ACT_SKBEDIT=m
355CONFIG_NET_ACT_CSUM=m 361CONFIG_NET_ACT_CSUM=m
356CONFIG_DNS_RESOLVER=y 362CONFIG_DNS_RESOLVER=y
363CONFIG_NETLINK_DIAG=m
357CONFIG_CGROUP_NET_PRIO=y 364CONFIG_CGROUP_NET_PRIO=y
358CONFIG_BPF_JIT=y 365CONFIG_BPF_JIT=y
359CONFIG_NET_PKTGEN=m 366CONFIG_NET_PKTGEN=m
@@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y
362CONFIG_DMA_CMA=y 369CONFIG_DMA_CMA=y
363CONFIG_CMA_SIZE_MBYTES=0 370CONFIG_CMA_SIZE_MBYTES=0
364CONFIG_CONNECTOR=y 371CONFIG_CONNECTOR=y
372CONFIG_ZRAM=m
365CONFIG_BLK_DEV_LOOP=m 373CONFIG_BLK_DEV_LOOP=m
366CONFIG_BLK_DEV_CRYPTOLOOP=m 374CONFIG_BLK_DEV_CRYPTOLOOP=m
375CONFIG_BLK_DEV_DRBD=m
367CONFIG_BLK_DEV_NBD=m 376CONFIG_BLK_DEV_NBD=m
368CONFIG_BLK_DEV_OSD=m 377CONFIG_BLK_DEV_OSD=m
369CONFIG_BLK_DEV_RAM=y 378CONFIG_BLK_DEV_RAM=y
370CONFIG_BLK_DEV_RAM_SIZE=32768 379CONFIG_BLK_DEV_RAM_SIZE=32768
371CONFIG_CDROM_PKTCDVD=m 380CONFIG_BLK_DEV_RAM_DAX=y
372CONFIG_ATA_OVER_ETH=m
373CONFIG_VIRTIO_BLK=y 381CONFIG_VIRTIO_BLK=y
374CONFIG_ENCLOSURE_SERVICES=m 382CONFIG_ENCLOSURE_SERVICES=m
383CONFIG_GENWQE=m
375CONFIG_RAID_ATTRS=m 384CONFIG_RAID_ATTRS=m
376CONFIG_SCSI=y 385CONFIG_SCSI=y
377CONFIG_BLK_DEV_SD=y 386CONFIG_BLK_DEV_SD=y
@@ -437,6 +446,8 @@ CONFIG_NLMON=m
437# CONFIG_NET_VENDOR_INTEL is not set 446# CONFIG_NET_VENDOR_INTEL is not set
438# CONFIG_NET_VENDOR_MARVELL is not set 447# CONFIG_NET_VENDOR_MARVELL is not set
439CONFIG_MLX4_EN=m 448CONFIG_MLX4_EN=m
449CONFIG_MLX5_CORE=m
450CONFIG_MLX5_CORE_EN=y
440# CONFIG_NET_VENDOR_NATSEMI is not set 451# CONFIG_NET_VENDOR_NATSEMI is not set
441CONFIG_PPP=m 452CONFIG_PPP=m
442CONFIG_PPP_BSDCOMP=m 453CONFIG_PPP_BSDCOMP=m
@@ -447,7 +458,6 @@ CONFIG_PPTP=m
447CONFIG_PPPOL2TP=m 458CONFIG_PPPOL2TP=m
448CONFIG_PPP_ASYNC=m 459CONFIG_PPP_ASYNC=m
449CONFIG_PPP_SYNC_TTY=m 460CONFIG_PPP_SYNC_TTY=m
450# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
451# CONFIG_INPUT_KEYBOARD is not set 461# CONFIG_INPUT_KEYBOARD is not set
452# CONFIG_INPUT_MOUSE is not set 462# CONFIG_INPUT_MOUSE is not set
453# CONFIG_SERIO is not set 463# CONFIG_SERIO is not set
@@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m
466CONFIG_INFINIBAND=m 476CONFIG_INFINIBAND=m
467CONFIG_INFINIBAND_USER_ACCESS=m 477CONFIG_INFINIBAND_USER_ACCESS=m
468CONFIG_MLX4_INFINIBAND=m 478CONFIG_MLX4_INFINIBAND=m
479CONFIG_MLX5_INFINIBAND=m
469CONFIG_VIRTIO_BALLOON=m 480CONFIG_VIRTIO_BALLOON=m
470CONFIG_EXT4_FS=y 481CONFIG_EXT4_FS=y
471CONFIG_EXT4_FS_POSIX_ACL=y 482CONFIG_EXT4_FS_POSIX_ACL=y
@@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y
481CONFIG_XFS_POSIX_ACL=y 492CONFIG_XFS_POSIX_ACL=y
482CONFIG_XFS_RT=y 493CONFIG_XFS_RT=y
483CONFIG_GFS2_FS=m 494CONFIG_GFS2_FS=m
495CONFIG_GFS2_FS_LOCKING_DLM=y
484CONFIG_OCFS2_FS=m 496CONFIG_OCFS2_FS=m
485CONFIG_BTRFS_FS=y 497CONFIG_BTRFS_FS=y
486CONFIG_BTRFS_FS_POSIX_ACL=y 498CONFIG_BTRFS_FS_POSIX_ACL=y
487CONFIG_NILFS2_FS=m 499CONFIG_NILFS2_FS=m
500CONFIG_FS_DAX=y
501CONFIG_EXPORTFS_BLOCK_OPS=y
488CONFIG_FANOTIFY=y 502CONFIG_FANOTIFY=y
503CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
489CONFIG_QUOTA_NETLINK_INTERFACE=y 504CONFIG_QUOTA_NETLINK_INTERFACE=y
490CONFIG_QFMT_V1=m 505CONFIG_QFMT_V1=m
491CONFIG_QFMT_V2=m 506CONFIG_QFMT_V2=m
@@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y
551CONFIG_MAGIC_SYSRQ=y 566CONFIG_MAGIC_SYSRQ=y
552CONFIG_DEBUG_MEMORY_INIT=y 567CONFIG_DEBUG_MEMORY_INIT=y
553CONFIG_PANIC_ON_OOPS=y 568CONFIG_PANIC_ON_OOPS=y
554CONFIG_TIMER_STATS=y
555CONFIG_RCU_TORTURE_TEST=m 569CONFIG_RCU_TORTURE_TEST=m
556CONFIG_RCU_CPU_STALL_TIMEOUT=60 570CONFIG_RCU_CPU_STALL_TIMEOUT=60
557CONFIG_LATENCYTOP=y 571CONFIG_LATENCYTOP=y
@@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y
574CONFIG_ENCRYPTED_KEYS=m 588CONFIG_ENCRYPTED_KEYS=m
575CONFIG_SECURITY=y 589CONFIG_SECURITY=y
576CONFIG_SECURITY_NETWORK=y 590CONFIG_SECURITY_NETWORK=y
591CONFIG_HARDENED_USERCOPY=y
577CONFIG_SECURITY_SELINUX=y 592CONFIG_SECURITY_SELINUX=y
578CONFIG_SECURITY_SELINUX_BOOTPARAM=y 593CONFIG_SECURITY_SELINUX_BOOTPARAM=y
579CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 594CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
597CONFIG_CRYPTO_LRW=m 612CONFIG_CRYPTO_LRW=m
598CONFIG_CRYPTO_PCBC=m 613CONFIG_CRYPTO_PCBC=m
599CONFIG_CRYPTO_KEYWRAP=m 614CONFIG_CRYPTO_KEYWRAP=m
615CONFIG_CRYPTO_CMAC=m
600CONFIG_CRYPTO_XCBC=m 616CONFIG_CRYPTO_XCBC=m
601CONFIG_CRYPTO_VMAC=m 617CONFIG_CRYPTO_VMAC=m
602CONFIG_CRYPTO_CRC32=m 618CONFIG_CRYPTO_CRC32=m
@@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m
609CONFIG_CRYPTO_SHA3=m 625CONFIG_CRYPTO_SHA3=m
610CONFIG_CRYPTO_TGR192=m 626CONFIG_CRYPTO_TGR192=m
611CONFIG_CRYPTO_WP512=m 627CONFIG_CRYPTO_WP512=m
628CONFIG_CRYPTO_AES_TI=m
612CONFIG_CRYPTO_ANUBIS=m 629CONFIG_CRYPTO_ANUBIS=m
613CONFIG_CRYPTO_BLOWFISH=m 630CONFIG_CRYPTO_BLOWFISH=m
614CONFIG_CRYPTO_CAMELLIA=m 631CONFIG_CRYPTO_CAMELLIA=m
@@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m
624CONFIG_CRYPTO_842=m 641CONFIG_CRYPTO_842=m
625CONFIG_CRYPTO_LZ4=m 642CONFIG_CRYPTO_LZ4=m
626CONFIG_CRYPTO_LZ4HC=m 643CONFIG_CRYPTO_LZ4HC=m
644CONFIG_CRYPTO_ANSI_CPRNG=m
627CONFIG_CRYPTO_USER_API_HASH=m 645CONFIG_CRYPTO_USER_API_HASH=m
628CONFIG_CRYPTO_USER_API_SKCIPHER=m 646CONFIG_CRYPTO_USER_API_SKCIPHER=m
629CONFIG_CRYPTO_USER_API_RNG=m 647CONFIG_CRYPTO_USER_API_RNG=m
@@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m
635CONFIG_CRYPTO_SHA512_S390=m 653CONFIG_CRYPTO_SHA512_S390=m
636CONFIG_CRYPTO_DES_S390=m 654CONFIG_CRYPTO_DES_S390=m
637CONFIG_CRYPTO_AES_S390=m 655CONFIG_CRYPTO_AES_S390=m
656CONFIG_CRYPTO_PAES_S390=m
638CONFIG_CRYPTO_GHASH_S390=m 657CONFIG_CRYPTO_GHASH_S390=m
639CONFIG_CRYPTO_CRC32_S390=y 658CONFIG_CRYPTO_CRC32_S390=y
640CONFIG_CRC7=m 659CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e23d97c13735..afa46a7406ea 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y
12CONFIG_NR_CPUS=2 12CONFIG_NR_CPUS=2
13# CONFIG_HOTPLUG_CPU is not set 13# CONFIG_HOTPLUG_CPU is not set
14CONFIG_HZ_100=y 14CONFIG_HZ_100=y
15# CONFIG_ARCH_RANDOM is not set
15# CONFIG_COMPACTION is not set 16# CONFIG_COMPACTION is not set
16# CONFIG_MIGRATION is not set 17# CONFIG_MIGRATION is not set
18# CONFIG_BOUNCE is not set
17# CONFIG_CHECK_STACK is not set 19# CONFIG_CHECK_STACK is not set
18# CONFIG_CHSC_SCH is not set 20# CONFIG_CHSC_SCH is not set
19# CONFIG_SCM_BUS is not set 21# CONFIG_SCM_BUS is not set
@@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y
36CONFIG_SCSI_LOGGING=y 38CONFIG_SCSI_LOGGING=y
37CONFIG_SCSI_FC_ATTRS=y 39CONFIG_SCSI_FC_ATTRS=y
38CONFIG_ZFCP=y 40CONFIG_ZFCP=y
39# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
40# CONFIG_INPUT_KEYBOARD is not set 41# CONFIG_INPUT_KEYBOARD is not set
41# CONFIG_INPUT_MOUSE is not set 42# CONFIG_INPUT_MOUSE is not set
42# CONFIG_SERIO is not set 43# CONFIG_SERIO is not set
43# CONFIG_HVC_IUCV is not set 44# CONFIG_HVC_IUCV is not set
45# CONFIG_HW_RANDOM_S390 is not set
44CONFIG_RAW_DRIVER=y 46CONFIG_RAW_DRIVER=y
45# CONFIG_SCLP_ASYNC is not set 47# CONFIG_SCLP_ASYNC is not set
46# CONFIG_HMC_DRV is not set 48# CONFIG_HMC_DRV is not set
@@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y
54# CONFIG_INOTIFY_USER is not set 56# CONFIG_INOTIFY_USER is not set
55CONFIG_CONFIGFS_FS=y 57CONFIG_CONFIGFS_FS=y
56# CONFIG_MISC_FILESYSTEMS is not set 58# CONFIG_MISC_FILESYSTEMS is not set
59# CONFIG_NETWORK_FILESYSTEMS is not set
57CONFIG_PRINTK_TIME=y 60CONFIG_PRINTK_TIME=y
58CONFIG_DEBUG_INFO=y 61CONFIG_DEBUG_INFO=y
59CONFIG_DEBUG_FS=y
60CONFIG_DEBUG_KERNEL=y 62CONFIG_DEBUG_KERNEL=y
61CONFIG_PANIC_ON_OOPS=y 63CONFIG_PANIC_ON_OOPS=y
62# CONFIG_SCHED_DEBUG is not set 64# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 678d9863e3f0..ad4bd777768d 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -6,7 +6,8 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o 6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
7obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o 7obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
8obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o 8obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
9obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o paes_s390.o 9obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
10obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o
10obj-$(CONFIG_S390_PRNG) += prng.o 11obj-$(CONFIG_S390_PRNG) += prng.o
11obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o 12obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
12obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o 13obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
index 9317b3e645e2..36aefc07d10c 100644
--- a/arch/s390/crypto/arch_random.c
+++ b/arch/s390/crypto/arch_random.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/atomic.h> 14#include <linux/atomic.h>
15#include <linux/random.h>
15#include <linux/static_key.h> 16#include <linux/static_key.h>
16#include <asm/cpacf.h> 17#include <asm/cpacf.h>
17 18
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 97189dbaf34b..20244a38c886 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 28CONFIG_USER_NS=y
29CONFIG_BLK_DEV_INITRD=y 29CONFIG_BLK_DEV_INITRD=y
30CONFIG_EXPERT=y 30CONFIG_EXPERT=y
31# CONFIG_SYSFS_SYSCALL is not set
31CONFIG_BPF_SYSCALL=y 32CONFIG_BPF_SYSCALL=y
32CONFIG_USERFAULTFD=y 33CONFIG_USERFAULTFD=y
33# CONFIG_COMPAT_BRK is not set 34# CONFIG_COMPAT_BRK is not set
@@ -108,7 +109,6 @@ CONFIG_ZFCP=y
108CONFIG_SCSI_VIRTIO=y 109CONFIG_SCSI_VIRTIO=y
109CONFIG_MD=y 110CONFIG_MD=y
110CONFIG_MD_LINEAR=m 111CONFIG_MD_LINEAR=m
111CONFIG_MD_RAID0=m
112CONFIG_MD_MULTIPATH=m 112CONFIG_MD_MULTIPATH=m
113CONFIG_BLK_DEV_DM=y 113CONFIG_BLK_DEV_DM=y
114CONFIG_DM_CRYPT=m 114CONFIG_DM_CRYPT=m
@@ -131,6 +131,7 @@ CONFIG_TUN=m
131CONFIG_VIRTIO_NET=y 131CONFIG_VIRTIO_NET=y
132# CONFIG_NET_VENDOR_ALACRITECH is not set 132# CONFIG_NET_VENDOR_ALACRITECH is not set
133# CONFIG_NET_VENDOR_SOLARFLARE is not set 133# CONFIG_NET_VENDOR_SOLARFLARE is not set
134# CONFIG_NET_VENDOR_SYNOPSYS is not set
134# CONFIG_INPUT is not set 135# CONFIG_INPUT is not set
135# CONFIG_SERIO is not set 136# CONFIG_SERIO is not set
136CONFIG_DEVKMEM=y 137CONFIG_DEVKMEM=y
@@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y
162CONFIG_DEBUG_PAGEALLOC=y 163CONFIG_DEBUG_PAGEALLOC=y
163CONFIG_DETECT_HUNG_TASK=y 164CONFIG_DETECT_HUNG_TASK=y
164CONFIG_PANIC_ON_OOPS=y 165CONFIG_PANIC_ON_OOPS=y
165CONFIG_TIMER_STATS=y
166CONFIG_DEBUG_RT_MUTEXES=y 166CONFIG_DEBUG_RT_MUTEXES=y
167CONFIG_PROVE_LOCKING=y 167CONFIG_PROVE_LOCKING=y
168CONFIG_LOCK_STAT=y 168CONFIG_LOCK_STAT=y
@@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y
172CONFIG_DEBUG_SG=y 172CONFIG_DEBUG_SG=y
173CONFIG_DEBUG_NOTIFIERS=y 173CONFIG_DEBUG_NOTIFIERS=y
174CONFIG_RCU_CPU_STALL_TIMEOUT=60 174CONFIG_RCU_CPU_STALL_TIMEOUT=60
175CONFIG_RCU_TRACE=y
176CONFIG_LATENCYTOP=y 175CONFIG_LATENCYTOP=y
177CONFIG_SCHED_TRACER=y 176CONFIG_SCHED_TRACER=y
178CONFIG_FTRACE_SYSCALLS=y 177CONFIG_FTRACE_SYSCALLS=y
179CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y 178CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
180CONFIG_STACK_TRACER=y 179CONFIG_STACK_TRACER=y
181CONFIG_BLK_DEV_IO_TRACE=y 180CONFIG_BLK_DEV_IO_TRACE=y
182CONFIG_UPROBE_EVENTS=y
183CONFIG_FUNCTION_PROFILER=y 181CONFIG_FUNCTION_PROFILER=y
184CONFIG_TRACE_ENUM_MAP_FILE=y 182CONFIG_TRACE_ENUM_MAP_FILE=y
185CONFIG_KPROBES_SANITY_TEST=y 183CONFIG_KPROBES_SANITY_TEST=y
@@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m
190CONFIG_CRYPTO_GCM=m 188CONFIG_CRYPTO_GCM=m
191CONFIG_CRYPTO_CBC=y 189CONFIG_CRYPTO_CBC=y
192CONFIG_CRYPTO_CTS=m 190CONFIG_CRYPTO_CTS=m
193CONFIG_CRYPTO_ECB=m
194CONFIG_CRYPTO_LRW=m 191CONFIG_CRYPTO_LRW=m
195CONFIG_CRYPTO_PCBC=m 192CONFIG_CRYPTO_PCBC=m
196CONFIG_CRYPTO_XTS=m 193CONFIG_CRYPTO_XTS=m
@@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
230CONFIG_CRYPTO_USER_API_RNG=m 227CONFIG_CRYPTO_USER_API_RNG=m
231CONFIG_ZCRYPT=m 228CONFIG_ZCRYPT=m
232CONFIG_PKEY=m 229CONFIG_PKEY=m
230CONFIG_CRYPTO_PAES_S390=m
233CONFIG_CRYPTO_SHA1_S390=m 231CONFIG_CRYPTO_SHA1_S390=m
234CONFIG_CRYPTO_SHA256_S390=m 232CONFIG_CRYPTO_SHA256_S390=m
235CONFIG_CRYPTO_SHA512_S390=m 233CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 45092b12f54f..b3c88479feba 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,10 +1,12 @@
1generic-y += asm-offsets.h 1generic-y += asm-offsets.h
2generic-y += cacheflush.h 2generic-y += cacheflush.h
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += device.h
4generic-y += dma-contiguous.h 5generic-y += dma-contiguous.h
5generic-y += div64.h 6generic-y += div64.h
6generic-y += emergency-restart.h 7generic-y += emergency-restart.h
7generic-y += export.h 8generic-y += export.h
9generic-y += fb.h
8generic-y += irq_regs.h 10generic-y += irq_regs.h
9generic-y += irq_work.h 11generic-y += irq_work.h
10generic-y += kmap_types.h 12generic-y += kmap_types.h
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 0206c8052328..df7b54ea956d 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -10,6 +10,7 @@
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/refcount.h>
13#include <uapi/asm/debug.h> 14#include <uapi/asm/debug.h>
14 15
15#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ 16#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
@@ -31,7 +32,7 @@ struct debug_view;
31typedef struct debug_info { 32typedef struct debug_info {
32 struct debug_info* next; 33 struct debug_info* next;
33 struct debug_info* prev; 34 struct debug_info* prev;
34 atomic_t ref_count; 35 refcount_t ref_count;
35 spinlock_t lock; 36 spinlock_t lock;
36 int level; 37 int level;
37 int nr_areas; 38 int nr_areas;
diff --git a/arch/s390/include/asm/device.h b/arch/s390/include/asm/device.h
deleted file mode 100644
index 5203fc87f080..000000000000
--- a/arch/s390/include/asm/device.h
+++ /dev/null
@@ -1,10 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6struct dev_archdata {
7};
8
9struct pdev_archdata {
10};
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 60323c21938b..37f617dfbede 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -40,6 +40,8 @@ static inline int insn_length(unsigned char code)
40 return ((((int) code + 64) >> 7) + 1) << 1; 40 return ((((int) code + 64) >> 7) + 1) << 1;
41} 41}
42 42
43struct pt_regs;
44
43void show_code(struct pt_regs *regs); 45void show_code(struct pt_regs *regs);
44void print_fn_code(unsigned char *code, unsigned long len); 46void print_fn_code(unsigned char *code, unsigned long len);
45int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len); 47int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index e8f623041769..ec024c08dabe 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -117,6 +117,9 @@
117#define ELF_DATA ELFDATA2MSB 117#define ELF_DATA ELFDATA2MSB
118#define ELF_ARCH EM_S390 118#define ELF_ARCH EM_S390
119 119
120/* s390 specific phdr types */
121#define PT_S390_PGSTE 0x70000000
122
120/* 123/*
121 * ELF register definitions.. 124 * ELF register definitions..
122 */ 125 */
@@ -151,6 +154,35 @@ extern unsigned int vdso_enabled;
151 && (x)->e_ident[EI_CLASS] == ELF_CLASS) 154 && (x)->e_ident[EI_CLASS] == ELF_CLASS)
152#define compat_start_thread start_thread31 155#define compat_start_thread start_thread31
153 156
157struct arch_elf_state {
158 int rc;
159};
160
161#define INIT_ARCH_ELF_STATE { .rc = 0 }
162
163#define arch_check_elf(ehdr, interp, interp_ehdr, state) (0)
164#ifdef CONFIG_PGSTE
165#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \
166({ \
167 struct arch_elf_state *_state = state; \
168 if ((phdr)->p_type == PT_S390_PGSTE && \
169 !page_table_allocate_pgste && \
170 !test_thread_flag(TIF_PGSTE) && \
171 !current->mm->context.alloc_pgste) { \
172 set_thread_flag(TIF_PGSTE); \
173 set_pt_regs_flag(task_pt_regs(current), \
174 PIF_SYSCALL_RESTART); \
175 _state->rc = -EAGAIN; \
176 } \
177 _state->rc; \
178})
179#else
180#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \
181({ \
182 (state)->rc; \
183})
184#endif
185
154/* For SVR4/S390 the function pointer to be registered with `atexit` is 186/* For SVR4/S390 the function pointer to be registered with `atexit` is
155 passed in R14. */ 187 passed in R14. */
156#define ELF_PLAT_INIT(_r, load_addr) \ 188#define ELF_PLAT_INIT(_r, load_addr) \
diff --git a/arch/s390/include/asm/fb.h b/arch/s390/include/asm/fb.h
deleted file mode 100644
index c7df38030992..000000000000
--- a/arch/s390/include/asm/fb.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 437e9af96688..904e4b3af95d 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -25,8 +25,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
25 25
26#define IO_SPACE_LIMIT 0 26#define IO_SPACE_LIMIT 0
27 27
28#ifdef CONFIG_PCI
29
30#define ioremap_nocache(addr, size) ioremap(addr, size) 28#define ioremap_nocache(addr, size) ioremap(addr, size)
31#define ioremap_wc ioremap_nocache 29#define ioremap_wc ioremap_nocache
32#define ioremap_wt ioremap_nocache 30#define ioremap_wt ioremap_nocache
@@ -49,6 +47,8 @@ static inline void ioport_unmap(void __iomem *p)
49{ 47{
50} 48}
51 49
50#ifdef CONFIG_PCI
51
52/* 52/*
53 * s390 needs a private implementation of pci_iomap since ioremap with its 53 * s390 needs a private implementation of pci_iomap since ioremap with its
54 * offset parameter isn't sufficient. That's because BAR spaces are not 54 * offset parameter isn't sufficient. That's because BAR spaces are not
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 1293c4066cfc..28792ef82c83 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -27,12 +27,21 @@
27 * 2005-Dec Used as a template for s390 by Mike Grundy 27 * 2005-Dec Used as a template for s390 by Mike Grundy
28 * <grundym@us.ibm.com> 28 * <grundym@us.ibm.com>
29 */ 29 */
30#include <linux/types.h>
30#include <asm-generic/kprobes.h> 31#include <asm-generic/kprobes.h>
31 32
32#define BREAKPOINT_INSTRUCTION 0x0002 33#define BREAKPOINT_INSTRUCTION 0x0002
33 34
35#define FIXUP_PSW_NORMAL 0x08
36#define FIXUP_BRANCH_NOT_TAKEN 0x04
37#define FIXUP_RETURN_REGISTER 0x02
38#define FIXUP_NOT_REQUIRED 0x01
39
40int probe_is_prohibited_opcode(u16 *insn);
41int probe_get_fixup_type(u16 *insn);
42int probe_is_insn_relative_long(u16 *insn);
43
34#ifdef CONFIG_KPROBES 44#ifdef CONFIG_KPROBES
35#include <linux/types.h>
36#include <linux/ptrace.h> 45#include <linux/ptrace.h>
37#include <linux/percpu.h> 46#include <linux/percpu.h>
38#include <linux/sched/task_stack.h> 47#include <linux/sched/task_stack.h>
@@ -56,11 +65,6 @@ typedef u16 kprobe_opcode_t;
56 65
57#define KPROBE_SWAP_INST 0x10 66#define KPROBE_SWAP_INST 0x10
58 67
59#define FIXUP_PSW_NORMAL 0x08
60#define FIXUP_BRANCH_NOT_TAKEN 0x04
61#define FIXUP_RETURN_REGISTER 0x02
62#define FIXUP_NOT_REQUIRED 0x01
63
64/* Architecture specific copy of original instruction */ 68/* Architecture specific copy of original instruction */
65struct arch_specific_insn { 69struct arch_specific_insn {
66 /* copy of original instruction */ 70 /* copy of original instruction */
@@ -90,10 +94,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
90int kprobe_exceptions_notify(struct notifier_block *self, 94int kprobe_exceptions_notify(struct notifier_block *self,
91 unsigned long val, void *data); 95 unsigned long val, void *data);
92 96
93int probe_is_prohibited_opcode(u16 *insn);
94int probe_get_fixup_type(u16 *insn);
95int probe_is_insn_relative_long(u16 *insn);
96
97#define flush_insn_slot(p) do { } while (0) 97#define flush_insn_slot(p) do { } while (0)
98 98
99#endif /* CONFIG_KPROBES */ 99#endif /* CONFIG_KPROBES */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 8712e11bead4..4541ac44b35f 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -25,7 +25,9 @@ static inline int init_new_context(struct task_struct *tsk,
25 mm->context.gmap_asce = 0; 25 mm->context.gmap_asce = 0;
26 mm->context.flush_mm = 0; 26 mm->context.flush_mm = 0;
27#ifdef CONFIG_PGSTE 27#ifdef CONFIG_PGSTE
28 mm->context.alloc_pgste = page_table_allocate_pgste; 28 mm->context.alloc_pgste = page_table_allocate_pgste ||
29 test_thread_flag(TIF_PGSTE) ||
30 current->mm->context.alloc_pgste;
29 mm->context.has_pgste = 0; 31 mm->context.has_pgste = 0;
30 mm->context.use_skey = 0; 32 mm->context.use_skey = 0;
31 mm->context.use_cmma = 0; 33 mm->context.use_cmma = 0;
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 69b8a41fca84..624deaa44230 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -74,6 +74,7 @@ typedef struct { unsigned long pgste; } pgste_t;
74typedef struct { unsigned long pte; } pte_t; 74typedef struct { unsigned long pte; } pte_t;
75typedef struct { unsigned long pmd; } pmd_t; 75typedef struct { unsigned long pmd; } pmd_t;
76typedef struct { unsigned long pud; } pud_t; 76typedef struct { unsigned long pud; } pud_t;
77typedef struct { unsigned long p4d; } p4d_t;
77typedef struct { unsigned long pgd; } pgd_t; 78typedef struct { unsigned long pgd; } pgd_t;
78typedef pte_t *pgtable_t; 79typedef pte_t *pgtable_t;
79 80
@@ -82,12 +83,14 @@ typedef pte_t *pgtable_t;
82#define pte_val(x) ((x).pte) 83#define pte_val(x) ((x).pte)
83#define pmd_val(x) ((x).pmd) 84#define pmd_val(x) ((x).pmd)
84#define pud_val(x) ((x).pud) 85#define pud_val(x) ((x).pud)
86#define p4d_val(x) ((x).p4d)
85#define pgd_val(x) ((x).pgd) 87#define pgd_val(x) ((x).pgd)
86 88
87#define __pgste(x) ((pgste_t) { (x) } ) 89#define __pgste(x) ((pgste_t) { (x) } )
88#define __pte(x) ((pte_t) { (x) } ) 90#define __pte(x) ((pte_t) { (x) } )
89#define __pmd(x) ((pmd_t) { (x) } ) 91#define __pmd(x) ((pmd_t) { (x) } )
90#define __pud(x) ((pud_t) { (x) } ) 92#define __pud(x) ((pud_t) { (x) } )
93#define __p4d(x) ((p4d_t) { (x) } )
91#define __pgd(x) ((pgd_t) { (x) } ) 94#define __pgd(x) ((pgd_t) { (x) } )
92#define __pgprot(x) ((pgprot_t) { (x) } ) 95#define __pgprot(x) ((pgprot_t) { (x) } )
93 96
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 4e3186649578..f36b4b726057 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -70,11 +70,10 @@ struct zpci_fmb {
70} __packed __aligned(128); 70} __packed __aligned(128);
71 71
72enum zpci_state { 72enum zpci_state {
73 ZPCI_FN_STATE_RESERVED, 73 ZPCI_FN_STATE_STANDBY = 0,
74 ZPCI_FN_STATE_STANDBY, 74 ZPCI_FN_STATE_CONFIGURED = 1,
75 ZPCI_FN_STATE_CONFIGURED, 75 ZPCI_FN_STATE_RESERVED = 2,
76 ZPCI_FN_STATE_ONLINE, 76 ZPCI_FN_STATE_ONLINE = 3,
77 NR_ZPCI_FN_STATES,
78}; 77};
79 78
80struct zpci_bar_struct { 79struct zpci_bar_struct {
@@ -109,7 +108,7 @@ struct zpci_dev {
109 u64 msi_addr; /* MSI address */ 108 u64 msi_addr; /* MSI address */
110 unsigned int max_msi; /* maximum number of MSI's */ 109 unsigned int max_msi; /* maximum number of MSI's */
111 struct airq_iv *aibv; /* adapter interrupt bit vector */ 110 struct airq_iv *aibv; /* adapter interrupt bit vector */
112 unsigned int aisb; /* number of the summary bit */ 111 unsigned long aisb; /* number of the summary bit */
113 112
114 /* DMA stuff */ 113 /* DMA stuff */
115 unsigned long *dma_table; 114 unsigned long *dma_table;
@@ -159,11 +158,12 @@ extern const struct attribute_group *zpci_attr_groups[];
159----------------------------------------------------------------------------- */ 158----------------------------------------------------------------------------- */
160/* Base stuff */ 159/* Base stuff */
161int zpci_create_device(struct zpci_dev *); 160int zpci_create_device(struct zpci_dev *);
161void zpci_remove_device(struct zpci_dev *zdev);
162int zpci_enable_device(struct zpci_dev *); 162int zpci_enable_device(struct zpci_dev *);
163int zpci_disable_device(struct zpci_dev *); 163int zpci_disable_device(struct zpci_dev *);
164void zpci_stop_device(struct zpci_dev *);
165int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); 164int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
166int zpci_unregister_ioat(struct zpci_dev *, u8); 165int zpci_unregister_ioat(struct zpci_dev *, u8);
166void zpci_remove_reserved_devices(void);
167 167
168/* CLP */ 168/* CLP */
169int clp_scan_pci_devices(void); 169int clp_scan_pci_devices(void);
@@ -172,6 +172,7 @@ int clp_rescan_pci_devices_simple(void);
172int clp_add_pci_device(u32, u32, int); 172int clp_add_pci_device(u32, u32, int);
173int clp_enable_fh(struct zpci_dev *, u8); 173int clp_enable_fh(struct zpci_dev *, u8);
174int clp_disable_fh(struct zpci_dev *); 174int clp_disable_fh(struct zpci_dev *);
175int clp_get_state(u32 fid, enum zpci_state *state);
175 176
176#ifdef CONFIG_PCI 177#ifdef CONFIG_PCI
177/* Error handling and recovery */ 178/* Error handling and recovery */
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 649eb62c52b3..34abcf275799 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -76,7 +76,7 @@ struct zpci_fib {
76 u32 gd; 76 u32 gd;
77} __packed __aligned(8); 77} __packed __aligned(8);
78 78
79int zpci_mod_fc(u64 req, struct zpci_fib *fib); 79u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status);
80int zpci_refresh_trans(u64 fn, u64 addr, u64 range); 80int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
81int zpci_load(u64 *data, u64 req, u64 offset); 81int zpci_load(u64 *data, u64 req, u64 offset);
82int zpci_store(u64 data, u64 req, u64 offset); 82int zpci_store(u64 data, u64 req, u64 offset);
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 166f703dad7c..bb0ff1bb0c4a 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -51,12 +51,24 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
51 return _SEGMENT_ENTRY_EMPTY; 51 return _SEGMENT_ENTRY_EMPTY;
52 if (mm->context.asce_limit <= (1UL << 42)) 52 if (mm->context.asce_limit <= (1UL << 42))
53 return _REGION3_ENTRY_EMPTY; 53 return _REGION3_ENTRY_EMPTY;
54 return _REGION2_ENTRY_EMPTY; 54 if (mm->context.asce_limit <= (1UL << 53))
55 return _REGION2_ENTRY_EMPTY;
56 return _REGION1_ENTRY_EMPTY;
55} 57}
56 58
57int crst_table_upgrade(struct mm_struct *); 59int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
58void crst_table_downgrade(struct mm_struct *); 60void crst_table_downgrade(struct mm_struct *);
59 61
62static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
63{
64 unsigned long *table = crst_table_alloc(mm);
65
66 if (table)
67 crst_table_init(table, _REGION2_ENTRY_EMPTY);
68 return (p4d_t *) table;
69}
70#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
71
60static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 72static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
61{ 73{
62 unsigned long *table = crst_table_alloc(mm); 74 unsigned long *table = crst_table_alloc(mm);
@@ -86,9 +98,14 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
86 crst_table_free(mm, (unsigned long *) pmd); 98 crst_table_free(mm, (unsigned long *) pmd);
87} 99}
88 100
89static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 101static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
102{
103 pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
104}
105
106static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
90{ 107{
91 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud); 108 p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
92} 109}
93 110
94static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 111static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e6e3b887bee3..57057fb1cc07 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -24,7 +24,6 @@
24 * the S390 page table tree. 24 * the S390 page table tree.
25 */ 25 */
26#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
27#include <asm-generic/5level-fixup.h>
28#include <linux/sched.h> 27#include <linux/sched.h>
29#include <linux/mm_types.h> 28#include <linux/mm_types.h>
30#include <linux/page-flags.h> 29#include <linux/page-flags.h>
@@ -87,12 +86,15 @@ extern unsigned long zero_page_mask;
87 */ 86 */
88#define PMD_SHIFT 20 87#define PMD_SHIFT 20
89#define PUD_SHIFT 31 88#define PUD_SHIFT 31
90#define PGDIR_SHIFT 42 89#define P4D_SHIFT 42
90#define PGDIR_SHIFT 53
91 91
92#define PMD_SIZE (1UL << PMD_SHIFT) 92#define PMD_SIZE (1UL << PMD_SHIFT)
93#define PMD_MASK (~(PMD_SIZE-1)) 93#define PMD_MASK (~(PMD_SIZE-1))
94#define PUD_SIZE (1UL << PUD_SHIFT) 94#define PUD_SIZE (1UL << PUD_SHIFT)
95#define PUD_MASK (~(PUD_SIZE-1)) 95#define PUD_MASK (~(PUD_SIZE-1))
96#define P4D_SIZE (1UL << P4D_SHIFT)
97#define P4D_MASK (~(P4D_SIZE-1))
96#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 98#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
97#define PGDIR_MASK (~(PGDIR_SIZE-1)) 99#define PGDIR_MASK (~(PGDIR_SIZE-1))
98 100
@@ -105,6 +107,7 @@ extern unsigned long zero_page_mask;
105#define PTRS_PER_PTE 256 107#define PTRS_PER_PTE 256
106#define PTRS_PER_PMD 2048 108#define PTRS_PER_PMD 2048
107#define PTRS_PER_PUD 2048 109#define PTRS_PER_PUD 2048
110#define PTRS_PER_P4D 2048
108#define PTRS_PER_PGD 2048 111#define PTRS_PER_PGD 2048
109 112
110#define FIRST_USER_ADDRESS 0UL 113#define FIRST_USER_ADDRESS 0UL
@@ -115,6 +118,8 @@ extern unsigned long zero_page_mask;
115 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 118 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
116#define pud_ERROR(e) \ 119#define pud_ERROR(e) \
117 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 120 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
121#define p4d_ERROR(e) \
122 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
118#define pgd_ERROR(e) \ 123#define pgd_ERROR(e) \
119 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 124 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
120 125
@@ -296,8 +301,6 @@ static inline int is_module_addr(void *addr)
296#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) 301#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
297 302
298#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ 303#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
299#define _REGION3_ENTRY_ORIGIN ~0x7ffUL/* region third table origin */
300
301#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ 304#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
302#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ 305#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
303#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ 306#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
@@ -310,8 +313,8 @@ static inline int is_module_addr(void *addr)
310#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ 313#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
311#endif 314#endif
312 315
313#define _REGION_ENTRY_BITS 0xfffffffffffff227UL 316#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
314#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe27UL 317#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
315 318
316/* Bits in the segment table entry */ 319/* Bits in the segment table entry */
317#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 320#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
@@ -560,18 +563,23 @@ static inline void crdte(unsigned long old, unsigned long new,
560} 563}
561 564
562/* 565/*
563 * pgd/pmd/pte query functions 566 * pgd/p4d/pud/pmd/pte query functions
564 */ 567 */
568static inline int pgd_folded(pgd_t pgd)
569{
570 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
571}
572
565static inline int pgd_present(pgd_t pgd) 573static inline int pgd_present(pgd_t pgd)
566{ 574{
567 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 575 if (pgd_folded(pgd))
568 return 1; 576 return 1;
569 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 577 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
570} 578}
571 579
572static inline int pgd_none(pgd_t pgd) 580static inline int pgd_none(pgd_t pgd)
573{ 581{
574 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 582 if (pgd_folded(pgd))
575 return 0; 583 return 0;
576 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; 584 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
577} 585}
@@ -589,16 +597,48 @@ static inline int pgd_bad(pgd_t pgd)
589 return (pgd_val(pgd) & mask) != 0; 597 return (pgd_val(pgd) & mask) != 0;
590} 598}
591 599
600static inline int p4d_folded(p4d_t p4d)
601{
602 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
603}
604
605static inline int p4d_present(p4d_t p4d)
606{
607 if (p4d_folded(p4d))
608 return 1;
609 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
610}
611
612static inline int p4d_none(p4d_t p4d)
613{
614 if (p4d_folded(p4d))
615 return 0;
616 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
617}
618
619static inline unsigned long p4d_pfn(p4d_t p4d)
620{
621 unsigned long origin_mask;
622
623 origin_mask = _REGION_ENTRY_ORIGIN;
624 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
625}
626
627static inline int pud_folded(pud_t pud)
628{
629 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
630}
631
592static inline int pud_present(pud_t pud) 632static inline int pud_present(pud_t pud)
593{ 633{
594 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 634 if (pud_folded(pud))
595 return 1; 635 return 1;
596 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 636 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
597} 637}
598 638
599static inline int pud_none(pud_t pud) 639static inline int pud_none(pud_t pud)
600{ 640{
601 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 641 if (pud_folded(pud))
602 return 0; 642 return 0;
603 return pud_val(pud) == _REGION3_ENTRY_EMPTY; 643 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
604} 644}
@@ -614,7 +654,7 @@ static inline unsigned long pud_pfn(pud_t pud)
614{ 654{
615 unsigned long origin_mask; 655 unsigned long origin_mask;
616 656
617 origin_mask = _REGION3_ENTRY_ORIGIN; 657 origin_mask = _REGION_ENTRY_ORIGIN;
618 if (pud_large(pud)) 658 if (pud_large(pud))
619 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; 659 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
620 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT; 660 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
@@ -641,6 +681,13 @@ static inline int pud_bad(pud_t pud)
641 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; 681 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
642} 682}
643 683
684static inline int p4d_bad(p4d_t p4d)
685{
686 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
687 return pud_bad(__pud(p4d_val(p4d)));
688 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
689}
690
644static inline int pmd_present(pmd_t pmd) 691static inline int pmd_present(pmd_t pmd)
645{ 692{
646 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; 693 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
@@ -794,8 +841,14 @@ static inline int pte_unused(pte_t pte)
794 841
795static inline void pgd_clear(pgd_t *pgd) 842static inline void pgd_clear(pgd_t *pgd)
796{ 843{
797 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 844 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
798 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 845 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
846}
847
848static inline void p4d_clear(p4d_t *p4d)
849{
850 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
851 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
799} 852}
800 853
801static inline void pud_clear(pud_t *pud) 854static inline void pud_clear(pud_t *pud)
@@ -1089,6 +1142,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1089} 1142}
1090 1143
1091#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1144#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1145#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1092#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1146#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1093#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1147#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1094#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 1148#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
@@ -1098,19 +1152,31 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1098 1152
1099#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1153#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1100#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1154#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1155#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1101#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1156#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1102 1157
1103static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 1158static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1104{ 1159{
1105 pud_t *pud = (pud_t *) pgd; 1160 p4d_t *p4d = (p4d_t *) pgd;
1106 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1161
1107 pud = (pud_t *) pgd_deref(*pgd); 1162 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
1108 return pud + pud_index(address); 1163 p4d = (p4d_t *) pgd_deref(*pgd);
1164 return p4d + p4d_index(address);
1165}
1166
1167static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1168{
1169 pud_t *pud = (pud_t *) p4d;
1170
1171 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1172 pud = (pud_t *) p4d_deref(*p4d);
1173 return pud + pud_index(address);
1109} 1174}
1110 1175
1111static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1176static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1112{ 1177{
1113 pmd_t *pmd = (pmd_t *) pud; 1178 pmd_t *pmd = (pmd_t *) pud;
1179
1114 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 1180 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1115 pmd = (pmd_t *) pud_deref(*pud); 1181 pmd = (pmd_t *) pud_deref(*pud);
1116 return pmd + pmd_index(address); 1182 return pmd + pmd_index(address);
@@ -1122,6 +1188,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1122 1188
1123#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1189#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1124#define pud_page(pud) pfn_to_page(pud_pfn(pud)) 1190#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1191#define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
1125 1192
1126/* Find an entry in the lowest level page table.. */ 1193/* Find an entry in the lowest level page table.. */
1127#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1194#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 5b1b247dfbca..72c7b88f8d2c 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -94,11 +94,11 @@ extern void execve_tail(void);
94 */ 94 */
95 95
96#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \ 96#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \
97 (1UL << 31) : (1UL << 53)) 97 (1UL << 31) : -PAGE_SIZE)
98#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 98#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
99 (1UL << 30) : (1UL << 41)) 99 (1UL << 30) : (1UL << 41))
100#define TASK_SIZE TASK_SIZE_OF(current) 100#define TASK_SIZE TASK_SIZE_OF(current)
101#define TASK_SIZE_MAX (1UL << 53) 101#define TASK_SIZE_MAX (-PAGE_SIZE)
102 102
103#define STACK_TOP (test_thread_flag(TIF_31BIT) ? \ 103#define STACK_TOP (test_thread_flag(TIF_31BIT) ? \
104 (1UL << 31) : (1UL << 42)) 104 (1UL << 31) : (1UL << 42))
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 99bc456cc26a..853b01245c20 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -11,9 +11,11 @@
11 11
12#define PIF_SYSCALL 0 /* inside a system call */ 12#define PIF_SYSCALL 0 /* inside a system call */
13#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ 13#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */
14#define PIF_SYSCALL_RESTART 2 /* restart the current system call */
14 15
15#define _PIF_SYSCALL _BITUL(PIF_SYSCALL) 16#define _PIF_SYSCALL _BITUL(PIF_SYSCALL)
16#define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP) 17#define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP)
18#define _PIF_SYSCALL_RESTART _BITUL(PIF_SYSCALL_RESTART)
17 19
18#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
19 21
@@ -24,38 +26,38 @@
24 PSW_MASK_PSTATE | PSW_ASC_PRIMARY) 26 PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
25 27
26struct psw_bits { 28struct psw_bits {
27 unsigned long : 1; 29 unsigned long : 1;
28 unsigned long r : 1; /* PER-Mask */ 30 unsigned long per : 1; /* PER-Mask */
29 unsigned long : 3; 31 unsigned long : 3;
30 unsigned long t : 1; /* DAT Mode */ 32 unsigned long dat : 1; /* DAT Mode */
31 unsigned long i : 1; /* Input/Output Mask */ 33 unsigned long io : 1; /* Input/Output Mask */
32 unsigned long e : 1; /* External Mask */ 34 unsigned long ext : 1; /* External Mask */
33 unsigned long key : 4; /* PSW Key */ 35 unsigned long key : 4; /* PSW Key */
34 unsigned long : 1; 36 unsigned long : 1;
35 unsigned long m : 1; /* Machine-Check Mask */ 37 unsigned long mcheck : 1; /* Machine-Check Mask */
36 unsigned long w : 1; /* Wait State */ 38 unsigned long wait : 1; /* Wait State */
37 unsigned long p : 1; /* Problem State */ 39 unsigned long pstate : 1; /* Problem State */
38 unsigned long as : 2; /* Address Space Control */ 40 unsigned long as : 2; /* Address Space Control */
39 unsigned long cc : 2; /* Condition Code */ 41 unsigned long cc : 2; /* Condition Code */
40 unsigned long pm : 4; /* Program Mask */ 42 unsigned long pm : 4; /* Program Mask */
41 unsigned long ri : 1; /* Runtime Instrumentation */ 43 unsigned long ri : 1; /* Runtime Instrumentation */
42 unsigned long : 6; 44 unsigned long : 6;
43 unsigned long eaba : 2; /* Addressing Mode */ 45 unsigned long eaba : 2; /* Addressing Mode */
44 unsigned long : 31; 46 unsigned long : 31;
45 unsigned long ia : 64; /* Instruction Address */ 47 unsigned long ia : 64; /* Instruction Address */
46}; 48};
47 49
48enum { 50enum {
49 PSW_AMODE_24BIT = 0, 51 PSW_BITS_AMODE_24BIT = 0,
50 PSW_AMODE_31BIT = 1, 52 PSW_BITS_AMODE_31BIT = 1,
51 PSW_AMODE_64BIT = 3 53 PSW_BITS_AMODE_64BIT = 3
52}; 54};
53 55
54enum { 56enum {
55 PSW_AS_PRIMARY = 0, 57 PSW_BITS_AS_PRIMARY = 0,
56 PSW_AS_ACCREG = 1, 58 PSW_BITS_AS_ACCREG = 1,
57 PSW_AS_SECONDARY = 2, 59 PSW_BITS_AS_SECONDARY = 2,
58 PSW_AS_HOME = 3 60 PSW_BITS_AS_HOME = 3
59}; 61};
60 62
61#define psw_bits(__psw) (*({ \ 63#define psw_bits(__psw) (*({ \
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index 72df5f2de6b0..020a8814d511 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -59,7 +59,7 @@ static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
59 int cc; 59 int cc;
60 60
61 cc = ____pcpu_sigp(addr, order, parm, &_status); 61 cc = ____pcpu_sigp(addr, order, parm, &_status);
62 if (status && cc == 1) 62 if (status && cc == SIGP_CC_STATUS_STORED)
63 *status = _status; 63 *status = _status;
64 return cc; 64 return cc;
65} 65}
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 73bff45ced55..e784bed6ed7f 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -146,7 +146,7 @@ extern int topology_max_mnest;
146 * Returns the maximum nesting level supported by the cpu topology code. 146 * Returns the maximum nesting level supported by the cpu topology code.
147 * The current maximum level is 4 which is the drawer level. 147 * The current maximum level is 4 which is the drawer level.
148 */ 148 */
149static inline int topology_mnest_limit(void) 149static inline unsigned char topology_mnest_limit(void)
150{ 150{
151 return min(topology_max_mnest, 4); 151 return min(topology_max_mnest, 4);
152} 152}
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 0b3ee083a665..1aecf432c48d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -58,6 +58,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
58#define TIF_UPROBE 3 /* breakpointed or single-stepping */ 58#define TIF_UPROBE 3 /* breakpointed or single-stepping */
59#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ 59#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
60#define TIF_PATCH_PENDING 5 /* pending live patching update */ 60#define TIF_PATCH_PENDING 5 /* pending live patching update */
61#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
61 62
62#define TIF_31BIT 16 /* 32bit process */ 63#define TIF_31BIT 16 /* 32bit process */
63#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 64#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 853b2a3d8dee..7317b3108a88 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -137,6 +137,21 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
137} 137}
138 138
139/* 139/*
140 * p4d_free_tlb frees a pud table and clears the CRSTE for the
141 * region second table entry from the tlb.
142 * If the mm uses a four level page table the single p4d is freed
143 * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
144 * to avoid the double free of the p4d in this case.
145 */
146static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
147 unsigned long address)
148{
149 if (tlb->mm->context.asce_limit <= (1UL << 53))
150 return;
151 tlb_remove_table(tlb, p4d);
152}
153
154/*
140 * pud_free_tlb frees a pud table and clears the CRSTE for the 155 * pud_free_tlb frees a pud table and clears the CRSTE for the
141 * region third table entry from the tlb. 156 * region third table entry from the tlb.
142 * If the mm uses a three level page table the single pud is freed 157 * If the mm uses a three level page table the single pud is freed
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 530226b6cb19..86b3e74f569e 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -277,7 +277,7 @@ debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
277 memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); 277 memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
278 memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS * 278 memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS *
279 sizeof(struct dentry*)); 279 sizeof(struct dentry*));
280 atomic_set(&(rc->ref_count), 0); 280 refcount_set(&(rc->ref_count), 0);
281 281
282 return rc; 282 return rc;
283 283
@@ -361,7 +361,7 @@ debug_info_create(const char *name, int pages_per_area, int nr_areas,
361 debug_area_last = rc; 361 debug_area_last = rc;
362 rc->next = NULL; 362 rc->next = NULL;
363 363
364 debug_info_get(rc); 364 refcount_set(&rc->ref_count, 1);
365out: 365out:
366 return rc; 366 return rc;
367} 367}
@@ -416,7 +416,7 @@ static void
416debug_info_get(debug_info_t * db_info) 416debug_info_get(debug_info_t * db_info)
417{ 417{
418 if (db_info) 418 if (db_info)
419 atomic_inc(&db_info->ref_count); 419 refcount_inc(&db_info->ref_count);
420} 420}
421 421
422/* 422/*
@@ -431,7 +431,7 @@ debug_info_put(debug_info_t *db_info)
431 431
432 if (!db_info) 432 if (!db_info)
433 return; 433 return;
434 if (atomic_dec_and_test(&db_info->ref_count)) { 434 if (refcount_dec_and_test(&db_info->ref_count)) {
435 for (i = 0; i < DEBUG_MAX_VIEWS; i++) { 435 for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
436 if (!db_info->views[i]) 436 if (!db_info->views[i])
437 continue; 437 continue;
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 829e1c53005c..dab78babfab6 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -98,8 +98,10 @@ static int show_address(void *data, unsigned long address, int reliable)
98 return 0; 98 return 0;
99} 99}
100 100
101static void show_trace(struct task_struct *task, unsigned long sp) 101void show_stack(struct task_struct *task, unsigned long *stack)
102{ 102{
103 unsigned long sp = (unsigned long) stack;
104
103 if (!sp) 105 if (!sp)
104 sp = task ? task->thread.ksp : current_stack_pointer(); 106 sp = task ? task->thread.ksp : current_stack_pointer();
105 printk("Call Trace:\n"); 107 printk("Call Trace:\n");
@@ -109,29 +111,6 @@ static void show_trace(struct task_struct *task, unsigned long sp)
109 debug_show_held_locks(task); 111 debug_show_held_locks(task);
110} 112}
111 113
112void show_stack(struct task_struct *task, unsigned long *sp)
113{
114 unsigned long *stack;
115 int i;
116
117 stack = sp;
118 if (!stack) {
119 if (!task)
120 stack = (unsigned long *)current_stack_pointer();
121 else
122 stack = (unsigned long *)task->thread.ksp;
123 }
124 printk(KERN_DEFAULT "Stack:\n");
125 for (i = 0; i < 20; i++) {
126 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
127 break;
128 if (i % 4 == 0)
129 printk(KERN_DEFAULT " ");
130 pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
131 }
132 show_trace(task, (unsigned long)sp);
133}
134
135static void show_last_breaking_event(struct pt_regs *regs) 114static void show_last_breaking_event(struct pt_regs *regs)
136{ 115{
137 printk("Last Breaking-Event-Address:\n"); 116 printk("Last Breaking-Event-Address:\n");
@@ -149,8 +128,8 @@ void show_registers(struct pt_regs *regs)
149 pr_cont(" (%pSR)", (void *)regs->psw.addr); 128 pr_cont(" (%pSR)", (void *)regs->psw.addr);
150 pr_cont("\n"); 129 pr_cont("\n");
151 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 130 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
152 "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e, 131 "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
153 psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm); 132 psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm);
154 pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba); 133 pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
155 printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode, 134 printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
156 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 135 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
@@ -169,7 +148,7 @@ void show_regs(struct pt_regs *regs)
169 show_registers(regs); 148 show_registers(regs);
170 /* Show stack backtrace if pt_regs is from kernel mode */ 149 /* Show stack backtrace if pt_regs is from kernel mode */
171 if (!user_mode(regs)) 150 if (!user_mode(regs))
172 show_trace(NULL, regs->gprs[15]); 151 show_stack(NULL, (unsigned long *) regs->gprs[15]);
173 show_last_breaking_event(regs); 152 show_last_breaking_event(regs);
174} 153}
175 154
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 9b48196ebf40..21900e1cee9c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -52,7 +52,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
52 _TIF_SYSCALL_TRACEPOINT) 52 _TIF_SYSCALL_TRACEPOINT)
53_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ 53_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
54 _CIF_ASCE_SECONDARY | _CIF_FPU) 54 _CIF_ASCE_SECONDARY | _CIF_FPU)
55_PIF_WORK = (_PIF_PER_TRAP) 55_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
56 56
57#define BASED(name) name-cleanup_critical(%r13) 57#define BASED(name) name-cleanup_critical(%r13)
58 58
@@ -232,12 +232,17 @@ ENTRY(sie64a)
232 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 232 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
233.Lsie_done: 233.Lsie_done:
234# some program checks are suppressing. C code (e.g. do_protection_exception) 234# some program checks are suppressing. C code (e.g. do_protection_exception)
235# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 235# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
236# instructions between sie64a and .Lsie_done should not cause program 236# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
237# interrupts. So lets use a nop (47 00 00 00) as a landing pad. 237# Other instructions between sie64a and .Lsie_done should not cause program
238# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
238# See also .Lcleanup_sie 239# See also .Lcleanup_sie
239.Lrewind_pad: 240.Lrewind_pad6:
240 nop 0 241 nopr 7
242.Lrewind_pad4:
243 nopr 7
244.Lrewind_pad2:
245 nopr 7
241 .globl sie_exit 246 .globl sie_exit
242sie_exit: 247sie_exit:
243 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 248 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -250,7 +255,9 @@ sie_exit:
250 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 255 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
251 j sie_exit 256 j sie_exit
252 257
253 EX_TABLE(.Lrewind_pad,.Lsie_fault) 258 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
259 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
260 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
254 EX_TABLE(sie_exit,.Lsie_fault) 261 EX_TABLE(sie_exit,.Lsie_fault)
255EXPORT_SYMBOL(sie64a) 262EXPORT_SYMBOL(sie64a)
256EXPORT_SYMBOL(sie_exit) 263EXPORT_SYMBOL(sie_exit)
@@ -313,6 +320,7 @@ ENTRY(system_call)
313 lg %r14,__LC_VDSO_PER_CPU 320 lg %r14,__LC_VDSO_PER_CPU
314 lmg %r0,%r10,__PT_R0(%r11) 321 lmg %r0,%r10,__PT_R0(%r11)
315 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 322 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
323.Lsysc_exit_timer:
316 stpt __LC_EXIT_TIMER 324 stpt __LC_EXIT_TIMER
317 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 325 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
318 lmg %r11,%r15,__PT_R11(%r11) 326 lmg %r11,%r15,__PT_R11(%r11)
@@ -327,6 +335,8 @@ ENTRY(system_call)
327 jo .Lsysc_mcck_pending 335 jo .Lsysc_mcck_pending
328 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 336 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
329 jo .Lsysc_reschedule 337 jo .Lsysc_reschedule
338 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
339 jo .Lsysc_syscall_restart
330#ifdef CONFIG_UPROBES 340#ifdef CONFIG_UPROBES
331 TSTMSK __TI_flags(%r12),_TIF_UPROBE 341 TSTMSK __TI_flags(%r12),_TIF_UPROBE
332 jo .Lsysc_uprobe_notify 342 jo .Lsysc_uprobe_notify
@@ -340,6 +350,8 @@ ENTRY(system_call)
340 jo .Lsysc_patch_pending # handle live patching just before 350 jo .Lsysc_patch_pending # handle live patching just before
341 # signals and possible syscall restart 351 # signals and possible syscall restart
342#endif 352#endif
353 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
354 jo .Lsysc_syscall_restart
343 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 355 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
344 jo .Lsysc_sigpending 356 jo .Lsysc_sigpending
345 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 357 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
@@ -441,6 +453,15 @@ ENTRY(system_call)
441 jg do_per_trap 453 jg do_per_trap
442 454
443# 455#
456# _PIF_SYSCALL_RESTART is set, repeat the current system call
457#
458.Lsysc_syscall_restart:
459 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
460 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments
461 lg %r2,__PT_ORIG_GPR2(%r11)
462 j .Lsysc_do_svc
463
464#
444# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 465# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
445# and after the system call 466# and after the system call
446# 467#
@@ -624,6 +645,7 @@ ENTRY(io_int_handler)
624 lg %r14,__LC_VDSO_PER_CPU 645 lg %r14,__LC_VDSO_PER_CPU
625 lmg %r0,%r10,__PT_R0(%r11) 646 lmg %r0,%r10,__PT_R0(%r11)
626 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 647 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
648.Lio_exit_timer:
627 stpt __LC_EXIT_TIMER 649 stpt __LC_EXIT_TIMER
628 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 650 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
629 lmg %r11,%r15,__PT_R11(%r11) 651 lmg %r11,%r15,__PT_R11(%r11)
@@ -873,9 +895,7 @@ ENTRY(save_fpu_regs)
873 oi __LC_CPU_FLAGS+7,_CIF_FPU 895 oi __LC_CPU_FLAGS+7,_CIF_FPU
874 br %r14 896 br %r14
875.Lsave_fpu_regs_end: 897.Lsave_fpu_regs_end:
876#if IS_ENABLED(CONFIG_KVM)
877EXPORT_SYMBOL(save_fpu_regs) 898EXPORT_SYMBOL(save_fpu_regs)
878#endif
879 899
880/* 900/*
881 * Load floating-point controls and floating-point or vector registers. 901 * Load floating-point controls and floating-point or vector registers.
@@ -1181,15 +1201,23 @@ cleanup_critical:
1181 br %r14 1201 br %r14
1182 1202
1183.Lcleanup_sysc_restore: 1203.Lcleanup_sysc_restore:
1204 # check if stpt has been executed
1184 clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1205 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
1206 jh 0f
1207 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1208 cghi %r11,__LC_SAVE_AREA_ASYNC
1185 je 0f 1209 je 0f
1210 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
12110: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
1212 je 1f
1186 lg %r9,24(%r11) # get saved pointer to pt_regs 1213 lg %r9,24(%r11) # get saved pointer to pt_regs
1187 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1214 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1188 mvc 0(64,%r11),__PT_R8(%r9) 1215 mvc 0(64,%r11),__PT_R8(%r9)
1189 lmg %r0,%r7,__PT_R0(%r9) 1216 lmg %r0,%r7,__PT_R0(%r9)
11900: lmg %r8,%r9,__LC_RETURN_PSW 12171: lmg %r8,%r9,__LC_RETURN_PSW
1191 br %r14 1218 br %r14
1192.Lcleanup_sysc_restore_insn: 1219.Lcleanup_sysc_restore_insn:
1220 .quad .Lsysc_exit_timer
1193 .quad .Lsysc_done - 4 1221 .quad .Lsysc_done - 4
1194 1222
1195.Lcleanup_io_tif: 1223.Lcleanup_io_tif:
@@ -1197,15 +1225,20 @@ cleanup_critical:
1197 br %r14 1225 br %r14
1198 1226
1199.Lcleanup_io_restore: 1227.Lcleanup_io_restore:
1228 # check if stpt has been executed
1200 clg %r9,BASED(.Lcleanup_io_restore_insn) 1229 clg %r9,BASED(.Lcleanup_io_restore_insn)
1201 je 0f 1230 jh 0f
1231 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
12320: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
1233 je 1f
1202 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1234 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
1203 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1235 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1204 mvc 0(64,%r11),__PT_R8(%r9) 1236 mvc 0(64,%r11),__PT_R8(%r9)
1205 lmg %r0,%r7,__PT_R0(%r9) 1237 lmg %r0,%r7,__PT_R0(%r9)
12060: lmg %r8,%r9,__LC_RETURN_PSW 12381: lmg %r8,%r9,__LC_RETURN_PSW
1207 br %r14 1239 br %r14
1208.Lcleanup_io_restore_insn: 1240.Lcleanup_io_restore_insn:
1241 .quad .Lio_exit_timer
1209 .quad .Lio_done - 4 1242 .quad .Lio_done - 4
1210 1243
1211.Lcleanup_idle: 1244.Lcleanup_idle:
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 27477f34cc0a..d03a6d12c4bd 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -173,6 +173,8 @@ int __init ftrace_dyn_arch_init(void)
173 return 0; 173 return 0;
174} 174}
175 175
176#ifdef CONFIG_MODULES
177
176static int __init ftrace_plt_init(void) 178static int __init ftrace_plt_init(void)
177{ 179{
178 unsigned int *ip; 180 unsigned int *ip;
@@ -191,6 +193,8 @@ static int __init ftrace_plt_init(void)
191} 193}
192device_initcall(ftrace_plt_init); 194device_initcall(ftrace_plt_init);
193 195
196#endif /* CONFIG_MODULES */
197
194#ifdef CONFIG_FUNCTION_GRAPH_TRACER 198#ifdef CONFIG_FUNCTION_GRAPH_TRACER
195/* 199/*
196 * Hook the return address and push it in the stack of return addresses 200 * Hook the return address and push it in the stack of return addresses
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index e545ffe5155a..8e622bb52f7a 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -564,8 +564,6 @@ static struct kset *ipl_kset;
564 564
565static void __ipl_run(void *unused) 565static void __ipl_run(void *unused)
566{ 566{
567 if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
568 diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
569 diag308(DIAG308_LOAD_CLEAR, NULL); 567 diag308(DIAG308_LOAD_CLEAR, NULL);
570 if (MACHINE_IS_VM) 568 if (MACHINE_IS_VM)
571 __cpcmd("IPL", NULL, 0, NULL); 569 __cpcmd("IPL", NULL, 0, NULL);
@@ -1088,10 +1086,7 @@ static void __reipl_run(void *unused)
1088 break; 1086 break;
1089 case REIPL_METHOD_CCW_DIAG: 1087 case REIPL_METHOD_CCW_DIAG:
1090 diag308(DIAG308_SET, reipl_block_ccw); 1088 diag308(DIAG308_SET, reipl_block_ccw);
1091 if (MACHINE_IS_LPAR) 1089 diag308(DIAG308_LOAD_CLEAR, NULL);
1092 diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
1093 else
1094 diag308(DIAG308_LOAD_CLEAR, NULL);
1095 break; 1090 break;
1096 case REIPL_METHOD_FCP_RW_DIAG: 1091 case REIPL_METHOD_FCP_RW_DIAG:
1097 diag308(DIAG308_SET, reipl_block_fcp); 1092 diag308(DIAG308_SET, reipl_block_fcp);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index ca960d0370d5..0c82f7903fc7 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -995,11 +995,11 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
995 regs.int_parm = CPU_MF_INT_SF_PRA; 995 regs.int_parm = CPU_MF_INT_SF_PRA;
996 sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long; 996 sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
997 997
998 psw_bits(regs.psw).ia = sfr->basic.ia; 998 psw_bits(regs.psw).ia = sfr->basic.ia;
999 psw_bits(regs.psw).t = sfr->basic.T; 999 psw_bits(regs.psw).dat = sfr->basic.T;
1000 psw_bits(regs.psw).w = sfr->basic.W; 1000 psw_bits(regs.psw).wait = sfr->basic.W;
1001 psw_bits(regs.psw).p = sfr->basic.P; 1001 psw_bits(regs.psw).per = sfr->basic.P;
1002 psw_bits(regs.psw).as = sfr->basic.AS; 1002 psw_bits(regs.psw).as = sfr->basic.AS;
1003 1003
1004 /* 1004 /*
1005 * Use the hardware provided configuration level to decide if the 1005 * Use the hardware provided configuration level to decide if the
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 955a7b6fa0a4..93a386f4a3b5 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -245,6 +245,5 @@ ssize_t cpumf_events_sysfs_show(struct device *dev,
245 struct perf_pmu_events_attr *pmu_attr; 245 struct perf_pmu_events_attr *pmu_attr;
246 246
247 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); 247 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
248 return sprintf(page, "event=0x%04llx,name=%s\n", 248 return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
249 pmu_attr->id, attr->attr.name);
250} 249}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 488c5bb8dc77..252ed61a128b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1160,6 +1160,8 @@ static int s390_gs_cb_get(struct task_struct *target,
1160 return -ENODEV; 1160 return -ENODEV;
1161 if (!data) 1161 if (!data)
1162 return -ENODATA; 1162 return -ENODATA;
1163 if (target == current)
1164 save_gs_cb(data);
1163 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 1165 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1164 data, 0, sizeof(struct gs_cb)); 1166 data, 0, sizeof(struct gs_cb));
1165} 1167}
@@ -1170,6 +1172,7 @@ static int s390_gs_cb_set(struct task_struct *target,
1170 const void *kbuf, const void __user *ubuf) 1172 const void *kbuf, const void __user *ubuf)
1171{ 1173{
1172 struct gs_cb *data = target->thread.gs_cb; 1174 struct gs_cb *data = target->thread.gs_cb;
1175 int rc;
1173 1176
1174 if (!MACHINE_HAS_GS) 1177 if (!MACHINE_HAS_GS)
1175 return -ENODEV; 1178 return -ENODEV;
@@ -1177,10 +1180,18 @@ static int s390_gs_cb_set(struct task_struct *target,
1177 data = kzalloc(sizeof(*data), GFP_KERNEL); 1180 data = kzalloc(sizeof(*data), GFP_KERNEL);
1178 if (!data) 1181 if (!data)
1179 return -ENOMEM; 1182 return -ENOMEM;
1183 data->gsd = 25;
1180 target->thread.gs_cb = data; 1184 target->thread.gs_cb = data;
1185 if (target == current)
1186 __ctl_set_bit(2, 4);
1187 } else if (target == current) {
1188 save_gs_cb(data);
1181 } 1189 }
1182 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1190 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1183 data, 0, sizeof(struct gs_cb)); 1191 data, 0, sizeof(struct gs_cb));
1192 if (target == current)
1193 restore_gs_cb(data);
1194 return rc;
1184} 1195}
1185 1196
1186static int s390_gs_bc_get(struct task_struct *target, 1197static int s390_gs_bc_get(struct task_struct *target,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 363000a77ffc..1020a11a24e5 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -26,6 +26,7 @@
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/kernel_stat.h> 28#include <linux/kernel_stat.h>
29#include <linux/kmemleak.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
30#include <linux/interrupt.h> 31#include <linux/interrupt.h>
31#include <linux/irqflags.h> 32#include <linux/irqflags.h>
@@ -207,6 +208,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
207 kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL); 208 kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
208 if (!mcesa_origin) 209 if (!mcesa_origin)
209 goto out; 210 goto out;
211 /* The pointer is stored with mcesa_bits ORed in */
212 kmemleak_not_leak((void *) mcesa_origin);
210 mcesa_bits = MACHINE_HAS_GS ? 11 : 0; 213 mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
211 } 214 }
212 } else { 215 } else {
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f787b9d8f54c..442e5423ce3d 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -21,6 +21,7 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/cpu.h>
24#include <asm/fpu/api.h> 25#include <asm/fpu/api.h>
25#include "entry.h" 26#include "entry.h"
26 27
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 314e0ee3016a..d94baa8db507 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -27,12 +27,12 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
27 27
28int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 28int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
29{ 29{
30 if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) 30 if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT)
31 return -EINVAL; 31 return -EINVAL;
32 if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) 32 if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)
33 return -EINVAL; 33 return -EINVAL;
34 clear_pt_regs_flag(regs, PIF_PER_TRAP); 34 clear_pt_regs_flag(regs, PIF_PER_TRAP);
35 auprobe->saved_per = psw_bits(regs->psw).r; 35 auprobe->saved_per = psw_bits(regs->psw).per;
36 auprobe->saved_int_code = regs->int_code; 36 auprobe->saved_int_code = regs->int_code;
37 regs->int_code = UPROBE_TRAP_NR; 37 regs->int_code = UPROBE_TRAP_NR;
38 regs->psw.addr = current->utask->xol_vaddr; 38 regs->psw.addr = current->utask->xol_vaddr;
@@ -81,7 +81,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
81 81
82 clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); 82 clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
83 update_cr_regs(current); 83 update_cr_regs(current);
84 psw_bits(regs->psw).r = auprobe->saved_per; 84 psw_bits(regs->psw).per = auprobe->saved_per;
85 regs->int_code = auprobe->saved_int_code; 85 regs->int_code = auprobe->saved_int_code;
86 86
87 if (fixup & FIXUP_PSW_NORMAL) 87 if (fixup & FIXUP_PSW_NORMAL)
@@ -372,8 +372,8 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
372 372
373bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) 373bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
374{ 374{
375 if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) || 375 if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) ||
376 ((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) && 376 ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) &&
377 !is_compat_task())) { 377 !is_compat_task())) {
378 regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE); 378 regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
379 do_report_trap(regs, SIGILL, ILL_ILLADR, NULL); 379 do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 10516ae3b55e..b89d19f6f2ab 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -50,6 +50,56 @@ static struct page **vdso64_pagelist;
50 */ 50 */
51unsigned int __read_mostly vdso_enabled = 1; 51unsigned int __read_mostly vdso_enabled = 1;
52 52
53static int vdso_fault(const struct vm_special_mapping *sm,
54 struct vm_area_struct *vma, struct vm_fault *vmf)
55{
56 struct page **vdso_pagelist;
57 unsigned long vdso_pages;
58
59 vdso_pagelist = vdso64_pagelist;
60 vdso_pages = vdso64_pages;
61#ifdef CONFIG_COMPAT
62 if (is_compat_task()) {
63 vdso_pagelist = vdso32_pagelist;
64 vdso_pages = vdso32_pages;
65 }
66#endif
67
68 if (vmf->pgoff >= vdso_pages)
69 return VM_FAULT_SIGBUS;
70
71 vmf->page = vdso_pagelist[vmf->pgoff];
72 get_page(vmf->page);
73 return 0;
74}
75
76static int vdso_mremap(const struct vm_special_mapping *sm,
77 struct vm_area_struct *vma)
78{
79 unsigned long vdso_pages;
80
81 vdso_pages = vdso64_pages;
82#ifdef CONFIG_COMPAT
83 if (is_compat_task())
84 vdso_pages = vdso32_pages;
85#endif
86
87 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
88 return -EINVAL;
89
90 if (WARN_ON_ONCE(current->mm != vma->vm_mm))
91 return -EFAULT;
92
93 current->mm->context.vdso_base = vma->vm_start;
94 return 0;
95}
96
97static const struct vm_special_mapping vdso_mapping = {
98 .name = "[vdso]",
99 .fault = vdso_fault,
100 .mremap = vdso_mremap,
101};
102
53static int __init vdso_setup(char *s) 103static int __init vdso_setup(char *s)
54{ 104{
55 unsigned long val; 105 unsigned long val;
@@ -181,7 +231,7 @@ static void vdso_init_cr5(void)
181int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 231int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
182{ 232{
183 struct mm_struct *mm = current->mm; 233 struct mm_struct *mm = current->mm;
184 struct page **vdso_pagelist; 234 struct vm_area_struct *vma;
185 unsigned long vdso_pages; 235 unsigned long vdso_pages;
186 unsigned long vdso_base; 236 unsigned long vdso_base;
187 int rc; 237 int rc;
@@ -194,13 +244,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
194 if (!uses_interp) 244 if (!uses_interp)
195 return 0; 245 return 0;
196 246
197 vdso_pagelist = vdso64_pagelist;
198 vdso_pages = vdso64_pages; 247 vdso_pages = vdso64_pages;
199#ifdef CONFIG_COMPAT 248#ifdef CONFIG_COMPAT
200 if (is_compat_task()) { 249 if (is_compat_task())
201 vdso_pagelist = vdso32_pagelist;
202 vdso_pages = vdso32_pages; 250 vdso_pages = vdso32_pages;
203 }
204#endif 251#endif
205 /* 252 /*
206 * vDSO has a problem and was disabled, just don't "enable" it for 253 * vDSO has a problem and was disabled, just don't "enable" it for
@@ -209,8 +256,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
209 if (vdso_pages == 0) 256 if (vdso_pages == 0)
210 return 0; 257 return 0;
211 258
212 current->mm->context.vdso_base = 0;
213
214 /* 259 /*
215 * pick a base address for the vDSO in process space. We try to put 260 * pick a base address for the vDSO in process space. We try to put
216 * it at vdso_base which is the "natural" base for it, but we might 261 * it at vdso_base which is the "natural" base for it, but we might
@@ -225,13 +270,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
225 } 270 }
226 271
227 /* 272 /*
228 * Put vDSO base into mm struct. We need to do this before calling
229 * install_special_mapping or the perf counter mmap tracking code
230 * will fail to recognise it as a vDSO (since arch_vma_name fails).
231 */
232 current->mm->context.vdso_base = vdso_base;
233
234 /*
235 * our vma flags don't have VM_WRITE so by default, the process 273 * our vma flags don't have VM_WRITE so by default, the process
236 * isn't allowed to write those pages. 274 * isn't allowed to write those pages.
237 * gdb can break that with ptrace interface, and thus trigger COW 275 * gdb can break that with ptrace interface, and thus trigger COW
@@ -241,24 +279,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
241 * It's fine to use that for setting breakpoints in the vDSO code 279 * It's fine to use that for setting breakpoints in the vDSO code
242 * pages though. 280 * pages though.
243 */ 281 */
244 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 282 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
245 VM_READ|VM_EXEC| 283 VM_READ|VM_EXEC|
246 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 284 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
247 vdso_pagelist); 285 &vdso_mapping);
248 if (rc) 286 if (IS_ERR(vma)) {
249 current->mm->context.vdso_base = 0; 287 rc = PTR_ERR(vma);
288 goto out_up;
289 }
290
291 current->mm->context.vdso_base = vdso_base;
292 rc = 0;
293
250out_up: 294out_up:
251 up_write(&mm->mmap_sem); 295 up_write(&mm->mmap_sem);
252 return rc; 296 return rc;
253} 297}
254 298
255const char *arch_vma_name(struct vm_area_struct *vma)
256{
257 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
258 return "[vdso]";
259 return NULL;
260}
261
262static int __init vdso_init(void) 299static int __init vdso_init(void)
263{ 300{
264 int i; 301 int i;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 72307f108c40..6e2c42bd1c3b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,8 +31,14 @@ SECTIONS
31{ 31{
32 . = 0x00000000; 32 . = 0x00000000;
33 .text : { 33 .text : {
34 _text = .; /* Text and read-only data */ 34 /* Text and read-only data */
35 HEAD_TEXT 35 HEAD_TEXT
36 /*
37 * E.g. perf doesn't like symbols starting at address zero,
38 * therefore skip the initial PSW and channel program located
39 * at address zero and let _text start at 0x200.
40 */
41 _text = 0x200;
36 TEXT_TEXT 42 TEXT_TEXT
37 SCHED_TEXT 43 SCHED_TEXT
38 CPUIDLE_TEXT 44 CPUIDLE_TEXT
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 072d84ba42a3..dd7178fbb4f3 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -110,11 +110,10 @@ static inline u64 scale_vtime(u64 vtime)
110 return vtime; 110 return vtime;
111} 111}
112 112
113static void account_system_index_scaled(struct task_struct *p, 113static void account_system_index_scaled(struct task_struct *p, u64 cputime,
114 u64 cputime, u64 scaled,
115 enum cpu_usage_stat index) 114 enum cpu_usage_stat index)
116{ 115{
117 p->stimescaled += cputime_to_nsecs(scaled); 116 p->stimescaled += cputime_to_nsecs(scale_vtime(cputime));
118 account_system_index_time(p, cputime_to_nsecs(cputime), index); 117 account_system_index_time(p, cputime_to_nsecs(cputime), index);
119} 118}
120 119
@@ -176,14 +175,11 @@ static int do_account_vtime(struct task_struct *tsk)
176 } 175 }
177 176
178 if (system) 177 if (system)
179 account_system_index_scaled(tsk, system, scale_vtime(system), 178 account_system_index_scaled(tsk, system, CPUTIME_SYSTEM);
180 CPUTIME_SYSTEM);
181 if (hardirq) 179 if (hardirq)
182 account_system_index_scaled(tsk, hardirq, scale_vtime(hardirq), 180 account_system_index_scaled(tsk, hardirq, CPUTIME_IRQ);
183 CPUTIME_IRQ);
184 if (softirq) 181 if (softirq)
185 account_system_index_scaled(tsk, softirq, scale_vtime(softirq), 182 account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
186 CPUTIME_SOFTIRQ);
187 183
188 steal = S390_lowcore.steal_timer; 184 steal = S390_lowcore.steal_timer;
189 if ((s64) steal > 0) { 185 if ((s64) steal > 0) {
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 9da243d94cc3..e0f7d5fc7efd 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -551,26 +551,26 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
551 int rc; 551 int rc;
552 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); 552 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
553 553
554 if (!psw.t) { 554 if (!psw.dat) {
555 asce->val = 0; 555 asce->val = 0;
556 asce->r = 1; 556 asce->r = 1;
557 return 0; 557 return 0;
558 } 558 }
559 559
560 if (mode == GACC_IFETCH) 560 if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME))
561 psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY; 561 psw.as = PSW_BITS_AS_PRIMARY;
562 562
563 switch (psw.as) { 563 switch (psw.as) {
564 case PSW_AS_PRIMARY: 564 case PSW_BITS_AS_PRIMARY:
565 asce->val = vcpu->arch.sie_block->gcr[1]; 565 asce->val = vcpu->arch.sie_block->gcr[1];
566 return 0; 566 return 0;
567 case PSW_AS_SECONDARY: 567 case PSW_BITS_AS_SECONDARY:
568 asce->val = vcpu->arch.sie_block->gcr[7]; 568 asce->val = vcpu->arch.sie_block->gcr[7];
569 return 0; 569 return 0;
570 case PSW_AS_HOME: 570 case PSW_BITS_AS_HOME:
571 asce->val = vcpu->arch.sie_block->gcr[13]; 571 asce->val = vcpu->arch.sie_block->gcr[13];
572 return 0; 572 return 0;
573 case PSW_AS_ACCREG: 573 case PSW_BITS_AS_ACCREG:
574 rc = ar_translation(vcpu, asce, ar, mode); 574 rc = ar_translation(vcpu, asce, ar, mode);
575 if (rc > 0) 575 if (rc > 0)
576 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC); 576 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
@@ -771,7 +771,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
771 771
772 if (!ctlreg0.lap) 772 if (!ctlreg0.lap)
773 return 0; 773 return 0;
774 if (psw_bits(*psw).t && asce.p) 774 if (psw_bits(*psw).dat && asce.p)
775 return 0; 775 return 0;
776 return 1; 776 return 1;
777} 777}
@@ -790,7 +790,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
790 return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode, 790 return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
791 PROT_TYPE_LA); 791 PROT_TYPE_LA);
792 ga &= PAGE_MASK; 792 ga &= PAGE_MASK;
793 if (psw_bits(*psw).t) { 793 if (psw_bits(*psw).dat) {
794 rc = guest_translate(vcpu, ga, pages, asce, mode); 794 rc = guest_translate(vcpu, ga, pages, asce, mode);
795 if (rc < 0) 795 if (rc < 0)
796 return rc; 796 return rc;
@@ -831,7 +831,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
831 pages = vmalloc(nr_pages * sizeof(unsigned long)); 831 pages = vmalloc(nr_pages * sizeof(unsigned long));
832 if (!pages) 832 if (!pages)
833 return -ENOMEM; 833 return -ENOMEM;
834 need_ipte_lock = psw_bits(*psw).t && !asce.r; 834 need_ipte_lock = psw_bits(*psw).dat && !asce.r;
835 if (need_ipte_lock) 835 if (need_ipte_lock)
836 ipte_lock(vcpu); 836 ipte_lock(vcpu);
837 rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode); 837 rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
@@ -899,7 +899,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
899 mode, PROT_TYPE_LA); 899 mode, PROT_TYPE_LA);
900 } 900 }
901 901
902 if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ 902 if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */
903 rc = guest_translate(vcpu, gva, gpa, asce, mode); 903 rc = guest_translate(vcpu, gva, gpa, asce, mode);
904 if (rc > 0) 904 if (rc > 0)
905 return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT); 905 return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT);
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 7ce47fd36f28..bec42b852246 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -57,9 +57,9 @@ static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
57{ 57{
58 psw_t *psw = &vcpu->arch.sie_block->gpsw; 58 psw_t *psw = &vcpu->arch.sie_block->gpsw;
59 59
60 if (psw_bits(*psw).eaba == PSW_AMODE_64BIT) 60 if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
61 return ga; 61 return ga;
62 if (psw_bits(*psw).eaba == PSW_AMODE_31BIT) 62 if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
63 return ga & ((1UL << 31) - 1); 63 return ga & ((1UL << 31) - 1);
64 return ga & ((1UL << 24) - 1); 64 return ga & ((1UL << 24) - 1);
65} 65}
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index 23d9a4e12da1..c2e0ddc1356e 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -613,15 +613,15 @@ int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
613 * instruction. Check primary and home space-switch-event 613 * instruction. Check primary and home space-switch-event
614 * controls. (theoretically home -> home produced no event) 614 * controls. (theoretically home -> home produced no event)
615 */ 615 */
616 if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) && 616 if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) &&
617 (pssec(vcpu) || hssec(vcpu))) 617 (pssec(vcpu) || hssec(vcpu)))
618 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; 618 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
619 619
620 /* 620 /*
621 * PT, PTI, PR, PC instruction operate on primary AS only. Check 621 * PT, PTI, PR, PC instruction operate on primary AS only. Check
622 * if the primary-space-switch-event control was or got set. 622 * if the primary-space-switch-event control was or got set.
623 */ 623 */
624 if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) && 624 if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) &&
625 (pssec(vcpu) || old_ssec(vcpu))) 625 (pssec(vcpu) || old_ssec(vcpu)))
626 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; 626 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
627 } 627 }
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index c03106c428cf..e53292a89257 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -361,7 +361,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
361 } 361 }
362 } 362 }
363 if (m3 & SSKE_MB) { 363 if (m3 & SSKE_MB) {
364 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) 364 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
365 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; 365 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
366 else 366 else
367 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; 367 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
@@ -374,7 +374,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
374static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 374static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
375{ 375{
376 vcpu->stat.instruction_ipte_interlock++; 376 vcpu->stat.instruction_ipte_interlock++;
377 if (psw_bits(vcpu->arch.sie_block->gpsw).p) 377 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
378 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 378 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
379 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 379 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
380 kvm_s390_retry_instr(vcpu); 380 kvm_s390_retry_instr(vcpu);
@@ -901,7 +901,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
901 /* only support 2G frame size if EDAT2 is available and we are 901 /* only support 2G frame size if EDAT2 is available and we are
902 not in 24-bit addressing mode */ 902 not in 24-bit addressing mode */
903 if (!test_kvm_facility(vcpu->kvm, 78) || 903 if (!test_kvm_facility(vcpu->kvm, 78) ||
904 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT) 904 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
905 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 905 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
906 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 906 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
907 break; 907 break;
@@ -938,7 +938,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
938 start += PAGE_SIZE; 938 start += PAGE_SIZE;
939 } 939 }
940 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 940 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
941 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) { 941 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
942 vcpu->run->s.regs.gprs[reg2] = end; 942 vcpu->run->s.regs.gprs[reg2] = end;
943 } else { 943 } else {
944 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; 944 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c
index ae90e1ae3607..1963ddbf4ab3 100644
--- a/arch/s390/lib/probes.c
+++ b/arch/s390/lib/probes.c
@@ -4,6 +4,7 @@
4 * Copyright IBM Corp. 2014 4 * Copyright IBM Corp. 2014
5 */ 5 */
6 6
7#include <linux/errno.h>
7#include <asm/kprobes.h> 8#include <asm/kprobes.h>
8#include <asm/dis.h> 9#include <asm/dis.h>
9 10
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 1e5bb2b86c42..b3bd3f23b8e8 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -337,8 +337,8 @@ long __strncpy_from_user(char *dst, const char __user *src, long size)
337 return 0; 337 return 0;
338 done = 0; 338 done = 0;
339 do { 339 do {
340 offset = (size_t)src & ~PAGE_MASK; 340 offset = (size_t)src & (L1_CACHE_BYTES - 1);
341 len = min(size - done, PAGE_SIZE - offset); 341 len = min(size - done, L1_CACHE_BYTES - offset);
342 if (copy_from_user(dst, src, len)) 342 if (copy_from_user(dst, src, len))
343 return -EFAULT; 343 return -EFAULT;
344 len_str = strnlen(dst, len); 344 len_str = strnlen(dst, len);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 1b553d847140..049c3c455b32 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -149,7 +149,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
149} 149}
150 150
151static void walk_pud_level(struct seq_file *m, struct pg_state *st, 151static void walk_pud_level(struct seq_file *m, struct pg_state *st,
152 pgd_t *pgd, unsigned long addr) 152 p4d_t *p4d, unsigned long addr)
153{ 153{
154 unsigned int prot; 154 unsigned int prot;
155 pud_t *pud; 155 pud_t *pud;
@@ -157,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
157 157
158 for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { 158 for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
159 st->current_address = addr; 159 st->current_address = addr;
160 pud = pud_offset(pgd, addr); 160 pud = pud_offset(p4d, addr);
161 if (!pud_none(*pud)) 161 if (!pud_none(*pud))
162 if (pud_large(*pud)) { 162 if (pud_large(*pud)) {
163 prot = pud_val(*pud) & 163 prot = pud_val(*pud) &
@@ -172,6 +172,23 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
172 } 172 }
173} 173}
174 174
175static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
176 pgd_t *pgd, unsigned long addr)
177{
178 p4d_t *p4d;
179 int i;
180
181 for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
182 st->current_address = addr;
183 p4d = p4d_offset(pgd, addr);
184 if (!p4d_none(*p4d))
185 walk_pud_level(m, st, p4d, addr);
186 else
187 note_page(m, st, _PAGE_INVALID, 2);
188 addr += P4D_SIZE;
189 }
190}
191
175static void walk_pgd_level(struct seq_file *m) 192static void walk_pgd_level(struct seq_file *m)
176{ 193{
177 unsigned long addr = 0; 194 unsigned long addr = 0;
@@ -184,7 +201,7 @@ static void walk_pgd_level(struct seq_file *m)
184 st.current_address = addr; 201 st.current_address = addr;
185 pgd = pgd_offset_k(addr); 202 pgd = pgd_offset_k(addr);
186 if (!pgd_none(*pgd)) 203 if (!pgd_none(*pgd))
187 walk_pud_level(m, &st, pgd, addr); 204 walk_p4d_level(m, &st, pgd, addr);
188 else 205 else
189 note_page(m, &st, _PAGE_INVALID, 1); 206 note_page(m, &st, _PAGE_INVALID, 1);
190 addr += PGDIR_SIZE; 207 addr += PGDIR_SIZE;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 5845d3028ffc..14f25798b001 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -130,7 +130,7 @@ static int bad_address(void *p)
130 130
131static void dump_pagetable(unsigned long asce, unsigned long address) 131static void dump_pagetable(unsigned long asce, unsigned long address)
132{ 132{
133 unsigned long *table = __va(asce & PAGE_MASK); 133 unsigned long *table = __va(asce & _ASCE_ORIGIN);
134 134
135 pr_alert("AS:%016lx ", asce); 135 pr_alert("AS:%016lx ", asce);
136 switch (asce & _ASCE_TYPE_MASK) { 136 switch (asce & _ASCE_TYPE_MASK) {
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 7f6db1e6c048..4fb3d3cdb370 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -125,7 +125,7 @@ static void gmap_radix_tree_free(struct radix_tree_root *root)
125 struct radix_tree_iter iter; 125 struct radix_tree_iter iter;
126 unsigned long indices[16]; 126 unsigned long indices[16];
127 unsigned long index; 127 unsigned long index;
128 void **slot; 128 void __rcu **slot;
129 int i, nr; 129 int i, nr;
130 130
131 /* A radix tree is freed by deleting all of its entries */ 131 /* A radix tree is freed by deleting all of its entries */
@@ -150,7 +150,7 @@ static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
150 struct radix_tree_iter iter; 150 struct radix_tree_iter iter;
151 unsigned long indices[16]; 151 unsigned long indices[16];
152 unsigned long index; 152 unsigned long index;
153 void **slot; 153 void __rcu **slot;
154 int i, nr; 154 int i, nr;
155 155
156 /* A radix tree is freed by deleting all of its entries */ 156 /* A radix tree is freed by deleting all of its entries */
@@ -537,6 +537,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
537 unsigned long *table; 537 unsigned long *table;
538 spinlock_t *ptl; 538 spinlock_t *ptl;
539 pgd_t *pgd; 539 pgd_t *pgd;
540 p4d_t *p4d;
540 pud_t *pud; 541 pud_t *pud;
541 pmd_t *pmd; 542 pmd_t *pmd;
542 int rc; 543 int rc;
@@ -573,7 +574,9 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
573 mm = gmap->mm; 574 mm = gmap->mm;
574 pgd = pgd_offset(mm, vmaddr); 575 pgd = pgd_offset(mm, vmaddr);
575 VM_BUG_ON(pgd_none(*pgd)); 576 VM_BUG_ON(pgd_none(*pgd));
576 pud = pud_offset(pgd, vmaddr); 577 p4d = p4d_offset(pgd, vmaddr);
578 VM_BUG_ON(p4d_none(*p4d));
579 pud = pud_offset(p4d, vmaddr);
577 VM_BUG_ON(pud_none(*pud)); 580 VM_BUG_ON(pud_none(*pud));
578 /* large puds cannot yet be handled */ 581 /* large puds cannot yet be handled */
579 if (pud_large(*pud)) 582 if (pud_large(*pud))
@@ -1008,7 +1011,7 @@ EXPORT_SYMBOL_GPL(gmap_read_table);
1008static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr, 1011static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1009 struct gmap_rmap *rmap) 1012 struct gmap_rmap *rmap)
1010{ 1013{
1011 void **slot; 1014 void __rcu **slot;
1012 1015
1013 BUG_ON(!gmap_is_shadow(sg)); 1016 BUG_ON(!gmap_is_shadow(sg));
1014 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT); 1017 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index b7b779c40a5b..8ecc25e760fa 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -166,15 +166,15 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
166 return 1; 166 return 1;
167} 167}
168 168
169static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, 169static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
170 unsigned long end, int write, struct page **pages, int *nr) 170 unsigned long end, int write, struct page **pages, int *nr)
171{ 171{
172 unsigned long next; 172 unsigned long next;
173 pud_t *pudp, pud; 173 pud_t *pudp, pud;
174 174
175 pudp = (pud_t *) pgdp; 175 pudp = (pud_t *) p4dp;
176 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 176 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
177 pudp = (pud_t *) pgd_deref(pgd); 177 pudp = (pud_t *) p4d_deref(p4d);
178 pudp += pud_index(addr); 178 pudp += pud_index(addr);
179 do { 179 do {
180 pud = *pudp; 180 pud = *pudp;
@@ -194,6 +194,29 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
194 return 1; 194 return 1;
195} 195}
196 196
197static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
198 unsigned long end, int write, struct page **pages, int *nr)
199{
200 unsigned long next;
201 p4d_t *p4dp, p4d;
202
203 p4dp = (p4d_t *) pgdp;
204 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
205 p4dp = (p4d_t *) pgd_deref(pgd);
206 p4dp += p4d_index(addr);
207 do {
208 p4d = *p4dp;
209 barrier();
210 next = p4d_addr_end(addr, end);
211 if (p4d_none(p4d))
212 return 0;
213 if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr))
214 return 0;
215 } while (p4dp++, addr = next, addr != end);
216
217 return 1;
218}
219
197/* 220/*
198 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall 221 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
199 * back to the regular GUP. 222 * back to the regular GUP.
@@ -228,7 +251,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
228 next = pgd_addr_end(addr, end); 251 next = pgd_addr_end(addr, end);
229 if (pgd_none(pgd)) 252 if (pgd_none(pgd))
230 break; 253 break;
231 if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr)) 254 if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr))
232 break; 255 break;
233 } while (pgdp++, addr = next, addr != end); 256 } while (pgdp++, addr = next, addr != end);
234 local_irq_restore(flags); 257 local_irq_restore(flags);
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 9b4050caa4e9..d3a5e39756f6 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -162,16 +162,20 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
162 unsigned long addr, unsigned long sz) 162 unsigned long addr, unsigned long sz)
163{ 163{
164 pgd_t *pgdp; 164 pgd_t *pgdp;
165 p4d_t *p4dp;
165 pud_t *pudp; 166 pud_t *pudp;
166 pmd_t *pmdp = NULL; 167 pmd_t *pmdp = NULL;
167 168
168 pgdp = pgd_offset(mm, addr); 169 pgdp = pgd_offset(mm, addr);
169 pudp = pud_alloc(mm, pgdp, addr); 170 p4dp = p4d_alloc(mm, pgdp, addr);
170 if (pudp) { 171 if (p4dp) {
171 if (sz == PUD_SIZE) 172 pudp = pud_alloc(mm, p4dp, addr);
172 return (pte_t *) pudp; 173 if (pudp) {
173 else if (sz == PMD_SIZE) 174 if (sz == PUD_SIZE)
174 pmdp = pmd_alloc(mm, pudp, addr); 175 return (pte_t *) pudp;
176 else if (sz == PMD_SIZE)
177 pmdp = pmd_alloc(mm, pudp, addr);
178 }
175 } 179 }
176 return (pte_t *) pmdp; 180 return (pte_t *) pmdp;
177} 181}
@@ -179,16 +183,20 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
179pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 183pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
180{ 184{
181 pgd_t *pgdp; 185 pgd_t *pgdp;
186 p4d_t *p4dp;
182 pud_t *pudp; 187 pud_t *pudp;
183 pmd_t *pmdp = NULL; 188 pmd_t *pmdp = NULL;
184 189
185 pgdp = pgd_offset(mm, addr); 190 pgdp = pgd_offset(mm, addr);
186 if (pgd_present(*pgdp)) { 191 if (pgd_present(*pgdp)) {
187 pudp = pud_offset(pgdp, addr); 192 p4dp = p4d_offset(pgdp, addr);
188 if (pud_present(*pudp)) { 193 if (p4d_present(*p4dp)) {
189 if (pud_large(*pudp)) 194 pudp = pud_offset(p4dp, addr);
190 return (pte_t *) pudp; 195 if (pud_present(*pudp)) {
191 pmdp = pmd_offset(pudp, addr); 196 if (pud_large(*pudp))
197 return (pte_t *) pudp;
198 pmdp = pmd_offset(pudp, addr);
199 }
192 } 200 }
193 } 201 }
194 return (pte_t *) pmdp; 202 return (pte_t *) pmdp;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ee6a1d3d4983..3348e60dd8ad 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -81,6 +81,7 @@ void __init paging_init(void)
81{ 81{
82 unsigned long max_zone_pfns[MAX_NR_ZONES]; 82 unsigned long max_zone_pfns[MAX_NR_ZONES];
83 unsigned long pgd_type, asce_bits; 83 unsigned long pgd_type, asce_bits;
84 psw_t psw;
84 85
85 init_mm.pgd = swapper_pg_dir; 86 init_mm.pgd = swapper_pg_dir;
86 if (VMALLOC_END > (1UL << 42)) { 87 if (VMALLOC_END > (1UL << 42)) {
@@ -100,7 +101,10 @@ void __init paging_init(void)
100 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 101 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
101 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 102 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
102 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 103 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
103 __arch_local_irq_stosm(0x04); 104 psw.mask = __extract_psw();
105 psw_bits(psw).dat = 1;
106 psw_bits(psw).as = PSW_BITS_AS_HOME;
107 __load_psw_mask(psw.mask);
104 108
105 sparse_memory_present_with_active_regions(MAX_NUMNODES); 109 sparse_memory_present_with_active_regions(MAX_NUMNODES);
106 sparse_init(); 110 sparse_init();
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index b017daed6887..8c5f284044ef 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -120,7 +120,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
120 120
121check_asce_limit: 121check_asce_limit:
122 if (addr + len > current->mm->context.asce_limit) { 122 if (addr + len > current->mm->context.asce_limit) {
123 rc = crst_table_upgrade(mm); 123 rc = crst_table_upgrade(mm, addr + len);
124 if (rc) 124 if (rc)
125 return (unsigned long) rc; 125 return (unsigned long) rc;
126 } 126 }
@@ -184,7 +184,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
184 184
185check_asce_limit: 185check_asce_limit:
186 if (addr + len > current->mm->context.asce_limit) { 186 if (addr + len > current->mm->context.asce_limit) {
187 rc = crst_table_upgrade(mm); 187 rc = crst_table_upgrade(mm, addr + len);
188 if (rc) 188 if (rc)
189 return (unsigned long) rc; 189 return (unsigned long) rc;
190 } 190 }
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 49e721f3645e..180481589246 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -229,14 +229,14 @@ static void modify_pud_page(pud_t *pudp, unsigned long addr,
229 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3); 229 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
230} 230}
231 231
232static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end, 232static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
233 unsigned long flags) 233 unsigned long flags)
234{ 234{
235 unsigned long next; 235 unsigned long next;
236 pud_t *pudp; 236 pud_t *pudp;
237 int rc = 0; 237 int rc = 0;
238 238
239 pudp = pud_offset(pgd, addr); 239 pudp = pud_offset(p4d, addr);
240 do { 240 do {
241 if (pud_none(*pudp)) 241 if (pud_none(*pudp))
242 return -EINVAL; 242 return -EINVAL;
@@ -259,6 +259,26 @@ static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
259 return rc; 259 return rc;
260} 260}
261 261
262static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
263 unsigned long flags)
264{
265 unsigned long next;
266 p4d_t *p4dp;
267 int rc = 0;
268
269 p4dp = p4d_offset(pgd, addr);
270 do {
271 if (p4d_none(*p4dp))
272 return -EINVAL;
273 next = p4d_addr_end(addr, end);
274 rc = walk_pud_level(p4dp, addr, next, flags);
275 p4dp++;
276 addr = next;
277 cond_resched();
278 } while (addr < end && !rc);
279 return rc;
280}
281
262static DEFINE_MUTEX(cpa_mutex); 282static DEFINE_MUTEX(cpa_mutex);
263 283
264static int change_page_attr(unsigned long addr, unsigned long end, 284static int change_page_attr(unsigned long addr, unsigned long end,
@@ -278,7 +298,7 @@ static int change_page_attr(unsigned long addr, unsigned long end,
278 if (pgd_none(*pgdp)) 298 if (pgd_none(*pgdp))
279 break; 299 break;
280 next = pgd_addr_end(addr, end); 300 next = pgd_addr_end(addr, end);
281 rc = walk_pud_level(pgdp, addr, next, flags); 301 rc = walk_p4d_level(pgdp, addr, next, flags);
282 if (rc) 302 if (rc)
283 break; 303 break;
284 cond_resched(); 304 cond_resched();
@@ -319,6 +339,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
319 unsigned long address; 339 unsigned long address;
320 int nr, i, j; 340 int nr, i, j;
321 pgd_t *pgd; 341 pgd_t *pgd;
342 p4d_t *p4d;
322 pud_t *pud; 343 pud_t *pud;
323 pmd_t *pmd; 344 pmd_t *pmd;
324 pte_t *pte; 345 pte_t *pte;
@@ -326,7 +347,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
326 for (i = 0; i < numpages;) { 347 for (i = 0; i < numpages;) {
327 address = page_to_phys(page + i); 348 address = page_to_phys(page + i);
328 pgd = pgd_offset_k(address); 349 pgd = pgd_offset_k(address);
329 pud = pud_offset(pgd, address); 350 p4d = p4d_offset(pgd, address);
351 pud = pud_offset(p4d, address);
330 pmd = pmd_offset(pud, address); 352 pmd = pmd_offset(pud, address);
331 pte = pte_offset_kernel(pmd, address); 353 pte = pte_offset_kernel(pmd, address);
332 nr = (unsigned long)pte >> ilog2(sizeof(long)); 354 nr = (unsigned long)pte >> ilog2(sizeof(long));
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index f502cbe657af..18918e394ce4 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -76,29 +76,46 @@ static void __crst_table_upgrade(void *arg)
76 __tlb_flush_local(); 76 __tlb_flush_local();
77} 77}
78 78
79int crst_table_upgrade(struct mm_struct *mm) 79int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
80{ 80{
81 unsigned long *table, *pgd; 81 unsigned long *table, *pgd;
82 int rc, notify;
82 83
83 /* upgrade should only happen from 3 to 4 levels */ 84 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
84 BUG_ON(mm->context.asce_limit != (1UL << 42)); 85 BUG_ON(mm->context.asce_limit < (1UL << 42));
85 86 if (end >= TASK_SIZE_MAX)
86 table = crst_table_alloc(mm);
87 if (!table)
88 return -ENOMEM; 87 return -ENOMEM;
89 88 rc = 0;
90 spin_lock_bh(&mm->page_table_lock); 89 notify = 0;
91 pgd = (unsigned long *) mm->pgd; 90 while (mm->context.asce_limit < end) {
92 crst_table_init(table, _REGION2_ENTRY_EMPTY); 91 table = crst_table_alloc(mm);
93 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); 92 if (!table) {
94 mm->pgd = (pgd_t *) table; 93 rc = -ENOMEM;
95 mm->context.asce_limit = 1UL << 53; 94 break;
96 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 95 }
97 _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 96 spin_lock_bh(&mm->page_table_lock);
98 spin_unlock_bh(&mm->page_table_lock); 97 pgd = (unsigned long *) mm->pgd;
99 98 if (mm->context.asce_limit == (1UL << 42)) {
100 on_each_cpu(__crst_table_upgrade, mm, 0); 99 crst_table_init(table, _REGION2_ENTRY_EMPTY);
101 return 0; 100 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
101 mm->pgd = (pgd_t *) table;
102 mm->context.asce_limit = 1UL << 53;
103 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
104 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
105 } else {
106 crst_table_init(table, _REGION1_ENTRY_EMPTY);
107 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
108 mm->pgd = (pgd_t *) table;
109 mm->context.asce_limit = -PAGE_SIZE;
110 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
111 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
112 }
113 notify = 1;
114 spin_unlock_bh(&mm->page_table_lock);
115 }
116 if (notify)
117 on_each_cpu(__crst_table_upgrade, mm, 0);
118 return rc;
102} 119}
103 120
104void crst_table_downgrade(struct mm_struct *mm) 121void crst_table_downgrade(struct mm_struct *mm)
@@ -274,7 +291,7 @@ static void __tlb_remove_table(void *_table)
274 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 291 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
275 292
276 switch (mask) { 293 switch (mask) {
277 case 0: /* pmd or pud */ 294 case 0: /* pmd, pud, or p4d */
278 free_pages((unsigned long) table, 2); 295 free_pages((unsigned long) table, 2);
279 break; 296 break;
280 case 1: /* lower 2K of a 4K page table */ 297 case 1: /* lower 2K of a 4K page table */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 947b66a5cdba..d4d409ba206b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -610,6 +610,7 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
610{ 610{
611 spinlock_t *ptl; 611 spinlock_t *ptl;
612 pgd_t *pgd; 612 pgd_t *pgd;
613 p4d_t *p4d;
613 pud_t *pud; 614 pud_t *pud;
614 pmd_t *pmd; 615 pmd_t *pmd;
615 pgste_t pgste; 616 pgste_t pgste;
@@ -618,7 +619,10 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
618 bool dirty; 619 bool dirty;
619 620
620 pgd = pgd_offset(mm, addr); 621 pgd = pgd_offset(mm, addr);
621 pud = pud_alloc(mm, pgd, addr); 622 p4d = p4d_alloc(mm, pgd, addr);
623 if (!p4d)
624 return false;
625 pud = pud_alloc(mm, p4d, addr);
622 if (!pud) 626 if (!pud)
623 return false; 627 return false;
624 pmd = pmd_alloc(mm, pud, addr); 628 pmd = pmd_alloc(mm, pud, addr);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index c33c94b4be60..d8398962a723 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -38,6 +38,17 @@ static void __ref *vmem_alloc_pages(unsigned int order)
38 return (void *) memblock_alloc(size, size); 38 return (void *) memblock_alloc(size, size);
39} 39}
40 40
41static inline p4d_t *vmem_p4d_alloc(void)
42{
43 p4d_t *p4d = NULL;
44
45 p4d = vmem_alloc_pages(2);
46 if (!p4d)
47 return NULL;
48 clear_table((unsigned long *) p4d, _REGION2_ENTRY_EMPTY, PAGE_SIZE * 4);
49 return p4d;
50}
51
41static inline pud_t *vmem_pud_alloc(void) 52static inline pud_t *vmem_pud_alloc(void)
42{ 53{
43 pud_t *pud = NULL; 54 pud_t *pud = NULL;
@@ -85,6 +96,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
85 unsigned long end = start + size; 96 unsigned long end = start + size;
86 unsigned long address = start; 97 unsigned long address = start;
87 pgd_t *pg_dir; 98 pgd_t *pg_dir;
99 p4d_t *p4_dir;
88 pud_t *pu_dir; 100 pud_t *pu_dir;
89 pmd_t *pm_dir; 101 pmd_t *pm_dir;
90 pte_t *pt_dir; 102 pte_t *pt_dir;
@@ -102,12 +114,19 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
102 while (address < end) { 114 while (address < end) {
103 pg_dir = pgd_offset_k(address); 115 pg_dir = pgd_offset_k(address);
104 if (pgd_none(*pg_dir)) { 116 if (pgd_none(*pg_dir)) {
117 p4_dir = vmem_p4d_alloc();
118 if (!p4_dir)
119 goto out;
120 pgd_populate(&init_mm, pg_dir, p4_dir);
121 }
122 p4_dir = p4d_offset(pg_dir, address);
123 if (p4d_none(*p4_dir)) {
105 pu_dir = vmem_pud_alloc(); 124 pu_dir = vmem_pud_alloc();
106 if (!pu_dir) 125 if (!pu_dir)
107 goto out; 126 goto out;
108 pgd_populate(&init_mm, pg_dir, pu_dir); 127 p4d_populate(&init_mm, p4_dir, pu_dir);
109 } 128 }
110 pu_dir = pud_offset(pg_dir, address); 129 pu_dir = pud_offset(p4_dir, address);
111 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && 130 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
112 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) && 131 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
113 !debug_pagealloc_enabled()) { 132 !debug_pagealloc_enabled()) {
@@ -161,6 +180,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
161 unsigned long end = start + size; 180 unsigned long end = start + size;
162 unsigned long address = start; 181 unsigned long address = start;
163 pgd_t *pg_dir; 182 pgd_t *pg_dir;
183 p4d_t *p4_dir;
164 pud_t *pu_dir; 184 pud_t *pu_dir;
165 pmd_t *pm_dir; 185 pmd_t *pm_dir;
166 pte_t *pt_dir; 186 pte_t *pt_dir;
@@ -172,7 +192,12 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
172 address += PGDIR_SIZE; 192 address += PGDIR_SIZE;
173 continue; 193 continue;
174 } 194 }
175 pu_dir = pud_offset(pg_dir, address); 195 p4_dir = p4d_offset(pg_dir, address);
196 if (p4d_none(*p4_dir)) {
197 address += P4D_SIZE;
198 continue;
199 }
200 pu_dir = pud_offset(p4_dir, address);
176 if (pud_none(*pu_dir)) { 201 if (pud_none(*pu_dir)) {
177 address += PUD_SIZE; 202 address += PUD_SIZE;
178 continue; 203 continue;
@@ -213,6 +238,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
213 unsigned long pgt_prot, sgt_prot; 238 unsigned long pgt_prot, sgt_prot;
214 unsigned long address = start; 239 unsigned long address = start;
215 pgd_t *pg_dir; 240 pgd_t *pg_dir;
241 p4d_t *p4_dir;
216 pud_t *pu_dir; 242 pud_t *pu_dir;
217 pmd_t *pm_dir; 243 pmd_t *pm_dir;
218 pte_t *pt_dir; 244 pte_t *pt_dir;
@@ -227,13 +253,21 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
227 for (address = start; address < end;) { 253 for (address = start; address < end;) {
228 pg_dir = pgd_offset_k(address); 254 pg_dir = pgd_offset_k(address);
229 if (pgd_none(*pg_dir)) { 255 if (pgd_none(*pg_dir)) {
256 p4_dir = vmem_p4d_alloc();
257 if (!p4_dir)
258 goto out;
259 pgd_populate(&init_mm, pg_dir, p4_dir);
260 }
261
262 p4_dir = p4d_offset(pg_dir, address);
263 if (p4d_none(*p4_dir)) {
230 pu_dir = vmem_pud_alloc(); 264 pu_dir = vmem_pud_alloc();
231 if (!pu_dir) 265 if (!pu_dir)
232 goto out; 266 goto out;
233 pgd_populate(&init_mm, pg_dir, pu_dir); 267 p4d_populate(&init_mm, p4_dir, pu_dir);
234 } 268 }
235 269
236 pu_dir = pud_offset(pg_dir, address); 270 pu_dir = pud_offset(p4_dir, address);
237 if (pud_none(*pu_dir)) { 271 if (pud_none(*pu_dir)) {
238 pm_dir = vmem_pmd_alloc(); 272 pm_dir = vmem_pmd_alloc();
239 if (!pm_dir) 273 if (!pm_dir)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 8051df109db3..7b30af5da222 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -86,6 +86,25 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
86 return zdev; 86 return zdev;
87} 87}
88 88
89void zpci_remove_reserved_devices(void)
90{
91 struct zpci_dev *tmp, *zdev;
92 enum zpci_state state;
93 LIST_HEAD(remove);
94
95 spin_lock(&zpci_list_lock);
96 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
97 if (zdev->state == ZPCI_FN_STATE_STANDBY &&
98 !clp_get_state(zdev->fid, &state) &&
99 state == ZPCI_FN_STATE_RESERVED)
100 list_move_tail(&zdev->entry, &remove);
101 }
102 spin_unlock(&zpci_list_lock);
103
104 list_for_each_entry_safe(zdev, tmp, &remove, entry)
105 zpci_remove_device(zdev);
106}
107
89static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) 108static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
90{ 109{
91 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; 110 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
@@ -108,6 +127,7 @@ static int zpci_set_airq(struct zpci_dev *zdev)
108{ 127{
109 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); 128 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
110 struct zpci_fib fib = {0}; 129 struct zpci_fib fib = {0};
130 u8 status;
111 131
112 fib.isc = PCI_ISC; 132 fib.isc = PCI_ISC;
113 fib.sum = 1; /* enable summary notifications */ 133 fib.sum = 1; /* enable summary notifications */
@@ -117,60 +137,58 @@ static int zpci_set_airq(struct zpci_dev *zdev)
117 fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8; 137 fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
118 fib.aisbo = zdev->aisb & 63; 138 fib.aisbo = zdev->aisb & 63;
119 139
120 return zpci_mod_fc(req, &fib); 140 return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
121} 141}
122 142
123struct mod_pci_args { 143/* Modify PCI: Unregister adapter interruptions */
124 u64 base; 144static int zpci_clear_airq(struct zpci_dev *zdev)
125 u64 limit;
126 u64 iota;
127 u64 fmb_addr;
128};
129
130static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
131{ 145{
132 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); 146 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
133 struct zpci_fib fib = {0}; 147 struct zpci_fib fib = {0};
148 u8 cc, status;
134 149
135 fib.pba = args->base; 150 cc = zpci_mod_fc(req, &fib, &status);
136 fib.pal = args->limit; 151 if (cc == 3 || (cc == 1 && status == 24))
137 fib.iota = args->iota; 152 /* Function already gone or IRQs already deregistered. */
138 fib.fmb_addr = args->fmb_addr; 153 cc = 0;
139 154
140 return zpci_mod_fc(req, &fib); 155 return cc ? -EIO : 0;
141} 156}
142 157
143/* Modify PCI: Register I/O address translation parameters */ 158/* Modify PCI: Register I/O address translation parameters */
144int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, 159int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
145 u64 base, u64 limit, u64 iota) 160 u64 base, u64 limit, u64 iota)
146{ 161{
147 struct mod_pci_args args = { base, limit, iota, 0 }; 162 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
163 struct zpci_fib fib = {0};
164 u8 status;
148 165
149 WARN_ON_ONCE(iota & 0x3fff); 166 WARN_ON_ONCE(iota & 0x3fff);
150 args.iota |= ZPCI_IOTA_RTTO_FLAG; 167 fib.pba = base;
151 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args); 168 fib.pal = limit;
169 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
170 return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
152} 171}
153 172
154/* Modify PCI: Unregister I/O address translation parameters */ 173/* Modify PCI: Unregister I/O address translation parameters */
155int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) 174int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
156{ 175{
157 struct mod_pci_args args = { 0, 0, 0, 0 }; 176 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
158 177 struct zpci_fib fib = {0};
159 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); 178 u8 cc, status;
160}
161
162/* Modify PCI: Unregister adapter interruptions */
163static int zpci_clear_airq(struct zpci_dev *zdev)
164{
165 struct mod_pci_args args = { 0, 0, 0, 0 };
166 179
167 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); 180 cc = zpci_mod_fc(req, &fib, &status);
181 if (cc == 3) /* Function already gone. */
182 cc = 0;
183 return cc ? -EIO : 0;
168} 184}
169 185
170/* Modify PCI: Set PCI function measurement parameters */ 186/* Modify PCI: Set PCI function measurement parameters */
171int zpci_fmb_enable_device(struct zpci_dev *zdev) 187int zpci_fmb_enable_device(struct zpci_dev *zdev)
172{ 188{
173 struct mod_pci_args args = { 0, 0, 0, 0 }; 189 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
190 struct zpci_fib fib = {0};
191 u8 cc, status;
174 192
175 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length) 193 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
176 return -EINVAL; 194 return -EINVAL;
@@ -185,25 +203,35 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
185 atomic64_set(&zdev->mapped_pages, 0); 203 atomic64_set(&zdev->mapped_pages, 0);
186 atomic64_set(&zdev->unmapped_pages, 0); 204 atomic64_set(&zdev->unmapped_pages, 0);
187 205
188 args.fmb_addr = virt_to_phys(zdev->fmb); 206 fib.fmb_addr = virt_to_phys(zdev->fmb);
189 return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 207 cc = zpci_mod_fc(req, &fib, &status);
208 if (cc) {
209 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
210 zdev->fmb = NULL;
211 }
212 return cc ? -EIO : 0;
190} 213}
191 214
192/* Modify PCI: Disable PCI function measurement */ 215/* Modify PCI: Disable PCI function measurement */
193int zpci_fmb_disable_device(struct zpci_dev *zdev) 216int zpci_fmb_disable_device(struct zpci_dev *zdev)
194{ 217{
195 struct mod_pci_args args = { 0, 0, 0, 0 }; 218 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
196 int rc; 219 struct zpci_fib fib = {0};
220 u8 cc, status;
197 221
198 if (!zdev->fmb) 222 if (!zdev->fmb)
199 return -EINVAL; 223 return -EINVAL;
200 224
201 /* Function measurement is disabled if fmb address is zero */ 225 /* Function measurement is disabled if fmb address is zero */
202 rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 226 cc = zpci_mod_fc(req, &fib, &status);
227 if (cc == 3) /* Function already gone. */
228 cc = 0;
203 229
204 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 230 if (!cc) {
205 zdev->fmb = NULL; 231 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
206 return rc; 232 zdev->fmb = NULL;
233 }
234 return cc ? -EIO : 0;
207} 235}
208 236
209static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) 237static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
@@ -372,22 +400,21 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
372 struct msi_msg msg; 400 struct msi_msg msg;
373 int rc, irq; 401 int rc, irq;
374 402
403 zdev->aisb = -1UL;
375 if (type == PCI_CAP_ID_MSI && nvec > 1) 404 if (type == PCI_CAP_ID_MSI && nvec > 1)
376 return 1; 405 return 1;
377 msi_vecs = min_t(unsigned int, nvec, zdev->max_msi); 406 msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
378 407
379 /* Allocate adapter summary indicator bit */ 408 /* Allocate adapter summary indicator bit */
380 rc = -EIO;
381 aisb = airq_iv_alloc_bit(zpci_aisb_iv); 409 aisb = airq_iv_alloc_bit(zpci_aisb_iv);
382 if (aisb == -1UL) 410 if (aisb == -1UL)
383 goto out; 411 return -EIO;
384 zdev->aisb = aisb; 412 zdev->aisb = aisb;
385 413
386 /* Create adapter interrupt vector */ 414 /* Create adapter interrupt vector */
387 rc = -ENOMEM;
388 zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK); 415 zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
389 if (!zdev->aibv) 416 if (!zdev->aibv)
390 goto out_si; 417 return -ENOMEM;
391 418
392 /* Wire up shortcut pointer */ 419 /* Wire up shortcut pointer */
393 zpci_aibv[aisb] = zdev->aibv; 420 zpci_aibv[aisb] = zdev->aibv;
@@ -398,10 +425,10 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
398 rc = -EIO; 425 rc = -EIO;
399 irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ 426 irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
400 if (irq < 0) 427 if (irq < 0)
401 goto out_msi; 428 return -ENOMEM;
402 rc = irq_set_msi_desc(irq, msi); 429 rc = irq_set_msi_desc(irq, msi);
403 if (rc) 430 if (rc)
404 goto out_msi; 431 return rc;
405 irq_set_chip_and_handler(irq, &zpci_irq_chip, 432 irq_set_chip_and_handler(irq, &zpci_irq_chip,
406 handle_simple_irq); 433 handle_simple_irq);
407 msg.data = hwirq; 434 msg.data = hwirq;
@@ -415,27 +442,9 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
415 /* Enable adapter interrupts */ 442 /* Enable adapter interrupts */
416 rc = zpci_set_airq(zdev); 443 rc = zpci_set_airq(zdev);
417 if (rc) 444 if (rc)
418 goto out_msi; 445 return rc;
419 446
420 return (msi_vecs == nvec) ? 0 : msi_vecs; 447 return (msi_vecs == nvec) ? 0 : msi_vecs;
421
422out_msi:
423 for_each_pci_msi_entry(msi, pdev) {
424 if (hwirq-- == 0)
425 break;
426 irq_set_msi_desc(msi->irq, NULL);
427 irq_free_desc(msi->irq);
428 msi->msg.address_lo = 0;
429 msi->msg.address_hi = 0;
430 msi->msg.data = 0;
431 msi->irq = 0;
432 }
433 zpci_aibv[aisb] = NULL;
434 airq_iv_release(zdev->aibv);
435out_si:
436 airq_iv_free_bit(zpci_aisb_iv, aisb);
437out:
438 return rc;
439} 448}
440 449
441void arch_teardown_msi_irqs(struct pci_dev *pdev) 450void arch_teardown_msi_irqs(struct pci_dev *pdev)
@@ -451,6 +460,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
451 460
452 /* Release MSI interrupts */ 461 /* Release MSI interrupts */
453 for_each_pci_msi_entry(msi, pdev) { 462 for_each_pci_msi_entry(msi, pdev) {
463 if (!msi->irq)
464 continue;
454 if (msi->msi_attrib.is_msix) 465 if (msi->msi_attrib.is_msix)
455 __pci_msix_desc_mask_irq(msi, 1); 466 __pci_msix_desc_mask_irq(msi, 1);
456 else 467 else
@@ -463,9 +474,15 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
463 msi->irq = 0; 474 msi->irq = 0;
464 } 475 }
465 476
466 zpci_aibv[zdev->aisb] = NULL; 477 if (zdev->aisb != -1UL) {
467 airq_iv_release(zdev->aibv); 478 zpci_aibv[zdev->aisb] = NULL;
468 airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); 479 airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
480 zdev->aisb = -1UL;
481 }
482 if (zdev->aibv) {
483 airq_iv_release(zdev->aibv);
484 zdev->aibv = NULL;
485 }
469} 486}
470 487
471static void zpci_map_resources(struct pci_dev *pdev) 488static void zpci_map_resources(struct pci_dev *pdev)
@@ -719,6 +736,16 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
719{ 736{
720 if (zpci_unique_uid) { 737 if (zpci_unique_uid) {
721 zdev->domain = (u16) zdev->uid; 738 zdev->domain = (u16) zdev->uid;
739 if (zdev->domain >= ZPCI_NR_DEVICES)
740 return 0;
741
742 spin_lock(&zpci_domain_lock);
743 if (test_bit(zdev->domain, zpci_domain)) {
744 spin_unlock(&zpci_domain_lock);
745 return -EEXIST;
746 }
747 set_bit(zdev->domain, zpci_domain);
748 spin_unlock(&zpci_domain_lock);
722 return 0; 749 return 0;
723 } 750 }
724 751
@@ -735,7 +762,7 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
735 762
736static void zpci_free_domain(struct zpci_dev *zdev) 763static void zpci_free_domain(struct zpci_dev *zdev)
737{ 764{
738 if (zpci_unique_uid) 765 if (zdev->domain >= ZPCI_NR_DEVICES)
739 return; 766 return;
740 767
741 spin_lock(&zpci_domain_lock); 768 spin_lock(&zpci_domain_lock);
@@ -755,6 +782,7 @@ void pcibios_remove_bus(struct pci_bus *bus)
755 list_del(&zdev->entry); 782 list_del(&zdev->entry);
756 spin_unlock(&zpci_list_lock); 783 spin_unlock(&zpci_list_lock);
757 784
785 zpci_dbg(3, "rem fid:%x\n", zdev->fid);
758 kfree(zdev); 786 kfree(zdev);
759} 787}
760 788
@@ -847,15 +875,14 @@ out:
847 return rc; 875 return rc;
848} 876}
849 877
850void zpci_stop_device(struct zpci_dev *zdev) 878void zpci_remove_device(struct zpci_dev *zdev)
851{ 879{
852 zpci_dma_exit_device(zdev); 880 if (!zdev->bus)
853 /* 881 return;
854 * Note: SCLP disables fh via set-pci-fn so don't 882
855 * do that here. 883 pci_stop_root_bus(zdev->bus);
856 */ 884 pci_remove_root_bus(zdev->bus);
857} 885}
858EXPORT_SYMBOL_GPL(zpci_stop_device);
859 886
860int zpci_report_error(struct pci_dev *pdev, 887int zpci_report_error(struct pci_dev *pdev,
861 struct zpci_report_error_header *report) 888 struct zpci_report_error_header *report)
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 1c3332ac1957..bd534b4d40e3 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -193,12 +193,12 @@ out:
193int clp_add_pci_device(u32 fid, u32 fh, int configured) 193int clp_add_pci_device(u32 fid, u32 fh, int configured)
194{ 194{
195 struct zpci_dev *zdev; 195 struct zpci_dev *zdev;
196 int rc; 196 int rc = -ENOMEM;
197 197
198 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured); 198 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
199 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); 199 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
200 if (!zdev) 200 if (!zdev)
201 return -ENOMEM; 201 goto error;
202 202
203 zdev->fh = fh; 203 zdev->fh = fh;
204 zdev->fid = fid; 204 zdev->fid = fid;
@@ -219,6 +219,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
219 return 0; 219 return 0;
220 220
221error: 221error:
222 zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
222 kfree(zdev); 223 kfree(zdev);
223 return rc; 224 return rc;
224} 225}
@@ -295,8 +296,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
295 return rc; 296 return rc;
296} 297}
297 298
298static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, 299static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
299 void (*cb)(struct clp_fh_list_entry *entry)) 300 void (*cb)(struct clp_fh_list_entry *, void *))
300{ 301{
301 u64 resume_token = 0; 302 u64 resume_token = 0;
302 int entries, i, rc; 303 int entries, i, rc;
@@ -327,21 +328,13 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
327 328
328 resume_token = rrb->response.resume_token; 329 resume_token = rrb->response.resume_token;
329 for (i = 0; i < entries; i++) 330 for (i = 0; i < entries; i++)
330 cb(&rrb->response.fh_list[i]); 331 cb(&rrb->response.fh_list[i], data);
331 } while (resume_token); 332 } while (resume_token);
332out: 333out:
333 return rc; 334 return rc;
334} 335}
335 336
336static void __clp_add(struct clp_fh_list_entry *entry) 337static void __clp_add(struct clp_fh_list_entry *entry, void *data)
337{
338 if (!entry->vendor_id)
339 return;
340
341 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
342}
343
344static void __clp_rescan(struct clp_fh_list_entry *entry)
345{ 338{
346 struct zpci_dev *zdev; 339 struct zpci_dev *zdev;
347 340
@@ -349,22 +342,11 @@ static void __clp_rescan(struct clp_fh_list_entry *entry)
349 return; 342 return;
350 343
351 zdev = get_zdev_by_fid(entry->fid); 344 zdev = get_zdev_by_fid(entry->fid);
352 if (!zdev) { 345 if (!zdev)
353 clp_add_pci_device(entry->fid, entry->fh, entry->config_state); 346 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
354 return;
355 }
356
357 if (!entry->config_state) {
358 /*
359 * The handle is already disabled, that means no iota/irq freeing via
360 * the firmware interfaces anymore. Need to free resources manually
361 * (DMA memory, debug, sysfs)...
362 */
363 zpci_stop_device(zdev);
364 }
365} 347}
366 348
367static void __clp_update(struct clp_fh_list_entry *entry) 349static void __clp_update(struct clp_fh_list_entry *entry, void *data)
368{ 350{
369 struct zpci_dev *zdev; 351 struct zpci_dev *zdev;
370 352
@@ -387,7 +369,7 @@ int clp_scan_pci_devices(void)
387 if (!rrb) 369 if (!rrb)
388 return -ENOMEM; 370 return -ENOMEM;
389 371
390 rc = clp_list_pci(rrb, __clp_add); 372 rc = clp_list_pci(rrb, NULL, __clp_add);
391 373
392 clp_free_block(rrb); 374 clp_free_block(rrb);
393 return rc; 375 return rc;
@@ -398,11 +380,13 @@ int clp_rescan_pci_devices(void)
398 struct clp_req_rsp_list_pci *rrb; 380 struct clp_req_rsp_list_pci *rrb;
399 int rc; 381 int rc;
400 382
383 zpci_remove_reserved_devices();
384
401 rrb = clp_alloc_block(GFP_KERNEL); 385 rrb = clp_alloc_block(GFP_KERNEL);
402 if (!rrb) 386 if (!rrb)
403 return -ENOMEM; 387 return -ENOMEM;
404 388
405 rc = clp_list_pci(rrb, __clp_rescan); 389 rc = clp_list_pci(rrb, NULL, __clp_add);
406 390
407 clp_free_block(rrb); 391 clp_free_block(rrb);
408 return rc; 392 return rc;
@@ -417,7 +401,40 @@ int clp_rescan_pci_devices_simple(void)
417 if (!rrb) 401 if (!rrb)
418 return -ENOMEM; 402 return -ENOMEM;
419 403
420 rc = clp_list_pci(rrb, __clp_update); 404 rc = clp_list_pci(rrb, NULL, __clp_update);
405
406 clp_free_block(rrb);
407 return rc;
408}
409
410struct clp_state_data {
411 u32 fid;
412 enum zpci_state state;
413};
414
415static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
416{
417 struct clp_state_data *sd = data;
418
419 if (entry->fid != sd->fid)
420 return;
421
422 sd->state = entry->config_state;
423}
424
425int clp_get_state(u32 fid, enum zpci_state *state)
426{
427 struct clp_req_rsp_list_pci *rrb;
428 struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
429 int rc;
430
431 rrb = clp_alloc_block(GFP_KERNEL);
432 if (!rrb)
433 return -ENOMEM;
434
435 rc = clp_list_pci(rrb, &sd, __clp_get_state);
436 if (!rc)
437 *state = sd.state;
421 438
422 clp_free_block(rrb); 439 clp_free_block(rrb);
423 return rc; 440 return rc;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 9081a57fa340..8eb1cc341dab 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -601,7 +601,9 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
601 */ 601 */
602 WARN_ON(zdev->s390_domain); 602 WARN_ON(zdev->s390_domain);
603 603
604 zpci_unregister_ioat(zdev, 0); 604 if (zpci_unregister_ioat(zdev, 0))
605 return;
606
605 dma_cleanup_tables(zdev->dma_table); 607 dma_cleanup_tables(zdev->dma_table);
606 zdev->dma_table = NULL; 608 zdev->dma_table = NULL;
607 vfree(zdev->iommu_bitmap); 609 vfree(zdev->iommu_bitmap);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index c2b27ad8e94d..0bbc04af4418 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -74,6 +74,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
74{ 74{
75 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); 75 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
76 struct pci_dev *pdev = NULL; 76 struct pci_dev *pdev = NULL;
77 enum zpci_state state;
77 int ret; 78 int ret;
78 79
79 if (zdev) 80 if (zdev)
@@ -108,6 +109,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
108 clp_add_pci_device(ccdf->fid, ccdf->fh, 0); 109 clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
109 break; 110 break;
110 case 0x0303: /* Deconfiguration requested */ 111 case 0x0303: /* Deconfiguration requested */
112 if (!zdev)
113 break;
111 if (pdev) 114 if (pdev)
112 pci_stop_and_remove_bus_device_locked(pdev); 115 pci_stop_and_remove_bus_device_locked(pdev);
113 116
@@ -121,7 +124,9 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
121 zdev->state = ZPCI_FN_STATE_STANDBY; 124 zdev->state = ZPCI_FN_STATE_STANDBY;
122 125
123 break; 126 break;
124 case 0x0304: /* Configured -> Standby */ 127 case 0x0304: /* Configured -> Standby|Reserved */
128 if (!zdev)
129 break;
125 if (pdev) { 130 if (pdev) {
126 /* Give the driver a hint that the function is 131 /* Give the driver a hint that the function is
127 * already unusable. */ 132 * already unusable. */
@@ -132,6 +137,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
132 zdev->fh = ccdf->fh; 137 zdev->fh = ccdf->fh;
133 zpci_disable_device(zdev); 138 zpci_disable_device(zdev);
134 zdev->state = ZPCI_FN_STATE_STANDBY; 139 zdev->state = ZPCI_FN_STATE_STANDBY;
140 if (!clp_get_state(ccdf->fid, &state) &&
141 state == ZPCI_FN_STATE_RESERVED) {
142 zpci_remove_device(zdev);
143 }
135 break; 144 break;
136 case 0x0306: /* 0x308 or 0x302 for multiple devices */ 145 case 0x0306: /* 0x308 or 0x302 for multiple devices */
137 clp_rescan_pci_devices(); 146 clp_rescan_pci_devices();
@@ -139,8 +148,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
139 case 0x0308: /* Standby -> Reserved */ 148 case 0x0308: /* Standby -> Reserved */
140 if (!zdev) 149 if (!zdev)
141 break; 150 break;
142 pci_stop_root_bus(zdev->bus); 151 zpci_remove_device(zdev);
143 pci_remove_root_bus(zdev->bus);
144 break; 152 break;
145 default: 153 default:
146 break; 154 break;
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index fa8d7d4b9751..ea34086c8674 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -40,20 +40,20 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
40 return cc; 40 return cc;
41} 41}
42 42
43int zpci_mod_fc(u64 req, struct zpci_fib *fib) 43u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
44{ 44{
45 u8 cc, status; 45 u8 cc;
46 46
47 do { 47 do {
48 cc = __mpcifc(req, fib, &status); 48 cc = __mpcifc(req, fib, status);
49 if (cc == 2) 49 if (cc == 2)
50 msleep(ZPCI_INSN_BUSY_DELAY); 50 msleep(ZPCI_INSN_BUSY_DELAY);
51 } while (cc == 2); 51 } while (cc == 2);
52 52
53 if (cc) 53 if (cc)
54 zpci_err_insn(cc, status, req, 0); 54 zpci_err_insn(cc, *status, req, 0);
55 55
56 return (cc) ? -EIO : 0; 56 return cc;
57} 57}
58 58
59/* Refresh PCI Translations */ 59/* Refresh PCI Translations */
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index be63fbd699fd..025ea20fc4b4 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -34,8 +34,6 @@ static struct facility_def facility_defs[] = {
34 18, /* long displacement facility */ 34 18, /* long displacement facility */
35#endif 35#endif
36#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 36#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
37 7, /* stfle */
38 17, /* message security assist */
39 21, /* extended-immediate facility */ 37 21, /* extended-immediate facility */
40 25, /* store clock fast */ 38 25, /* store clock fast */
41#endif 39#endif
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fb1e60f5002e..9c7951bb05ac 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -89,6 +89,20 @@ config PKEY
89 requires to have at least one CEX card in coprocessor mode 89 requires to have at least one CEX card in coprocessor mode
90 available at runtime. 90 available at runtime.
91 91
92config CRYPTO_PAES_S390
93 tristate "PAES cipher algorithms"
94 depends on S390
95 depends on ZCRYPT
96 depends on PKEY
97 select CRYPTO_ALGAPI
98 select CRYPTO_BLKCIPHER
99 help
100 This is the s390 hardware accelerated implementation of the
101 AES cipher algorithms for use with protected key.
102
103 Select this option if you want to use the paes cipher
104 for example to use protected key encrypted devices.
105
92config CRYPTO_SHA1_S390 106config CRYPTO_SHA1_S390
93 tristate "SHA1 digest algorithm" 107 tristate "SHA1 digest algorithm"
94 depends on S390 108 depends on S390
@@ -137,7 +151,6 @@ config CRYPTO_AES_S390
137 depends on S390 151 depends on S390
138 select CRYPTO_ALGAPI 152 select CRYPTO_ALGAPI
139 select CRYPTO_BLKCIPHER 153 select CRYPTO_BLKCIPHER
140 select PKEY
141 help 154 help
142 This is the s390 hardware accelerated implementation of the 155 This is the s390 hardware accelerated implementation of the
143 AES cipher algorithms (FIPS-197). 156 AES cipher algorithms (FIPS-197).
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 82dab1692264..3aea55698165 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -782,24 +782,26 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
782 782
783static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) 783static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
784{ 784{
785 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; 785 int dimm, size0, size1, cs0, cs1;
786 int dimm, size0, size1;
787 786
788 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); 787 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
789 788
790 for (dimm = 0; dimm < 4; dimm++) { 789 for (dimm = 0; dimm < 4; dimm++) {
791 size0 = 0; 790 size0 = 0;
791 cs0 = dimm * 2;
792 792
793 if (dcsb[dimm*2] & DCSB_CS_ENABLE) 793 if (csrow_enabled(cs0, ctrl, pvt))
794 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); 794 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
795 795
796 size1 = 0; 796 size1 = 0;
797 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) 797 cs1 = dimm * 2 + 1;
798 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); 798
799 if (csrow_enabled(cs1, ctrl, pvt))
800 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
799 801
800 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 802 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
801 dimm * 2, size0, 803 cs0, size0,
802 dimm * 2 + 1, size1); 804 cs1, size1);
803 } 805 }
804} 806}
805 807
@@ -2756,26 +2758,22 @@ skip:
2756 * encompasses 2758 * encompasses
2757 * 2759 *
2758 */ 2760 */
2759static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) 2761static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2760{ 2762{
2761 u32 cs_mode, nr_pages;
2762 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; 2763 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2764 int csrow_nr = csrow_nr_orig;
2765 u32 cs_mode, nr_pages;
2763 2766
2767 if (!pvt->umc)
2768 csrow_nr >>= 1;
2764 2769
2765 /* 2770 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2766 * The math on this doesn't look right on the surface because x/2*4 can
2767 * be simplified to x*2 but this expression makes use of the fact that
2768 * it is integral math where 1/2=0. This intermediate value becomes the
2769 * number of bits to shift the DBAM register to extract the proper CSROW
2770 * field.
2771 */
2772 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2773 2771
2774 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2)) 2772 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2775 << (20 - PAGE_SHIFT); 2773 nr_pages <<= 20 - PAGE_SHIFT;
2776 2774
2777 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", 2775 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2778 csrow_nr, dct, cs_mode); 2776 csrow_nr_orig, dct, cs_mode);
2779 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); 2777 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2780 2778
2781 return nr_pages; 2779 return nr_pages;
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index ed3137c1ceb0..ab3a951a17e6 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -155,19 +155,14 @@ static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
155 * efi_pstore_sysfs_entry_iter 155 * efi_pstore_sysfs_entry_iter
156 * 156 *
157 * @record: pstore record to pass to callback 157 * @record: pstore record to pass to callback
158 * @pos: entry to begin iterating from
159 * 158 *
160 * You MUST call efivar_enter_iter_begin() before this function, and 159 * You MUST call efivar_enter_iter_begin() before this function, and
161 * efivar_entry_iter_end() afterwards. 160 * efivar_entry_iter_end() afterwards.
162 * 161 *
163 * It is possible to begin iteration from an arbitrary entry within
164 * the list by passing @pos. @pos is updated on return to point to
165 * the next entry of the last one passed to efi_pstore_read_func().
166 * To begin iterating from the beginning of the list @pos must be %NULL.
167 */ 162 */
168static int efi_pstore_sysfs_entry_iter(struct pstore_record *record, 163static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
169 struct efivar_entry **pos)
170{ 164{
165 struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
171 struct efivar_entry *entry, *n; 166 struct efivar_entry *entry, *n;
172 struct list_head *head = &efivar_sysfs_list; 167 struct list_head *head = &efivar_sysfs_list;
173 int size = 0; 168 int size = 0;
@@ -218,7 +213,6 @@ static int efi_pstore_sysfs_entry_iter(struct pstore_record *record,
218 */ 213 */
219static ssize_t efi_pstore_read(struct pstore_record *record) 214static ssize_t efi_pstore_read(struct pstore_record *record)
220{ 215{
221 struct efivar_entry *entry = (struct efivar_entry *)record->psi->data;
222 ssize_t size; 216 ssize_t size;
223 217
224 record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); 218 record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
@@ -229,7 +223,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record)
229 size = -EINTR; 223 size = -EINTR;
230 goto out; 224 goto out;
231 } 225 }
232 size = efi_pstore_sysfs_entry_iter(record, &entry); 226 size = efi_pstore_sysfs_entry_iter(record);
233 efivar_entry_iter_end(); 227 efivar_entry_iter_end();
234 228
235out: 229out:
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 19581d783d8e..d034d8cd7d22 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -849,6 +849,9 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
849 mv88e6xxx_g1_stats_read(chip, reg, &low); 849 mv88e6xxx_g1_stats_read(chip, reg, &low);
850 if (s->sizeof_stat == 8) 850 if (s->sizeof_stat == 8)
851 mv88e6xxx_g1_stats_read(chip, reg + 1, &high); 851 mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
852 break;
853 default:
854 return UINT64_MAX;
852 } 855 }
853 value = (((u64)high) << 16) | low; 856 value = (((u64)high) << 16) | low;
854 return value; 857 return value;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 4ee15ff06a44..faeb4935ef3e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -200,29 +200,18 @@ err_exit:
200static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, 200static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
201 struct aq_nic_cfg_s *aq_nic_cfg) 201 struct aq_nic_cfg_s *aq_nic_cfg)
202{ 202{
203 int err = 0;
204
205 /* TX checksums offloads*/ 203 /* TX checksums offloads*/
206 tpo_ipv4header_crc_offload_en_set(self, 1); 204 tpo_ipv4header_crc_offload_en_set(self, 1);
207 tpo_tcp_udp_crc_offload_en_set(self, 1); 205 tpo_tcp_udp_crc_offload_en_set(self, 1);
208 if (err < 0)
209 goto err_exit;
210 206
211 /* RX checksums offloads*/ 207 /* RX checksums offloads*/
212 rpo_ipv4header_crc_offload_en_set(self, 1); 208 rpo_ipv4header_crc_offload_en_set(self, 1);
213 rpo_tcp_udp_crc_offload_en_set(self, 1); 209 rpo_tcp_udp_crc_offload_en_set(self, 1);
214 if (err < 0)
215 goto err_exit;
216 210
217 /* LSO offloads*/ 211 /* LSO offloads*/
218 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 212 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
219 if (err < 0)
220 goto err_exit;
221
222 err = aq_hw_err_from_flags(self);
223 213
224err_exit: 214 return aq_hw_err_from_flags(self);
225 return err;
226} 215}
227 216
228static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) 217static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 42150708191d..1bceb7358e5c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -200,25 +200,18 @@ err_exit:
200static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, 200static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
201 struct aq_nic_cfg_s *aq_nic_cfg) 201 struct aq_nic_cfg_s *aq_nic_cfg)
202{ 202{
203 int err = 0;
204 unsigned int i; 203 unsigned int i;
205 204
206 /* TX checksums offloads*/ 205 /* TX checksums offloads*/
207 tpo_ipv4header_crc_offload_en_set(self, 1); 206 tpo_ipv4header_crc_offload_en_set(self, 1);
208 tpo_tcp_udp_crc_offload_en_set(self, 1); 207 tpo_tcp_udp_crc_offload_en_set(self, 1);
209 if (err < 0)
210 goto err_exit;
211 208
212 /* RX checksums offloads*/ 209 /* RX checksums offloads*/
213 rpo_ipv4header_crc_offload_en_set(self, 1); 210 rpo_ipv4header_crc_offload_en_set(self, 1);
214 rpo_tcp_udp_crc_offload_en_set(self, 1); 211 rpo_tcp_udp_crc_offload_en_set(self, 1);
215 if (err < 0)
216 goto err_exit;
217 212
218 /* LSO offloads*/ 213 /* LSO offloads*/
219 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 214 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
220 if (err < 0)
221 goto err_exit;
222 215
223/* LRO offloads */ 216/* LRO offloads */
224 { 217 {
@@ -245,10 +238,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
245 238
246 rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); 239 rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
247 } 240 }
248 err = aq_hw_err_from_flags(self); 241 return aq_hw_err_from_flags(self);
249
250err_exit:
251 return err;
252} 242}
253 243
254static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) 244static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 703205475524..83aab1e4c8c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2862,12 +2862,10 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2862 int port = 0; 2862 int port = 0;
2863 2863
2864 if (msi_x) { 2864 if (msi_x) {
2865 int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2865 int nreq = min3(dev->caps.num_ports *
2866 2866 (int)num_online_cpus() + 1,
2867 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2867 dev->caps.num_eqs - dev->caps.reserved_eqs,
2868 nreq); 2868 MAX_MSIX);
2869 if (nreq > MAX_MSIX)
2870 nreq = MAX_MSIX;
2871 2869
2872 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2870 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2873 if (!entries) 2871 if (!entries)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0099a3e397bc..2fd044b23875 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1003,7 +1003,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
1003void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); 1003void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
1004void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); 1004void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1005 1005
1006int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn); 1006int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
1007void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); 1007void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
1008 1008
1009int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, 1009int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index ce7b09d72ff6..8209affa75c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -794,7 +794,6 @@ static void get_supported(u32 eth_proto_cap,
794 ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); 794 ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
795 ptys2ethtool_supported_link(supported, eth_proto_cap); 795 ptys2ethtool_supported_link(supported, eth_proto_cap);
796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); 796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
797 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
798} 797}
799 798
800static void get_advertising(u32 eth_proto_cap, u8 tx_pause, 799static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
@@ -804,7 +803,7 @@ static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
804 unsigned long *advertising = link_ksettings->link_modes.advertising; 803 unsigned long *advertising = link_ksettings->link_modes.advertising;
805 804
806 ptys2ethtool_adver_link(advertising, eth_proto_cap); 805 ptys2ethtool_adver_link(advertising, eth_proto_cap);
807 if (tx_pause) 806 if (rx_pause)
808 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); 807 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
809 if (tx_pause ^ rx_pause) 808 if (tx_pause ^ rx_pause)
810 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); 809 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
@@ -849,6 +848,8 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
849 struct mlx5e_priv *priv = netdev_priv(netdev); 848 struct mlx5e_priv *priv = netdev_priv(netdev);
850 struct mlx5_core_dev *mdev = priv->mdev; 849 struct mlx5_core_dev *mdev = priv->mdev;
851 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; 850 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
851 u32 rx_pause = 0;
852 u32 tx_pause = 0;
852 u32 eth_proto_cap; 853 u32 eth_proto_cap;
853 u32 eth_proto_admin; 854 u32 eth_proto_admin;
854 u32 eth_proto_lp; 855 u32 eth_proto_lp;
@@ -871,11 +872,13 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
871 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); 872 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
872 an_status = MLX5_GET(ptys_reg, out, an_status); 873 an_status = MLX5_GET(ptys_reg, out, an_status);
873 874
875 mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
876
874 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 877 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
875 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 878 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
876 879
877 get_supported(eth_proto_cap, link_ksettings); 880 get_supported(eth_proto_cap, link_ksettings);
878 get_advertising(eth_proto_admin, 0, 0, link_ksettings); 881 get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
879 get_speed_duplex(netdev, eth_proto_oper, link_ksettings); 882 get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
880 883
881 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 884 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 576d6787b484..53ed58320a24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -800,7 +800,7 @@ void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
800 mlx5e_destroy_flow_table(&ttc->ft); 800 mlx5e_destroy_flow_table(&ttc->ft);
801} 801}
802 802
803int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn) 803int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
804{ 804{
805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc; 805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
806 struct mlx5_flow_table_attr ft_attr = {}; 806 struct mlx5_flow_table_attr ft_attr = {};
@@ -810,7 +810,6 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn)
810 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; 810 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
811 ft_attr.level = MLX5E_TTC_FT_LEVEL; 811 ft_attr.level = MLX5E_TTC_FT_LEVEL;
812 ft_attr.prio = MLX5E_NIC_PRIO; 812 ft_attr.prio = MLX5E_NIC_PRIO;
813 ft_attr.underlay_qpn = underlay_qpn;
814 813
815 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); 814 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
816 if (IS_ERR(ft->t)) { 815 if (IS_ERR(ft->t)) {
@@ -1147,7 +1146,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1147 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 1146 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1148 } 1147 }
1149 1148
1150 err = mlx5e_create_ttc_table(priv, 0); 1149 err = mlx5e_create_ttc_table(priv);
1151 if (err) { 1150 if (err) {
1152 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 1151 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1153 err); 1152 err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a61b71b6fff3..41cd22a223dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2976,7 +2976,7 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2976 new_channels.params = priv->channels.params; 2976 new_channels.params = priv->channels.params;
2977 new_channels.params.num_tc = tc ? tc : 1; 2977 new_channels.params.num_tc = tc ? tc : 1;
2978 2978
2979 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { 2979 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
2980 priv->channels.params = new_channels.params; 2980 priv->channels.params = new_channels.params;
2981 goto out; 2981 goto out;
2982 } 2982 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 19e3d2fc2099..fcec7bedd3cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -40,28 +40,25 @@
40#include "eswitch.h" 40#include "eswitch.h"
41 41
42int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 42int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft) 43 struct mlx5_flow_table *ft, u32 underlay_qpn)
44{ 44{
45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; 45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; 46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
47 47
48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && 48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
49 ft->underlay_qpn == 0) 49 underlay_qpn == 0)
50 return 0; 50 return 0;
51 51
52 MLX5_SET(set_flow_table_root_in, in, opcode, 52 MLX5_SET(set_flow_table_root_in, in, opcode,
53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); 54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); 55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
56 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
56 if (ft->vport) { 57 if (ft->vport) {
57 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); 58 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
58 MLX5_SET(set_flow_table_root_in, in, other_vport, 1); 59 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
59 } 60 }
60 61
61 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
62 ft->underlay_qpn != 0)
63 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, ft->underlay_qpn);
64
65 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 62 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
66} 63}
67 64
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 8fad80688536..0f98a7cf4877 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -71,7 +71,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
71 unsigned int index); 71 unsigned int index);
72 72
73int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 73int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
74 struct mlx5_flow_table *ft); 74 struct mlx5_flow_table *ft,
75 u32 underlay_qpn);
75 76
76int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); 77int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
77int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); 78int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b8a176503d38..0e487e8ca634 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -650,7 +650,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
650 if (ft->level >= min_level) 650 if (ft->level >= min_level)
651 return 0; 651 return 0;
652 652
653 err = mlx5_cmd_update_root_ft(root->dev, ft); 653 err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn);
654 if (err) 654 if (err)
655 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", 655 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
656 ft->id); 656 ft->id);
@@ -818,8 +818,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
818 goto unlock_root; 818 goto unlock_root;
819 } 819 }
820 820
821 ft->underlay_qpn = ft_attr->underlay_qpn;
822
823 tree_init_node(&ft->node, 1, del_flow_table); 821 tree_init_node(&ft->node, 1, del_flow_table);
824 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; 822 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
825 next_ft = find_next_chained_ft(fs_prio); 823 next_ft = find_next_chained_ft(fs_prio);
@@ -1489,7 +1487,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1489 1487
1490 new_root_ft = find_next_ft(ft); 1488 new_root_ft = find_next_ft(ft);
1491 if (new_root_ft) { 1489 if (new_root_ft) {
1492 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft); 1490 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
1491 root->underlay_qpn);
1493 1492
1494 if (err) { 1493 if (err) {
1495 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", 1494 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
@@ -2062,3 +2061,21 @@ err:
2062 mlx5_cleanup_fs(dev); 2061 mlx5_cleanup_fs(dev);
2063 return err; 2062 return err;
2064} 2063}
2064
2065int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2066{
2067 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2068
2069 root->underlay_qpn = underlay_qpn;
2070 return 0;
2071}
2072EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2073
2074int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2075{
2076 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2077
2078 root->underlay_qpn = 0;
2079 return 0;
2080}
2081EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 81eafc7b9dd9..990acee6fb09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -118,7 +118,6 @@ struct mlx5_flow_table {
118 /* FWD rules that point on this flow table */ 118 /* FWD rules that point on this flow table */
119 struct list_head fwd_rules; 119 struct list_head fwd_rules;
120 u32 flags; 120 u32 flags;
121 u32 underlay_qpn;
122}; 121};
123 122
124struct mlx5_fc_cache { 123struct mlx5_fc_cache {
@@ -195,6 +194,7 @@ struct mlx5_flow_root_namespace {
195 struct mlx5_flow_table *root_ft; 194 struct mlx5_flow_table *root_ft;
196 /* Should be held when chaining flow tables */ 195 /* Should be held when chaining flow tables */
197 struct mutex chain_lock; 196 struct mutex chain_lock;
197 u32 underlay_qpn;
198}; 198};
199 199
200int mlx5_init_fc_stats(struct mlx5_core_dev *dev); 200int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
index 019c230da498..cc1858752e70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
@@ -66,6 +66,10 @@ static void mlx5i_init(struct mlx5_core_dev *mdev,
66 66
67 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); 67 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
68 68
69 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
70 mlx5e_set_rq_type_params(mdev, &priv->channels.params, MLX5_WQ_TYPE_LINKED_LIST);
71 priv->channels.params.lro_en = false;
72
69 mutex_init(&priv->state_lock); 73 mutex_init(&priv->state_lock);
70 74
71 netdev->hw_features |= NETIF_F_SG; 75 netdev->hw_features |= NETIF_F_SG;
@@ -156,6 +160,8 @@ out:
156 160
157static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) 161static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
158{ 162{
163 mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
164
159 mlx5_core_destroy_qp(mdev, qp); 165 mlx5_core_destroy_qp(mdev, qp);
160} 166}
161 167
@@ -170,6 +176,8 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
170 return err; 176 return err;
171 } 177 }
172 178
179 mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
180
173 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); 181 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
174 if (err) { 182 if (err) {
175 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); 183 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
@@ -189,7 +197,6 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
189 197
190static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) 198static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
191{ 199{
192 struct mlx5i_priv *ipriv = priv->ppriv;
193 int err; 200 int err;
194 201
195 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, 202 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
@@ -205,7 +212,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
205 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 212 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
206 } 213 }
207 214
208 err = mlx5e_create_ttc_table(priv, ipriv->qp.qpn); 215 err = mlx5e_create_ttc_table(priv);
209 if (err) { 216 if (err) {
210 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 217 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
211 err); 218 err);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index b8d5270359cd..e30676515529 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
247 cmd.req.arg3 = 0; 247 cmd.req.arg3 = 0;
248 248
249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) 249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
250 netxen_issue_cmd(adapter, &cmd); 250 rcode = netxen_issue_cmd(adapter, &cmd);
251 251
252 if (rcode != NX_RCODE_SUCCESS) 252 if (rcode != NX_RCODE_SUCCESS)
253 return -EIO; 253 return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 67200c5498ab..0a8fde629991 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -983,7 +983,7 @@ void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
983 memset(&camline, 0, sizeof(union gft_cam_line_union)); 983 memset(&camline, 0, sizeof(union gft_cam_line_union));
984 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 984 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
985 camline.cam_line_mapped.camline); 985 camline.cam_line_mapped.camline);
986 memset(&ramline, 0, sizeof(union gft_cam_line_union)); 986 memset(&ramline, 0, sizeof(ramline));
987 987
988 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { 988 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
989 u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM; 989 u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 49bad00a0f8f..7245b1072518 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
37 37
38#define _QLCNIC_LINUX_MAJOR 5 38#define _QLCNIC_LINUX_MAJOR 5
39#define _QLCNIC_LINUX_MINOR 3 39#define _QLCNIC_LINUX_MINOR 3
40#define _QLCNIC_LINUX_SUBVERSION 65 40#define _QLCNIC_LINUX_SUBVERSION 66
41#define QLCNIC_LINUX_VERSIONID "5.3.65" 41#define QLCNIC_LINUX_VERSIONID "5.3.66"
42#define QLCNIC_DRV_IDC_VER 0x01 42#define QLCNIC_DRV_IDC_VER 0x01
43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 718bf58a7da6..4fb68797630e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3168,6 +3168,40 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
3168 return 0; 3168 return 0;
3169} 3169}
3170 3170
3171void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter)
3172{
3173 struct qlcnic_hardware_context *ahw = adapter->ahw;
3174 struct qlcnic_cmd_args cmd;
3175 u32 config;
3176 int err;
3177
3178 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
3179 if (err)
3180 return;
3181
3182 err = qlcnic_issue_cmd(adapter, &cmd);
3183 if (err) {
3184 dev_info(&adapter->pdev->dev,
3185 "Get Link Status Command failed: 0x%x\n", err);
3186 goto out;
3187 } else {
3188 config = cmd.rsp.arg[3];
3189
3190 switch (QLC_83XX_SFP_MODULE_TYPE(config)) {
3191 case QLC_83XX_MODULE_FIBRE_1000BASE_SX:
3192 case QLC_83XX_MODULE_FIBRE_1000BASE_LX:
3193 case QLC_83XX_MODULE_FIBRE_1000BASE_CX:
3194 case QLC_83XX_MODULE_TP_1000BASE_T:
3195 ahw->port_type = QLCNIC_GBE;
3196 break;
3197 default:
3198 ahw->port_type = QLCNIC_XGBE;
3199 }
3200 }
3201out:
3202 qlcnic_free_mbx_args(&cmd);
3203}
3204
3171int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) 3205int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
3172{ 3206{
3173 u8 pci_func; 3207 u8 pci_func;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 3dfe8e27b51c..b75a81246856 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -637,6 +637,7 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
637int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, 637int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
638 struct ethtool_pauseparam *); 638 struct ethtool_pauseparam *);
639int qlcnic_83xx_test_link(struct qlcnic_adapter *); 639int qlcnic_83xx_test_link(struct qlcnic_adapter *);
640void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter);
640int qlcnic_83xx_reg_test(struct qlcnic_adapter *); 641int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
641int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); 642int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
642int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); 643int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 9a869c15d8bf..7f7deeaf1cf0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -486,6 +486,9 @@ static int qlcnic_set_link_ksettings(struct net_device *dev,
486 u32 ret = 0; 486 u32 ret = 0;
487 struct qlcnic_adapter *adapter = netdev_priv(dev); 487 struct qlcnic_adapter *adapter = netdev_priv(dev);
488 488
489 if (qlcnic_83xx_check(adapter))
490 qlcnic_83xx_get_port_type(adapter);
491
489 if (adapter->ahw->port_type != QLCNIC_GBE) 492 if (adapter->ahw->port_type != QLCNIC_GBE)
490 return -EOPNOTSUPP; 493 return -EOPNOTSUPP;
491 494
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 513e6c74e199..24ca7df15d07 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca)
296 296
297 /* Allocate rx SKB if we don't have one available. */ 297 /* Allocate rx SKB if we don't have one available. */
298 if (!qca->rx_skb) { 298 if (!qca->rx_skb) {
299 qca->rx_skb = netdev_alloc_skb(net_dev, 299 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
300 net_dev->mtu + VLAN_ETH_HLEN); 300 net_dev->mtu +
301 VLAN_ETH_HLEN);
301 if (!qca->rx_skb) { 302 if (!qca->rx_skb) {
302 netdev_dbg(net_dev, "out of RX resources\n"); 303 netdev_dbg(net_dev, "out of RX resources\n");
303 qca->stats.out_of_mem++; 304 qca->stats.out_of_mem++;
@@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca)
377 qca->rx_skb, qca->rx_skb->dev); 378 qca->rx_skb, qca->rx_skb->dev);
378 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; 379 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
379 netif_rx_ni(qca->rx_skb); 380 netif_rx_ni(qca->rx_skb);
380 qca->rx_skb = netdev_alloc_skb(net_dev, 381 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
381 net_dev->mtu + VLAN_ETH_HLEN); 382 net_dev->mtu + VLAN_ETH_HLEN);
382 if (!qca->rx_skb) { 383 if (!qca->rx_skb) {
383 netdev_dbg(net_dev, "out of RX resources\n"); 384 netdev_dbg(net_dev, "out of RX resources\n");
@@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev)
759 if (!qca->rx_buffer) 760 if (!qca->rx_buffer)
760 return -ENOBUFS; 761 return -ENOBUFS;
761 762
762 qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); 763 qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
764 VLAN_ETH_HLEN);
763 if (!qca->rx_skb) { 765 if (!qca->rx_skb) {
764 kfree(qca->rx_buffer); 766 kfree(qca->rx_buffer);
765 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); 767 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7b916aa21bde..4d7fb8af880d 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -18,8 +18,12 @@
18#include "mcdi.h" 18#include "mcdi.h"
19 19
20enum { 20enum {
21 EFX_REV_SIENA_A0 = 0, 21 /* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
22 EFX_REV_HUNT_A0 = 1, 22 * They are not supported by this driver but these revision numbers
23 * form part of the ethtool API for register dumping.
24 */
25 EFX_REV_SIENA_A0 = 3,
26 EFX_REV_HUNT_A0 = 4,
23}; 27};
24 28
25static inline int efx_nic_rev(struct efx_nic *efx) 29static inline int efx_nic_rev(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cd8c60132390..a74c481401c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3725,7 +3725,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
3725 ep++; 3725 ep++;
3726 } else { 3726 } else {
3727 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3727 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3728 i, (unsigned int)virt_to_phys(ep), 3728 i, (unsigned int)virt_to_phys(p),
3729 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 3729 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3730 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 3730 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3731 p++; 3731 p++;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 5a90fed06260..5b56c24b6ed2 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -411,13 +411,14 @@ static int vsw_port_remove(struct vio_dev *vdev)
411 411
412 if (port) { 412 if (port) {
413 del_timer_sync(&port->vio.timer); 413 del_timer_sync(&port->vio.timer);
414 del_timer_sync(&port->clean_timer);
414 415
415 napi_disable(&port->napi); 416 napi_disable(&port->napi);
417 unregister_netdev(port->dev);
416 418
417 list_del_rcu(&port->list); 419 list_del_rcu(&port->list);
418 420
419 synchronize_rcu(); 421 synchronize_rcu();
420 del_timer_sync(&port->clean_timer);
421 spin_lock_irqsave(&port->vp->lock, flags); 422 spin_lock_irqsave(&port->vp->lock, flags);
422 sunvnet_port_rm_txq_common(port); 423 sunvnet_port_rm_txq_common(port);
423 spin_unlock_irqrestore(&port->vp->lock, flags); 424 spin_unlock_irqrestore(&port->vp->lock, flags);
@@ -427,7 +428,6 @@ static int vsw_port_remove(struct vio_dev *vdev)
427 428
428 dev_set_drvdata(&vdev->dev, NULL); 429 dev_set_drvdata(&vdev->dev, NULL);
429 430
430 unregister_netdev(port->dev);
431 free_netdev(port->dev); 431 free_netdev(port->dev);
432 } 432 }
433 433
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 729a7da90b5b..e6222e535019 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1353,9 +1353,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1353 1353
1354 tx_pipe->dma_channel = knav_dma_open_channel(dev, 1354 tx_pipe->dma_channel = knav_dma_open_channel(dev,
1355 tx_pipe->dma_chan_name, &config); 1355 tx_pipe->dma_chan_name, &config);
1356 if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) { 1356 if (IS_ERR(tx_pipe->dma_channel)) {
1357 dev_err(dev, "failed opening tx chan(%s)\n", 1357 dev_err(dev, "failed opening tx chan(%s)\n",
1358 tx_pipe->dma_chan_name); 1358 tx_pipe->dma_chan_name);
1359 ret = PTR_ERR(tx_pipe->dma_channel);
1359 goto err; 1360 goto err;
1360 } 1361 }
1361 1362
@@ -1673,9 +1674,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
1673 1674
1674 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, 1675 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1675 netcp->dma_chan_name, &config); 1676 netcp->dma_chan_name, &config);
1676 if (IS_ERR_OR_NULL(netcp->rx_channel)) { 1677 if (IS_ERR(netcp->rx_channel)) {
1677 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", 1678 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1678 netcp->dma_chan_name); 1679 netcp->dma_chan_name);
1680 ret = PTR_ERR(netcp->rx_channel);
1679 goto fail; 1681 goto fail;
1680 } 1682 }
1681 1683
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 897176fc5043..dd92950a4615 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2651,7 +2651,6 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2651 case HWTSTAMP_FILTER_NONE: 2651 case HWTSTAMP_FILTER_NONE:
2652 cpts_rx_enable(cpts, 0); 2652 cpts_rx_enable(cpts, 0);
2653 break; 2653 break;
2654 case HWTSTAMP_FILTER_ALL:
2655 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2654 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2656 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2655 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2657 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2656 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 8716b8c07feb..6f3c805f7211 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1077,7 +1077,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1077 * are "42101001.sb" or "42101002.sb" 1077 * are "42101001.sb" or "42101002.sb"
1078 */ 1078 */
1079 sprintf(stir421x_fw_name, "4210%4X.sb", 1079 sprintf(stir421x_fw_name, "4210%4X.sb",
1080 self->usbdev->descriptor.bcdDevice); 1080 le16_to_cpu(self->usbdev->descriptor.bcdDevice));
1081 ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev); 1081 ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
1082 if (ret < 0) 1082 if (ret < 0)
1083 return ret; 1083 return ret;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b34eaaae03fd..346ad2ff3998 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -789,10 +789,12 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
789 */ 789 */
790static struct lock_class_key macvlan_netdev_addr_lock_key; 790static struct lock_class_key macvlan_netdev_addr_lock_key;
791 791
792#define ALWAYS_ON_FEATURES \ 792#define ALWAYS_ON_OFFLOADS \
793 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \ 793 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
794 NETIF_F_GSO_ROBUST) 794 NETIF_F_GSO_ROBUST)
795 795
796#define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX)
797
796#define MACVLAN_FEATURES \ 798#define MACVLAN_FEATURES \
797 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 799 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
798 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ 800 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \
@@ -827,6 +829,7 @@ static int macvlan_init(struct net_device *dev)
827 dev->features |= ALWAYS_ON_FEATURES; 829 dev->features |= ALWAYS_ON_FEATURES;
828 dev->hw_features |= NETIF_F_LRO; 830 dev->hw_features |= NETIF_F_LRO;
829 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; 831 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
832 dev->vlan_features |= ALWAYS_ON_OFFLOADS;
830 dev->gso_max_size = lowerdev->gso_max_size; 833 dev->gso_max_size = lowerdev->gso_max_size;
831 dev->gso_max_segs = lowerdev->gso_max_segs; 834 dev->gso_max_segs = lowerdev->gso_max_segs;
832 dev->hard_header_len = lowerdev->hard_header_len; 835 dev->hard_header_len = lowerdev->hard_header_len;
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 963838d4fac1..599ce24c514f 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -122,10 +122,9 @@ int mdio_mux_init(struct device *dev,
122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); 122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
123 if (pb == NULL) { 123 if (pb == NULL) {
124 ret_val = -ENOMEM; 124 ret_val = -ENOMEM;
125 goto err_parent_bus; 125 goto err_pb_kz;
126 } 126 }
127 127
128
129 pb->switch_data = data; 128 pb->switch_data = data;
130 pb->switch_fn = switch_fn; 129 pb->switch_fn = switch_fn;
131 pb->current_child = -1; 130 pb->current_child = -1;
@@ -154,6 +153,7 @@ int mdio_mux_init(struct device *dev,
154 cb->mii_bus = mdiobus_alloc(); 153 cb->mii_bus = mdiobus_alloc();
155 if (!cb->mii_bus) { 154 if (!cb->mii_bus) {
156 ret_val = -ENOMEM; 155 ret_val = -ENOMEM;
156 devm_kfree(dev, cb);
157 of_node_put(child_bus_node); 157 of_node_put(child_bus_node);
158 break; 158 break;
159 } 159 }
@@ -170,7 +170,6 @@ int mdio_mux_init(struct device *dev,
170 mdiobus_free(cb->mii_bus); 170 mdiobus_free(cb->mii_bus);
171 devm_kfree(dev, cb); 171 devm_kfree(dev, cb);
172 } else { 172 } else {
173 of_node_get(child_bus_node);
174 cb->next = pb->children; 173 cb->next = pb->children;
175 pb->children = cb; 174 pb->children = cb;
176 } 175 }
@@ -181,9 +180,11 @@ int mdio_mux_init(struct device *dev,
181 return 0; 180 return 0;
182 } 181 }
183 182
183 devm_kfree(dev, pb);
184err_pb_kz:
184 /* balance the reference of_mdio_find_bus() took */ 185 /* balance the reference of_mdio_find_bus() took */
185 put_device(&pb->mii_bus->dev); 186 if (!mux_bus)
186 187 put_device(&parent_bus->dev);
187err_parent_bus: 188err_parent_bus:
188 of_node_put(parent_bus_node); 189 of_node_put(parent_bus_node);
189 return ret_val; 190 return ret_val;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index a898e5c4ef1b..8e73f5f36e71 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -364,9 +364,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
364 364
365 mutex_init(&bus->mdio_lock); 365 mutex_init(&bus->mdio_lock);
366 366
367 if (bus->reset)
368 bus->reset(bus);
369
370 /* de-assert bus level PHY GPIO resets */ 367 /* de-assert bus level PHY GPIO resets */
371 if (bus->num_reset_gpios > 0) { 368 if (bus->num_reset_gpios > 0) {
372 bus->reset_gpiod = devm_kcalloc(&bus->dev, 369 bus->reset_gpiod = devm_kcalloc(&bus->dev,
@@ -396,6 +393,9 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
396 } 393 }
397 } 394 }
398 395
396 if (bus->reset)
397 bus->reset(bus);
398
399 for (i = 0; i < PHY_MAX_ADDR; i++) { 399 for (i = 0; i < PHY_MAX_ADDR; i++) {
400 if ((bus->phy_mask & (1 << i)) == 0) { 400 if ((bus->phy_mask & (1 << i)) == 0) {
401 struct phy_device *phydev; 401 struct phy_device *phydev;
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index c4f1c363e24b..9df3c1ffff35 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -310,8 +310,8 @@ static int get_mac_address(struct usbnet *dev, unsigned char *data)
310 int rd_mac_len = 0; 310 int rd_mac_len = 0;
311 311
312 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", 312 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
313 dev->udev->descriptor.idVendor, 313 le16_to_cpu(dev->udev->descriptor.idVendor),
314 dev->udev->descriptor.idProduct); 314 le16_to_cpu(dev->udev->descriptor.idProduct));
315 315
316 memset(mac_addr, 0, sizeof(mac_addr)); 316 memset(mac_addr, 0, sizeof(mac_addr));
317 rd_mac_len = control_read(dev, REQUEST_READ, 0, 317 rd_mac_len = control_read(dev, REQUEST_READ, 0,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 25bc764ae7dc..d1c7029ded7c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2962,6 +2962,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2962 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2962 /* we need to enable NAPI, otherwise dev_close will deadlock */
2963 for (i = 0; i < adapter->num_rx_queues; i++) 2963 for (i = 0; i < adapter->num_rx_queues; i++)
2964 napi_enable(&adapter->rx_queue[i].napi); 2964 napi_enable(&adapter->rx_queue[i].napi);
2965 /*
2966 * Need to clear the quiesce bit to ensure that vmxnet3_close
2967 * can quiesce the device properly
2968 */
2969 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2965 dev_close(adapter->netdev); 2970 dev_close(adapter->netdev);
2966} 2971}
2967 2972
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ceda5861da78..db882493875c 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -989,6 +989,7 @@ static u32 vrf_fib_table(const struct net_device *dev)
989 989
990static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 990static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
991{ 991{
992 kfree_skb(skb);
992 return 0; 993 return 0;
993} 994}
994 995
@@ -998,7 +999,7 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
998{ 999{
999 struct net *net = dev_net(dev); 1000 struct net *net = dev_net(dev);
1000 1001
1001 if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0) 1002 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
1002 skb = NULL; /* kfree_skb(skb) handled by nf code */ 1003 skb = NULL; /* kfree_skb(skb) handled by nf code */
1003 1004
1004 return skb; 1005 return skb;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 6ffc482550c1..7b61adb6270c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1934,8 +1934,7 @@ abort_transaction_no_dev_fatal:
1934 xennet_disconnect_backend(info); 1934 xennet_disconnect_backend(info);
1935 xennet_destroy_queues(info); 1935 xennet_destroy_queues(info);
1936 out: 1936 out:
1937 unregister_netdev(info->netdev); 1937 device_unregister(&dev->dev);
1938 xennet_free_netdev(info->netdev);
1939 return err; 1938 return err;
1940} 1939}
1941 1940
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 0acb8c2f9475..31f014b57bfc 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -82,10 +82,3 @@ config SCM_BLOCK
82 82
83 To compile this driver as a module, choose M here: the 83 To compile this driver as a module, choose M here: the
84 module will be called scm_block. 84 module will be called scm_block.
85
86config SCM_BLOCK_CLUSTER_WRITE
87 def_bool y
88 prompt "SCM force cluster writes"
89 depends on SCM_BLOCK
90 help
91 Force writes to Storage Class Memory (SCM) to be in done in clusters.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index c2f4e673e031..b64e2b32c753 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -19,7 +19,4 @@ obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
19obj-$(CONFIG_DCSSBLK) += dcssblk.o 19obj-$(CONFIG_DCSSBLK) += dcssblk.o
20 20
21scm_block-objs := scm_drv.o scm_blk.o 21scm_block-objs := scm_drv.o scm_blk.o
22ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
23scm_block-objs += scm_blk_cluster.o
24endif
25obj-$(CONFIG_SCM_BLOCK) += scm_block.o 22obj-$(CONFIG_SCM_BLOCK) += scm_block.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 6fb3fd5efc11..c72ac57940f4 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1965,8 +1965,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device,
1965{ 1965{
1966 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); 1966 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
1967 1967
1968 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1968 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
1969 /* dasd is being set offline. */ 1969 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1970 /*
1971 * dasd is being set offline
1972 * but it is no safe offline where we have to allow I/O
1973 */
1970 return 1; 1974 return 1;
1971 } 1975 }
1972 if (device->stopped) { 1976 if (device->stopped) {
@@ -3562,57 +3566,69 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3562 else 3566 else
3563 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3567 pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3564 dev_name(&cdev->dev)); 3568 dev_name(&cdev->dev));
3565 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3569 rc = -EBUSY;
3566 goto out_busy; 3570 goto out_err;
3567 } 3571 }
3568 } 3572 }
3569 3573
3570 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3574 /*
3571 /* 3575 * Test if the offline processing is already running and exit if so.
3572 * safe offline already running 3576 * If a safe offline is being processed this could only be a normal
3573 * could only be called by normal offline so safe_offline flag 3577 * offline that should be able to overtake the safe offline and
3574 * needs to be removed to run normal offline and kill all I/O 3578 * cancel any I/O we do not want to wait for any longer
3575 */ 3579 */
3576 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) 3580 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3577 /* Already doing normal offline processing */ 3581 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3578 goto out_busy; 3582 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3579 else 3583 &device->flags);
3580 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3584 } else {
3581 } else { 3585 rc = -EBUSY;
3582 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) 3586 goto out_err;
3583 /* Already doing offline processing */ 3587 }
3584 goto out_busy;
3585 } 3588 }
3586
3587 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3589 set_bit(DASD_FLAG_OFFLINE, &device->flags);
3588 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3589 3590
3590 /* 3591 /*
3591 * if safe_offline called set safe_offline_running flag and 3592 * if safe_offline is called set safe_offline_running flag and
3592 * clear safe_offline so that a call to normal offline 3593 * clear safe_offline so that a call to normal offline
3593 * can overrun safe_offline processing 3594 * can overrun safe_offline processing
3594 */ 3595 */
3595 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3596 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3596 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3597 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3598 /* need to unlock here to wait for outstanding I/O */
3599 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3597 /* 3600 /*
3598 * If we want to set the device safe offline all IO operations 3601 * If we want to set the device safe offline all IO operations
3599 * should be finished before continuing the offline process 3602 * should be finished before continuing the offline process
3600 * so sync bdev first and then wait for our queues to become 3603 * so sync bdev first and then wait for our queues to become
3601 * empty 3604 * empty
3602 */ 3605 */
3603 /* sync blockdev and partitions */
3604 if (device->block) { 3606 if (device->block) {
3605 rc = fsync_bdev(device->block->bdev); 3607 rc = fsync_bdev(device->block->bdev);
3606 if (rc != 0) 3608 if (rc != 0)
3607 goto interrupted; 3609 goto interrupted;
3608 } 3610 }
3609 /* schedule device tasklet and wait for completion */
3610 dasd_schedule_device_bh(device); 3611 dasd_schedule_device_bh(device);
3611 rc = wait_event_interruptible(shutdown_waitq, 3612 rc = wait_event_interruptible(shutdown_waitq,
3612 _wait_for_empty_queues(device)); 3613 _wait_for_empty_queues(device));
3613 if (rc != 0) 3614 if (rc != 0)
3614 goto interrupted; 3615 goto interrupted;
3616
3617 /*
3618 * check if a normal offline process overtook the offline
3619 * processing in this case simply do nothing beside returning
3620 * that we got interrupted
3621 * otherwise mark safe offline as not running any longer and
3622 * continue with normal offline
3623 */
3624 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3625 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3626 rc = -ERESTARTSYS;
3627 goto out_err;
3628 }
3629 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3615 } 3630 }
3631 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3616 3632
3617 dasd_set_target_state(device, DASD_STATE_NEW); 3633 dasd_set_target_state(device, DASD_STATE_NEW);
3618 /* dasd_delete_device destroys the device reference. */ 3634 /* dasd_delete_device destroys the device reference. */
@@ -3624,22 +3640,18 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3624 */ 3640 */
3625 if (block) 3641 if (block)
3626 dasd_free_block(block); 3642 dasd_free_block(block);
3643
3627 return 0; 3644 return 0;
3628 3645
3629interrupted: 3646interrupted:
3630 /* interrupted by signal */ 3647 /* interrupted by signal */
3631 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3648 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3632 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3649 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3633 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3650 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3634 dasd_put_device(device); 3651out_err:
3635
3636 return rc;
3637
3638out_busy:
3639 dasd_put_device(device); 3652 dasd_put_device(device);
3640 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3653 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3641 3654 return rc;
3642 return -EBUSY;
3643} 3655}
3644EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3656EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3645 3657
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 1164b51d09f3..7c7351276d2e 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -315,45 +315,58 @@ static int __init dasd_parse_range(const char *range)
315 char *features_str = NULL; 315 char *features_str = NULL;
316 char *from_str = NULL; 316 char *from_str = NULL;
317 char *to_str = NULL; 317 char *to_str = NULL;
318 size_t len = strlen(range) + 1; 318 int rc = 0;
319 char tmp[len]; 319 char *tmp;
320 320
321 strlcpy(tmp, range, len); 321 tmp = kstrdup(range, GFP_KERNEL);
322 if (!tmp)
323 return -ENOMEM;
322 324
323 if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) 325 if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) {
324 goto out_err; 326 rc = -EINVAL;
327 goto out;
328 }
325 329
326 if (dasd_busid(from_str, &from_id0, &from_id1, &from)) 330 if (dasd_busid(from_str, &from_id0, &from_id1, &from)) {
327 goto out_err; 331 rc = -EINVAL;
332 goto out;
333 }
328 334
329 to = from; 335 to = from;
330 to_id0 = from_id0; 336 to_id0 = from_id0;
331 to_id1 = from_id1; 337 to_id1 = from_id1;
332 if (to_str) { 338 if (to_str) {
333 if (dasd_busid(to_str, &to_id0, &to_id1, &to)) 339 if (dasd_busid(to_str, &to_id0, &to_id1, &to)) {
334 goto out_err; 340 rc = -EINVAL;
341 goto out;
342 }
335 if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) { 343 if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) {
336 pr_err("%s is not a valid device range\n", range); 344 pr_err("%s is not a valid device range\n", range);
337 goto out_err; 345 rc = -EINVAL;
346 goto out;
338 } 347 }
339 } 348 }
340 349
341 features = dasd_feature_list(features_str); 350 features = dasd_feature_list(features_str);
342 if (features < 0) 351 if (features < 0) {
343 goto out_err; 352 rc = -EINVAL;
353 goto out;
354 }
344 /* each device in dasd= parameter should be set initially online */ 355 /* each device in dasd= parameter should be set initially online */
345 features |= DASD_FEATURE_INITIAL_ONLINE; 356 features |= DASD_FEATURE_INITIAL_ONLINE;
346 while (from <= to) { 357 while (from <= to) {
347 sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++); 358 sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++);
348 devmap = dasd_add_busid(bus_id, features); 359 devmap = dasd_add_busid(bus_id, features);
349 if (IS_ERR(devmap)) 360 if (IS_ERR(devmap)) {
350 return PTR_ERR(devmap); 361 rc = PTR_ERR(devmap);
362 goto out;
363 }
351 } 364 }
352 365
353 return 0; 366out:
367 kfree(tmp);
354 368
355out_err: 369 return rc;
356 return -EINVAL;
357} 370}
358 371
359/* 372/*
@@ -735,13 +748,22 @@ static ssize_t
735dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf) 748dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
736{ 749{
737 struct dasd_devmap *devmap; 750 struct dasd_devmap *devmap;
738 int ro_flag; 751 struct dasd_device *device;
752 int ro_flag = 0;
739 753
740 devmap = dasd_find_busid(dev_name(dev)); 754 devmap = dasd_find_busid(dev_name(dev));
741 if (!IS_ERR(devmap)) 755 if (IS_ERR(devmap))
742 ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0; 756 goto out;
743 else 757
744 ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0; 758 ro_flag = !!(devmap->features & DASD_FEATURE_READONLY);
759
760 spin_lock(&dasd_devmap_lock);
761 device = devmap->device;
762 if (device)
763 ro_flag |= test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
764 spin_unlock(&dasd_devmap_lock);
765
766out:
745 return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n"); 767 return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
746} 768}
747 769
@@ -764,7 +786,7 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
764 786
765 device = dasd_device_from_cdev(cdev); 787 device = dasd_device_from_cdev(cdev);
766 if (IS_ERR(device)) 788 if (IS_ERR(device))
767 return PTR_ERR(device); 789 return count;
768 790
769 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 791 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
770 val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags); 792 val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
@@ -928,11 +950,14 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
928{ 950{
929 struct ccw_device *cdev = to_ccwdev(dev); 951 struct ccw_device *cdev = to_ccwdev(dev);
930 struct dasd_device *device; 952 struct dasd_device *device;
953 unsigned long flags;
931 int rc; 954 int rc;
932 955
933 device = dasd_device_from_cdev(cdev); 956 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
957 device = dasd_device_from_cdev_locked(cdev);
934 if (IS_ERR(device)) { 958 if (IS_ERR(device)) {
935 rc = PTR_ERR(device); 959 rc = PTR_ERR(device);
960 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
936 goto out; 961 goto out;
937 } 962 }
938 963
@@ -940,12 +965,14 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
940 test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 965 test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
941 /* Already doing offline processing */ 966 /* Already doing offline processing */
942 dasd_put_device(device); 967 dasd_put_device(device);
968 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
943 rc = -EBUSY; 969 rc = -EBUSY;
944 goto out; 970 goto out;
945 } 971 }
946 972
947 set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 973 set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
948 dasd_put_device(device); 974 dasd_put_device(device);
975 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
949 976
950 rc = ccw_device_set_offline(cdev); 977 rc = ccw_device_set_offline(cdev);
951 978
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 152de6817875..725f912fab41 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -13,6 +13,7 @@
13#include <linux/mempool.h> 13#include <linux/mempool.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/blkdev.h> 15#include <linux/blkdev.h>
16#include <linux/blk-mq.h>
16#include <linux/genhd.h> 17#include <linux/genhd.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/list.h> 19#include <linux/list.h>
@@ -42,7 +43,6 @@ static void __scm_free_rq(struct scm_request *scmrq)
42 struct aob_rq_header *aobrq = to_aobrq(scmrq); 43 struct aob_rq_header *aobrq = to_aobrq(scmrq);
43 44
44 free_page((unsigned long) scmrq->aob); 45 free_page((unsigned long) scmrq->aob);
45 __scm_free_rq_cluster(scmrq);
46 kfree(scmrq->request); 46 kfree(scmrq->request);
47 kfree(aobrq); 47 kfree(aobrq);
48} 48}
@@ -82,9 +82,6 @@ static int __scm_alloc_rq(void)
82 if (!scmrq->request) 82 if (!scmrq->request)
83 goto free; 83 goto free;
84 84
85 if (__scm_alloc_rq_cluster(scmrq))
86 goto free;
87
88 INIT_LIST_HEAD(&scmrq->list); 85 INIT_LIST_HEAD(&scmrq->list);
89 spin_lock_irq(&list_lock); 86 spin_lock_irq(&list_lock);
90 list_add(&scmrq->list, &inactive_requests); 87 list_add(&scmrq->list, &inactive_requests);
@@ -114,13 +111,13 @@ static struct scm_request *scm_request_fetch(void)
114{ 111{
115 struct scm_request *scmrq = NULL; 112 struct scm_request *scmrq = NULL;
116 113
117 spin_lock(&list_lock); 114 spin_lock_irq(&list_lock);
118 if (list_empty(&inactive_requests)) 115 if (list_empty(&inactive_requests))
119 goto out; 116 goto out;
120 scmrq = list_first_entry(&inactive_requests, struct scm_request, list); 117 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
121 list_del(&scmrq->list); 118 list_del(&scmrq->list);
122out: 119out:
123 spin_unlock(&list_lock); 120 spin_unlock_irq(&list_lock);
124 return scmrq; 121 return scmrq;
125} 122}
126 123
@@ -234,130 +231,123 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
234 scmrq->error = 0; 231 scmrq->error = 0;
235 /* We don't use all msbs - place aidaws at the end of the aob page. */ 232 /* We don't use all msbs - place aidaws at the end of the aob page. */
236 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; 233 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
237 scm_request_cluster_init(scmrq);
238} 234}
239 235
240static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) 236static void scm_request_requeue(struct scm_request *scmrq)
241{
242 if (atomic_read(&bdev->queued_reqs)) {
243 /* Queue restart is triggered by the next interrupt. */
244 return;
245 }
246 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
247}
248
249void scm_request_requeue(struct scm_request *scmrq)
250{ 237{
251 struct scm_blk_dev *bdev = scmrq->bdev; 238 struct scm_blk_dev *bdev = scmrq->bdev;
252 int i; 239 int i;
253 240
254 scm_release_cluster(scmrq);
255 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) 241 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
256 blk_requeue_request(bdev->rq, scmrq->request[i]); 242 blk_mq_requeue_request(scmrq->request[i], false);
257 243
258 atomic_dec(&bdev->queued_reqs); 244 atomic_dec(&bdev->queued_reqs);
259 scm_request_done(scmrq); 245 scm_request_done(scmrq);
260 scm_ensure_queue_restart(bdev); 246 blk_mq_kick_requeue_list(bdev->rq);
261} 247}
262 248
263void scm_request_finish(struct scm_request *scmrq) 249static void scm_request_finish(struct scm_request *scmrq)
264{ 250{
265 struct scm_blk_dev *bdev = scmrq->bdev; 251 struct scm_blk_dev *bdev = scmrq->bdev;
266 int i; 252 int i;
267 253
268 scm_release_cluster(scmrq); 254 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
269 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) 255 if (scmrq->error)
270 blk_end_request_all(scmrq->request[i], scmrq->error); 256 blk_mq_end_request(scmrq->request[i], scmrq->error);
257 else
258 blk_mq_complete_request(scmrq->request[i]);
259 }
271 260
272 atomic_dec(&bdev->queued_reqs); 261 atomic_dec(&bdev->queued_reqs);
273 scm_request_done(scmrq); 262 scm_request_done(scmrq);
274} 263}
275 264
276static int scm_request_start(struct scm_request *scmrq) 265static void scm_request_start(struct scm_request *scmrq)
277{ 266{
278 struct scm_blk_dev *bdev = scmrq->bdev; 267 struct scm_blk_dev *bdev = scmrq->bdev;
279 int ret;
280 268
281 atomic_inc(&bdev->queued_reqs); 269 atomic_inc(&bdev->queued_reqs);
282 if (!scmrq->aob->request.msb_count) { 270 if (eadm_start_aob(scmrq->aob)) {
283 scm_request_requeue(scmrq);
284 return -EINVAL;
285 }
286
287 ret = eadm_start_aob(scmrq->aob);
288 if (ret) {
289 SCM_LOG(5, "no subchannel"); 271 SCM_LOG(5, "no subchannel");
290 scm_request_requeue(scmrq); 272 scm_request_requeue(scmrq);
291 } 273 }
292 return ret;
293} 274}
294 275
295static void scm_blk_request(struct request_queue *rq) 276struct scm_queue {
277 struct scm_request *scmrq;
278 spinlock_t lock;
279};
280
281static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
282 const struct blk_mq_queue_data *qd)
296{ 283{
297 struct scm_device *scmdev = rq->queuedata; 284 struct scm_device *scmdev = hctx->queue->queuedata;
298 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); 285 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
299 struct scm_request *scmrq = NULL; 286 struct scm_queue *sq = hctx->driver_data;
300 struct request *req; 287 struct request *req = qd->rq;
288 struct scm_request *scmrq;
301 289
302 while ((req = blk_peek_request(rq))) { 290 spin_lock(&sq->lock);
303 if (!scm_permit_request(bdev, req)) 291 if (!scm_permit_request(bdev, req)) {
304 goto out; 292 spin_unlock(&sq->lock);
293 return BLK_MQ_RQ_QUEUE_BUSY;
294 }
305 295
296 scmrq = sq->scmrq;
297 if (!scmrq) {
298 scmrq = scm_request_fetch();
306 if (!scmrq) { 299 if (!scmrq) {
307 scmrq = scm_request_fetch(); 300 SCM_LOG(5, "no request");
308 if (!scmrq) { 301 spin_unlock(&sq->lock);
309 SCM_LOG(5, "no request"); 302 return BLK_MQ_RQ_QUEUE_BUSY;
310 goto out;
311 }
312 scm_request_init(bdev, scmrq);
313 } 303 }
314 scm_request_set(scmrq, req); 304 scm_request_init(bdev, scmrq);
305 sq->scmrq = scmrq;
306 }
307 scm_request_set(scmrq, req);
315 308
316 if (!scm_reserve_cluster(scmrq)) { 309 if (scm_request_prepare(scmrq)) {
317 SCM_LOG(5, "cluster busy"); 310 SCM_LOG(5, "aidaw alloc failed");
318 scm_request_set(scmrq, NULL); 311 scm_request_set(scmrq, NULL);
319 if (scmrq->aob->request.msb_count)
320 goto out;
321 312
322 scm_request_done(scmrq); 313 if (scmrq->aob->request.msb_count)
323 return; 314 scm_request_start(scmrq);
324 }
325 315
326 if (scm_need_cluster_request(scmrq)) { 316 sq->scmrq = NULL;
327 if (scmrq->aob->request.msb_count) { 317 spin_unlock(&sq->lock);
328 /* Start cluster requests separately. */ 318 return BLK_MQ_RQ_QUEUE_BUSY;
329 scm_request_set(scmrq, NULL); 319 }
330 if (scm_request_start(scmrq)) 320 blk_mq_start_request(req);
331 return;
332 } else {
333 atomic_inc(&bdev->queued_reqs);
334 blk_start_request(req);
335 scm_initiate_cluster_request(scmrq);
336 }
337 scmrq = NULL;
338 continue;
339 }
340 321
341 if (scm_request_prepare(scmrq)) { 322 if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
342 SCM_LOG(5, "aidaw alloc failed"); 323 scm_request_start(scmrq);
343 scm_request_set(scmrq, NULL); 324 sq->scmrq = NULL;
344 goto out; 325 }
345 } 326 spin_unlock(&sq->lock);
346 blk_start_request(req); 327 return BLK_MQ_RQ_QUEUE_OK;
328}
347 329
348 if (scmrq->aob->request.msb_count < nr_requests_per_io) 330static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
349 continue; 331 unsigned int idx)
332{
333 struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
350 334
351 if (scm_request_start(scmrq)) 335 if (!qd)
352 return; 336 return -ENOMEM;
353 337
354 scmrq = NULL; 338 spin_lock_init(&qd->lock);
355 } 339 hctx->driver_data = qd;
356out: 340
357 if (scmrq) 341 return 0;
358 scm_request_start(scmrq); 342}
359 else 343
360 scm_ensure_queue_restart(bdev); 344static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
345{
346 struct scm_queue *qd = hctx->driver_data;
347
348 WARN_ON(qd->scmrq);
349 kfree(hctx->driver_data);
350 hctx->driver_data = NULL;
361} 351}
362 352
363static void __scmrq_log_error(struct scm_request *scmrq) 353static void __scmrq_log_error(struct scm_request *scmrq)
@@ -377,21 +367,6 @@ static void __scmrq_log_error(struct scm_request *scmrq)
377 scmrq->error); 367 scmrq->error);
378} 368}
379 369
380void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
381{
382 struct scm_request *scmrq = data;
383 struct scm_blk_dev *bdev = scmrq->bdev;
384
385 scmrq->error = error;
386 if (error)
387 __scmrq_log_error(scmrq);
388
389 spin_lock(&bdev->lock);
390 list_add_tail(&scmrq->list, &bdev->finished_requests);
391 spin_unlock(&bdev->lock);
392 tasklet_hi_schedule(&bdev->tasklet);
393}
394
395static void scm_blk_handle_error(struct scm_request *scmrq) 370static void scm_blk_handle_error(struct scm_request *scmrq)
396{ 371{
397 struct scm_blk_dev *bdev = scmrq->bdev; 372 struct scm_blk_dev *bdev = scmrq->bdev;
@@ -419,54 +394,46 @@ restart:
419 return; 394 return;
420 395
421requeue: 396requeue:
422 spin_lock_irqsave(&bdev->rq_lock, flags);
423 scm_request_requeue(scmrq); 397 scm_request_requeue(scmrq);
424 spin_unlock_irqrestore(&bdev->rq_lock, flags);
425} 398}
426 399
427static void scm_blk_tasklet(struct scm_blk_dev *bdev) 400void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
428{ 401{
429 struct scm_request *scmrq; 402 struct scm_request *scmrq = data;
430 unsigned long flags;
431
432 spin_lock_irqsave(&bdev->lock, flags);
433 while (!list_empty(&bdev->finished_requests)) {
434 scmrq = list_first_entry(&bdev->finished_requests,
435 struct scm_request, list);
436 list_del(&scmrq->list);
437 spin_unlock_irqrestore(&bdev->lock, flags);
438 403
439 if (scmrq->error && scmrq->retries-- > 0) { 404 scmrq->error = error;
405 if (error) {
406 __scmrq_log_error(scmrq);
407 if (scmrq->retries-- > 0) {
440 scm_blk_handle_error(scmrq); 408 scm_blk_handle_error(scmrq);
441 409 return;
442 /* Request restarted or requeued, handle next. */
443 spin_lock_irqsave(&bdev->lock, flags);
444 continue;
445 } 410 }
411 }
446 412
447 if (scm_test_cluster_request(scmrq)) { 413 scm_request_finish(scmrq);
448 scm_cluster_request_irq(scmrq); 414}
449 spin_lock_irqsave(&bdev->lock, flags);
450 continue;
451 }
452 415
453 scm_request_finish(scmrq); 416static void scm_blk_request_done(struct request *req)
454 spin_lock_irqsave(&bdev->lock, flags); 417{
455 } 418 blk_mq_end_request(req, 0);
456 spin_unlock_irqrestore(&bdev->lock, flags);
457 /* Look out for more requests. */
458 blk_run_queue(bdev->rq);
459} 419}
460 420
461static const struct block_device_operations scm_blk_devops = { 421static const struct block_device_operations scm_blk_devops = {
462 .owner = THIS_MODULE, 422 .owner = THIS_MODULE,
463}; 423};
464 424
425static const struct blk_mq_ops scm_mq_ops = {
426 .queue_rq = scm_blk_request,
427 .complete = scm_blk_request_done,
428 .init_hctx = scm_blk_init_hctx,
429 .exit_hctx = scm_blk_exit_hctx,
430};
431
465int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) 432int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
466{ 433{
467 struct request_queue *rq;
468 int len, ret = -ENOMEM;
469 unsigned int devindex, nr_max_blk; 434 unsigned int devindex, nr_max_blk;
435 struct request_queue *rq;
436 int len, ret;
470 437
471 devindex = atomic_inc_return(&nr_devices) - 1; 438 devindex = atomic_inc_return(&nr_devices) - 1;
472 /* scma..scmz + scmaa..scmzz */ 439 /* scma..scmz + scmaa..scmzz */
@@ -477,18 +444,23 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
477 444
478 bdev->scmdev = scmdev; 445 bdev->scmdev = scmdev;
479 bdev->state = SCM_OPER; 446 bdev->state = SCM_OPER;
480 spin_lock_init(&bdev->rq_lock);
481 spin_lock_init(&bdev->lock); 447 spin_lock_init(&bdev->lock);
482 INIT_LIST_HEAD(&bdev->finished_requests);
483 atomic_set(&bdev->queued_reqs, 0); 448 atomic_set(&bdev->queued_reqs, 0);
484 tasklet_init(&bdev->tasklet,
485 (void (*)(unsigned long)) scm_blk_tasklet,
486 (unsigned long) bdev);
487 449
488 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); 450 bdev->tag_set.ops = &scm_mq_ops;
489 if (!rq) 451 bdev->tag_set.nr_hw_queues = nr_requests;
452 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
453 bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
454
455 ret = blk_mq_alloc_tag_set(&bdev->tag_set);
456 if (ret)
490 goto out; 457 goto out;
491 458
459 rq = blk_mq_init_queue(&bdev->tag_set);
460 if (IS_ERR(rq)) {
461 ret = PTR_ERR(rq);
462 goto out_tag;
463 }
492 bdev->rq = rq; 464 bdev->rq = rq;
493 nr_max_blk = min(scmdev->nr_max_block, 465 nr_max_blk = min(scmdev->nr_max_block,
494 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); 466 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
@@ -498,12 +470,12 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
498 blk_queue_max_segments(rq, nr_max_blk); 470 blk_queue_max_segments(rq, nr_max_blk);
499 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); 471 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
500 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); 472 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
501 scm_blk_dev_cluster_setup(bdev);
502 473
503 bdev->gendisk = alloc_disk(SCM_NR_PARTS); 474 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
504 if (!bdev->gendisk) 475 if (!bdev->gendisk) {
476 ret = -ENOMEM;
505 goto out_queue; 477 goto out_queue;
506 478 }
507 rq->queuedata = scmdev; 479 rq->queuedata = scmdev;
508 bdev->gendisk->private_data = scmdev; 480 bdev->gendisk->private_data = scmdev;
509 bdev->gendisk->fops = &scm_blk_devops; 481 bdev->gendisk->fops = &scm_blk_devops;
@@ -528,6 +500,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
528 500
529out_queue: 501out_queue:
530 blk_cleanup_queue(rq); 502 blk_cleanup_queue(rq);
503out_tag:
504 blk_mq_free_tag_set(&bdev->tag_set);
531out: 505out:
532 atomic_dec(&nr_devices); 506 atomic_dec(&nr_devices);
533 return ret; 507 return ret;
@@ -535,9 +509,9 @@ out:
535 509
536void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) 510void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
537{ 511{
538 tasklet_kill(&bdev->tasklet);
539 del_gendisk(bdev->gendisk); 512 del_gendisk(bdev->gendisk);
540 blk_cleanup_queue(bdev->gendisk->queue); 513 blk_cleanup_queue(bdev->gendisk->queue);
514 blk_mq_free_tag_set(&bdev->tag_set);
541 put_disk(bdev->gendisk); 515 put_disk(bdev->gendisk);
542} 516}
543 517
@@ -558,7 +532,7 @@ static bool __init scm_blk_params_valid(void)
558 if (!nr_requests_per_io || nr_requests_per_io > 64) 532 if (!nr_requests_per_io || nr_requests_per_io > 64)
559 return false; 533 return false;
560 534
561 return scm_cluster_size_valid(); 535 return true;
562} 536}
563 537
564static int __init scm_blk_init(void) 538static int __init scm_blk_init(void)
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 09218cdc5129..242d17a91920 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -4,6 +4,7 @@
4#include <linux/interrupt.h> 4#include <linux/interrupt.h>
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/blkdev.h> 6#include <linux/blkdev.h>
7#include <linux/blk-mq.h>
7#include <linux/genhd.h> 8#include <linux/genhd.h>
8#include <linux/list.h> 9#include <linux/list.h>
9 10
@@ -14,18 +15,14 @@
14#define SCM_QUEUE_DELAY 5 15#define SCM_QUEUE_DELAY 5
15 16
16struct scm_blk_dev { 17struct scm_blk_dev {
17 struct tasklet_struct tasklet;
18 struct request_queue *rq; 18 struct request_queue *rq;
19 struct gendisk *gendisk; 19 struct gendisk *gendisk;
20 struct blk_mq_tag_set tag_set;
20 struct scm_device *scmdev; 21 struct scm_device *scmdev;
21 spinlock_t rq_lock; /* guard the request queue */ 22 spinlock_t lock;
22 spinlock_t lock; /* guard the rest of the blockdev */
23 atomic_t queued_reqs; 23 atomic_t queued_reqs;
24 enum {SCM_OPER, SCM_WR_PROHIBIT} state; 24 enum {SCM_OPER, SCM_WR_PROHIBIT} state;
25 struct list_head finished_requests; 25 struct list_head finished_requests;
26#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
27 struct list_head cluster_list;
28#endif
29}; 26};
30 27
31struct scm_request { 28struct scm_request {
@@ -36,13 +33,6 @@ struct scm_request {
36 struct list_head list; 33 struct list_head list;
37 u8 retries; 34 u8 retries;
38 int error; 35 int error;
39#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
40 struct {
41 enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
42 struct list_head list;
43 void **buf;
44 } cluster;
45#endif
46}; 36};
47 37
48#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data) 38#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
@@ -52,55 +42,11 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *);
52void scm_blk_set_available(struct scm_blk_dev *); 42void scm_blk_set_available(struct scm_blk_dev *);
53void scm_blk_irq(struct scm_device *, void *, int); 43void scm_blk_irq(struct scm_device *, void *, int);
54 44
55void scm_request_finish(struct scm_request *);
56void scm_request_requeue(struct scm_request *);
57
58struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes); 45struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
59 46
60int scm_drv_init(void); 47int scm_drv_init(void);
61void scm_drv_cleanup(void); 48void scm_drv_cleanup(void);
62 49
63#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
64void __scm_free_rq_cluster(struct scm_request *);
65int __scm_alloc_rq_cluster(struct scm_request *);
66void scm_request_cluster_init(struct scm_request *);
67bool scm_reserve_cluster(struct scm_request *);
68void scm_release_cluster(struct scm_request *);
69void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
70bool scm_need_cluster_request(struct scm_request *);
71void scm_initiate_cluster_request(struct scm_request *);
72void scm_cluster_request_irq(struct scm_request *);
73bool scm_test_cluster_request(struct scm_request *);
74bool scm_cluster_size_valid(void);
75#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
76static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
77static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
78{
79 return 0;
80}
81static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
82static inline bool scm_reserve_cluster(struct scm_request *scmrq)
83{
84 return true;
85}
86static inline void scm_release_cluster(struct scm_request *scmrq) {}
87static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
88static inline bool scm_need_cluster_request(struct scm_request *scmrq)
89{
90 return false;
91}
92static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
93static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
94static inline bool scm_test_cluster_request(struct scm_request *scmrq)
95{
96 return false;
97}
98static inline bool scm_cluster_size_valid(void)
99{
100 return true;
101}
102#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
103
104extern debug_info_t *scm_debug; 50extern debug_info_t *scm_debug;
105 51
106#define SCM_LOG(imp, txt) do { \ 52#define SCM_LOG(imp, txt) do { \
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
deleted file mode 100644
index 7497ddde2dd6..000000000000
--- a/drivers/s390/block/scm_blk_cluster.c
+++ /dev/null
@@ -1,255 +0,0 @@
1/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/spinlock.h>
9#include <linux/module.h>
10#include <linux/blkdev.h>
11#include <linux/genhd.h>
12#include <linux/slab.h>
13#include <linux/list.h>
14#include <asm/eadm.h>
15#include "scm_blk.h"
16
17static unsigned int write_cluster_size = 64;
18module_param(write_cluster_size, uint, S_IRUGO);
19MODULE_PARM_DESC(write_cluster_size,
20 "Number of pages used for contiguous writes.");
21
22#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
23
24void __scm_free_rq_cluster(struct scm_request *scmrq)
25{
26 int i;
27
28 if (!scmrq->cluster.buf)
29 return;
30
31 for (i = 0; i < 2 * write_cluster_size; i++)
32 free_page((unsigned long) scmrq->cluster.buf[i]);
33
34 kfree(scmrq->cluster.buf);
35}
36
37int __scm_alloc_rq_cluster(struct scm_request *scmrq)
38{
39 int i;
40
41 scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
42 GFP_KERNEL);
43 if (!scmrq->cluster.buf)
44 return -ENOMEM;
45
46 for (i = 0; i < 2 * write_cluster_size; i++) {
47 scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
48 if (!scmrq->cluster.buf[i])
49 return -ENOMEM;
50 }
51 INIT_LIST_HEAD(&scmrq->cluster.list);
52 return 0;
53}
54
55void scm_request_cluster_init(struct scm_request *scmrq)
56{
57 scmrq->cluster.state = CLUSTER_NONE;
58}
59
60static bool clusters_intersect(struct request *A, struct request *B)
61{
62 unsigned long firstA, lastA, firstB, lastB;
63
64 firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
65 lastA = (((u64) blk_rq_pos(A) << 9) +
66 blk_rq_bytes(A) - 1) / CLUSTER_SIZE;
67
68 firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
69 lastB = (((u64) blk_rq_pos(B) << 9) +
70 blk_rq_bytes(B) - 1) / CLUSTER_SIZE;
71
72 return (firstB <= lastA && firstA <= lastB);
73}
74
75bool scm_reserve_cluster(struct scm_request *scmrq)
76{
77 struct request *req = scmrq->request[scmrq->aob->request.msb_count];
78 struct scm_blk_dev *bdev = scmrq->bdev;
79 struct scm_request *iter;
80 int pos, add = 1;
81
82 if (write_cluster_size == 0)
83 return true;
84
85 spin_lock(&bdev->lock);
86 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
87 if (iter == scmrq) {
88 /*
89 * We don't have to use clusters_intersect here, since
90 * cluster requests are always started separately.
91 */
92 add = 0;
93 continue;
94 }
95 for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
96 if (clusters_intersect(req, iter->request[pos]) &&
97 (rq_data_dir(req) == WRITE ||
98 rq_data_dir(iter->request[pos]) == WRITE)) {
99 spin_unlock(&bdev->lock);
100 return false;
101 }
102 }
103 }
104 if (add)
105 list_add(&scmrq->cluster.list, &bdev->cluster_list);
106 spin_unlock(&bdev->lock);
107
108 return true;
109}
110
111void scm_release_cluster(struct scm_request *scmrq)
112{
113 struct scm_blk_dev *bdev = scmrq->bdev;
114 unsigned long flags;
115
116 if (write_cluster_size == 0)
117 return;
118
119 spin_lock_irqsave(&bdev->lock, flags);
120 list_del(&scmrq->cluster.list);
121 spin_unlock_irqrestore(&bdev->lock, flags);
122}
123
124void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
125{
126 INIT_LIST_HEAD(&bdev->cluster_list);
127 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
128}
129
130static int scm_prepare_cluster_request(struct scm_request *scmrq)
131{
132 struct scm_blk_dev *bdev = scmrq->bdev;
133 struct scm_device *scmdev = bdev->gendisk->private_data;
134 struct request *req = scmrq->request[0];
135 struct msb *msb = &scmrq->aob->msb[0];
136 struct req_iterator iter;
137 struct aidaw *aidaw;
138 struct bio_vec bv;
139 int i = 0;
140 u64 addr;
141
142 switch (scmrq->cluster.state) {
143 case CLUSTER_NONE:
144 scmrq->cluster.state = CLUSTER_READ;
145 /* fall through */
146 case CLUSTER_READ:
147 msb->bs = MSB_BS_4K;
148 msb->oc = MSB_OC_READ;
149 msb->flags = MSB_FLAG_IDA;
150 msb->blk_count = write_cluster_size;
151
152 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
153 msb->scm_addr = round_down(addr, CLUSTER_SIZE);
154
155 if (msb->scm_addr !=
156 round_down(addr + (u64) blk_rq_bytes(req) - 1,
157 CLUSTER_SIZE))
158 msb->blk_count = 2 * write_cluster_size;
159
160 aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
161 if (!aidaw)
162 return -ENOMEM;
163
164 scmrq->aob->request.msb_count = 1;
165 msb->data_addr = (u64) aidaw;
166 for (i = 0; i < msb->blk_count; i++) {
167 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
168 aidaw++;
169 }
170
171 break;
172 case CLUSTER_WRITE:
173 aidaw = (void *) msb->data_addr;
174 msb->oc = MSB_OC_WRITE;
175
176 for (addr = msb->scm_addr;
177 addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
178 addr += PAGE_SIZE) {
179 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
180 aidaw++;
181 i++;
182 }
183 rq_for_each_segment(bv, req, iter) {
184 aidaw->data_addr = (u64) page_address(bv.bv_page);
185 aidaw++;
186 i++;
187 }
188 for (; i < msb->blk_count; i++) {
189 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
190 aidaw++;
191 }
192 break;
193 }
194 return 0;
195}
196
197bool scm_need_cluster_request(struct scm_request *scmrq)
198{
199 int pos = scmrq->aob->request.msb_count;
200
201 if (rq_data_dir(scmrq->request[pos]) == READ)
202 return false;
203
204 return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
205}
206
207/* Called with queue lock held. */
208void scm_initiate_cluster_request(struct scm_request *scmrq)
209{
210 if (scm_prepare_cluster_request(scmrq))
211 goto requeue;
212 if (eadm_start_aob(scmrq->aob))
213 goto requeue;
214 return;
215requeue:
216 scm_request_requeue(scmrq);
217}
218
219bool scm_test_cluster_request(struct scm_request *scmrq)
220{
221 return scmrq->cluster.state != CLUSTER_NONE;
222}
223
224void scm_cluster_request_irq(struct scm_request *scmrq)
225{
226 struct scm_blk_dev *bdev = scmrq->bdev;
227 unsigned long flags;
228
229 switch (scmrq->cluster.state) {
230 case CLUSTER_NONE:
231 BUG();
232 break;
233 case CLUSTER_READ:
234 if (scmrq->error) {
235 scm_request_finish(scmrq);
236 break;
237 }
238 scmrq->cluster.state = CLUSTER_WRITE;
239 spin_lock_irqsave(&bdev->rq_lock, flags);
240 scm_initiate_cluster_request(scmrq);
241 spin_unlock_irqrestore(&bdev->rq_lock, flags);
242 break;
243 case CLUSTER_WRITE:
244 scm_request_finish(scmrq);
245 break;
246 }
247}
248
249bool scm_cluster_size_valid(void)
250{
251 if (write_cluster_size == 1 || write_cluster_size > 128)
252 return false;
253
254 return !(write_cluster_size & (write_cluster_size - 1));
255}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e443b0d0b236..34b9ad6b3143 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -35,7 +35,7 @@ static struct bus_type ccwgroup_bus_type;
35static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 35static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
36{ 36{
37 int i; 37 int i;
38 char str[8]; 38 char str[16];
39 39
40 for (i = 0; i < gdev->count; i++) { 40 for (i = 0; i < gdev->count; i++) {
41 sprintf(str, "cdev%d", i); 41 sprintf(str, "cdev%d", i);
@@ -238,7 +238,7 @@ static void ccwgroup_release(struct device *dev)
238 238
239static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 239static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
240{ 240{
241 char str[8]; 241 char str[16];
242 int i, rc; 242 int i, rc;
243 243
244 for (i = 0; i < gdev->count; i++) { 244 for (i = 0; i < gdev->count; i++) {
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index e2aa944eb566..d3e504c3c362 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -296,6 +296,51 @@ static const struct attribute_group *default_subch_attr_groups[] = {
296 NULL, 296 NULL,
297}; 297};
298 298
299static ssize_t chpids_show(struct device *dev,
300 struct device_attribute *attr,
301 char *buf)
302{
303 struct subchannel *sch = to_subchannel(dev);
304 struct chsc_ssd_info *ssd = &sch->ssd_info;
305 ssize_t ret = 0;
306 int mask;
307 int chp;
308
309 for (chp = 0; chp < 8; chp++) {
310 mask = 0x80 >> chp;
311 if (ssd->path_mask & mask)
312 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
313 else
314 ret += sprintf(buf + ret, "00 ");
315 }
316 ret += sprintf(buf + ret, "\n");
317 return ret;
318}
319static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
320
321static ssize_t pimpampom_show(struct device *dev,
322 struct device_attribute *attr,
323 char *buf)
324{
325 struct subchannel *sch = to_subchannel(dev);
326 struct pmcw *pmcw = &sch->schib.pmcw;
327
328 return sprintf(buf, "%02x %02x %02x\n",
329 pmcw->pim, pmcw->pam, pmcw->pom);
330}
331static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
332
333static struct attribute *io_subchannel_type_attrs[] = {
334 &dev_attr_chpids.attr,
335 &dev_attr_pimpampom.attr,
336 NULL,
337};
338ATTRIBUTE_GROUPS(io_subchannel_type);
339
340static const struct device_type io_subchannel_type = {
341 .groups = io_subchannel_type_groups,
342};
343
299int css_register_subchannel(struct subchannel *sch) 344int css_register_subchannel(struct subchannel *sch)
300{ 345{
301 int ret; 346 int ret;
@@ -304,6 +349,10 @@ int css_register_subchannel(struct subchannel *sch)
304 sch->dev.parent = &channel_subsystems[0]->device; 349 sch->dev.parent = &channel_subsystems[0]->device;
305 sch->dev.bus = &css_bus_type; 350 sch->dev.bus = &css_bus_type;
306 sch->dev.groups = default_subch_attr_groups; 351 sch->dev.groups = default_subch_attr_groups;
352
353 if (sch->st == SUBCHANNEL_TYPE_IO)
354 sch->dev.type = &io_subchannel_type;
355
307 /* 356 /*
308 * We don't want to generate uevents for I/O subchannels that don't 357 * We don't want to generate uevents for I/O subchannels that don't
309 * have a working ccw device behind them since they will be 358 * have a working ccw device behind them since they will be
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index b8006ea9099c..7be01a58b44f 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -208,44 +208,6 @@ int __init io_subchannel_init(void)
208 208
209/************************ device handling **************************/ 209/************************ device handling **************************/
210 210
211/*
212 * A ccw_device has some interfaces in sysfs in addition to the
213 * standard ones.
214 * The following entries are designed to export the information which
215 * resided in 2.4 in /proc/subchannels. Subchannel and device number
216 * are obvious, so they don't have an entry :)
217 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
218 */
219static ssize_t
220chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
221{
222 struct subchannel *sch = to_subchannel(dev);
223 struct chsc_ssd_info *ssd = &sch->ssd_info;
224 ssize_t ret = 0;
225 int chp;
226 int mask;
227
228 for (chp = 0; chp < 8; chp++) {
229 mask = 0x80 >> chp;
230 if (ssd->path_mask & mask)
231 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
232 else
233 ret += sprintf(buf + ret, "00 ");
234 }
235 ret += sprintf (buf+ret, "\n");
236 return min((ssize_t)PAGE_SIZE, ret);
237}
238
239static ssize_t
240pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
241{
242 struct subchannel *sch = to_subchannel(dev);
243 struct pmcw *pmcw = &sch->schib.pmcw;
244
245 return sprintf (buf, "%02x %02x %02x\n",
246 pmcw->pim, pmcw->pam, pmcw->pom);
247}
248
249static ssize_t 211static ssize_t
250devtype_show (struct device *dev, struct device_attribute *attr, char *buf) 212devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
251{ 213{
@@ -636,8 +598,6 @@ static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
636 return sprintf(buf, "%02x\n", sch->vpm); 598 return sprintf(buf, "%02x\n", sch->vpm);
637} 599}
638 600
639static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
640static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
641static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); 601static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
642static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); 602static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
643static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 603static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
@@ -647,8 +607,6 @@ static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
647static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); 607static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
648 608
649static struct attribute *io_subchannel_attrs[] = { 609static struct attribute *io_subchannel_attrs[] = {
650 &dev_attr_chpids.attr,
651 &dev_attr_pimpampom.attr,
652 &dev_attr_logging.attr, 610 &dev_attr_logging.attr,
653 &dev_attr_vpm.attr, 611 &dev_attr_vpm.attr,
654 NULL, 612 NULL,
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index f33ce8577619..1d595d17bf11 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -11,7 +11,7 @@
11#include "qdio.h" 11#include "qdio.h"
12 12
13/* that gives us 15 characters in the text event views */ 13/* that gives us 15 characters in the text event views */
14#define QDIO_DBF_LEN 16 14#define QDIO_DBF_LEN 32
15 15
16extern debug_info_t *qdio_dbf_setup; 16extern debug_info_t *qdio_dbf_setup;
17extern debug_info_t *qdio_dbf_error; 17extern debug_info_t *qdio_dbf_error;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index e90dd43d2a55..a25367ebaa89 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -90,54 +90,6 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
90} 90}
91 91
92/* 92/*
93 * Sysfs interfaces
94 */
95static ssize_t chpids_show(struct device *dev,
96 struct device_attribute *attr,
97 char *buf)
98{
99 struct subchannel *sch = to_subchannel(dev);
100 struct chsc_ssd_info *ssd = &sch->ssd_info;
101 ssize_t ret = 0;
102 int chp;
103 int mask;
104
105 for (chp = 0; chp < 8; chp++) {
106 mask = 0x80 >> chp;
107 if (ssd->path_mask & mask)
108 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
109 else
110 ret += sprintf(buf + ret, "00 ");
111 }
112 ret += sprintf(buf+ret, "\n");
113 return ret;
114}
115
116static ssize_t pimpampom_show(struct device *dev,
117 struct device_attribute *attr,
118 char *buf)
119{
120 struct subchannel *sch = to_subchannel(dev);
121 struct pmcw *pmcw = &sch->schib.pmcw;
122
123 return sprintf(buf, "%02x %02x %02x\n",
124 pmcw->pim, pmcw->pam, pmcw->pom);
125}
126
127static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
128static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
129
130static struct attribute *vfio_subchannel_attrs[] = {
131 &dev_attr_chpids.attr,
132 &dev_attr_pimpampom.attr,
133 NULL,
134};
135
136static struct attribute_group vfio_subchannel_attr_group = {
137 .attrs = vfio_subchannel_attrs,
138};
139
140/*
141 * Css driver callbacks 93 * Css driver callbacks
142 */ 94 */
143static void vfio_ccw_sch_irq(struct subchannel *sch) 95static void vfio_ccw_sch_irq(struct subchannel *sch)
@@ -174,13 +126,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
174 if (ret) 126 if (ret)
175 goto out_free; 127 goto out_free;
176 128
177 ret = sysfs_create_group(&sch->dev.kobj, &vfio_subchannel_attr_group);
178 if (ret)
179 goto out_disable;
180
181 ret = vfio_ccw_mdev_reg(sch); 129 ret = vfio_ccw_mdev_reg(sch);
182 if (ret) 130 if (ret)
183 goto out_rm_group; 131 goto out_disable;
184 132
185 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 133 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
186 atomic_set(&private->avail, 1); 134 atomic_set(&private->avail, 1);
@@ -188,8 +136,6 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
188 136
189 return 0; 137 return 0;
190 138
191out_rm_group:
192 sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group);
193out_disable: 139out_disable:
194 cio_disable_subchannel(sch); 140 cio_disable_subchannel(sch);
195out_free: 141out_free:
@@ -206,8 +152,6 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
206 152
207 vfio_ccw_mdev_unreg(sch); 153 vfio_ccw_mdev_unreg(sch);
208 154
209 sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group);
210
211 dev_set_drvdata(&sch->dev, NULL); 155 dev_set_drvdata(&sch->dev, NULL);
212 156
213 kfree(private); 157 kfree(private);
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index e72abbc18ee3..a66a317f3e4f 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -70,14 +70,14 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
70{ 70{
71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); 71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
72} 72}
73MDEV_TYPE_ATTR_RO(name); 73static MDEV_TYPE_ATTR_RO(name);
74 74
75static ssize_t device_api_show(struct kobject *kobj, struct device *dev, 75static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
76 char *buf) 76 char *buf)
77{ 77{
78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); 78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
79} 79}
80MDEV_TYPE_ATTR_RO(device_api); 80static MDEV_TYPE_ATTR_RO(device_api);
81 81
82static ssize_t available_instances_show(struct kobject *kobj, 82static ssize_t available_instances_show(struct kobject *kobj,
83 struct device *dev, char *buf) 83 struct device *dev, char *buf)
@@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj,
86 86
87 return sprintf(buf, "%d\n", atomic_read(&private->avail)); 87 return sprintf(buf, "%d\n", atomic_read(&private->avail));
88} 88}
89MDEV_TYPE_ATTR_RO(available_instances); 89static MDEV_TYPE_ATTR_RO(available_instances);
90 90
91static struct attribute *mdev_types_attrs[] = { 91static struct attribute *mdev_types_attrs[] = {
92 &mdev_type_attr_name.attr, 92 &mdev_type_attr_name.attr,
@@ -100,7 +100,7 @@ static struct attribute_group mdev_type_group = {
100 .attrs = mdev_types_attrs, 100 .attrs = mdev_types_attrs,
101}; 101};
102 102
103struct attribute_group *mdev_type_groups[] = { 103static struct attribute_group *mdev_type_groups[] = {
104 &mdev_type_group, 104 &mdev_type_group,
105 NULL, 105 NULL,
106}; 106};
@@ -152,7 +152,7 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
152 &events, &private->nb); 152 &events, &private->nb);
153} 153}
154 154
155void vfio_ccw_mdev_release(struct mdev_device *mdev) 155static void vfio_ccw_mdev_release(struct mdev_device *mdev)
156{ 156{
157 struct vfio_ccw_private *private = 157 struct vfio_ccw_private *private =
158 dev_get_drvdata(mdev_parent_dev(mdev)); 158 dev_get_drvdata(mdev_parent_dev(mdev));
@@ -233,7 +233,7 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
233 } 233 }
234} 234}
235 235
236int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) 236static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
237{ 237{
238 if (info->index != VFIO_CCW_IO_IRQ_INDEX) 238 if (info->index != VFIO_CCW_IO_IRQ_INDEX)
239 return -EINVAL; 239 return -EINVAL;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9be4596d8a08..6dee598979e7 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -668,10 +668,28 @@ static int ap_device_probe(struct device *dev)
668 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 668 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
669 int rc; 669 int rc;
670 670
671 /* Add queue/card to list of active queues/cards */
672 spin_lock_bh(&ap_list_lock);
673 if (is_card_dev(dev))
674 list_add(&to_ap_card(dev)->list, &ap_card_list);
675 else
676 list_add(&to_ap_queue(dev)->list,
677 &to_ap_queue(dev)->card->queues);
678 spin_unlock_bh(&ap_list_lock);
679
671 ap_dev->drv = ap_drv; 680 ap_dev->drv = ap_drv;
672 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 681 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
673 if (rc) 682
683 if (rc) {
684 spin_lock_bh(&ap_list_lock);
685 if (is_card_dev(dev))
686 list_del_init(&to_ap_card(dev)->list);
687 else
688 list_del_init(&to_ap_queue(dev)->list);
689 spin_unlock_bh(&ap_list_lock);
674 ap_dev->drv = NULL; 690 ap_dev->drv = NULL;
691 }
692
675 return rc; 693 return rc;
676} 694}
677 695
@@ -680,14 +698,17 @@ static int ap_device_remove(struct device *dev)
680 struct ap_device *ap_dev = to_ap_dev(dev); 698 struct ap_device *ap_dev = to_ap_dev(dev);
681 struct ap_driver *ap_drv = ap_dev->drv; 699 struct ap_driver *ap_drv = ap_dev->drv;
682 700
701 if (ap_drv->remove)
702 ap_drv->remove(ap_dev);
703
704 /* Remove queue/card from list of active queues/cards */
683 spin_lock_bh(&ap_list_lock); 705 spin_lock_bh(&ap_list_lock);
684 if (is_card_dev(dev)) 706 if (is_card_dev(dev))
685 list_del_init(&to_ap_card(dev)->list); 707 list_del_init(&to_ap_card(dev)->list);
686 else 708 else
687 list_del_init(&to_ap_queue(dev)->list); 709 list_del_init(&to_ap_queue(dev)->list);
688 spin_unlock_bh(&ap_list_lock); 710 spin_unlock_bh(&ap_list_lock);
689 if (ap_drv->remove) 711
690 ap_drv->remove(ap_dev);
691 return 0; 712 return 0;
692} 713}
693 714
@@ -745,7 +766,7 @@ static ssize_t ap_domain_store(struct bus_type *bus,
745 ap_domain_index = domain; 766 ap_domain_index = domain;
746 spin_unlock_bh(&ap_domain_lock); 767 spin_unlock_bh(&ap_domain_lock);
747 768
748 AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain); 769 AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain);
749 770
750 return count; 771 return count;
751} 772}
@@ -931,6 +952,7 @@ static int ap_select_domain(void)
931 } 952 }
932 if (best_domain >= 0){ 953 if (best_domain >= 0){
933 ap_domain_index = best_domain; 954 ap_domain_index = best_domain;
955 AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
934 spin_unlock_bh(&ap_domain_lock); 956 spin_unlock_bh(&ap_domain_lock);
935 return 0; 957 return 0;
936 } 958 }
@@ -967,7 +989,7 @@ static void ap_scan_bus(struct work_struct *unused)
967 ap_qid_t qid; 989 ap_qid_t qid;
968 int depth = 0, type = 0; 990 int depth = 0, type = 0;
969 unsigned int functions = 0; 991 unsigned int functions = 0;
970 int rc, id, dom, borked, domains; 992 int rc, id, dom, borked, domains, defdomdevs = 0;
971 993
972 AP_DBF(DBF_DEBUG, "ap_scan_bus running\n"); 994 AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
973 995
@@ -1031,6 +1053,8 @@ static void ap_scan_bus(struct work_struct *unused)
1031 put_device(dev); 1053 put_device(dev);
1032 if (!borked) { 1054 if (!borked) {
1033 domains++; 1055 domains++;
1056 if (dom == ap_domain_index)
1057 defdomdevs++;
1034 continue; 1058 continue;
1035 } 1059 }
1036 } 1060 }
@@ -1056,10 +1080,6 @@ static void ap_scan_bus(struct work_struct *unused)
1056 } 1080 }
1057 /* get it and thus adjust reference counter */ 1081 /* get it and thus adjust reference counter */
1058 get_device(&ac->ap_dev.device); 1082 get_device(&ac->ap_dev.device);
1059 /* Add card device to card list */
1060 spin_lock_bh(&ap_list_lock);
1061 list_add(&ac->list, &ap_card_list);
1062 spin_unlock_bh(&ap_list_lock);
1063 } 1083 }
1064 /* now create the new queue device */ 1084 /* now create the new queue device */
1065 aq = ap_queue_create(qid, type); 1085 aq = ap_queue_create(qid, type);
@@ -1070,10 +1090,6 @@ static void ap_scan_bus(struct work_struct *unused)
1070 aq->ap_dev.device.parent = &ac->ap_dev.device; 1090 aq->ap_dev.device.parent = &ac->ap_dev.device;
1071 dev_set_name(&aq->ap_dev.device, 1091 dev_set_name(&aq->ap_dev.device,
1072 "%02x.%04x", id, dom); 1092 "%02x.%04x", id, dom);
1073 /* Add queue device to card queue list */
1074 spin_lock_bh(&ap_list_lock);
1075 list_add(&aq->list, &ac->queues);
1076 spin_unlock_bh(&ap_list_lock);
1077 /* Start with a device reset */ 1093 /* Start with a device reset */
1078 spin_lock_bh(&aq->lock); 1094 spin_lock_bh(&aq->lock);
1079 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 1095 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
@@ -1081,13 +1097,12 @@ static void ap_scan_bus(struct work_struct *unused)
1081 /* Register device */ 1097 /* Register device */
1082 rc = device_register(&aq->ap_dev.device); 1098 rc = device_register(&aq->ap_dev.device);
1083 if (rc) { 1099 if (rc) {
1084 spin_lock_bh(&ap_list_lock);
1085 list_del_init(&aq->list);
1086 spin_unlock_bh(&ap_list_lock);
1087 put_device(&aq->ap_dev.device); 1100 put_device(&aq->ap_dev.device);
1088 continue; 1101 continue;
1089 } 1102 }
1090 domains++; 1103 domains++;
1104 if (dom == ap_domain_index)
1105 defdomdevs++;
1091 } /* end domain loop */ 1106 } /* end domain loop */
1092 if (ac) { 1107 if (ac) {
1093 /* remove card dev if there are no queue devices */ 1108 /* remove card dev if there are no queue devices */
@@ -1096,6 +1111,11 @@ static void ap_scan_bus(struct work_struct *unused)
1096 put_device(&ac->ap_dev.device); 1111 put_device(&ac->ap_dev.device);
1097 } 1112 }
1098 } /* end device loop */ 1113 } /* end device loop */
1114
1115 if (defdomdevs < 1)
1116 AP_DBF(DBF_INFO, "no queue device with default domain %d available\n",
1117 ap_domain_index);
1118
1099out: 1119out:
1100 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1120 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
1101} 1121}
@@ -1164,14 +1184,14 @@ int __init ap_module_init(void)
1164 ap_init_configuration(); 1184 ap_init_configuration();
1165 1185
1166 if (ap_configuration) 1186 if (ap_configuration)
1167 max_domain_id = ap_max_domain_id ? : (AP_DOMAINS - 1); 1187 max_domain_id =
1188 ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1;
1168 else 1189 else
1169 max_domain_id = 15; 1190 max_domain_id = 15;
1170 if (ap_domain_index < -1 || ap_domain_index > max_domain_id) { 1191 if (ap_domain_index < -1 || ap_domain_index > max_domain_id) {
1171 pr_warn("%d is not a valid cryptographic domain\n", 1192 pr_warn("%d is not a valid cryptographic domain\n",
1172 ap_domain_index); 1193 ap_domain_index);
1173 rc = -EINVAL; 1194 ap_domain_index = -1;
1174 goto out_free;
1175 } 1195 }
1176 /* In resume callback we need to know if the user had set the domain. 1196 /* In resume callback we need to know if the user had set the domain.
1177 * If so, we can not just reset it. 1197 * If so, we can not just reset it.
@@ -1244,7 +1264,6 @@ out:
1244 unregister_reset_call(&ap_reset_call); 1264 unregister_reset_call(&ap_reset_call);
1245 if (ap_using_interrupts()) 1265 if (ap_using_interrupts())
1246 unregister_adapter_interrupt(&ap_airq); 1266 unregister_adapter_interrupt(&ap_airq);
1247out_free:
1248 kfree(ap_configuration); 1267 kfree(ap_configuration);
1249 return rc; 1268 return rc;
1250} 1269}
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index cfa161ccc74e..836efac96813 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -160,7 +160,14 @@ static struct device_type ap_card_type = {
160 160
161static void ap_card_device_release(struct device *dev) 161static void ap_card_device_release(struct device *dev)
162{ 162{
163 kfree(to_ap_card(dev)); 163 struct ap_card *ac = to_ap_card(dev);
164
165 if (!list_empty(&ac->list)) {
166 spin_lock_bh(&ap_list_lock);
167 list_del_init(&ac->list);
168 spin_unlock_bh(&ap_list_lock);
169 }
170 kfree(ac);
164} 171}
165 172
166struct ap_card *ap_card_create(int id, int queue_depth, int device_type, 173struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 480c58a63769..0f1a5d02acb0 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -584,7 +584,14 @@ static struct device_type ap_queue_type = {
584 584
585static void ap_queue_device_release(struct device *dev) 585static void ap_queue_device_release(struct device *dev)
586{ 586{
587 kfree(to_ap_queue(dev)); 587 struct ap_queue *aq = to_ap_queue(dev);
588
589 if (!list_empty(&aq->list)) {
590 spin_lock_bh(&ap_list_lock);
591 list_del_init(&aq->list);
592 spin_unlock_bh(&ap_list_lock);
593 }
594 kfree(aq);
588} 595}
589 596
590struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) 597struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index ea86da8c75f9..f61fa47135a6 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -178,9 +178,9 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb,
178 pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); 178 pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
179 pxcrb->request_control_blk_length = 179 pxcrb->request_control_blk_length =
180 preqcblk->cprb_len + preqcblk->req_parml; 180 preqcblk->cprb_len + preqcblk->req_parml;
181 pxcrb->request_control_blk_addr = (void *) preqcblk; 181 pxcrb->request_control_blk_addr = (void __user *) preqcblk;
182 pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; 182 pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
183 pxcrb->reply_control_blk_addr = (void *) prepcblk; 183 pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
184} 184}
185 185
186/* 186/*
@@ -1194,7 +1194,7 @@ static struct miscdevice pkey_dev = {
1194/* 1194/*
1195 * Module init 1195 * Module init
1196 */ 1196 */
1197int __init pkey_init(void) 1197static int __init pkey_init(void)
1198{ 1198{
1199 cpacf_mask_t pckmo_functions; 1199 cpacf_mask_t pckmo_functions;
1200 1200
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 93015f85d4a6..b1c27e28859b 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -821,8 +821,10 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
821 do { 821 do {
822 rc = zcrypt_rsa_modexpo(&mex); 822 rc = zcrypt_rsa_modexpo(&mex);
823 } while (rc == -EAGAIN); 823 } while (rc == -EAGAIN);
824 if (rc) 824 if (rc) {
825 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d", rc);
825 return rc; 826 return rc;
827 }
826 return put_user(mex.outputdatalength, &umex->outputdatalength); 828 return put_user(mex.outputdatalength, &umex->outputdatalength);
827 } 829 }
828 case ICARSACRT: { 830 case ICARSACRT: {
@@ -838,8 +840,10 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
838 do { 840 do {
839 rc = zcrypt_rsa_crt(&crt); 841 rc = zcrypt_rsa_crt(&crt);
840 } while (rc == -EAGAIN); 842 } while (rc == -EAGAIN);
841 if (rc) 843 if (rc) {
844 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d", rc);
842 return rc; 845 return rc;
846 }
843 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 847 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
844 } 848 }
845 case ZSECSENDCPRB: { 849 case ZSECSENDCPRB: {
@@ -855,6 +859,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
855 do { 859 do {
856 rc = zcrypt_send_cprb(&xcRB); 860 rc = zcrypt_send_cprb(&xcRB);
857 } while (rc == -EAGAIN); 861 } while (rc == -EAGAIN);
862 if (rc)
863 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d", rc);
858 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 864 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
859 return -EFAULT; 865 return -EFAULT;
860 return rc; 866 return rc;
@@ -872,6 +878,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
872 do { 878 do {
873 rc = zcrypt_send_ep11_cprb(&xcrb); 879 rc = zcrypt_send_ep11_cprb(&xcrb);
874 } while (rc == -EAGAIN); 880 } while (rc == -EAGAIN);
881 if (rc)
882 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d", rc);
875 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 883 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
876 return -EFAULT; 884 return -EFAULT;
877 return rc; 885 return rc;
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index ca0cdbe46368..12cff6262566 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -48,26 +48,6 @@ struct cca_token_hdr {
48 48
49#define CCA_TKN_HDR_ID_EXT 0x1E 49#define CCA_TKN_HDR_ID_EXT 0x1E
50 50
51/**
52 * mapping for the cca private ME section
53 */
54struct cca_private_ext_ME_sec {
55 unsigned char section_identifier;
56 unsigned char version;
57 unsigned short section_length;
58 unsigned char private_key_hash[20];
59 unsigned char reserved1[4];
60 unsigned char key_format;
61 unsigned char reserved2;
62 unsigned char key_name_hash[20];
63 unsigned char key_use_flags[4];
64 unsigned char reserved3[6];
65 unsigned char reserved4[24];
66 unsigned char confounder[24];
67 unsigned char exponent[128];
68 unsigned char modulus[128];
69} __attribute__((packed));
70
71#define CCA_PVT_USAGE_ALL 0x80 51#define CCA_PVT_USAGE_ALL 0x80
72 52
73/** 53/**
@@ -124,77 +104,6 @@ struct cca_pvt_ext_CRT_sec {
124#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40 104#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
125 105
126/** 106/**
127 * Set up private key fields of a type6 MEX message.
128 * Note that all numerics in the key token are big-endian,
129 * while the entries in the key block header are little-endian.
130 *
131 * @mex: pointer to user input data
132 * @p: pointer to memory area for the key
133 *
134 * Returns the size of the key area or -EFAULT
135 */
136static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
137 void *p, int big_endian)
138{
139 static struct cca_token_hdr static_pvt_me_hdr = {
140 .token_identifier = 0x1E,
141 .token_length = 0x0183,
142 };
143 static struct cca_private_ext_ME_sec static_pvt_me_sec = {
144 .section_identifier = 0x02,
145 .section_length = 0x016C,
146 .key_use_flags = {0x80,0x00,0x00,0x00},
147 };
148 static struct cca_public_sec static_pub_me_sec = {
149 .section_identifier = 0x04,
150 .section_length = 0x000F,
151 .exponent_len = 0x0003,
152 };
153 static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
154 struct {
155 struct T6_keyBlock_hdr t6_hdr;
156 struct cca_token_hdr pvtMeHdr;
157 struct cca_private_ext_ME_sec pvtMeSec;
158 struct cca_public_sec pubMeSec;
159 char exponent[3];
160 } __attribute__((packed)) *key = p;
161 unsigned char *temp;
162
163 memset(key, 0, sizeof(*key));
164
165 if (big_endian) {
166 key->t6_hdr.blen = cpu_to_be16(0x189);
167 key->t6_hdr.ulen = cpu_to_be16(0x189 - 2);
168 } else {
169 key->t6_hdr.blen = cpu_to_le16(0x189);
170 key->t6_hdr.ulen = cpu_to_le16(0x189 - 2);
171 }
172 key->pvtMeHdr = static_pvt_me_hdr;
173 key->pvtMeSec = static_pvt_me_sec;
174 key->pubMeSec = static_pub_me_sec;
175 /*
176 * In a private key, the modulus doesn't appear in the public
177 * section. So, an arbitrary public exponent of 0x010001 will be
178 * used.
179 */
180 memcpy(key->exponent, pk_exponent, 3);
181
182 /* key parameter block */
183 temp = key->pvtMeSec.exponent +
184 sizeof(key->pvtMeSec.exponent) - mex->inputdatalength;
185 if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
186 return -EFAULT;
187
188 /* modulus */
189 temp = key->pvtMeSec.modulus +
190 sizeof(key->pvtMeSec.modulus) - mex->inputdatalength;
191 if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
192 return -EFAULT;
193 key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength;
194 return sizeof(*key);
195}
196
197/**
198 * Set up private key fields of a type6 MEX message. The _pad variant 107 * Set up private key fields of a type6 MEX message. The _pad variant
199 * strips leading zeroes from the b_key. 108 * strips leading zeroes from the b_key.
200 * Note that all numerics in the key token are big-endian, 109 * Note that all numerics in the key token are big-endian,
@@ -205,8 +114,7 @@ static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
205 * 114 *
206 * Returns the size of the key area or -EFAULT 115 * Returns the size of the key area or -EFAULT
207 */ 116 */
208static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, 117static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
209 void *p, int big_endian)
210{ 118{
211 static struct cca_token_hdr static_pub_hdr = { 119 static struct cca_token_hdr static_pub_hdr = {
212 .token_identifier = 0x1E, 120 .token_identifier = 0x1E,
@@ -251,13 +159,8 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
251 2*mex->inputdatalength - i; 159 2*mex->inputdatalength - i;
252 key->pubHdr.token_length = 160 key->pubHdr.token_length =
253 key->pubSec.section_length + sizeof(key->pubHdr); 161 key->pubSec.section_length + sizeof(key->pubHdr);
254 if (big_endian) { 162 key->t6_hdr.ulen = key->pubHdr.token_length + 4;
255 key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4); 163 key->t6_hdr.blen = key->pubHdr.token_length + 6;
256 key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6);
257 } else {
258 key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4);
259 key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6);
260 }
261 return sizeof(*key) + 2*mex->inputdatalength - i; 164 return sizeof(*key) + 2*mex->inputdatalength - i;
262} 165}
263 166
@@ -271,8 +174,7 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
271 * 174 *
272 * Returns the size of the key area or -EFAULT 175 * Returns the size of the key area or -EFAULT
273 */ 176 */
274static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, 177static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
275 void *p, int big_endian)
276{ 178{
277 static struct cca_public_sec static_cca_pub_sec = { 179 static struct cca_public_sec static_cca_pub_sec = {
278 .section_identifier = 4, 180 .section_identifier = 4,
@@ -298,13 +200,8 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
298 size = sizeof(*key) + key_len + sizeof(*pub) + 3; 200 size = sizeof(*key) + key_len + sizeof(*pub) + 3;
299 201
300 /* parameter block.key block */ 202 /* parameter block.key block */
301 if (big_endian) { 203 key->t6_hdr.blen = size;
302 key->t6_hdr.blen = cpu_to_be16(size); 204 key->t6_hdr.ulen = size - 2;
303 key->t6_hdr.ulen = cpu_to_be16(size - 2);
304 } else {
305 key->t6_hdr.blen = cpu_to_le16(size);
306 key->t6_hdr.ulen = cpu_to_le16(size - 2);
307 }
308 205
309 /* key token header */ 206 /* key token header */
310 key->token.token_identifier = CCA_TKN_HDR_ID_EXT; 207 key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index e5563ffeb839..4fddb4319481 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -291,7 +291,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
291 return -EFAULT; 291 return -EFAULT;
292 292
293 /* Set up key which is located after the variable length text. */ 293 /* Set up key which is located after the variable length text. */
294 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1); 294 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength);
295 if (size < 0) 295 if (size < 0)
296 return size; 296 return size;
297 size += sizeof(*msg) + mex->inputdatalength; 297 size += sizeof(*msg) + mex->inputdatalength;
@@ -353,7 +353,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
353 return -EFAULT; 353 return -EFAULT;
354 354
355 /* Set up key which is located after the variable length text. */ 355 /* Set up key which is located after the variable length text. */
356 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1); 356 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength);
357 if (size < 0) 357 if (size < 0)
358 return size; 358 return size;
359 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ 359 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f6aa21176d89..30bc6105aac3 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -701,6 +701,7 @@ enum qeth_discipline_id {
701}; 701};
702 702
703struct qeth_discipline { 703struct qeth_discipline {
704 const struct device_type *devtype;
704 void (*start_poll)(struct ccw_device *, int, unsigned long); 705 void (*start_poll)(struct ccw_device *, int, unsigned long);
705 qdio_handler_t *input_handler; 706 qdio_handler_t *input_handler;
706 qdio_handler_t *output_handler; 707 qdio_handler_t *output_handler;
@@ -875,6 +876,9 @@ extern struct qeth_discipline qeth_l2_discipline;
875extern struct qeth_discipline qeth_l3_discipline; 876extern struct qeth_discipline qeth_l3_discipline;
876extern const struct attribute_group *qeth_generic_attr_groups[]; 877extern const struct attribute_group *qeth_generic_attr_groups[];
877extern const struct attribute_group *qeth_osn_attr_groups[]; 878extern const struct attribute_group *qeth_osn_attr_groups[];
879extern const struct attribute_group qeth_device_attr_group;
880extern const struct attribute_group qeth_device_blkt_group;
881extern const struct device_type qeth_generic_devtype;
878extern struct workqueue_struct *qeth_wq; 882extern struct workqueue_struct *qeth_wq;
879 883
880int qeth_card_hw_is_reachable(struct qeth_card *); 884int qeth_card_hw_is_reachable(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 38114a8d56e0..fc6d85f2b38d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5530,10 +5530,12 @@ void qeth_core_free_discipline(struct qeth_card *card)
5530 card->discipline = NULL; 5530 card->discipline = NULL;
5531} 5531}
5532 5532
5533static const struct device_type qeth_generic_devtype = { 5533const struct device_type qeth_generic_devtype = {
5534 .name = "qeth_generic", 5534 .name = "qeth_generic",
5535 .groups = qeth_generic_attr_groups, 5535 .groups = qeth_generic_attr_groups,
5536}; 5536};
5537EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5538
5537static const struct device_type qeth_osn_devtype = { 5539static const struct device_type qeth_osn_devtype = {
5538 .name = "qeth_osn", 5540 .name = "qeth_osn",
5539 .groups = qeth_osn_attr_groups, 5541 .groups = qeth_osn_attr_groups,
@@ -5659,23 +5661,22 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5659 goto err_card; 5661 goto err_card;
5660 } 5662 }
5661 5663
5662 if (card->info.type == QETH_CARD_TYPE_OSN)
5663 gdev->dev.type = &qeth_osn_devtype;
5664 else
5665 gdev->dev.type = &qeth_generic_devtype;
5666
5667 switch (card->info.type) { 5664 switch (card->info.type) {
5668 case QETH_CARD_TYPE_OSN: 5665 case QETH_CARD_TYPE_OSN:
5669 case QETH_CARD_TYPE_OSM: 5666 case QETH_CARD_TYPE_OSM:
5670 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); 5667 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
5671 if (rc) 5668 if (rc)
5672 goto err_card; 5669 goto err_card;
5670
5671 gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
5672 ? card->discipline->devtype
5673 : &qeth_osn_devtype;
5673 rc = card->discipline->setup(card->gdev); 5674 rc = card->discipline->setup(card->gdev);
5674 if (rc) 5675 if (rc)
5675 goto err_disc; 5676 goto err_disc;
5676 case QETH_CARD_TYPE_OSD: 5677 break;
5677 case QETH_CARD_TYPE_OSX:
5678 default: 5678 default:
5679 gdev->dev.type = &qeth_generic_devtype;
5679 break; 5680 break;
5680 } 5681 }
5681 5682
@@ -5731,8 +5732,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
5731 if (rc) 5732 if (rc)
5732 goto err; 5733 goto err;
5733 rc = card->discipline->setup(card->gdev); 5734 rc = card->discipline->setup(card->gdev);
5734 if (rc) 5735 if (rc) {
5736 qeth_core_free_discipline(card);
5735 goto err; 5737 goto err;
5738 }
5736 } 5739 }
5737 rc = card->discipline->set_online(gdev); 5740 rc = card->discipline->set_online(gdev);
5738err: 5741err:
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 75b29fd2fcf4..db6a285d41e0 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -413,12 +413,16 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
413 413
414 if (card->options.layer2 == newdis) 414 if (card->options.layer2 == newdis)
415 goto out; 415 goto out;
416 else { 416 if (card->info.type == QETH_CARD_TYPE_OSM) {
417 card->info.mac_bits = 0; 417 /* fixed layer, can't switch */
418 if (card->discipline) { 418 rc = -EOPNOTSUPP;
419 card->discipline->remove(card->gdev); 419 goto out;
420 qeth_core_free_discipline(card); 420 }
421 } 421
422 card->info.mac_bits = 0;
423 if (card->discipline) {
424 card->discipline->remove(card->gdev);
425 qeth_core_free_discipline(card);
422 } 426 }
423 427
424 rc = qeth_core_load_discipline(card, newdis); 428 rc = qeth_core_load_discipline(card, newdis);
@@ -426,6 +430,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
426 goto out; 430 goto out;
427 431
428 rc = card->discipline->setup(card->gdev); 432 rc = card->discipline->setup(card->gdev);
433 if (rc)
434 qeth_core_free_discipline(card);
429out: 435out:
430 mutex_unlock(&card->discipline_mutex); 436 mutex_unlock(&card->discipline_mutex);
431 return rc ? rc : count; 437 return rc ? rc : count;
@@ -703,10 +709,11 @@ static struct attribute *qeth_blkt_device_attrs[] = {
703 &dev_attr_inter_jumbo.attr, 709 &dev_attr_inter_jumbo.attr,
704 NULL, 710 NULL,
705}; 711};
706static struct attribute_group qeth_device_blkt_group = { 712const struct attribute_group qeth_device_blkt_group = {
707 .name = "blkt", 713 .name = "blkt",
708 .attrs = qeth_blkt_device_attrs, 714 .attrs = qeth_blkt_device_attrs,
709}; 715};
716EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
710 717
711static struct attribute *qeth_device_attrs[] = { 718static struct attribute *qeth_device_attrs[] = {
712 &dev_attr_state.attr, 719 &dev_attr_state.attr,
@@ -726,9 +733,10 @@ static struct attribute *qeth_device_attrs[] = {
726 &dev_attr_switch_attrs.attr, 733 &dev_attr_switch_attrs.attr,
727 NULL, 734 NULL,
728}; 735};
729static struct attribute_group qeth_device_attr_group = { 736const struct attribute_group qeth_device_attr_group = {
730 .attrs = qeth_device_attrs, 737 .attrs = qeth_device_attrs,
731}; 738};
739EXPORT_SYMBOL_GPL(qeth_device_attr_group);
732 740
733const struct attribute_group *qeth_generic_attr_groups[] = { 741const struct attribute_group *qeth_generic_attr_groups[] = {
734 &qeth_device_attr_group, 742 &qeth_device_attr_group,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 29d9fb3890ad..0d59f9a45ea9 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -8,6 +8,8 @@
8 8
9#include "qeth_core.h" 9#include "qeth_core.h"
10 10
11extern const struct attribute_group *qeth_l2_attr_groups[];
12
11int qeth_l2_create_device_attributes(struct device *); 13int qeth_l2_create_device_attributes(struct device *);
12void qeth_l2_remove_device_attributes(struct device *); 14void qeth_l2_remove_device_attributes(struct device *);
13void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); 15void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 1b07f382d74c..bd2df62a5cdf 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -880,11 +880,21 @@ static int qeth_l2_stop(struct net_device *dev)
880 return 0; 880 return 0;
881} 881}
882 882
883static const struct device_type qeth_l2_devtype = {
884 .name = "qeth_layer2",
885 .groups = qeth_l2_attr_groups,
886};
887
883static int qeth_l2_probe_device(struct ccwgroup_device *gdev) 888static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
884{ 889{
885 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 890 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
891 int rc;
886 892
887 qeth_l2_create_device_attributes(&gdev->dev); 893 if (gdev->dev.type == &qeth_generic_devtype) {
894 rc = qeth_l2_create_device_attributes(&gdev->dev);
895 if (rc)
896 return rc;
897 }
888 INIT_LIST_HEAD(&card->vid_list); 898 INIT_LIST_HEAD(&card->vid_list);
889 hash_init(card->mac_htable); 899 hash_init(card->mac_htable);
890 card->options.layer2 = 1; 900 card->options.layer2 = 1;
@@ -896,7 +906,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
896{ 906{
897 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 907 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
898 908
899 qeth_l2_remove_device_attributes(&cgdev->dev); 909 if (cgdev->dev.type == &qeth_generic_devtype)
910 qeth_l2_remove_device_attributes(&cgdev->dev);
900 qeth_set_allowed_threads(card, 0, 1); 911 qeth_set_allowed_threads(card, 0, 1);
901 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 912 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
902 913
@@ -954,7 +965,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
954 case QETH_CARD_TYPE_OSN: 965 case QETH_CARD_TYPE_OSN:
955 card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, 966 card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
956 ether_setup); 967 ether_setup);
957 card->dev->flags |= IFF_NOARP;
958 break; 968 break;
959 default: 969 default:
960 card->dev = alloc_etherdev(0); 970 card->dev = alloc_etherdev(0);
@@ -969,9 +979,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
969 card->dev->min_mtu = 64; 979 card->dev->min_mtu = 64;
970 card->dev->max_mtu = ETH_MAX_MTU; 980 card->dev->max_mtu = ETH_MAX_MTU;
971 card->dev->netdev_ops = &qeth_l2_netdev_ops; 981 card->dev->netdev_ops = &qeth_l2_netdev_ops;
972 card->dev->ethtool_ops = 982 if (card->info.type == QETH_CARD_TYPE_OSN) {
973 (card->info.type != QETH_CARD_TYPE_OSN) ? 983 card->dev->ethtool_ops = &qeth_l2_osn_ops;
974 &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; 984 card->dev->flags |= IFF_NOARP;
985 } else {
986 card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
987 }
975 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 988 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
976 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { 989 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
977 card->dev->hw_features = NETIF_F_SG; 990 card->dev->hw_features = NETIF_F_SG;
@@ -1269,6 +1282,7 @@ static int qeth_l2_control_event(struct qeth_card *card,
1269} 1282}
1270 1283
1271struct qeth_discipline qeth_l2_discipline = { 1284struct qeth_discipline qeth_l2_discipline = {
1285 .devtype = &qeth_l2_devtype,
1272 .start_poll = qeth_qdio_start_poll, 1286 .start_poll = qeth_qdio_start_poll,
1273 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 1287 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
1274 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, 1288 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 687972356d6b..9696baa49e2d 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -269,3 +269,11 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
269 } else 269 } else
270 qeth_bridgeport_an_set(card, 0); 270 qeth_bridgeport_an_set(card, 0);
271} 271}
272
273const struct attribute_group *qeth_l2_attr_groups[] = {
274 &qeth_device_attr_group,
275 &qeth_device_blkt_group,
276 /* l2 specific, see l2_{create,remove}_device_attributes(): */
277 &qeth_l2_bridgeport_attr_group,
278 NULL,
279};
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6e0354ef4b86..d8df1e635163 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3039,8 +3039,13 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3039static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3039static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3040{ 3040{
3041 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3041 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3042 int rc;
3042 3043
3043 qeth_l3_create_device_attributes(&gdev->dev); 3044 rc = qeth_l3_create_device_attributes(&gdev->dev);
3045 if (rc)
3046 return rc;
3047 hash_init(card->ip_htable);
3048 hash_init(card->ip_mc_htable);
3044 card->options.layer2 = 0; 3049 card->options.layer2 = 0;
3045 card->info.hwtrap = 0; 3050 card->info.hwtrap = 0;
3046 return 0; 3051 return 0;
@@ -3306,6 +3311,7 @@ static int qeth_l3_control_event(struct qeth_card *card,
3306} 3311}
3307 3312
3308struct qeth_discipline qeth_l3_discipline = { 3313struct qeth_discipline qeth_l3_discipline = {
3314 .devtype = &qeth_generic_devtype,
3309 .start_poll = qeth_qdio_start_poll, 3315 .start_poll = qeth_qdio_start_poll,
3310 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 3316 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
3311 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, 3317 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 2a76ea78a0bf..b18fe2014cf2 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -87,7 +87,7 @@ struct vq_info_block {
87} __packed; 87} __packed;
88 88
89struct virtio_feature_desc { 89struct virtio_feature_desc {
90 __u32 features; 90 __le32 features;
91 __u8 index; 91 __u8 index;
92} __packed; 92} __packed;
93 93
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index ecebe2eecc3a..026182d3b27c 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -413,7 +413,7 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
413 * @name: slave channel name 413 * @name: slave channel name
414 * @config: dma configuration parameters 414 * @config: dma configuration parameters
415 * 415 *
416 * Returns pointer to appropriate DMA channel on success or NULL. 416 * Returns pointer to appropriate DMA channel on success or error.
417 */ 417 */
418void *knav_dma_open_channel(struct device *dev, const char *name, 418void *knav_dma_open_channel(struct device *dev, const char *name,
419 struct knav_dma_cfg *config) 419 struct knav_dma_cfg *config)
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 574da15fe618..b8d5ea0ae26b 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -44,7 +44,7 @@ config HVC_RTAS
44 44
45config HVC_IUCV 45config HVC_IUCV
46 bool "z/VM IUCV Hypervisor console support (VM only)" 46 bool "z/VM IUCV Hypervisor console support (VM only)"
47 depends on S390 47 depends on S390 && NET
48 select HVC_DRIVER 48 select HVC_DRIVER
49 select IUCV 49 select IUCV
50 default y 50 default y
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 15bac390dff9..b98436f5c7c7 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1135,20 +1135,19 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1135 u32 acllen = 0; 1135 u32 acllen = 0;
1136 int rc = 0; 1136 int rc = 0;
1137 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1137 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1138 struct cifs_tcon *tcon; 1138 struct smb_version_operations *ops;
1139 1139
1140 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); 1140 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
1141 1141
1142 if (IS_ERR(tlink)) 1142 if (IS_ERR(tlink))
1143 return PTR_ERR(tlink); 1143 return PTR_ERR(tlink);
1144 tcon = tlink_tcon(tlink);
1145 1144
1146 if (pfid && (tcon->ses->server->ops->get_acl_by_fid)) 1145 ops = tlink_tcon(tlink)->ses->server->ops;
1147 pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid, 1146
1148 &acllen); 1147 if (pfid && (ops->get_acl_by_fid))
1149 else if (tcon->ses->server->ops->get_acl) 1148 pntsd = ops->get_acl_by_fid(cifs_sb, pfid, &acllen);
1150 pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, 1149 else if (ops->get_acl)
1151 &acllen); 1150 pntsd = ops->get_acl(cifs_sb, inode, path, &acllen);
1152 else { 1151 else {
1153 cifs_put_tlink(tlink); 1152 cifs_put_tlink(tlink);
1154 return -EOPNOTSUPP; 1153 return -EOPNOTSUPP;
@@ -1181,23 +1180,23 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1181 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ 1180 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1182 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1181 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1183 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1182 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1184 struct cifs_tcon *tcon; 1183 struct smb_version_operations *ops;
1185 1184
1186 if (IS_ERR(tlink)) 1185 if (IS_ERR(tlink))
1187 return PTR_ERR(tlink); 1186 return PTR_ERR(tlink);
1188 tcon = tlink_tcon(tlink); 1187
1188 ops = tlink_tcon(tlink)->ses->server->ops;
1189 1189
1190 cifs_dbg(NOISY, "set ACL from mode for %s\n", path); 1190 cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
1191 1191
1192 /* Get the security descriptor */ 1192 /* Get the security descriptor */
1193 1193
1194 if (tcon->ses->server->ops->get_acl == NULL) { 1194 if (ops->get_acl == NULL) {
1195 cifs_put_tlink(tlink); 1195 cifs_put_tlink(tlink);
1196 return -EOPNOTSUPP; 1196 return -EOPNOTSUPP;
1197 } 1197 }
1198 1198
1199 pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, 1199 pntsd = ops->get_acl(cifs_sb, inode, path, &secdesclen);
1200 &secdesclen);
1201 if (IS_ERR(pntsd)) { 1200 if (IS_ERR(pntsd)) {
1202 rc = PTR_ERR(pntsd); 1201 rc = PTR_ERR(pntsd);
1203 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); 1202 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
@@ -1224,13 +1223,12 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1224 1223
1225 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); 1224 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
1226 1225
1227 if (tcon->ses->server->ops->set_acl == NULL) 1226 if (ops->set_acl == NULL)
1228 rc = -EOPNOTSUPP; 1227 rc = -EOPNOTSUPP;
1229 1228
1230 if (!rc) { 1229 if (!rc) {
1231 /* Set the security descriptor */ 1230 /* Set the security descriptor */
1232 rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode, 1231 rc = ops->set_acl(pnntsd, secdesclen, inode, path, aclflag);
1233 path, aclflag);
1234 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); 1232 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
1235 } 1233 }
1236 cifs_put_tlink(tlink); 1234 cifs_put_tlink(tlink);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 8be55be70faf..bcc7d9acad64 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -418,7 +418,7 @@ struct smb_version_operations {
418 int (*validate_negotiate)(const unsigned int, struct cifs_tcon *); 418 int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
419 ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *, 419 ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *,
420 const unsigned char *, const unsigned char *, char *, 420 const unsigned char *, const unsigned char *, char *,
421 size_t, const struct nls_table *, int); 421 size_t, struct cifs_sb_info *);
422 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *, 422 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
423 const char *, const void *, const __u16, 423 const char *, const void *, const __u16,
424 const struct nls_table *, int); 424 const struct nls_table *, int);
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e49958c3f8bb..6eb3147132e3 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -480,8 +480,7 @@ extern int CIFSSMBCopy(unsigned int xid,
480extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, 480extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
481 const unsigned char *searchName, 481 const unsigned char *searchName,
482 const unsigned char *ea_name, char *EAData, 482 const unsigned char *ea_name, char *EAData,
483 size_t bufsize, const struct nls_table *nls_codepage, 483 size_t bufsize, struct cifs_sb_info *cifs_sb);
484 int remap_special_chars);
485extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon, 484extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
486 const char *fileName, const char *ea_name, 485 const char *fileName, const char *ea_name,
487 const void *ea_value, const __u16 ea_value_len, 486 const void *ea_value, const __u16 ea_value_len,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4c01b3f9abf0..fbb0d4cbda41 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -697,9 +697,7 @@ cifs_echo_callback(struct mid_q_entry *mid)
697{ 697{
698 struct TCP_Server_Info *server = mid->callback_data; 698 struct TCP_Server_Info *server = mid->callback_data;
699 699
700 mutex_lock(&server->srv_mutex);
701 DeleteMidQEntry(mid); 700 DeleteMidQEntry(mid);
702 mutex_unlock(&server->srv_mutex);
703 add_credits(server, 1, CIFS_ECHO_OP); 701 add_credits(server, 1, CIFS_ECHO_OP);
704} 702}
705 703
@@ -1599,9 +1597,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
1599 } 1597 }
1600 1598
1601 queue_work(cifsiod_wq, &rdata->work); 1599 queue_work(cifsiod_wq, &rdata->work);
1602 mutex_lock(&server->srv_mutex);
1603 DeleteMidQEntry(mid); 1600 DeleteMidQEntry(mid);
1604 mutex_unlock(&server->srv_mutex);
1605 add_credits(server, 1, 0); 1601 add_credits(server, 1, 0);
1606} 1602}
1607 1603
@@ -2058,7 +2054,6 @@ cifs_writev_callback(struct mid_q_entry *mid)
2058{ 2054{
2059 struct cifs_writedata *wdata = mid->callback_data; 2055 struct cifs_writedata *wdata = mid->callback_data;
2060 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 2056 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
2061 struct TCP_Server_Info *server = tcon->ses->server;
2062 unsigned int written; 2057 unsigned int written;
2063 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; 2058 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
2064 2059
@@ -2095,9 +2090,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
2095 } 2090 }
2096 2091
2097 queue_work(cifsiod_wq, &wdata->work); 2092 queue_work(cifsiod_wq, &wdata->work);
2098 mutex_lock(&server->srv_mutex);
2099 DeleteMidQEntry(mid); 2093 DeleteMidQEntry(mid);
2100 mutex_unlock(&server->srv_mutex);
2101 add_credits(tcon->ses->server, 1, 0); 2094 add_credits(tcon->ses->server, 1, 0);
2102} 2095}
2103 2096
@@ -6076,11 +6069,13 @@ ssize_t
6076CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, 6069CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
6077 const unsigned char *searchName, const unsigned char *ea_name, 6070 const unsigned char *searchName, const unsigned char *ea_name,
6078 char *EAData, size_t buf_size, 6071 char *EAData, size_t buf_size,
6079 const struct nls_table *nls_codepage, int remap) 6072 struct cifs_sb_info *cifs_sb)
6080{ 6073{
6081 /* BB assumes one setup word */ 6074 /* BB assumes one setup word */
6082 TRANSACTION2_QPI_REQ *pSMB = NULL; 6075 TRANSACTION2_QPI_REQ *pSMB = NULL;
6083 TRANSACTION2_QPI_RSP *pSMBr = NULL; 6076 TRANSACTION2_QPI_RSP *pSMBr = NULL;
6077 int remap = cifs_remap(cifs_sb);
6078 struct nls_table *nls_codepage = cifs_sb->local_nls;
6084 int rc = 0; 6079 int rc = 0;
6085 int bytes_returned; 6080 int bytes_returned;
6086 int list_len; 6081 int list_len;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 6ef78ad838e6..0fd081bd2a2f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -582,7 +582,7 @@ cifs_relock_file(struct cifsFileInfo *cfile)
582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
583 int rc = 0; 583 int rc = 0;
584 584
585 down_read(&cinode->lock_sem); 585 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
586 if (cinode->can_cache_brlcks) { 586 if (cinode->can_cache_brlcks) {
587 /* can cache locks - no need to relock */ 587 /* can cache locks - no need to relock */
588 up_read(&cinode->lock_sem); 588 up_read(&cinode->lock_sem);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index c3b2fa0b2ec8..4d1fcd76d022 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -563,8 +563,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
563 563
564 rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path, 564 rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
565 "SETFILEBITS", ea_value, 4 /* size of buf */, 565 "SETFILEBITS", ea_value, 4 /* size of buf */,
566 cifs_sb->local_nls, 566 cifs_sb);
567 cifs_remap(cifs_sb));
568 cifs_put_tlink(tlink); 567 cifs_put_tlink(tlink);
569 if (rc < 0) 568 if (rc < 0)
570 return (int)rc; 569 return (int)rc;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 48ff7703b919..e4afdaae743f 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1240,15 +1240,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1240 goto tcon_exit; 1240 goto tcon_exit;
1241 } 1241 }
1242 1242
1243 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK) 1243 switch (rsp->ShareType) {
1244 case SMB2_SHARE_TYPE_DISK:
1244 cifs_dbg(FYI, "connection to disk share\n"); 1245 cifs_dbg(FYI, "connection to disk share\n");
1245 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) { 1246 break;
1247 case SMB2_SHARE_TYPE_PIPE:
1246 tcon->ipc = true; 1248 tcon->ipc = true;
1247 cifs_dbg(FYI, "connection to pipe share\n"); 1249 cifs_dbg(FYI, "connection to pipe share\n");
1248 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) { 1250 break;
1249 tcon->print = true; 1251 case SMB2_SHARE_TYPE_PRINT:
1252 tcon->ipc = true;
1250 cifs_dbg(FYI, "connection to printer\n"); 1253 cifs_dbg(FYI, "connection to printer\n");
1251 } else { 1254 break;
1255 default:
1252 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); 1256 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
1253 rc = -EOPNOTSUPP; 1257 rc = -EOPNOTSUPP;
1254 goto tcon_error_exit; 1258 goto tcon_error_exit;
@@ -2173,9 +2177,7 @@ smb2_echo_callback(struct mid_q_entry *mid)
2173 if (mid->mid_state == MID_RESPONSE_RECEIVED) 2177 if (mid->mid_state == MID_RESPONSE_RECEIVED)
2174 credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); 2178 credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest);
2175 2179
2176 mutex_lock(&server->srv_mutex);
2177 DeleteMidQEntry(mid); 2180 DeleteMidQEntry(mid);
2178 mutex_unlock(&server->srv_mutex);
2179 add_credits(server, credits_received, CIFS_ECHO_OP); 2181 add_credits(server, credits_received, CIFS_ECHO_OP);
2180} 2182}
2181 2183
@@ -2433,9 +2435,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
2433 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 2435 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
2434 2436
2435 queue_work(cifsiod_wq, &rdata->work); 2437 queue_work(cifsiod_wq, &rdata->work);
2436 mutex_lock(&server->srv_mutex);
2437 DeleteMidQEntry(mid); 2438 DeleteMidQEntry(mid);
2438 mutex_unlock(&server->srv_mutex);
2439 add_credits(server, credits_received, 0); 2439 add_credits(server, credits_received, 0);
2440} 2440}
2441 2441
@@ -2594,7 +2594,6 @@ smb2_writev_callback(struct mid_q_entry *mid)
2594{ 2594{
2595 struct cifs_writedata *wdata = mid->callback_data; 2595 struct cifs_writedata *wdata = mid->callback_data;
2596 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 2596 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
2597 struct TCP_Server_Info *server = tcon->ses->server;
2598 unsigned int written; 2597 unsigned int written;
2599 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 2598 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
2600 unsigned int credits_received = 1; 2599 unsigned int credits_received = 1;
@@ -2634,9 +2633,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
2634 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 2633 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
2635 2634
2636 queue_work(cifsiod_wq, &wdata->work); 2635 queue_work(cifsiod_wq, &wdata->work);
2637 mutex_lock(&server->srv_mutex);
2638 DeleteMidQEntry(mid); 2636 DeleteMidQEntry(mid);
2639 mutex_unlock(&server->srv_mutex);
2640 add_credits(tcon->ses->server, credits_received, 0); 2637 add_credits(tcon->ses->server, credits_received, 0);
2641} 2638}
2642 2639
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 4d64b5b8fc9c..47a125ece11e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -94,7 +94,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
94 now = jiffies; 94 now = jiffies;
95 /* commands taking longer than one second are indications that 95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */ 96 something is wrong, unless it is quite a slow link or server */
97 if ((now - midEntry->when_alloc) > HZ) { 97 if (time_after(now, midEntry->when_alloc + HZ)) {
98 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) { 98 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
99 pr_debug(" CIFS slow rsp: cmd %d mid %llu", 99 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
100 midEntry->command, midEntry->mid); 100 midEntry->command, midEntry->mid);
@@ -613,9 +613,7 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
613 } 613 }
614 spin_unlock(&GlobalMid_Lock); 614 spin_unlock(&GlobalMid_Lock);
615 615
616 mutex_lock(&server->srv_mutex);
617 DeleteMidQEntry(mid); 616 DeleteMidQEntry(mid);
618 mutex_unlock(&server->srv_mutex);
619 return rc; 617 return rc;
620} 618}
621 619
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 20af5187ba63..3cb5c9e2d4e7 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -235,8 +235,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
235 235
236 if (pTcon->ses->server->ops->query_all_EAs) 236 if (pTcon->ses->server->ops->query_all_EAs)
237 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, 237 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
238 full_path, name, value, size, 238 full_path, name, value, size, cifs_sb);
239 cifs_sb->local_nls, cifs_remap(cifs_sb));
240 break; 239 break;
241 240
242 case XATTR_CIFS_ACL: { 241 case XATTR_CIFS_ACL: {
@@ -336,8 +335,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
336 335
337 if (pTcon->ses->server->ops->query_all_EAs) 336 if (pTcon->ses->server->ops->query_all_EAs)
338 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, 337 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
339 full_path, NULL, data, buf_size, 338 full_path, NULL, data, buf_size, cifs_sb);
340 cifs_sb->local_nls, cifs_remap(cifs_sb));
341list_ea_exit: 339list_ea_exit:
342 kfree(full_path); 340 kfree(full_path);
343 free_xid(xid); 341 free_xid(xid);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 5efb4db44e1e..d5093b52b485 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -40,6 +40,9 @@ struct bpf_reg_state {
40 */ 40 */
41 s64 min_value; 41 s64 min_value;
42 u64 max_value; 42 u64 max_value;
43 u32 min_align;
44 u32 aux_off;
45 u32 aux_off_align;
43}; 46};
44 47
45enum bpf_stack_slot_type { 48enum bpf_stack_slot_type {
@@ -87,6 +90,7 @@ struct bpf_verifier_env {
87 struct bpf_prog *prog; /* eBPF program being verified */ 90 struct bpf_prog *prog; /* eBPF program being verified */
88 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 91 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
89 int stack_size; /* number of states to be processed */ 92 int stack_size; /* number of states to be processed */
93 bool strict_alignment; /* perform strict pointer alignment checks */
90 struct bpf_verifier_state cur_state; /* current verifier state */ 94 struct bpf_verifier_state cur_state; /* current verifier state */
91 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 95 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
92 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */ 96 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 1b166d2e19c5..b25e7baa273e 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -109,7 +109,6 @@ struct mlx5_flow_table_attr {
109 int max_fte; 109 int max_fte;
110 u32 level; 110 u32 level;
111 u32 flags; 111 u32 flags;
112 u32 underlay_qpn;
113}; 112};
114 113
115struct mlx5_flow_table * 114struct mlx5_flow_table *
@@ -167,4 +166,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
167void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); 166void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
168void mlx5_fc_query_cached(struct mlx5_fc *counter, 167void mlx5_fc_query_cached(struct mlx5_fc *counter,
169 u64 *bytes, u64 *packets, u64 *lastuse); 168 u64 *bytes, u64 *packets, u64 *lastuse);
169int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
170int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
171
170#endif 172#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9c23bd2efb56..3f39d27decf4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3296,11 +3296,15 @@ int dev_get_phys_port_id(struct net_device *dev,
3296int dev_get_phys_port_name(struct net_device *dev, 3296int dev_get_phys_port_name(struct net_device *dev,
3297 char *name, size_t len); 3297 char *name, size_t len);
3298int dev_change_proto_down(struct net_device *dev, bool proto_down); 3298int dev_change_proto_down(struct net_device *dev, bool proto_down);
3299int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3300 int fd, u32 flags);
3301struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); 3299struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3302struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3300struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3303 struct netdev_queue *txq, int *ret); 3301 struct netdev_queue *txq, int *ret);
3302
3303typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp);
3304int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3305 int fd, u32 flags);
3306bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op);
3307
3304int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3308int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3305int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3309int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3306bool is_skb_forwardable(const struct net_device *dev, 3310bool is_skb_forwardable(const struct net_device *dev,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 945a1f5f63c5..94dfa9def355 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -132,6 +132,13 @@ enum bpf_attach_type {
132 */ 132 */
133#define BPF_F_ALLOW_OVERRIDE (1U << 0) 133#define BPF_F_ALLOW_OVERRIDE (1U << 0)
134 134
135/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
136 * verifier will perform strict alignment checking as if the kernel
137 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
138 * and NET_IP_ALIGN defined to 2.
139 */
140#define BPF_F_STRICT_ALIGNMENT (1U << 0)
141
135#define BPF_PSEUDO_MAP_FD 1 142#define BPF_PSEUDO_MAP_FD 1
136 143
137/* flags for BPF_MAP_UPDATE_ELEM command */ 144/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -177,6 +184,7 @@ union bpf_attr {
177 __u32 log_size; /* size of user buffer */ 184 __u32 log_size; /* size of user buffer */
178 __aligned_u64 log_buf; /* user supplied buffer */ 185 __aligned_u64 log_buf; /* user supplied buffer */
179 __u32 kern_version; /* checked when prog_type=kprobe */ 186 __u32 kern_version; /* checked when prog_type=kprobe */
187 __u32 prog_flags;
180 }; 188 };
181 189
182 struct { /* anonymous struct used by BPF_OBJ_* commands */ 190 struct { /* anonymous struct used by BPF_OBJ_* commands */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 8e56ac70e0d1..15ac20382aba 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -888,9 +888,18 @@ enum {
888/* XDP section */ 888/* XDP section */
889 889
890#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) 890#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
891#define XDP_FLAGS_SKB_MODE (2U << 0) 891#define XDP_FLAGS_SKB_MODE (1U << 1)
892#define XDP_FLAGS_DRV_MODE (1U << 2)
892#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ 893#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \
893 XDP_FLAGS_SKB_MODE) 894 XDP_FLAGS_SKB_MODE | \
895 XDP_FLAGS_DRV_MODE)
896
897/* These are stored into IFLA_XDP_ATTACHED on dump. */
898enum {
899 XDP_ATTACHED_NONE = 0,
900 XDP_ATTACHED_DRV,
901 XDP_ATTACHED_SKB,
902};
894 903
895enum { 904enum {
896 IFLA_XDP_UNSPEC, 905 IFLA_XDP_UNSPEC,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index fd2411fd6914..265a0d854e33 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -783,7 +783,7 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
783EXPORT_SYMBOL_GPL(bpf_prog_get_type); 783EXPORT_SYMBOL_GPL(bpf_prog_get_type);
784 784
785/* last field in 'union bpf_attr' used by this command */ 785/* last field in 'union bpf_attr' used by this command */
786#define BPF_PROG_LOAD_LAST_FIELD kern_version 786#define BPF_PROG_LOAD_LAST_FIELD prog_flags
787 787
788static int bpf_prog_load(union bpf_attr *attr) 788static int bpf_prog_load(union bpf_attr *attr)
789{ 789{
@@ -796,6 +796,9 @@ static int bpf_prog_load(union bpf_attr *attr)
796 if (CHECK_ATTR(BPF_PROG_LOAD)) 796 if (CHECK_ATTR(BPF_PROG_LOAD))
797 return -EINVAL; 797 return -EINVAL;
798 798
799 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
800 return -EINVAL;
801
799 /* copy eBPF program license from user space */ 802 /* copy eBPF program license from user space */
800 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 803 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
801 sizeof(license) - 1) < 0) 804 sizeof(license) - 1) < 0)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c5b56c92f8e2..39f2dcbc4cbc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -241,6 +241,12 @@ static void print_verifier_state(struct bpf_verifier_state *state)
241 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 241 if (reg->max_value != BPF_REGISTER_MAX_RANGE)
242 verbose(",max_value=%llu", 242 verbose(",max_value=%llu",
243 (unsigned long long)reg->max_value); 243 (unsigned long long)reg->max_value);
244 if (reg->min_align)
245 verbose(",min_align=%u", reg->min_align);
246 if (reg->aux_off)
247 verbose(",aux_off=%u", reg->aux_off);
248 if (reg->aux_off_align)
249 verbose(",aux_off_align=%u", reg->aux_off_align);
244 } 250 }
245 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 251 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
246 if (state->stack_slot_type[i] == STACK_SPILL) 252 if (state->stack_slot_type[i] == STACK_SPILL)
@@ -466,6 +472,9 @@ static void init_reg_state(struct bpf_reg_state *regs)
466 regs[i].imm = 0; 472 regs[i].imm = 0;
467 regs[i].min_value = BPF_REGISTER_MIN_RANGE; 473 regs[i].min_value = BPF_REGISTER_MIN_RANGE;
468 regs[i].max_value = BPF_REGISTER_MAX_RANGE; 474 regs[i].max_value = BPF_REGISTER_MAX_RANGE;
475 regs[i].min_align = 0;
476 regs[i].aux_off = 0;
477 regs[i].aux_off_align = 0;
469 } 478 }
470 479
471 /* frame pointer */ 480 /* frame pointer */
@@ -492,6 +501,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
492{ 501{
493 regs[regno].min_value = BPF_REGISTER_MIN_RANGE; 502 regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
494 regs[regno].max_value = BPF_REGISTER_MAX_RANGE; 503 regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
504 regs[regno].min_align = 0;
495} 505}
496 506
497static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, 507static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs,
@@ -779,17 +789,33 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
779} 789}
780 790
781static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, 791static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
782 int off, int size) 792 int off, int size, bool strict)
783{ 793{
784 if (reg->id && size != 1) { 794 int ip_align;
785 verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n"); 795 int reg_off;
786 return -EACCES; 796
797 /* Byte size accesses are always allowed. */
798 if (!strict || size == 1)
799 return 0;
800
801 reg_off = reg->off;
802 if (reg->id) {
803 if (reg->aux_off_align % size) {
804 verbose("Packet access is only %u byte aligned, %d byte access not allowed\n",
805 reg->aux_off_align, size);
806 return -EACCES;
807 }
808 reg_off += reg->aux_off;
787 } 809 }
788 810
789 /* skb->data is NET_IP_ALIGN-ed */ 811 /* skb->data is NET_IP_ALIGN-ed, but for strict alignment checking
790 if ((NET_IP_ALIGN + reg->off + off) % size != 0) { 812 * we force this to 2 which is universally what architectures use
813 * when they don't set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
814 */
815 ip_align = strict ? 2 : NET_IP_ALIGN;
816 if ((ip_align + reg_off + off) % size != 0) {
791 verbose("misaligned packet access off %d+%d+%d size %d\n", 817 verbose("misaligned packet access off %d+%d+%d size %d\n",
792 NET_IP_ALIGN, reg->off, off, size); 818 ip_align, reg_off, off, size);
793 return -EACCES; 819 return -EACCES;
794 } 820 }
795 821
@@ -797,9 +823,9 @@ static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
797} 823}
798 824
799static int check_val_ptr_alignment(const struct bpf_reg_state *reg, 825static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
800 int size) 826 int size, bool strict)
801{ 827{
802 if (size != 1) { 828 if (strict && size != 1) {
803 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); 829 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
804 return -EACCES; 830 return -EACCES;
805 } 831 }
@@ -807,16 +833,20 @@ static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
807 return 0; 833 return 0;
808} 834}
809 835
810static int check_ptr_alignment(const struct bpf_reg_state *reg, 836static int check_ptr_alignment(struct bpf_verifier_env *env,
837 const struct bpf_reg_state *reg,
811 int off, int size) 838 int off, int size)
812{ 839{
840 bool strict = env->strict_alignment;
841
842 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
843 strict = true;
844
813 switch (reg->type) { 845 switch (reg->type) {
814 case PTR_TO_PACKET: 846 case PTR_TO_PACKET:
815 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : 847 return check_pkt_ptr_alignment(reg, off, size, strict);
816 check_pkt_ptr_alignment(reg, off, size);
817 case PTR_TO_MAP_VALUE_ADJ: 848 case PTR_TO_MAP_VALUE_ADJ:
818 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : 849 return check_val_ptr_alignment(reg, size, strict);
819 check_val_ptr_alignment(reg, size);
820 default: 850 default:
821 if (off % size != 0) { 851 if (off % size != 0) {
822 verbose("misaligned access off %d size %d\n", 852 verbose("misaligned access off %d size %d\n",
@@ -849,7 +879,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
849 if (size < 0) 879 if (size < 0)
850 return size; 880 return size;
851 881
852 err = check_ptr_alignment(reg, off, size); 882 err = check_ptr_alignment(env, reg, off, size);
853 if (err) 883 if (err)
854 return err; 884 return err;
855 885
@@ -883,6 +913,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
883 value_regno); 913 value_regno);
884 /* note that reg.[id|off|range] == 0 */ 914 /* note that reg.[id|off|range] == 0 */
885 state->regs[value_regno].type = reg_type; 915 state->regs[value_regno].type = reg_type;
916 state->regs[value_regno].aux_off = 0;
917 state->regs[value_regno].aux_off_align = 0;
886 } 918 }
887 919
888 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { 920 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
@@ -1455,6 +1487,8 @@ add_imm:
1455 */ 1487 */
1456 dst_reg->off += imm; 1488 dst_reg->off += imm;
1457 } else { 1489 } else {
1490 bool had_id;
1491
1458 if (src_reg->type == PTR_TO_PACKET) { 1492 if (src_reg->type == PTR_TO_PACKET) {
1459 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1493 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */
1460 tmp_reg = *dst_reg; /* save r7 state */ 1494 tmp_reg = *dst_reg; /* save r7 state */
@@ -1488,14 +1522,23 @@ add_imm:
1488 src_reg->imm); 1522 src_reg->imm);
1489 return -EACCES; 1523 return -EACCES;
1490 } 1524 }
1525
1526 had_id = (dst_reg->id != 0);
1527
1491 /* dst_reg stays as pkt_ptr type and since some positive 1528 /* dst_reg stays as pkt_ptr type and since some positive
1492 * integer value was added to the pointer, increment its 'id' 1529 * integer value was added to the pointer, increment its 'id'
1493 */ 1530 */
1494 dst_reg->id = ++env->id_gen; 1531 dst_reg->id = ++env->id_gen;
1495 1532
1496 /* something was added to pkt_ptr, set range and off to zero */ 1533 /* something was added to pkt_ptr, set range to zero */
1534 dst_reg->aux_off += dst_reg->off;
1497 dst_reg->off = 0; 1535 dst_reg->off = 0;
1498 dst_reg->range = 0; 1536 dst_reg->range = 0;
1537 if (had_id)
1538 dst_reg->aux_off_align = min(dst_reg->aux_off_align,
1539 src_reg->min_align);
1540 else
1541 dst_reg->aux_off_align = src_reg->min_align;
1499 } 1542 }
1500 return 0; 1543 return 0;
1501} 1544}
@@ -1669,6 +1712,13 @@ static void check_reg_overflow(struct bpf_reg_state *reg)
1669 reg->min_value = BPF_REGISTER_MIN_RANGE; 1712 reg->min_value = BPF_REGISTER_MIN_RANGE;
1670} 1713}
1671 1714
1715static u32 calc_align(u32 imm)
1716{
1717 if (!imm)
1718 return 1U << 31;
1719 return imm - ((imm - 1) & imm);
1720}
1721
1672static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, 1722static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1673 struct bpf_insn *insn) 1723 struct bpf_insn *insn)
1674{ 1724{
@@ -1676,8 +1726,10 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1676 s64 min_val = BPF_REGISTER_MIN_RANGE; 1726 s64 min_val = BPF_REGISTER_MIN_RANGE;
1677 u64 max_val = BPF_REGISTER_MAX_RANGE; 1727 u64 max_val = BPF_REGISTER_MAX_RANGE;
1678 u8 opcode = BPF_OP(insn->code); 1728 u8 opcode = BPF_OP(insn->code);
1729 u32 dst_align, src_align;
1679 1730
1680 dst_reg = &regs[insn->dst_reg]; 1731 dst_reg = &regs[insn->dst_reg];
1732 src_align = 0;
1681 if (BPF_SRC(insn->code) == BPF_X) { 1733 if (BPF_SRC(insn->code) == BPF_X) {
1682 check_reg_overflow(&regs[insn->src_reg]); 1734 check_reg_overflow(&regs[insn->src_reg]);
1683 min_val = regs[insn->src_reg].min_value; 1735 min_val = regs[insn->src_reg].min_value;
@@ -1693,12 +1745,18 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1693 regs[insn->src_reg].type != UNKNOWN_VALUE) { 1745 regs[insn->src_reg].type != UNKNOWN_VALUE) {
1694 min_val = BPF_REGISTER_MIN_RANGE; 1746 min_val = BPF_REGISTER_MIN_RANGE;
1695 max_val = BPF_REGISTER_MAX_RANGE; 1747 max_val = BPF_REGISTER_MAX_RANGE;
1748 src_align = 0;
1749 } else {
1750 src_align = regs[insn->src_reg].min_align;
1696 } 1751 }
1697 } else if (insn->imm < BPF_REGISTER_MAX_RANGE && 1752 } else if (insn->imm < BPF_REGISTER_MAX_RANGE &&
1698 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { 1753 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) {
1699 min_val = max_val = insn->imm; 1754 min_val = max_val = insn->imm;
1755 src_align = calc_align(insn->imm);
1700 } 1756 }
1701 1757
1758 dst_align = dst_reg->min_align;
1759
1702 /* We don't know anything about what was done to this register, mark it 1760 /* We don't know anything about what was done to this register, mark it
1703 * as unknown. 1761 * as unknown.
1704 */ 1762 */
@@ -1723,18 +1781,21 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1723 dst_reg->min_value += min_val; 1781 dst_reg->min_value += min_val;
1724 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1782 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1725 dst_reg->max_value += max_val; 1783 dst_reg->max_value += max_val;
1784 dst_reg->min_align = min(src_align, dst_align);
1726 break; 1785 break;
1727 case BPF_SUB: 1786 case BPF_SUB:
1728 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1787 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1729 dst_reg->min_value -= min_val; 1788 dst_reg->min_value -= min_val;
1730 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1789 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1731 dst_reg->max_value -= max_val; 1790 dst_reg->max_value -= max_val;
1791 dst_reg->min_align = min(src_align, dst_align);
1732 break; 1792 break;
1733 case BPF_MUL: 1793 case BPF_MUL:
1734 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1794 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1735 dst_reg->min_value *= min_val; 1795 dst_reg->min_value *= min_val;
1736 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1796 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1737 dst_reg->max_value *= max_val; 1797 dst_reg->max_value *= max_val;
1798 dst_reg->min_align = max(src_align, dst_align);
1738 break; 1799 break;
1739 case BPF_AND: 1800 case BPF_AND:
1740 /* Disallow AND'ing of negative numbers, ain't nobody got time 1801 /* Disallow AND'ing of negative numbers, ain't nobody got time
@@ -1746,17 +1807,23 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1746 else 1807 else
1747 dst_reg->min_value = 0; 1808 dst_reg->min_value = 0;
1748 dst_reg->max_value = max_val; 1809 dst_reg->max_value = max_val;
1810 dst_reg->min_align = max(src_align, dst_align);
1749 break; 1811 break;
1750 case BPF_LSH: 1812 case BPF_LSH:
1751 /* Gotta have special overflow logic here, if we're shifting 1813 /* Gotta have special overflow logic here, if we're shifting
1752 * more than MAX_RANGE then just assume we have an invalid 1814 * more than MAX_RANGE then just assume we have an invalid
1753 * range. 1815 * range.
1754 */ 1816 */
1755 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1817 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) {
1756 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1818 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1757 else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1819 dst_reg->min_align = 1;
1758 dst_reg->min_value <<= min_val; 1820 } else {
1759 1821 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1822 dst_reg->min_value <<= min_val;
1823 if (!dst_reg->min_align)
1824 dst_reg->min_align = 1;
1825 dst_reg->min_align <<= min_val;
1826 }
1760 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1827 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
1761 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1828 dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1762 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1829 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
@@ -1766,11 +1833,19 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1766 /* RSH by a negative number is undefined, and the BPF_RSH is an 1833 /* RSH by a negative number is undefined, and the BPF_RSH is an
1767 * unsigned shift, so make the appropriate casts. 1834 * unsigned shift, so make the appropriate casts.
1768 */ 1835 */
1769 if (min_val < 0 || dst_reg->min_value < 0) 1836 if (min_val < 0 || dst_reg->min_value < 0) {
1770 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1837 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1771 else 1838 } else {
1772 dst_reg->min_value = 1839 dst_reg->min_value =
1773 (u64)(dst_reg->min_value) >> min_val; 1840 (u64)(dst_reg->min_value) >> min_val;
1841 }
1842 if (min_val < 0) {
1843 dst_reg->min_align = 1;
1844 } else {
1845 dst_reg->min_align >>= (u64) min_val;
1846 if (!dst_reg->min_align)
1847 dst_reg->min_align = 1;
1848 }
1774 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1849 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1775 dst_reg->max_value >>= max_val; 1850 dst_reg->max_value >>= max_val;
1776 break; 1851 break;
@@ -1872,6 +1947,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1872 regs[insn->dst_reg].imm = insn->imm; 1947 regs[insn->dst_reg].imm = insn->imm;
1873 regs[insn->dst_reg].max_value = insn->imm; 1948 regs[insn->dst_reg].max_value = insn->imm;
1874 regs[insn->dst_reg].min_value = insn->imm; 1949 regs[insn->dst_reg].min_value = insn->imm;
1950 regs[insn->dst_reg].min_align = calc_align(insn->imm);
1875 } 1951 }
1876 1952
1877 } else if (opcode > BPF_END) { 1953 } else if (opcode > BPF_END) {
@@ -2856,8 +2932,12 @@ static int do_check(struct bpf_verifier_env *env)
2856 goto process_bpf_exit; 2932 goto process_bpf_exit;
2857 } 2933 }
2858 2934
2859 if (log_level && do_print_state) { 2935 if (log_level > 1 || (log_level && do_print_state)) {
2860 verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); 2936 if (log_level > 1)
2937 verbose("%d:", insn_idx);
2938 else
2939 verbose("\nfrom %d to %d:",
2940 prev_insn_idx, insn_idx);
2861 print_verifier_state(&env->cur_state); 2941 print_verifier_state(&env->cur_state);
2862 do_print_state = false; 2942 do_print_state = false;
2863 } 2943 }
@@ -3494,6 +3574,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
3494 } else { 3574 } else {
3495 log_level = 0; 3575 log_level = 0;
3496 } 3576 }
3577 if (attr->prog_flags & BPF_F_STRICT_ALIGNMENT)
3578 env->strict_alignment = true;
3579 else
3580 env->strict_alignment = false;
3497 3581
3498 ret = replace_map_fd_with_map_ptr(env); 3582 ret = replace_map_fd_with_map_ptr(env);
3499 if (ret < 0) 3583 if (ret < 0)
@@ -3599,6 +3683,7 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
3599 mutex_lock(&bpf_verifier_lock); 3683 mutex_lock(&bpf_verifier_lock);
3600 3684
3601 log_level = 0; 3685 log_level = 0;
3686 env->strict_alignment = false;
3602 3687
3603 env->explored_states = kcalloc(env->prog->len, 3688 env->explored_states = kcalloc(env->prog->len,
3604 sizeof(struct bpf_verifier_state_list *), 3689 sizeof(struct bpf_verifier_state_list *),
diff --git a/net/core/dev.c b/net/core/dev.c
index 96cf83da0d66..fca407b4a6ea 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6852,6 +6852,32 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
6852} 6852}
6853EXPORT_SYMBOL(dev_change_proto_down); 6853EXPORT_SYMBOL(dev_change_proto_down);
6854 6854
6855bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op)
6856{
6857 struct netdev_xdp xdp;
6858
6859 memset(&xdp, 0, sizeof(xdp));
6860 xdp.command = XDP_QUERY_PROG;
6861
6862 /* Query must always succeed. */
6863 WARN_ON(xdp_op(dev, &xdp) < 0);
6864 return xdp.prog_attached;
6865}
6866
6867static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
6868 struct netlink_ext_ack *extack,
6869 struct bpf_prog *prog)
6870{
6871 struct netdev_xdp xdp;
6872
6873 memset(&xdp, 0, sizeof(xdp));
6874 xdp.command = XDP_SETUP_PROG;
6875 xdp.extack = extack;
6876 xdp.prog = prog;
6877
6878 return xdp_op(dev, &xdp);
6879}
6880
6855/** 6881/**
6856 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 6882 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
6857 * @dev: device 6883 * @dev: device
@@ -6864,41 +6890,34 @@ EXPORT_SYMBOL(dev_change_proto_down);
6864int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 6890int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
6865 int fd, u32 flags) 6891 int fd, u32 flags)
6866{ 6892{
6867 int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp);
6868 const struct net_device_ops *ops = dev->netdev_ops; 6893 const struct net_device_ops *ops = dev->netdev_ops;
6869 struct bpf_prog *prog = NULL; 6894 struct bpf_prog *prog = NULL;
6870 struct netdev_xdp xdp; 6895 xdp_op_t xdp_op, xdp_chk;
6871 int err; 6896 int err;
6872 6897
6873 ASSERT_RTNL(); 6898 ASSERT_RTNL();
6874 6899
6875 xdp_op = ops->ndo_xdp; 6900 xdp_op = xdp_chk = ops->ndo_xdp;
6901 if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE))
6902 return -EOPNOTSUPP;
6876 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) 6903 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
6877 xdp_op = generic_xdp_install; 6904 xdp_op = generic_xdp_install;
6905 if (xdp_op == xdp_chk)
6906 xdp_chk = generic_xdp_install;
6878 6907
6879 if (fd >= 0) { 6908 if (fd >= 0) {
6880 if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { 6909 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk))
6881 memset(&xdp, 0, sizeof(xdp)); 6910 return -EEXIST;
6882 xdp.command = XDP_QUERY_PROG; 6911 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
6883 6912 __dev_xdp_attached(dev, xdp_op))
6884 err = xdp_op(dev, &xdp); 6913 return -EBUSY;
6885 if (err < 0)
6886 return err;
6887 if (xdp.prog_attached)
6888 return -EBUSY;
6889 }
6890 6914
6891 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 6915 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
6892 if (IS_ERR(prog)) 6916 if (IS_ERR(prog))
6893 return PTR_ERR(prog); 6917 return PTR_ERR(prog);
6894 } 6918 }
6895 6919
6896 memset(&xdp, 0, sizeof(xdp)); 6920 err = dev_xdp_install(dev, xdp_op, extack, prog);
6897 xdp.command = XDP_SETUP_PROG;
6898 xdp.extack = extack;
6899 xdp.prog = prog;
6900
6901 err = xdp_op(dev, &xdp);
6902 if (err < 0 && prog) 6921 if (err < 0 && prog)
6903 bpf_prog_put(prog); 6922 bpf_prog_put(prog);
6904 6923
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bcb0f610ee42..d7f82c3450b1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -899,8 +899,7 @@ static size_t rtnl_port_size(const struct net_device *dev,
899static size_t rtnl_xdp_size(void) 899static size_t rtnl_xdp_size(void)
900{ 900{
901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
902 nla_total_size(1) + /* XDP_ATTACHED */ 902 nla_total_size(1); /* XDP_ATTACHED */
903 nla_total_size(4); /* XDP_FLAGS */
904 903
905 return xdp_size; 904 return xdp_size;
906} 905}
@@ -1247,37 +1246,34 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1247 return 0; 1246 return 0;
1248} 1247}
1249 1248
1249static u8 rtnl_xdp_attached_mode(struct net_device *dev)
1250{
1251 const struct net_device_ops *ops = dev->netdev_ops;
1252
1253 ASSERT_RTNL();
1254
1255 if (rcu_access_pointer(dev->xdp_prog))
1256 return XDP_ATTACHED_SKB;
1257 if (ops->ndo_xdp && __dev_xdp_attached(dev, ops->ndo_xdp))
1258 return XDP_ATTACHED_DRV;
1259
1260 return XDP_ATTACHED_NONE;
1261}
1262
1250static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1263static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1251{ 1264{
1252 struct nlattr *xdp; 1265 struct nlattr *xdp;
1253 u32 xdp_flags = 0;
1254 u8 val = 0;
1255 int err; 1266 int err;
1256 1267
1257 xdp = nla_nest_start(skb, IFLA_XDP); 1268 xdp = nla_nest_start(skb, IFLA_XDP);
1258 if (!xdp) 1269 if (!xdp)
1259 return -EMSGSIZE; 1270 return -EMSGSIZE;
1260 if (rcu_access_pointer(dev->xdp_prog)) { 1271
1261 xdp_flags = XDP_FLAGS_SKB_MODE; 1272 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
1262 val = 1; 1273 rtnl_xdp_attached_mode(dev));
1263 } else if (dev->netdev_ops->ndo_xdp) {
1264 struct netdev_xdp xdp_op = {};
1265
1266 xdp_op.command = XDP_QUERY_PROG;
1267 err = dev->netdev_ops->ndo_xdp(dev, &xdp_op);
1268 if (err)
1269 goto err_cancel;
1270 val = xdp_op.prog_attached;
1271 }
1272 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, val);
1273 if (err) 1274 if (err)
1274 goto err_cancel; 1275 goto err_cancel;
1275 1276
1276 if (xdp_flags) {
1277 err = nla_put_u32(skb, IFLA_XDP_FLAGS, xdp_flags);
1278 if (err)
1279 goto err_cancel;
1280 }
1281 nla_nest_end(skb, xdp); 1277 nla_nest_end(skb, xdp);
1282 return 0; 1278 return 0;
1283 1279
@@ -2199,6 +2195,11 @@ static int do_setlink(const struct sk_buff *skb,
2199 err = -EINVAL; 2195 err = -EINVAL;
2200 goto errout; 2196 goto errout;
2201 } 2197 }
2198 if ((xdp_flags & XDP_FLAGS_SKB_MODE) &&
2199 (xdp_flags & XDP_FLAGS_DRV_MODE)) {
2200 err = -EINVAL;
2201 goto errout;
2202 }
2202 } 2203 }
2203 2204
2204 if (xdp[IFLA_XDP_FD]) { 2205 if (xdp[IFLA_XDP_FD]) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 79c6aee6af9b..e43e71d7856b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1803,28 +1803,24 @@ EXPORT_SYMBOL(skb_set_owner_w);
1803 * delay queue. We want to allow the owner socket to send more 1803 * delay queue. We want to allow the owner socket to send more
1804 * packets, as if they were already TX completed by a typical driver. 1804 * packets, as if they were already TX completed by a typical driver.
1805 * But we also want to keep skb->sk set because some packet schedulers 1805 * But we also want to keep skb->sk set because some packet schedulers
1806 * rely on it (sch_fq for example). So we set skb->truesize to a small 1806 * rely on it (sch_fq for example).
1807 * amount (1) and decrease sk_wmem_alloc accordingly.
1808 */ 1807 */
1809void skb_orphan_partial(struct sk_buff *skb) 1808void skb_orphan_partial(struct sk_buff *skb)
1810{ 1809{
1811 /* If this skb is a TCP pure ACK or already went here, 1810 if (skb_is_tcp_pure_ack(skb))
1812 * we have nothing to do. 2 is already a very small truesize.
1813 */
1814 if (skb->truesize <= 2)
1815 return; 1811 return;
1816 1812
1817 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1818 * so we do not completely orphan skb, but transfert all
1819 * accounted bytes but one, to avoid unexpected reorders.
1820 */
1821 if (skb->destructor == sock_wfree 1813 if (skb->destructor == sock_wfree
1822#ifdef CONFIG_INET 1814#ifdef CONFIG_INET
1823 || skb->destructor == tcp_wfree 1815 || skb->destructor == tcp_wfree
1824#endif 1816#endif
1825 ) { 1817 ) {
1826 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); 1818 struct sock *sk = skb->sk;
1827 skb->truesize = 1; 1819
1820 if (atomic_inc_not_zero(&sk->sk_refcnt)) {
1821 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1822 skb->destructor = sock_efree;
1823 }
1828 } else { 1824 } else {
1829 skb_orphan(skb); 1825 skb_orphan(skb);
1830 } 1826 }
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 840f14aaa016..992621172220 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -426,6 +426,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
426 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 426 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
427 newnp->pktoptions = NULL; 427 newnp->pktoptions = NULL;
428 newnp->opt = NULL; 428 newnp->opt = NULL;
429 newnp->ipv6_mc_list = NULL;
430 newnp->ipv6_ac_list = NULL;
431 newnp->ipv6_fl_list = NULL;
429 newnp->mcast_oif = inet6_iif(skb); 432 newnp->mcast_oif = inet6_iif(skb);
430 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 433 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
431 434
@@ -490,6 +493,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
490 /* Clone RX bits */ 493 /* Clone RX bits */
491 newnp->rxopt.all = np->rxopt.all; 494 newnp->rxopt.all = np->rxopt.all;
492 495
496 newnp->ipv6_mc_list = NULL;
497 newnp->ipv6_ac_list = NULL;
498 newnp->ipv6_fl_list = NULL;
493 newnp->pktoptions = NULL; 499 newnp->pktoptions = NULL;
494 newnp->opt = NULL; 500 newnp->opt = NULL;
495 newnp->mcast_oif = inet6_iif(skb); 501 newnp->mcast_oif = inet6_iif(skb);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5a3ad09e2786..06e2dbc2b4a2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1179,13 +1179,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1179 */ 1179 */
1180 if (pkt_len > mss) { 1180 if (pkt_len > mss) {
1181 unsigned int new_len = (pkt_len / mss) * mss; 1181 unsigned int new_len = (pkt_len / mss) * mss;
1182 if (!in_sack && new_len < pkt_len) { 1182 if (!in_sack && new_len < pkt_len)
1183 new_len += mss; 1183 new_len += mss;
1184 if (new_len >= skb->len)
1185 return 0;
1186 }
1187 pkt_len = new_len; 1184 pkt_len = new_len;
1188 } 1185 }
1186
1187 if (pkt_len >= skb->len && !in_sack)
1188 return 0;
1189
1189 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); 1190 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
1190 if (err < 0) 1191 if (err < 0)
1191 return err; 1192 return err;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8d297a79b568..6a4fb1e629fb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1022,7 +1022,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
1022 INIT_HLIST_NODE(&ifa->addr_lst); 1022 INIT_HLIST_NODE(&ifa->addr_lst);
1023 ifa->scope = scope; 1023 ifa->scope = scope;
1024 ifa->prefix_len = pfxlen; 1024 ifa->prefix_len = pfxlen;
1025 ifa->flags = flags | IFA_F_TENTATIVE; 1025 ifa->flags = flags;
1026 /* No need to add the TENTATIVE flag for addresses with NODAD */
1027 if (!(flags & IFA_F_NODAD))
1028 ifa->flags |= IFA_F_TENTATIVE;
1026 ifa->valid_lft = valid_lft; 1029 ifa->valid_lft = valid_lft;
1027 ifa->prefered_lft = prefered_lft; 1030 ifa->prefered_lft = prefered_lft;
1028 ifa->cstamp = ifa->tstamp = jiffies; 1031 ifa->cstamp = ifa->tstamp = jiffies;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7a8237acd210..4f4310a36a04 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1062,6 +1062,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1063#endif 1063#endif
1064 1064
1065 newnp->ipv6_mc_list = NULL;
1065 newnp->ipv6_ac_list = NULL; 1066 newnp->ipv6_ac_list = NULL;
1066 newnp->ipv6_fl_list = NULL; 1067 newnp->ipv6_fl_list = NULL;
1067 newnp->pktoptions = NULL; 1068 newnp->pktoptions = NULL;
@@ -1131,6 +1132,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1131 First: no IPv4 options. 1132 First: no IPv4 options.
1132 */ 1133 */
1133 newinet->inet_opt = NULL; 1134 newinet->inet_opt = NULL;
1135 newnp->ipv6_mc_list = NULL;
1134 newnp->ipv6_ac_list = NULL; 1136 newnp->ipv6_ac_list = NULL;
1135 newnp->ipv6_fl_list = NULL; 1137 newnp->ipv6_fl_list = NULL;
1136 1138
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f4001763134d..e3eeed19cc7a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2658,13 +2658,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2659 } 2659 }
2660 2660
2661 sockc.tsflags = po->sk.sk_tsflags;
2662 if (msg->msg_controllen) {
2663 err = sock_cmsg_send(&po->sk, msg, &sockc);
2664 if (unlikely(err))
2665 goto out;
2666 }
2667
2668 err = -ENXIO; 2661 err = -ENXIO;
2669 if (unlikely(dev == NULL)) 2662 if (unlikely(dev == NULL))
2670 goto out; 2663 goto out;
@@ -2672,6 +2665,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2672 if (unlikely(!(dev->flags & IFF_UP))) 2665 if (unlikely(!(dev->flags & IFF_UP)))
2673 goto out_put; 2666 goto out_put;
2674 2667
2668 sockc.tsflags = po->sk.sk_tsflags;
2669 if (msg->msg_controllen) {
2670 err = sock_cmsg_send(&po->sk, msg, &sockc);
2671 if (unlikely(err))
2672 goto out_put;
2673 }
2674
2675 if (po->sk.sk_socket->type == SOCK_RAW) 2675 if (po->sk.sk_socket->type == SOCK_RAW)
2676 reserve = dev->hard_header_len; 2676 reserve = dev->hard_header_len;
2677 size_max = po->tx_ring.frame_size 2677 size_max = po->tx_ring.frame_size
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bbe57d57b67f..e88342fde1bc 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1831,6 +1831,12 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1831 if (!qdisc_dev(root)) 1831 if (!qdisc_dev(root))
1832 return 0; 1832 return 0;
1833 1833
1834 if (tcm->tcm_parent) {
1835 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
1836 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1837 return -1;
1838 return 0;
1839 }
1834 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 1840 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1835 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 1841 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1836 return -1; 1842 return -1;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 961ee59f696a..142b70e959af 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -240,12 +240,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
240 struct sctp_bind_addr *bp; 240 struct sctp_bind_addr *bp;
241 struct ipv6_pinfo *np = inet6_sk(sk); 241 struct ipv6_pinfo *np = inet6_sk(sk);
242 struct sctp_sockaddr_entry *laddr; 242 struct sctp_sockaddr_entry *laddr;
243 union sctp_addr *baddr = NULL;
244 union sctp_addr *daddr = &t->ipaddr; 243 union sctp_addr *daddr = &t->ipaddr;
245 union sctp_addr dst_saddr; 244 union sctp_addr dst_saddr;
246 struct in6_addr *final_p, final; 245 struct in6_addr *final_p, final;
247 __u8 matchlen = 0; 246 __u8 matchlen = 0;
248 __u8 bmatchlen;
249 sctp_scope_t scope; 247 sctp_scope_t scope;
250 248
251 memset(fl6, 0, sizeof(struct flowi6)); 249 memset(fl6, 0, sizeof(struct flowi6));
@@ -312,23 +310,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
312 */ 310 */
313 rcu_read_lock(); 311 rcu_read_lock();
314 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 312 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
315 if (!laddr->valid) 313 struct dst_entry *bdst;
314 __u8 bmatchlen;
315
316 if (!laddr->valid ||
317 laddr->state != SCTP_ADDR_SRC ||
318 laddr->a.sa.sa_family != AF_INET6 ||
319 scope > sctp_scope(&laddr->a))
316 continue; 320 continue;
317 if ((laddr->state == SCTP_ADDR_SRC) && 321
318 (laddr->a.sa.sa_family == AF_INET6) && 322 fl6->saddr = laddr->a.v6.sin6_addr;
319 (scope <= sctp_scope(&laddr->a))) { 323 fl6->fl6_sport = laddr->a.v6.sin6_port;
320 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
321 if (!baddr || (matchlen < bmatchlen)) {
322 baddr = &laddr->a;
323 matchlen = bmatchlen;
324 }
325 }
326 }
327 if (baddr) {
328 fl6->saddr = baddr->v6.sin6_addr;
329 fl6->fl6_sport = baddr->v6.sin6_port;
330 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 324 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
331 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 325 bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
326
327 if (!IS_ERR(bdst) &&
328 ipv6_chk_addr(dev_net(bdst->dev),
329 &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
330 if (!IS_ERR_OR_NULL(dst))
331 dst_release(dst);
332 dst = bdst;
333 break;
334 }
335
336 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
337 if (matchlen > bmatchlen)
338 continue;
339
340 if (!IS_ERR_OR_NULL(dst))
341 dst_release(dst);
342 dst = bdst;
343 matchlen = bmatchlen;
332 } 344 }
333 rcu_read_unlock(); 345 rcu_read_unlock();
334 346
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 0d4f2f455a7c..1b92b72e812f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -362,25 +362,25 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
362 return 0; 362 return 0;
363} 363}
364 364
365#define tipc_wait_for_cond(sock_, timeout_, condition_) \ 365#define tipc_wait_for_cond(sock_, timeo_, condition_) \
366({ \ 366({ \
367 int rc_ = 0; \ 367 struct sock *sk_; \
368 int done_ = 0; \ 368 int rc_; \
369 \ 369 \
370 while (!(condition_) && !done_) { \ 370 while ((rc_ = !(condition_))) { \
371 struct sock *sk_ = sock->sk; \ 371 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
372 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 372 sk_ = (sock_)->sk; \
373 \ 373 rc_ = tipc_sk_sock_err((sock_), timeo_); \
374 rc_ = tipc_sk_sock_err(sock_, timeout_); \ 374 if (rc_) \
375 if (rc_) \ 375 break; \
376 break; \ 376 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
377 prepare_to_wait(sk_sleep(sk_), &wait_, \ 377 release_sock(sk_); \
378 TASK_INTERRUPTIBLE); \ 378 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
379 done_ = sk_wait_event(sk_, timeout_, \ 379 sched_annotate_sleep(); \
380 (condition_), &wait_); \ 380 lock_sock(sk_); \
381 remove_wait_queue(sk_sleep(sk_), &wait_); \ 381 remove_wait_queue(sk_sleep(sk_), &wait_); \
382 } \ 382 } \
383 rc_; \ 383 rc_; \
384}) 384})
385 385
386/** 386/**
diff --git a/samples/bpf/cookie_uid_helper_example.c b/samples/bpf/cookie_uid_helper_example.c
index b08ab4e88929..9d751e209f31 100644
--- a/samples/bpf/cookie_uid_helper_example.c
+++ b/samples/bpf/cookie_uid_helper_example.c
@@ -306,7 +306,9 @@ int main(int argc, char *argv[])
306 prog_attach_iptables(argv[2]); 306 prog_attach_iptables(argv[2]);
307 if (cfg_test_traffic) { 307 if (cfg_test_traffic) {
308 if (signal(SIGINT, finish) == SIG_ERR) 308 if (signal(SIGINT, finish) == SIG_ERR)
309 error(1, errno, "register handler failed"); 309 error(1, errno, "register SIGINT handler failed");
310 if (signal(SIGTERM, finish) == SIG_ERR)
311 error(1, errno, "register SIGTERM handler failed");
310 while (!test_finish) { 312 while (!test_finish) {
311 print_table(); 313 print_table();
312 printf("\n"); 314 printf("\n");
diff --git a/samples/bpf/offwaketime_user.c b/samples/bpf/offwaketime_user.c
index 9cce2a66bd66..512f87a5fd20 100644
--- a/samples/bpf/offwaketime_user.c
+++ b/samples/bpf/offwaketime_user.c
@@ -100,6 +100,7 @@ int main(int argc, char **argv)
100 setrlimit(RLIMIT_MEMLOCK, &r); 100 setrlimit(RLIMIT_MEMLOCK, &r);
101 101
102 signal(SIGINT, int_exit); 102 signal(SIGINT, int_exit);
103 signal(SIGTERM, int_exit);
103 104
104 if (load_kallsyms()) { 105 if (load_kallsyms()) {
105 printf("failed to process /proc/kallsyms\n"); 106 printf("failed to process /proc/kallsyms\n");
diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c
index be59d7dcbdde..4ed690b907ff 100644
--- a/samples/bpf/sampleip_user.c
+++ b/samples/bpf/sampleip_user.c
@@ -180,6 +180,7 @@ int main(int argc, char **argv)
180 return 1; 180 return 1;
181 } 181 }
182 signal(SIGINT, int_exit); 182 signal(SIGINT, int_exit);
183 signal(SIGTERM, int_exit);
183 184
184 /* do sampling */ 185 /* do sampling */
185 printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n", 186 printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index 0c5561d193a4..fa4336423da5 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -192,6 +192,7 @@ int main(int argc, char **argv)
192 setrlimit(RLIMIT_MEMLOCK, &r); 192 setrlimit(RLIMIT_MEMLOCK, &r);
193 193
194 signal(SIGINT, int_exit); 194 signal(SIGINT, int_exit);
195 signal(SIGTERM, int_exit);
195 196
196 if (load_kallsyms()) { 197 if (load_kallsyms()) {
197 printf("failed to process /proc/kallsyms\n"); 198 printf("failed to process /proc/kallsyms\n");
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
index 7fee0f1ba9a3..7321a3f253c9 100644
--- a/samples/bpf/tracex2_user.c
+++ b/samples/bpf/tracex2_user.c
@@ -127,6 +127,7 @@ int main(int ac, char **argv)
127 } 127 }
128 128
129 signal(SIGINT, int_exit); 129 signal(SIGINT, int_exit);
130 signal(SIGTERM, int_exit);
130 131
131 /* start 'ping' in the background to have some kfree_skb events */ 132 /* start 'ping' in the background to have some kfree_skb events */
132 f = popen("ping -c5 localhost", "r"); 133 f = popen("ping -c5 localhost", "r");
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
index 378850c70eb8..2431c0321b71 100644
--- a/samples/bpf/xdp1_user.c
+++ b/samples/bpf/xdp1_user.c
@@ -62,13 +62,14 @@ static void usage(const char *prog)
62 fprintf(stderr, 62 fprintf(stderr,
63 "usage: %s [OPTS] IFINDEX\n\n" 63 "usage: %s [OPTS] IFINDEX\n\n"
64 "OPTS:\n" 64 "OPTS:\n"
65 " -S use skb-mode\n", 65 " -S use skb-mode\n"
66 " -N enforce native mode\n",
66 prog); 67 prog);
67} 68}
68 69
69int main(int argc, char **argv) 70int main(int argc, char **argv)
70{ 71{
71 const char *optstr = "S"; 72 const char *optstr = "SN";
72 char filename[256]; 73 char filename[256];
73 int opt; 74 int opt;
74 75
@@ -77,6 +78,9 @@ int main(int argc, char **argv)
77 case 'S': 78 case 'S':
78 xdp_flags |= XDP_FLAGS_SKB_MODE; 79 xdp_flags |= XDP_FLAGS_SKB_MODE;
79 break; 80 break;
81 case 'N':
82 xdp_flags |= XDP_FLAGS_DRV_MODE;
83 break;
80 default: 84 default:
81 usage(basename(argv[0])); 85 usage(basename(argv[0]));
82 return 1; 86 return 1;
@@ -102,6 +106,7 @@ int main(int argc, char **argv)
102 } 106 }
103 107
104 signal(SIGINT, int_exit); 108 signal(SIGINT, int_exit);
109 signal(SIGTERM, int_exit);
105 110
106 if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) { 111 if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) {
107 printf("link set xdp fd failed\n"); 112 printf("link set xdp fd failed\n");
diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c
index 92b8bde9337c..715cd12eaca5 100644
--- a/samples/bpf/xdp_tx_iptunnel_user.c
+++ b/samples/bpf/xdp_tx_iptunnel_user.c
@@ -79,6 +79,8 @@ static void usage(const char *cmd)
79 printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n"); 79 printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n");
80 printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n"); 80 printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n");
81 printf(" -P <IP-Protocol> Default is TCP\n"); 81 printf(" -P <IP-Protocol> Default is TCP\n");
82 printf(" -S use skb-mode\n");
83 printf(" -N enforce native mode\n");
82 printf(" -h Display this help\n"); 84 printf(" -h Display this help\n");
83} 85}
84 86
@@ -138,7 +140,7 @@ int main(int argc, char **argv)
138{ 140{
139 unsigned char opt_flags[256] = {}; 141 unsigned char opt_flags[256] = {};
140 unsigned int kill_after_s = 0; 142 unsigned int kill_after_s = 0;
141 const char *optstr = "i:a:p:s:d:m:T:P:Sh"; 143 const char *optstr = "i:a:p:s:d:m:T:P:SNh";
142 int min_port = 0, max_port = 0; 144 int min_port = 0, max_port = 0;
143 struct iptnl_info tnl = {}; 145 struct iptnl_info tnl = {};
144 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; 146 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
@@ -206,6 +208,9 @@ int main(int argc, char **argv)
206 case 'S': 208 case 'S':
207 xdp_flags |= XDP_FLAGS_SKB_MODE; 209 xdp_flags |= XDP_FLAGS_SKB_MODE;
208 break; 210 break;
211 case 'N':
212 xdp_flags |= XDP_FLAGS_DRV_MODE;
213 break;
209 default: 214 default:
210 usage(argv[0]); 215 usage(argv[0]);
211 return 1; 216 return 1;
@@ -239,6 +244,7 @@ int main(int argc, char **argv)
239 } 244 }
240 245
241 signal(SIGINT, int_exit); 246 signal(SIGINT, int_exit);
247 signal(SIGTERM, int_exit);
242 248
243 while (min_port <= max_port) { 249 while (min_port <= max_port) {
244 vip.dport = htons(min_port++); 250 vip.dport = htons(min_port++);
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
index ebc6dceddb58..7598361ef1f1 100644
--- a/tools/build/feature/test-bpf.c
+++ b/tools/build/feature/test-bpf.c
@@ -29,6 +29,7 @@ int main(void)
29 attr.log_size = 0; 29 attr.log_size = 0;
30 attr.log_level = 0; 30 attr.log_level = 0;
31 attr.kern_version = 0; 31 attr.kern_version = 0;
32 attr.prog_flags = 0;
32 33
33 /* 34 /*
34 * Test existence of __NR_bpf and BPF_PROG_LOAD. 35 * Test existence of __NR_bpf and BPF_PROG_LOAD.
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index e553529929f6..94dfa9def355 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -132,6 +132,13 @@ enum bpf_attach_type {
132 */ 132 */
133#define BPF_F_ALLOW_OVERRIDE (1U << 0) 133#define BPF_F_ALLOW_OVERRIDE (1U << 0)
134 134
135/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
136 * verifier will perform strict alignment checking as if the kernel
137 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
138 * and NET_IP_ALIGN defined to 2.
139 */
140#define BPF_F_STRICT_ALIGNMENT (1U << 0)
141
135#define BPF_PSEUDO_MAP_FD 1 142#define BPF_PSEUDO_MAP_FD 1
136 143
137/* flags for BPF_MAP_UPDATE_ELEM command */ 144/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -177,6 +184,7 @@ union bpf_attr {
177 __u32 log_size; /* size of user buffer */ 184 __u32 log_size; /* size of user buffer */
178 __aligned_u64 log_buf; /* user supplied buffer */ 185 __aligned_u64 log_buf; /* user supplied buffer */
179 __u32 kern_version; /* checked when prog_type=kprobe */ 186 __u32 kern_version; /* checked when prog_type=kprobe */
187 __u32 prog_flags;
180 }; 188 };
181 189
182 struct { /* anonymous struct used by BPF_OBJ_* commands */ 190 struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -481,8 +489,7 @@ union bpf_attr {
481 * u32 bpf_get_socket_uid(skb) 489 * u32 bpf_get_socket_uid(skb)
482 * Get the owner uid of the socket stored inside sk_buff. 490 * Get the owner uid of the socket stored inside sk_buff.
483 * @skb: pointer to skb 491 * @skb: pointer to skb
484 * Return: uid of the socket owner on success or 0 if the socket pointer 492 * Return: uid of the socket owner on success or overflowuid if failed.
485 * inside sk_buff is NULL
486 */ 493 */
487#define __BPF_FUNC_MAPPER(FN) \ 494#define __BPF_FUNC_MAPPER(FN) \
488 FN(unspec), \ 495 FN(unspec), \
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 4fe444b8092e..6e178987af8e 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -117,6 +117,28 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
117 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 117 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
118} 118}
119 119
120int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
121 size_t insns_cnt, int strict_alignment,
122 const char *license, __u32 kern_version,
123 char *log_buf, size_t log_buf_sz)
124{
125 union bpf_attr attr;
126
127 bzero(&attr, sizeof(attr));
128 attr.prog_type = type;
129 attr.insn_cnt = (__u32)insns_cnt;
130 attr.insns = ptr_to_u64(insns);
131 attr.license = ptr_to_u64(license);
132 attr.log_buf = ptr_to_u64(log_buf);
133 attr.log_size = log_buf_sz;
134 attr.log_level = 2;
135 log_buf[0] = 0;
136 attr.kern_version = kern_version;
137 attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
138
139 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
140}
141
120int bpf_map_update_elem(int fd, const void *key, const void *value, 142int bpf_map_update_elem(int fd, const void *key, const void *value,
121 __u64 flags) 143 __u64 flags)
122{ 144{
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index edb4daeff7a5..972bd8333eb7 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -35,6 +35,10 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
35 size_t insns_cnt, const char *license, 35 size_t insns_cnt, const char *license,
36 __u32 kern_version, char *log_buf, 36 __u32 kern_version, char *log_buf,
37 size_t log_buf_sz); 37 size_t log_buf_sz);
38int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
39 size_t insns_cnt, int strict_alignment,
40 const char *license, __u32 kern_version,
41 char *log_buf, size_t log_buf_sz);
38 42
39int bpf_map_update_elem(int fd, const void *key, const void *value, 43int bpf_map_update_elem(int fd, const void *key, const void *value,
40 __u64 flags); 44 __u64 flags);
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 91edd0566237..f389b02d43a0 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -11,7 +11,8 @@ endif
11CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 11CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
12LDLIBS += -lcap -lelf 12LDLIBS += -lcap -lelf
13 13
14TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs 14TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
15 test_align
15 16
16TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o 17TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o
17 18
@@ -34,6 +35,7 @@ $(BPFOBJ): force
34CLANG ?= clang 35CLANG ?= clang
35 36
36%.o: %.c 37%.o: %.c
37 $(CLANG) -I. -I../../../include/uapi -I../../../../samples/bpf/ \ 38 $(CLANG) -I. -I./include/uapi -I../../../include/uapi \
39 -I../../../../samples/bpf/ \
38 -Wno-compare-distinct-pointer-types \ 40 -Wno-compare-distinct-pointer-types \
39 -O2 -target bpf -c $< -o $@ 41 -O2 -target bpf -c $< -o $@
diff --git a/tools/testing/selftests/bpf/include/uapi/linux/types.h b/tools/testing/selftests/bpf/include/uapi/linux/types.h
new file mode 100644
index 000000000000..fbd16a7554af
--- /dev/null
+++ b/tools/testing/selftests/bpf/include/uapi/linux/types.h
@@ -0,0 +1,6 @@
1#ifndef _UAPI_LINUX_TYPES_H
2#define _UAPI_LINUX_TYPES_H
3
4#include <asm-generic/int-ll64.h>
5
6#endif /* _UAPI_LINUX_TYPES_H */
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
new file mode 100644
index 000000000000..9644d4e069de
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -0,0 +1,453 @@
1#include <asm/types.h>
2#include <linux/types.h>
3#include <stdint.h>
4#include <stdio.h>
5#include <stdlib.h>
6#include <unistd.h>
7#include <errno.h>
8#include <string.h>
9#include <stddef.h>
10#include <stdbool.h>
11
12#include <linux/unistd.h>
13#include <linux/filter.h>
14#include <linux/bpf_perf_event.h>
15#include <linux/bpf.h>
16
17#include <bpf/bpf.h>
18
19#include "../../../include/linux/filter.h"
20
21#ifndef ARRAY_SIZE
22# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
23#endif
24
25#define MAX_INSNS 512
26#define MAX_MATCHES 16
27
28struct bpf_align_test {
29 const char *descr;
30 struct bpf_insn insns[MAX_INSNS];
31 enum {
32 UNDEF,
33 ACCEPT,
34 REJECT
35 } result;
36 enum bpf_prog_type prog_type;
37 const char *matches[MAX_MATCHES];
38};
39
40static struct bpf_align_test tests[] = {
41 {
42 .descr = "mov",
43 .insns = {
44 BPF_MOV64_IMM(BPF_REG_3, 2),
45 BPF_MOV64_IMM(BPF_REG_3, 4),
46 BPF_MOV64_IMM(BPF_REG_3, 8),
47 BPF_MOV64_IMM(BPF_REG_3, 16),
48 BPF_MOV64_IMM(BPF_REG_3, 32),
49 BPF_MOV64_IMM(BPF_REG_0, 0),
50 BPF_EXIT_INSN(),
51 },
52 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
53 .matches = {
54 "1: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp",
55 "2: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp",
56 "3: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp",
57 "4: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp",
58 "5: R1=ctx R3=imm32,min_value=32,max_value=32,min_align=32 R10=fp",
59 },
60 },
61 {
62 .descr = "shift",
63 .insns = {
64 BPF_MOV64_IMM(BPF_REG_3, 1),
65 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
66 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
67 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
68 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
69 BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
70 BPF_MOV64_IMM(BPF_REG_4, 32),
71 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
72 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
73 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
74 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
75 BPF_MOV64_IMM(BPF_REG_0, 0),
76 BPF_EXIT_INSN(),
77 },
78 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
79 .matches = {
80 "1: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp",
81 "2: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp",
82 "3: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp",
83 "4: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp",
84 "5: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp",
85 "6: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp",
86 "7: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm32,min_value=32,max_value=32,min_align=32 R10=fp",
87 "8: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm16,min_value=16,max_value=16,min_align=16 R10=fp",
88 "9: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp",
89 "10: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm4,min_value=4,max_value=4,min_align=4 R10=fp",
90 "11: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm2,min_value=2,max_value=2,min_align=2 R10=fp",
91 },
92 },
93 {
94 .descr = "addsub",
95 .insns = {
96 BPF_MOV64_IMM(BPF_REG_3, 4),
97 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
98 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
99 BPF_MOV64_IMM(BPF_REG_4, 8),
100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
102 BPF_MOV64_IMM(BPF_REG_0, 0),
103 BPF_EXIT_INSN(),
104 },
105 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
106 .matches = {
107 "1: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp",
108 "2: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=4 R10=fp",
109 "3: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R10=fp",
110 "4: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp",
111 "5: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm12,min_value=12,max_value=12,min_align=4 R10=fp",
112 "6: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm14,min_value=14,max_value=14,min_align=2 R10=fp",
113 },
114 },
115 {
116 .descr = "mul",
117 .insns = {
118 BPF_MOV64_IMM(BPF_REG_3, 7),
119 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
120 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
121 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
122 BPF_MOV64_IMM(BPF_REG_0, 0),
123 BPF_EXIT_INSN(),
124 },
125 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
126 .matches = {
127 "1: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp",
128 "2: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp",
129 "3: R1=ctx R3=imm14,min_value=14,max_value=14,min_align=2 R10=fp",
130 "4: R1=ctx R3=imm56,min_value=56,max_value=56,min_align=4 R10=fp",
131 },
132 },
133
134#define PREP_PKT_POINTERS \
135 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
136 offsetof(struct __sk_buff, data)), \
137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
138 offsetof(struct __sk_buff, data_end))
139
140#define LOAD_UNKNOWN(DST_REG) \
141 PREP_PKT_POINTERS, \
142 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
143 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
144 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
145 BPF_EXIT_INSN(), \
146 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
147
148 {
149 .descr = "unknown shift",
150 .insns = {
151 LOAD_UNKNOWN(BPF_REG_3),
152 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
153 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
154 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
155 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
156 LOAD_UNKNOWN(BPF_REG_4),
157 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
158 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
159 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
160 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
161 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
162 BPF_MOV64_IMM(BPF_REG_0, 0),
163 BPF_EXIT_INSN(),
164 },
165 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
166 .matches = {
167 "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp",
168 "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv55,min_align=2 R10=fp",
169 "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv54,min_align=4 R10=fp",
170 "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv53,min_align=8 R10=fp",
171 "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv52,min_align=16 R10=fp",
172 "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv56 R10=fp",
173 "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv51,min_align=32 R10=fp",
174 "20: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv52,min_align=16 R10=fp",
175 "21: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv53,min_align=8 R10=fp",
176 "22: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv54,min_align=4 R10=fp",
177 "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv55,min_align=2 R10=fp",
178 },
179 },
180 {
181 .descr = "unknown mul",
182 .insns = {
183 LOAD_UNKNOWN(BPF_REG_3),
184 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
185 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
186 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
187 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
188 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
189 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
190 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
191 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
192 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
193 BPF_MOV64_IMM(BPF_REG_0, 0),
194 BPF_EXIT_INSN(),
195 },
196 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
197 .matches = {
198 "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp",
199 "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp",
200 "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv55,min_align=1 R10=fp",
201 "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp",
202 "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv54,min_align=2 R10=fp",
203 "12: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp",
204 "13: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv53,min_align=4 R10=fp",
205 "14: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp",
206 "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv52,min_align=8 R10=fp",
207 "16: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv50,min_align=8 R10=fp"
208 },
209 },
210 {
211 .descr = "packet const offset",
212 .insns = {
213 PREP_PKT_POINTERS,
214 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
215
216 BPF_MOV64_IMM(BPF_REG_0, 0),
217
218 /* Skip over ethernet header. */
219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
220 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
222 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
223 BPF_EXIT_INSN(),
224
225 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
226 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
227 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
228 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
229 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
230 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
231 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
232
233 BPF_MOV64_IMM(BPF_REG_0, 0),
234 BPF_EXIT_INSN(),
235 },
236 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
237 .matches = {
238 "4: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=0,r=0) R10=fp",
239 "5: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=14,r=0) R10=fp",
240 "6: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R4=pkt(id=0,off=14,r=0) R5=pkt(id=0,off=14,r=0) R10=fp",
241 "10: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv56 R5=pkt(id=0,off=14,r=18) R10=fp",
242 "14: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp",
243 "15: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp",
244 },
245 },
246 {
247 .descr = "packet variable offset",
248 .insns = {
249 LOAD_UNKNOWN(BPF_REG_6),
250 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
251
252 /* First, add a constant to the R5 packet pointer,
253 * then a variable with a known alignment.
254 */
255 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
257 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
258 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
260 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
261 BPF_EXIT_INSN(),
262 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
263
264 /* Now, test in the other direction. Adding first
265 * the variable offset to R5, then the constant.
266 */
267 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
268 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
270 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
272 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
273 BPF_EXIT_INSN(),
274 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
275
276 /* Test multiple accumulations of unknown values
277 * into a packet pointer.
278 */
279 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
281 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
283 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
284 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
285 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
286 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
287 BPF_EXIT_INSN(),
288 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
289
290 BPF_MOV64_IMM(BPF_REG_0, 0),
291 BPF_EXIT_INSN(),
292 },
293 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
294 .matches = {
295 /* Calculated offset in R6 has unknown value, but known
296 * alignment of 4.
297 */
298 "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R6=inv54,min_align=4 R10=fp",
299
300 /* Offset is added to packet pointer R5, resulting in known
301 * auxiliary alignment and offset.
302 */
303 "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R5=pkt(id=1,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
304
305 /* At the time the word size load is performed from R5,
306 * it's total offset is NET_IP_ALIGN + reg->off (0) +
307 * reg->aux_off (14) which is 16. Then the variable
308 * offset is considered using reg->aux_off_align which
309 * is 4 and meets the load's requirements.
310 */
311 "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=1,off=4,r=4),aux_off=14,aux_off_align=4 R5=pkt(id=1,off=0,r=4),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
312
313
314 /* Variable offset is added to R5 packet pointer,
315 * resulting in auxiliary alignment of 4.
316 */
317 "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=0,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp",
318
319 /* Constant offset is added to R5, resulting in
320 * reg->off of 14.
321 */
322 "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=14,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp",
323
324 /* At the time the word size load is performed from R5,
325 * it's total offset is NET_IP_ALIGN + reg->off (14) which
326 * is 16. Then the variable offset is considered using
327 * reg->aux_off_align which is 4 and meets the load's
328 * requirements.
329 */
330 "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=2,off=18,r=18),aux_off_align=4 R5=pkt(id=2,off=14,r=18),aux_off_align=4 R6=inv54,min_align=4 R10=fp",
331
332 /* Constant offset is added to R5 packet pointer,
333 * resulting in reg->off value of 14.
334 */
335 "26: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=0,off=14,r=8) R6=inv54,min_align=4 R10=fp",
336 /* Variable offset is added to R5, resulting in an
337 * auxiliary offset of 14, and an auxiliary alignment of 4.
338 */
339 "27: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
340 /* Constant is added to R5 again, setting reg->off to 4. */
341 "28: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=4,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
342 /* And once more we add a variable, which causes an accumulation
343 * of reg->off into reg->aux_off_align, with resulting value of
344 * 18. The auxiliary alignment stays at 4.
345 */
346 "29: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=4,off=0,r=0),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
347 /* At the time the word size load is performed from R5,
348 * it's total offset is NET_IP_ALIGN + reg->off (0) +
349 * reg->aux_off (18) which is 20. Then the variable offset
350 * is considered using reg->aux_off_align which is 4 and meets
351 * the load's requirements.
352 */
353 "33: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=4,off=4,r=4),aux_off=18,aux_off_align=4 R5=pkt(id=4,off=0,r=4),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
354 },
355 },
356};
357
358static int probe_filter_length(const struct bpf_insn *fp)
359{
360 int len;
361
362 for (len = MAX_INSNS - 1; len > 0; --len)
363 if (fp[len].code != 0 || fp[len].imm != 0)
364 break;
365 return len + 1;
366}
367
368static char bpf_vlog[32768];
369
370static int do_test_single(struct bpf_align_test *test)
371{
372 struct bpf_insn *prog = test->insns;
373 int prog_type = test->prog_type;
374 int prog_len, i;
375 int fd_prog;
376 int ret;
377
378 prog_len = probe_filter_length(prog);
379 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
380 prog, prog_len, 1, "GPL", 0,
381 bpf_vlog, sizeof(bpf_vlog));
382 if (fd_prog < 0) {
383 printf("Failed to load program.\n");
384 printf("%s", bpf_vlog);
385 ret = 1;
386 } else {
387 ret = 0;
388 for (i = 0; i < MAX_MATCHES; i++) {
389 const char *t, *m = test->matches[i];
390
391 if (!m)
392 break;
393 t = strstr(bpf_vlog, m);
394 if (!t) {
395 printf("Failed to find match: %s\n", m);
396 ret = 1;
397 printf("%s", bpf_vlog);
398 break;
399 }
400 }
401 close(fd_prog);
402 }
403 return ret;
404}
405
406static int do_test(unsigned int from, unsigned int to)
407{
408 int all_pass = 0;
409 int all_fail = 0;
410 unsigned int i;
411
412 for (i = from; i < to; i++) {
413 struct bpf_align_test *test = &tests[i];
414 int fail;
415
416 printf("Test %3d: %s ... ",
417 i, test->descr);
418 fail = do_test_single(test);
419 if (fail) {
420 all_fail++;
421 printf("FAIL\n");
422 } else {
423 all_pass++;
424 printf("PASS\n");
425 }
426 }
427 printf("Results: %d pass %d fail\n",
428 all_pass, all_fail);
429 return 0;
430}
431
432int main(int argc, char **argv)
433{
434 unsigned int from = 0, to = ARRAY_SIZE(tests);
435
436 if (argc == 3) {
437 unsigned int l = atoi(argv[argc - 2]);
438 unsigned int u = atoi(argv[argc - 1]);
439
440 if (l < to && u < to) {
441 from = l;
442 to = u + 1;
443 }
444 } else if (argc == 2) {
445 unsigned int t = atoi(argv[argc - 1]);
446
447 if (t < to) {
448 from = t;
449 to = t + 1;
450 }
451 }
452 return do_test(from, to);
453}