aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/dontdiff3
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/vfs.txt2
-rw-r--r--Makefile41
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/syscall.h5
-rw-r--r--arch/blackfin/Kconfig12
-rw-r--r--arch/blackfin/include/asm/bfin_crc.h125
-rw-r--r--arch/blackfin/include/asm/dma.h2
-rw-r--r--arch/blackfin/include/asm/portmux.h10
-rw-r--r--arch/blackfin/kernel/irqchip.c39
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c87
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c64
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c71
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c91
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c76
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c139
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c91
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c46
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c44
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c54
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c56
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c26
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c118
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c82
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c70
-rw-r--r--arch/blackfin/mach-bf537/boards/dnp5370.c32
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c50
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c55
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c425
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c70
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c62
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c78
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c136
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF544.h30
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF547.h30
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c28
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c56
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c74
-rw-r--r--arch/blackfin/mach-bf561/boards/tepla.c8
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c131
-rw-r--r--arch/blackfin/mach-bf609/clock.c18
-rw-r--r--arch/blackfin/mach-bf609/pm.c2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/mips/include/asm/syscall.h7
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mn10300/include/asm/highmem.h4
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/configs/default_defconfig1
-rw-r--r--arch/s390/include/asm/syscall.h7
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/configs/rsk7203_defconfig1
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/um/Kconfig.common1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/syscall.h10
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/mmu.c38
-rw-r--r--arch/x86/kvm/mmu.h44
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c11
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--arch/xtensa/configs/iss_defconfig1
-rw-r--r--arch/xtensa/configs/s6105_defconfig1
-rw-r--r--block/blk-map.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c12
-rw-r--r--drivers/block/nbd.c48
-rw-r--r--drivers/block/nvme-core.c684
-rw-r--r--drivers/block/nvme-scsi.c43
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c10
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c828
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h38
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c27
-rw-r--r--drivers/md/bitmap.c1
-rw-r--r--drivers/md/md.c65
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid1.c17
-rw-r--r--drivers/md/raid5.c28
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/Kconfig2
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c1
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c8
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c45
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c15
-rw-r--r--drivers/media/rc/img-ir/img-ir-nec.c27
-rw-r--r--drivers/media/rc/ir-nec-decoder.c5
-rw-r--r--drivers/media/rc/keymaps/rc-tivo.c86
-rw-r--r--drivers/media/rc/rc-main.c98
-rw-r--r--drivers/media/tuners/r820t.c3
-rw-r--r--drivers/media/tuners/tuner-xc2028.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/gspca/jpeg.h4
-rw-r--r--drivers/media/usb/stk1160/stk1160-ac97.c2
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/ethernet/8390/apne.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c17
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c124
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c20
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c16
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c20
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c31
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c30
-rw-r--r--drivers/net/hyperv/rndis_filter.c12
-rw-r--r--drivers/net/ieee802154/at86rf230.c10
-rw-r--r--drivers/net/ntb_netdev.c27
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/usb/r8152.c48
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/b43/phy_n.c14
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c35
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c26
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c10
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/ntb/ntb_hw.c192
-rw-r--r--drivers/ntb/ntb_hw.h8
-rw-r--r--drivers/ntb/ntb_transport.c20
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c16
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c4
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c18
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_pm.c128
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c4
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c64
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c23
-rw-r--r--drivers/staging/media/msi3101/msi001.c2
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c15
-rw-r--r--drivers/staging/usbip/stub_dev.c8
-rw-r--r--drivers/staging/usbip/usbip_common.c25
-rw-r--r--drivers/staging/usbip/usbip_common.h1
-rw-r--r--drivers/staging/usbip/vhci_hcd.c4
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c33
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c21
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/loopback/tcm_loop.c12
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_alua.c95
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_file.c40
-rw-r--r--drivers/target/target_core_iblock.c5
-rw-r--r--drivers/target/target_core_rd.c14
-rw-r--r--drivers/target/target_core_sbc.c178
-rw-r--r--drivers/target/target_core_spc.c49
-rw-r--r--drivers/target/target_core_tmr.c23
-rw-r--r--drivers/target/target_core_transport.c92
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h13
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c5
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c76
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c10
-rw-r--r--drivers/tty/tty_audit.c3
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c8
-rw-r--r--drivers/vhost/net.c14
-rw-r--r--drivers/vhost/scsi.c9
-rw-r--r--fs/aio.c120
-rw-r--r--fs/bio.c10
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/async-thread.c2
-rw-r--r--fs/btrfs/backref.c33
-rw-r--r--fs/btrfs/ctree.c94
-rw-r--r--fs/btrfs/ctree.h13
-rw-r--r--fs/btrfs/disk-io.c23
-rw-r--r--fs/btrfs/extent-tree.c35
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/file.c21
-rw-r--r--fs/btrfs/inode-map.c14
-rw-r--r--fs/btrfs/inode.c36
-rw-r--r--fs/btrfs/ioctl.c35
-rw-r--r--fs/btrfs/relocation.c21
-rw-r--r--fs/btrfs/scrub.c108
-rw-r--r--fs/btrfs/send.c117
-rw-r--r--fs/btrfs/super.c22
-rw-r--r--fs/btrfs/transaction.c48
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/btrfs/volumes.c35
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/cachefiles/bind.c1
-rw-r--r--fs/cachefiles/namei.c3
-rw-r--r--fs/ceph/file.c12
-rw-r--r--fs/ceph/ioctl.c3
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/file.c128
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/exec.c2
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/file.c11
-rw-r--r--fs/file_table.c43
-rw-r--r--fs/fuse/dev.c14
-rw-r--r--fs/fuse/file.c5
-rw-r--r--fs/mount.h5
-rw-r--r--fs/namei.c67
-rw-r--r--fs/namespace.c56
-rw-r--r--fs/ncpfs/inode.c50
-rw-r--r--fs/ncpfs/ncp_fs_sb.h6
-rw-r--r--fs/ncpfs/sock.c4
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ocfs2/cluster/tcp.c64
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h2
-rw-r--r--fs/ocfs2/file.c9
-rw-r--r--fs/open.c68
-rw-r--r--fs/pipe.c133
-rw-r--r--fs/pnode.c198
-rw-r--r--fs/pnode.h3
-rw-r--r--fs/proc/base.c36
-rw-r--r--fs/proc/namespaces.c14
-rw-r--r--fs/proc/self.c2
-rw-r--r--fs/proc_namespace.c1
-rw-r--r--fs/splice.c126
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/xfs/xfs_file.c13
-rw-r--r--fs/xfs/xfs_ioctl.c28
-rw-r--r--include/asm-generic/cmpxchg-local.h3
-rw-r--r--include/asm-generic/syscall.h4
-rw-r--r--include/linux/audit.h14
-rw-r--r--include/linux/bio.h5
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/compiler-clang.h12
-rw-r--r--include/linux/compiler.h7
-rw-r--r--include/linux/fdtable.h2
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/fs.h97
-rw-r--r--include/linux/ftrace_event.h22
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mod_devicetable.h5
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/nbd.h3
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h1
-rw-r--r--include/linux/ntb.h19
-rw-r--r--include/linux/nvme.h21
-rw-r--r--include/linux/pipe_fs_i.h19
-rw-r--r--include/linux/sched.h20
-rw-r--r--include/linux/slab.h11
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/tracepoint.h49
-rw-r--r--include/linux/uio.h52
-rw-r--r--include/media/rc-core.h8
-rw-r--r--include/net/9p/client.h6
-rw-r--r--include/net/9p/transport.h3
-rw-r--r--include/net/dst.h14
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/ip.h13
-rw-r--r--include/net/ip6_route.h5
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/netfilter/nf_tables_core.h10
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/structs.h14
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/xfrm.h6
-rw-r--r--include/target/iscsi/iscsi_transport.h2
-rw-r--r--include/target/target_core_backend.h2
-rw-r--r--include/target/target_core_base.h33
-rw-r--r--include/target/target_core_fabric.h6
-rw-r--r--include/trace/events/syscalls.h3
-rw-r--r--include/trace/ftrace.h15
-rw-r--r--include/uapi/linux/audit.h3
-rw-r--r--include/uapi/linux/capability.h4
-rw-r--r--include/uapi/linux/nvme.h1
-rw-r--r--include/uapi/linux/v4l2-common.h2
-rw-r--r--init/Kconfig5
-rw-r--r--kernel/audit.c27
-rw-r--r--kernel/audit.h6
-rw-r--r--kernel/auditfilter.c33
-rw-r--r--kernel/auditsc.c133
-rw-r--r--kernel/futex.c32
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/seccomp.c21
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_events.c55
-rw-r--r--kernel/trace/trace_events_trigger.c2
-rw-r--r--kernel/trace/trace_export.c6
-rw-r--r--kernel/trace/trace_kprobe.c21
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/trace/trace_uprobe.c20
-rw-r--r--kernel/tracepoint.c516
-rw-r--r--kernel/user_namespace.c11
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/Makefile1
-rw-r--r--lib/audit.c15
-rw-r--r--lib/compat_audit.c50
-rw-r--r--mm/Makefile3
-rw-r--r--mm/filemap.c344
-rw-r--r--mm/iov_iter.c224
-rw-r--r--mm/process_vm_access.c250
-rw-r--r--mm/shmem.c79
-rw-r--r--mm/slab.c183
-rw-r--r--mm/slob.c10
-rw-r--r--mm/slub.c5
-rw-r--r--mm/util.c48
-rw-r--r--net/9p/client.c25
-rw-r--r--net/9p/trans_fd.c110
-rw-r--r--net/9p/trans_rdma.c26
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/lec.c10
-rw-r--r--net/atm/mpc.c6
-rw-r--r--net/atm/raw.c2
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/ax25/ax25_in.c2
-rw-r--r--net/bluetooth/l2cap_sock.c6
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c4
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_vlan.c7
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/ceph/messenger.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/filter.c9
-rw-r--r--net/core/pktgen.c8
-rw-r--r--net/core/skbuff.c16
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/dccp/output.c2
-rw-r--r--net/decnet/dn_nsp_in.c4
-rw-r--r--net/decnet/dn_route.c16
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_output.c16
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/ping.c15
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c3
-rw-r--r--net/ipv6/ip6_gre.c10
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/sit.c5
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c1
-rw-r--r--net/netfilter/nf_conntrack_pptp.c20
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c3
-rw-r--r--net/netfilter/nf_tables_core.c3
-rw-r--r--net/netfilter/nft_cmp.c2
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/nfc/llcp_core.c2
-rw-r--r--net/openvswitch/vport-gre.c2
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/rds/tcp.h4
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_recv.c8
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/rxrpc/ar-input.c6
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/sctp/associola.c82
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c14
-rw-r--r--net/sctp/ulpevent.c8
-rw-r--r--net/sctp/ulpqueue.c4
-rw-r--r--net/sunrpc/svcsock.c12
-rw-r--r--net/sunrpc/xprtsock.c8
-rw-r--r--net/tipc/server.c4
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/vmw_vsock/vmci_transport_notify.c2
-rw-r--r--net/vmw_vsock/vmci_transport_notify_qstate.c4
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--scripts/Makefile.build12
-rw-r--r--scripts/bootgraph.pl42
-rw-r--r--scripts/coccinelle/api/ptr_ret.cocci14
-rw-r--r--scripts/coccinelle/misc/memcpy-assign.cocci103
-rwxr-xr-xscripts/mkcompile_h2
-rwxr-xr-xscripts/objdiff141
-rwxr-xr-xscripts/tags.sh9
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/integrity/evm/evm_main.c2
-rw-r--r--security/integrity/integrity_audit.c2
-rw-r--r--security/lsm_audit.c11
-rw-r--r--security/tomoyo/realpath.c4
-rw-r--r--virt/kvm/ioapic.c25
452 files changed, 6970 insertions, 5898 deletions
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index b89a739a3276..9de9813d0ec5 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -1,5 +1,6 @@
1*.a 1*.a
2*.aux 2*.aux
3*.bc
3*.bin 4*.bin
4*.bz2 5*.bz2
5*.cis 6*.cis
@@ -21,6 +22,7 @@
21*.i 22*.i
22*.jpeg 23*.jpeg
23*.ko 24*.ko
25*.ll
24*.log 26*.log
25*.lst 27*.lst
26*.lzma 28*.lzma
@@ -35,6 +37,7 @@
35*.out 37*.out
36*.patch 38*.patch
37*.pdf 39*.pdf
40*.plist
38*.png 41*.png
39*.pot 42*.pot
40*.ps 43*.ps
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index efca5c1bbb10..eba790134253 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -202,7 +202,7 @@ prototypes:
202 unsigned long *); 202 unsigned long *);
203 int (*migratepage)(struct address_space *, struct page *, struct page *); 203 int (*migratepage)(struct address_space *, struct page *, struct page *);
204 int (*launder_page)(struct page *); 204 int (*launder_page)(struct page *);
205 int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long); 205 int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
206 int (*error_remove_page)(struct address_space *, struct page *); 206 int (*error_remove_page)(struct address_space *, struct page *);
207 int (*swap_activate)(struct file *); 207 int (*swap_activate)(struct file *);
208 int (*swap_deactivate)(struct file *); 208 int (*swap_deactivate)(struct file *);
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 94eb86287bcb..617f6d70c077 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -596,7 +596,7 @@ struct address_space_operations {
596 /* migrate the contents of a page to the specified target */ 596 /* migrate the contents of a page to the specified target */
597 int (*migratepage) (struct page *, struct page *); 597 int (*migratepage) (struct page *, struct page *);
598 int (*launder_page) (struct page *); 598 int (*launder_page) (struct page *);
599 int (*is_partially_uptodate) (struct page *, read_descriptor_t *, 599 int (*is_partially_uptodate) (struct page *, unsigned long,
600 unsigned long); 600 unsigned long);
601 void (*is_dirty_writeback) (struct page *, bool *, bool *); 601 void (*is_dirty_writeback) (struct page *, bool *, bool *);
602 int (*error_remove_page) (struct mapping *mapping, struct page *page); 602 int (*error_remove_page) (struct mapping *mapping, struct page *page);
diff --git a/Makefile b/Makefile
index cf3e07516a04..60ccbfe750a2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 14 2PATCHLEVEL = 15
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc1
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -248,6 +248,11 @@ HOSTCXX = g++
248HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer 248HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
249HOSTCXXFLAGS = -O2 249HOSTCXXFLAGS = -O2
250 250
251ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
252HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
253 -Wno-missing-field-initializers -fno-delete-null-pointer-checks
254endif
255
251# Decide whether to build built-in, modular, or both. 256# Decide whether to build built-in, modular, or both.
252# Normally, just do built-in. 257# Normally, just do built-in.
253 258
@@ -324,6 +329,14 @@ endif
324 329
325export quiet Q KBUILD_VERBOSE 330export quiet Q KBUILD_VERBOSE
326 331
332ifneq ($(CC),)
333ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
334COMPILER := clang
335else
336COMPILER := gcc
337endif
338export COMPILER
339endif
327 340
328# Look for make include files relative to root of kernel src 341# Look for make include files relative to root of kernel src
329MAKEFLAGS += --include-dir=$(srctree) 342MAKEFLAGS += --include-dir=$(srctree)
@@ -383,7 +396,7 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
383 -fno-strict-aliasing -fno-common \ 396 -fno-strict-aliasing -fno-common \
384 -Werror-implicit-function-declaration \ 397 -Werror-implicit-function-declaration \
385 -Wno-format-security \ 398 -Wno-format-security \
386 -fno-delete-null-pointer-checks 399 $(call cc-option,-fno-delete-null-pointer-checks,)
387KBUILD_AFLAGS_KERNEL := 400KBUILD_AFLAGS_KERNEL :=
388KBUILD_CFLAGS_KERNEL := 401KBUILD_CFLAGS_KERNEL :=
389KBUILD_AFLAGS := -D__ASSEMBLY__ 402KBUILD_AFLAGS := -D__ASSEMBLY__
@@ -415,8 +428,9 @@ export MODVERDIR := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_ve
415 428
416# Files to ignore in find ... statements 429# Files to ignore in find ... statements
417 430
418RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \ 431export RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o \
419 -o -name .pc -o -name .hg -o -name .git \) -prune -o 432 -name CVS -o -name .pc -o -name .hg -o -name .git \) \
433 -prune -o
420export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ 434export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
421 --exclude CVS --exclude .pc --exclude .hg --exclude .git 435 --exclude CVS --exclude .pc --exclude .hg --exclude .git
422 436
@@ -623,9 +637,24 @@ endif
623endif 637endif
624KBUILD_CFLAGS += $(stackp-flag) 638KBUILD_CFLAGS += $(stackp-flag)
625 639
640ifeq ($(COMPILER),clang)
641KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
642KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,)
643KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
644KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
645KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
646# Quiet clang warning: comparison of unsigned expression < 0 is always false
647KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
648# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
649# source of a reference will be _MergedGlobals and not on of the whitelisted names.
650# See modpost pattern 2
651KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
652else
653
626# This warning generated too much noise in a regular build. 654# This warning generated too much noise in a regular build.
627# Use make W=1 to enable this warning (see scripts/Makefile.build) 655# Use make W=1 to enable this warning (see scripts/Makefile.build)
628KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) 656KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
657endif
629 658
630ifdef CONFIG_FRAME_POINTER 659ifdef CONFIG_FRAME_POINTER
631KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls 660KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
@@ -1075,7 +1104,7 @@ CLEAN_DIRS += $(MODVERDIR)
1075 1104
1076# Directories & files removed with 'make mrproper' 1105# Directories & files removed with 'make mrproper'
1077MRPROPER_DIRS += include/config usr/include include/generated \ 1106MRPROPER_DIRS += include/config usr/include include/generated \
1078 arch/*/include/generated 1107 arch/*/include/generated .tmp_objdiff
1079MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \ 1108MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
1080 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ 1109 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
1081 signing_key.priv signing_key.x509 x509.genkey \ 1110 signing_key.priv signing_key.x509 x509.genkey \
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index f6c6b345388c..b7ff9a318c31 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -22,6 +22,7 @@ config ALPHA
22 select GENERIC_SMP_IDLE_THREAD 22 select GENERIC_SMP_IDLE_THREAD
23 select GENERIC_STRNCPY_FROM_USER 23 select GENERIC_STRNCPY_FROM_USER
24 select GENERIC_STRNLEN_USER 24 select GENERIC_STRNLEN_USER
25 select HAVE_ARCH_AUDITSYSCALL
25 select HAVE_MOD_ARCH_SPECIFIC 26 select HAVE_MOD_ARCH_SPECIFIC
26 select MODULES_USE_ELF_RELA 27 select MODULES_USE_ELF_RELA
27 select ODD_RT_SIGACTION 28 select ODD_RT_SIGACTION
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5db05f6a0412..ab438cb5af55 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -24,6 +24,7 @@ config ARM
24 select GENERIC_STRNCPY_FROM_USER 24 select GENERIC_STRNCPY_FROM_USER
25 select GENERIC_STRNLEN_USER 25 select GENERIC_STRNLEN_USER
26 select HARDIRQS_SW_RESEND 26 select HARDIRQS_SW_RESEND
27 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
27 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL 28 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
28 select HAVE_ARCH_KGDB 29 select HAVE_ARCH_KGDB
29 select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) 30 select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index 73ddd7239b33..4651f6999b7d 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -7,7 +7,7 @@
7#ifndef _ASM_ARM_SYSCALL_H 7#ifndef _ASM_ARM_SYSCALL_H
8#define _ASM_ARM_SYSCALL_H 8#define _ASM_ARM_SYSCALL_H
9 9
10#include <linux/audit.h> /* for AUDIT_ARCH_* */ 10#include <uapi/linux/audit.h> /* for AUDIT_ARCH_* */
11#include <linux/elf.h> /* for ELF_EM */ 11#include <linux/elf.h> /* for ELF_EM */
12#include <linux/err.h> 12#include <linux/err.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
@@ -103,8 +103,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
103 memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0])); 103 memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
104} 104}
105 105
106static inline int syscall_get_arch(struct task_struct *task, 106static inline int syscall_get_arch(void)
107 struct pt_regs *regs)
108{ 107{
109 /* ARM tasks don't change audit architectures on the fly. */ 108 /* ARM tasks don't change audit architectures on the fly. */
110 return AUDIT_ARCH_ARM; 109 return AUDIT_ARCH_ARM;
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 9ceccef9c649..f81e7b989fff 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -34,6 +34,7 @@ config BLACKFIN
34 select ARCH_WANT_IPC_PARSE_VERSION 34 select ARCH_WANT_IPC_PARSE_VERSION
35 select GENERIC_ATOMIC64 35 select GENERIC_ATOMIC64
36 select GENERIC_IRQ_PROBE 36 select GENERIC_IRQ_PROBE
37 select GENERIC_IRQ_SHOW
37 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG 38 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
38 select GENERIC_SMP_IDLE_THREAD 39 select GENERIC_SMP_IDLE_THREAD
39 select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS 40 select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
@@ -51,9 +52,6 @@ config GENERIC_BUG
51config ZONE_DMA 52config ZONE_DMA
52 def_bool y 53 def_bool y
53 54
54config GENERIC_GPIO
55 def_bool y
56
57config FORCE_MAX_ZONEORDER 55config FORCE_MAX_ZONEORDER
58 int 56 int
59 default "14" 57 default "14"
@@ -870,14 +868,6 @@ config SYS_BFIN_SPINLOCK_L1
870 If enabled, sys_bfin_spinlock function is linked 868 If enabled, sys_bfin_spinlock function is linked
871 into L1 instruction memory. (less latency) 869 into L1 instruction memory. (less latency)
872 870
873config IP_CHECKSUM_L1
874 bool "Locate IP Checksum function in L1 Memory"
875 default n
876 depends on !SMP
877 help
878 If enabled, the IP Checksum function is linked
879 into L1 instruction memory. (less latency)
880
881config CACHELINE_ALIGNED_L1 871config CACHELINE_ALIGNED_L1
882 bool "Locate cacheline_aligned data to L1 Data Memory" 872 bool "Locate cacheline_aligned data to L1 Data Memory"
883 default y if !BF54x 873 default y if !BF54x
diff --git a/arch/blackfin/include/asm/bfin_crc.h b/arch/blackfin/include/asm/bfin_crc.h
deleted file mode 100644
index 75cef4dc85a1..000000000000
--- a/arch/blackfin/include/asm/bfin_crc.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 * bfin_crc.h - interface to Blackfin CRC controllers
3 *
4 * Copyright 2012 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_CRC_H__
10#define __BFIN_CRC_H__
11
12/* Function driver which use hardware crc must initialize the structure */
13struct crc_info {
14 /* Input data address */
15 unsigned char *in_addr;
16 /* Output data address */
17 unsigned char *out_addr;
18 /* Input or output bytes */
19 unsigned long datasize;
20 union {
21 /* CRC to compare with that of input buffer */
22 unsigned long crc_compare;
23 /* Value to compare with input data */
24 unsigned long val_verify;
25 /* Value to fill */
26 unsigned long val_fill;
27 };
28 /* Value to program the 32b CRC Polynomial */
29 unsigned long crc_poly;
30 union {
31 /* CRC calculated from the input data */
32 unsigned long crc_result;
33 /* First failed position to verify input data */
34 unsigned long pos_verify;
35 };
36 /* CRC mirror flags */
37 unsigned int bitmirr:1;
38 unsigned int bytmirr:1;
39 unsigned int w16swp:1;
40 unsigned int fdsel:1;
41 unsigned int rsltmirr:1;
42 unsigned int polymirr:1;
43 unsigned int cmpmirr:1;
44};
45
46/* Userspace interface */
47#define CRC_IOC_MAGIC 'C'
48#define CRC_IOC_CALC_CRC _IOWR('C', 0x01, unsigned int)
49#define CRC_IOC_MEMCPY_CRC _IOWR('C', 0x02, unsigned int)
50#define CRC_IOC_VERIFY_VAL _IOWR('C', 0x03, unsigned int)
51#define CRC_IOC_FILL_VAL _IOWR('C', 0x04, unsigned int)
52
53
54#ifdef __KERNEL__
55
56#include <linux/types.h>
57#include <linux/spinlock.h>
58#include <linux/miscdevice.h>
59
60struct crc_register {
61 u32 control;
62 u32 datacnt;
63 u32 datacntrld;
64 u32 __pad_1[2];
65 u32 compare;
66 u32 fillval;
67 u32 datafifo;
68 u32 intren;
69 u32 intrenset;
70 u32 intrenclr;
71 u32 poly;
72 u32 __pad_2[4];
73 u32 status;
74 u32 datacntcap;
75 u32 __pad_3;
76 u32 result;
77 u32 curresult;
78 u32 __pad_4[3];
79 u32 revid;
80};
81
82/* CRC_STATUS Masks */
83#define CMPERR 0x00000002 /* Compare error */
84#define DCNTEXP 0x00000010 /* datacnt register expired */
85#define IBR 0x00010000 /* Input buffer ready */
86#define OBR 0x00020000 /* Output buffer ready */
87#define IRR 0x00040000 /* Immediate result readt */
88#define LUTDONE 0x00080000 /* Look-up table generation done */
89#define FSTAT 0x00700000 /* FIFO status */
90#define MAX_FIFO 4 /* Max fifo size */
91
92/* CRC_CONTROL Masks */
93#define BLKEN 0x00000001 /* Block enable */
94#define OPMODE 0x000000F0 /* Operation mode */
95#define OPMODE_OFFSET 4 /* Operation mode mask offset*/
96#define MODE_DMACPY_CRC 1 /* MTM CRC compute and compare */
97#define MODE_DATA_FILL 2 /* MTM data fill */
98#define MODE_CALC_CRC 3 /* MSM CRC compute and compare */
99#define MODE_DATA_VERIFY 4 /* MSM data verify */
100#define AUTOCLRZ 0x00000100 /* Auto clear to zero */
101#define AUTOCLRF 0x00000200 /* Auto clear to one */
102#define OBRSTALL 0x00001000 /* Stall on output buffer ready */
103#define IRRSTALL 0x00002000 /* Stall on immediate result ready */
104#define BITMIRR 0x00010000 /* Mirror bits within each byte of 32-bit input data */
105#define BITMIRR_OFFSET 16 /* Mirror bits offset */
106#define BYTMIRR 0x00020000 /* Mirror bytes of 32-bit input data */
107#define BYTMIRR_OFFSET 17 /* Mirror bytes offset */
108#define W16SWP 0x00040000 /* Mirror uppper and lower 16-bit word of 32-bit input data */
109#define W16SWP_OFFSET 18 /* Mirror 16-bit word offset */
110#define FDSEL 0x00080000 /* FIFO is written after input data is mirrored */
111#define FDSEL_OFFSET 19 /* Mirror FIFO offset */
112#define RSLTMIRR 0x00100000 /* CRC result registers are mirrored. */
113#define RSLTMIRR_OFFSET 20 /* Mirror CRC result offset. */
114#define POLYMIRR 0x00200000 /* CRC poly register is mirrored. */
115#define POLYMIRR_OFFSET 21 /* Mirror CRC poly offset. */
116#define CMPMIRR 0x00400000 /* CRC compare register is mirrored. */
117#define CMPMIRR_OFFSET 22 /* Mirror CRC compare offset. */
118
119/* CRC_INTREN Masks */
120#define CMPERRI 0x02 /* CRC_ERROR_INTR */
121#define DCNTEXPI 0x10 /* CRC_STATUS_INTR */
122
123#endif
124
125#endif
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index 40e9c2bbc6e3..8d1e4c2d2c36 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -316,8 +316,6 @@ static inline void disable_dma(unsigned int channel)
316} 316}
317static inline void enable_dma(unsigned int channel) 317static inline void enable_dma(unsigned int channel)
318{ 318{
319 dma_ch[channel].regs->curr_x_count = 0;
320 dma_ch[channel].regs->curr_y_count = 0;
321 dma_ch[channel].regs->cfg |= DMAEN; 319 dma_ch[channel].regs->cfg |= DMAEN;
322} 320}
323int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data); 321int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data);
diff --git a/arch/blackfin/include/asm/portmux.h b/arch/blackfin/include/asm/portmux.h
index 7aa20436e799..c8f0939419be 100644
--- a/arch/blackfin/include/asm/portmux.h
+++ b/arch/blackfin/include/asm/portmux.h
@@ -18,16 +18,14 @@
18#define P_DONTCARE 0x1000 18#define P_DONTCARE 0x1000
19 19
20#ifdef CONFIG_PINCTRL 20#ifdef CONFIG_PINCTRL
21#include <asm/irq_handler.h> 21int bfin_internal_set_wake(unsigned int irq, unsigned int state);
22 22
23#define gpio_pint_regs bfin_pint_regs 23#define gpio_pint_regs bfin_pint_regs
24#define adi_internal_set_wake bfin_internal_set_wake 24#define adi_internal_set_wake bfin_internal_set_wake
25 25
26#define peripheral_request(per, label) 0 26#define peripheral_request(per, label) (0)
27#define peripheral_free(per) 27#define peripheral_free(per)
28#define peripheral_request_list(per, label) \ 28#define peripheral_request_list(per, label) (0)
29 (pdev ? (IS_ERR(devm_pinctrl_get_select_default(&pdev->dev)) \
30 ? -EINVAL : 0) : 0)
31#define peripheral_free_list(per) 29#define peripheral_free_list(per)
32#else 30#else
33int peripheral_request(unsigned short per, const char *label); 31int peripheral_request(unsigned short per, const char *label);
@@ -39,7 +37,7 @@ void peripheral_free_list(const unsigned short per[]);
39#include <linux/err.h> 37#include <linux/err.h>
40#include <linux/pinctrl/pinctrl.h> 38#include <linux/pinctrl/pinctrl.h>
41#include <mach/portmux.h> 39#include <mach/portmux.h>
42#include <linux/gpio.h> 40#include <mach/gpio.h>
43 41
44#ifndef P_SPORT2_TFS 42#ifndef P_SPORT2_TFS
45#define P_SPORT2_TFS P_UNDEF 43#define P_SPORT2_TFS P_UNDEF
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index ff3d747154ac..0ba25764b8c0 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -11,6 +11,7 @@
11#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/seq_file.h>
14#include <asm/irq_handler.h> 15#include <asm/irq_handler.h>
15#include <asm/trace.h> 16#include <asm/trace.h>
16#include <asm/pda.h> 17#include <asm/pda.h>
@@ -33,37 +34,15 @@ static struct irq_desc bad_irq_desc = {
33#endif 34#endif
34 35
35#ifdef CONFIG_PROC_FS 36#ifdef CONFIG_PROC_FS
36int show_interrupts(struct seq_file *p, void *v) 37int arch_show_interrupts(struct seq_file *p, int prec)
37{ 38{
38 int i = *(loff_t *) v, j; 39 int j;
39 struct irqaction *action; 40
40 unsigned long flags; 41 seq_printf(p, "%*s: ", prec, "NMI");
41 42 for_each_online_cpu(j)
42 if (i < NR_IRQS) { 43 seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
43 struct irq_desc *desc = irq_to_desc(i); 44 seq_printf(p, " CORE Non Maskable Interrupt\n");
44 45 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
45 raw_spin_lock_irqsave(&desc->lock, flags);
46 action = desc->action;
47 if (!action)
48 goto skip;
49 seq_printf(p, "%3d: ", i);
50 for_each_online_cpu(j)
51 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
52 seq_printf(p, " %8s", irq_desc_get_chip(desc)->name);
53 seq_printf(p, " %s", action->name);
54 for (action = action->next; action; action = action->next)
55 seq_printf(p, " %s", action->name);
56
57 seq_putc(p, '\n');
58 skip:
59 raw_spin_unlock_irqrestore(&desc->lock, flags);
60 } else if (i == NR_IRQS) {
61 seq_printf(p, "NMI: ");
62 for_each_online_cpu(j)
63 seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
64 seq_printf(p, " CORE Non Maskable Interrupt\n");
65 seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
66 }
67 return 0; 46 return 0;
68} 47}
69#endif 48#endif
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index f8047ca3b339..d022112927c2 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -36,7 +36,7 @@ const char bfin_board_name[] = "ADI BF518F-EZBRD";
36 * Driver needs to know address, irq and flag pin. 36 * Driver needs to know address, irq and flag pin.
37 */ 37 */
38 38
39#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 39#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
40static struct mtd_partition ezbrd_partitions[] = { 40static struct mtd_partition ezbrd_partitions[] = {
41 { 41 {
42 .name = "bootloader(nor)", 42 .name = "bootloader(nor)",
@@ -61,7 +61,7 @@ static struct physmap_flash_data ezbrd_flash_data = {
61 61
62static struct resource ezbrd_flash_resource = { 62static struct resource ezbrd_flash_resource = {
63 .start = 0x20000000, 63 .start = 0x20000000,
64#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 64#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
65 .end = 0x202fffff, 65 .end = 0x202fffff,
66#else 66#else
67 .end = 0x203fffff, 67 .end = 0x203fffff,
@@ -80,14 +80,14 @@ static struct platform_device ezbrd_flash_device = {
80}; 80};
81#endif 81#endif
82 82
83#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 83#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
84static struct platform_device rtc_device = { 84static struct platform_device rtc_device = {
85 .name = "rtc-bfin", 85 .name = "rtc-bfin",
86 .id = -1, 86 .id = -1,
87}; 87};
88#endif 88#endif
89 89
90#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 90#if IS_ENABLED(CONFIG_BFIN_MAC)
91#include <linux/bfin_mac.h> 91#include <linux/bfin_mac.h>
92static const unsigned short bfin_mac_peripherals[] = { 92static const unsigned short bfin_mac_peripherals[] = {
93 P_MII0_ETxD0, 93 P_MII0_ETxD0,
@@ -105,7 +105,7 @@ static const unsigned short bfin_mac_peripherals[] = {
105 105
106static struct bfin_phydev_platform_data bfin_phydev_data[] = { 106static struct bfin_phydev_platform_data bfin_phydev_data[] = {
107 { 107 {
108#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE) 108#if IS_ENABLED(CONFIG_NET_DSA_KSZ8893M)
109 .addr = 3, 109 .addr = 3,
110#else 110#else
111 .addr = 1, 111 .addr = 1,
@@ -119,7 +119,7 @@ static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
119 .phydev_data = bfin_phydev_data, 119 .phydev_data = bfin_phydev_data,
120 .phy_mode = PHY_INTERFACE_MODE_MII, 120 .phy_mode = PHY_INTERFACE_MODE_MII,
121 .mac_peripherals = bfin_mac_peripherals, 121 .mac_peripherals = bfin_mac_peripherals,
122#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE) 122#if IS_ENABLED(CONFIG_NET_DSA_KSZ8893M)
123 .phy_mask = 0xfff7, /* Only probe the port phy connect to the on chip MAC */ 123 .phy_mask = 0xfff7, /* Only probe the port phy connect to the on chip MAC */
124#endif 124#endif
125 .vlan1_mask = 1, 125 .vlan1_mask = 1,
@@ -140,7 +140,7 @@ static struct platform_device bfin_mac_device = {
140 } 140 }
141}; 141};
142 142
143#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE) 143#if IS_ENABLED(CONFIG_NET_DSA_KSZ8893M)
144static struct dsa_chip_data ksz8893m_switch_chip_data = { 144static struct dsa_chip_data ksz8893m_switch_chip_data = {
145 .mii_bus = &bfin_mii_bus.dev, 145 .mii_bus = &bfin_mii_bus.dev,
146 .port_names = { 146 .port_names = {
@@ -165,8 +165,7 @@ static struct platform_device ksz8893m_switch_device = {
165#endif 165#endif
166#endif 166#endif
167 167
168#if defined(CONFIG_MTD_M25P80) \ 168#if IS_ENABLED(CONFIG_MTD_M25P80)
169 || defined(CONFIG_MTD_M25P80_MODULE)
170static struct mtd_partition bfin_spi_flash_partitions[] = { 169static struct mtd_partition bfin_spi_flash_partitions[] = {
171 { 170 {
172 .name = "bootloader(spi)", 171 .name = "bootloader(spi)",
@@ -193,13 +192,13 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
193}; 192};
194#endif 193#endif
195 194
196#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 195#if IS_ENABLED(CONFIG_MMC_SPI)
197static struct bfin5xx_spi_chip mmc_spi_chip_info = { 196static struct bfin5xx_spi_chip mmc_spi_chip_info = {
198 .enable_dma = 0, 197 .enable_dma = 0,
199}; 198};
200#endif 199#endif
201 200
202#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 201#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
203static const struct ad7877_platform_data bfin_ad7877_ts_info = { 202static const struct ad7877_platform_data bfin_ad7877_ts_info = {
204 .model = 7877, 203 .model = 7877,
205 .vref_delay_usecs = 50, /* internal, no capacitor */ 204 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -216,8 +215,7 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
216#endif 215#endif
217 216
218static struct spi_board_info bfin_spi_board_info[] __initdata = { 217static struct spi_board_info bfin_spi_board_info[] __initdata = {
219#if defined(CONFIG_MTD_M25P80) \ 218#if IS_ENABLED(CONFIG_MTD_M25P80)
220 || defined(CONFIG_MTD_M25P80_MODULE)
221 { 219 {
222 /* the modalias must be the same as spi device driver name */ 220 /* the modalias must be the same as spi device driver name */
223 .modalias = "m25p80", /* Name of spi_driver for this device */ 221 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -230,9 +228,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
230 }, 228 },
231#endif 229#endif
232 230
233#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 231#if IS_ENABLED(CONFIG_BFIN_MAC)
234#if defined(CONFIG_NET_DSA_KSZ8893M) \ 232#if IS_ENABLED(CONFIG_NET_DSA_KSZ8893M)
235 || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
236 { 233 {
237 .modalias = "ksz8893m", 234 .modalias = "ksz8893m",
238 .max_speed_hz = 5000000, 235 .max_speed_hz = 5000000,
@@ -244,7 +241,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
244#endif 241#endif
245#endif 242#endif
246 243
247#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 244#if IS_ENABLED(CONFIG_MMC_SPI)
248 { 245 {
249 .modalias = "mmc_spi", 246 .modalias = "mmc_spi",
250 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 247 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
@@ -254,7 +251,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
254 .mode = SPI_MODE_3, 251 .mode = SPI_MODE_3,
255 }, 252 },
256#endif 253#endif
257#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 254#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
258 { 255 {
259 .modalias = "ad7877", 256 .modalias = "ad7877",
260 .platform_data = &bfin_ad7877_ts_info, 257 .platform_data = &bfin_ad7877_ts_info,
@@ -264,7 +261,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
264 .chip_select = 2, 261 .chip_select = 2,
265 }, 262 },
266#endif 263#endif
267#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ 264#if IS_ENABLED(CONFIG_SND_SOC_WM8731) \
268 && defined(CONFIG_SND_SOC_WM8731_SPI) 265 && defined(CONFIG_SND_SOC_WM8731_SPI)
269 { 266 {
270 .modalias = "wm8731", 267 .modalias = "wm8731",
@@ -274,7 +271,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
274 .mode = SPI_MODE_0, 271 .mode = SPI_MODE_0,
275 }, 272 },
276#endif 273#endif
277#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 274#if IS_ENABLED(CONFIG_SPI_SPIDEV)
278 { 275 {
279 .modalias = "spidev", 276 .modalias = "spidev",
280 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 277 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -282,7 +279,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
282 .chip_select = 1, 279 .chip_select = 1,
283 }, 280 },
284#endif 281#endif
285#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 282#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
286 { 283 {
287 .modalias = "bfin-lq035q1-spi", 284 .modalias = "bfin-lq035q1-spi",
288 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 285 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -294,7 +291,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
294}; 291};
295 292
296/* SPI controller data */ 293/* SPI controller data */
297#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 294#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
298/* SPI (0) */ 295/* SPI (0) */
299static struct bfin5xx_spi_master bfin_spi0_info = { 296static struct bfin5xx_spi_master bfin_spi0_info = {
300 .num_chipselect = 6, 297 .num_chipselect = 6,
@@ -366,7 +363,7 @@ static struct platform_device bfin_spi1_device = {
366}; 363};
367#endif /* spi master and devices */ 364#endif /* spi master and devices */
368 365
369#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 366#if IS_ENABLED(CONFIG_SERIAL_BFIN)
370#ifdef CONFIG_SERIAL_BFIN_UART0 367#ifdef CONFIG_SERIAL_BFIN_UART0
371static struct resource bfin_uart0_resources[] = { 368static struct resource bfin_uart0_resources[] = {
372 { 369 {
@@ -465,7 +462,7 @@ static struct platform_device bfin_uart1_device = {
465#endif 462#endif
466#endif 463#endif
467 464
468#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 465#if IS_ENABLED(CONFIG_BFIN_SIR)
469#ifdef CONFIG_BFIN_SIR0 466#ifdef CONFIG_BFIN_SIR0
470static struct resource bfin_sir0_resources[] = { 467static struct resource bfin_sir0_resources[] = {
471 { 468 {
@@ -520,7 +517,7 @@ static struct platform_device bfin_sir1_device = {
520#endif 517#endif
521#endif 518#endif
522 519
523#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 520#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
524static struct platform_device bfin_i2s = { 521static struct platform_device bfin_i2s = {
525 .name = "bfin-i2s", 522 .name = "bfin-i2s",
526 .id = CONFIG_SND_BF5XX_SPORT_NUM, 523 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -528,7 +525,7 @@ static struct platform_device bfin_i2s = {
528}; 525};
529#endif 526#endif
530 527
531#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 528#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
532static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 529static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
533 530
534static struct resource bfin_twi0_resource[] = { 531static struct resource bfin_twi0_resource[] = {
@@ -556,25 +553,25 @@ static struct platform_device i2c_bfin_twi_device = {
556#endif 553#endif
557 554
558static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 555static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
559#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 556#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
560 { 557 {
561 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 558 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
562 }, 559 },
563#endif 560#endif
564#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) 561#if IS_ENABLED(CONFIG_INPUT_PCF8574)
565 { 562 {
566 I2C_BOARD_INFO("pcf8574_keypad", 0x27), 563 I2C_BOARD_INFO("pcf8574_keypad", 0x27),
567 .irq = IRQ_PF8, 564 .irq = IRQ_PF8,
568 }, 565 },
569#endif 566#endif
570#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) 567#if IS_ENABLED(CONFIG_SND_SOC_SSM2602)
571 { 568 {
572 I2C_BOARD_INFO("ssm2602", 0x1b), 569 I2C_BOARD_INFO("ssm2602", 0x1b),
573 }, 570 },
574#endif 571#endif
575}; 572};
576 573
577#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 574#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
578#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 575#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
579static struct resource bfin_sport0_uart_resources[] = { 576static struct resource bfin_sport0_uart_resources[] = {
580 { 577 {
@@ -645,7 +642,7 @@ static struct platform_device bfin_sport1_uart_device = {
645#endif 642#endif
646#endif 643#endif
647 644
648#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 645#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
649#include <linux/input.h> 646#include <linux/input.h>
650#include <linux/gpio_keys.h> 647#include <linux/gpio_keys.h>
651 648
@@ -667,7 +664,7 @@ static struct platform_device bfin_device_gpiokeys = {
667}; 664};
668#endif 665#endif
669 666
670#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 667#if IS_ENABLED(CONFIG_SDH_BFIN)
671 668
672static struct bfin_sd_host bfin_sdh_data = { 669static struct bfin_sd_host bfin_sdh_data = {
673 .dma_chan = CH_RSI, 670 .dma_chan = CH_RSI,
@@ -710,24 +707,24 @@ static struct platform_device *stamp_devices[] __initdata = {
710 707
711 &bfin_dpmc, 708 &bfin_dpmc,
712 709
713#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 710#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
714 &rtc_device, 711 &rtc_device,
715#endif 712#endif
716 713
717#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 714#if IS_ENABLED(CONFIG_BFIN_MAC)
718 &bfin_mii_bus, 715 &bfin_mii_bus,
719 &bfin_mac_device, 716 &bfin_mac_device,
720#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE) 717#if IS_ENABLED(CONFIG_NET_DSA_KSZ8893M)
721 &ksz8893m_switch_device, 718 &ksz8893m_switch_device,
722#endif 719#endif
723#endif 720#endif
724 721
725#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 722#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
726 &bfin_spi0_device, 723 &bfin_spi0_device,
727 &bfin_spi1_device, 724 &bfin_spi1_device,
728#endif 725#endif
729 726
730#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 727#if IS_ENABLED(CONFIG_SERIAL_BFIN)
731#ifdef CONFIG_SERIAL_BFIN_UART0 728#ifdef CONFIG_SERIAL_BFIN_UART0
732 &bfin_uart0_device, 729 &bfin_uart0_device,
733#endif 730#endif
@@ -736,7 +733,7 @@ static struct platform_device *stamp_devices[] __initdata = {
736#endif 733#endif
737#endif 734#endif
738 735
739#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 736#if IS_ENABLED(CONFIG_BFIN_SIR)
740#ifdef CONFIG_BFIN_SIR0 737#ifdef CONFIG_BFIN_SIR0
741 &bfin_sir0_device, 738 &bfin_sir0_device,
742#endif 739#endif
@@ -745,15 +742,15 @@ static struct platform_device *stamp_devices[] __initdata = {
745#endif 742#endif
746#endif 743#endif
747 744
748#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 745#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
749 &i2c_bfin_twi_device, 746 &i2c_bfin_twi_device,
750#endif 747#endif
751 748
752#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 749#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
753 &bfin_i2s, 750 &bfin_i2s,
754#endif 751#endif
755 752
756#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 753#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
757#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 754#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
758 &bfin_sport0_uart_device, 755 &bfin_sport0_uart_device,
759#endif 756#endif
@@ -762,15 +759,15 @@ static struct platform_device *stamp_devices[] __initdata = {
762#endif 759#endif
763#endif 760#endif
764 761
765#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 762#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
766 &bfin_device_gpiokeys, 763 &bfin_device_gpiokeys,
767#endif 764#endif
768 765
769#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 766#if IS_ENABLED(CONFIG_SDH_BFIN)
770 &bf51x_sdh_device, 767 &bf51x_sdh_device,
771#endif 768#endif
772 769
773#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 770#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
774 &ezbrd_flash_device, 771 &ezbrd_flash_device,
775#endif 772#endif
776}; 773};
@@ -784,7 +781,7 @@ static int __init ezbrd_init(void)
784 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 781 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
785 /* setup BF518-EZBRD GPIO pin PG11 to AMS2, PG15 to AMS3. */ 782 /* setup BF518-EZBRD GPIO pin PG11 to AMS2, PG15 to AMS3. */
786 peripheral_request(P_AMS2, "ParaFlash"); 783 peripheral_request(P_AMS2, "ParaFlash");
787#if !defined(CONFIG_SPI_BFIN5XX) && !defined(CONFIG_SPI_BFIN5XX_MODULE) 784#if !IS_ENABLED(CONFIG_SPI_BFIN5XX)
788 peripheral_request(P_AMS3, "ParaFlash"); 785 peripheral_request(P_AMS3, "ParaFlash");
789#endif 786#endif
790 return 0; 787 return 0;
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index 0bedc737566b..240d5cb1f02c 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -36,7 +36,7 @@ const char bfin_board_name[] = "Bluetechnix TCM-BF518";
36 * Driver needs to know address, irq and flag pin. 36 * Driver needs to know address, irq and flag pin.
37 */ 37 */
38 38
39#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 39#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
40static struct mtd_partition tcm_partitions[] = { 40static struct mtd_partition tcm_partitions[] = {
41 { 41 {
42 .name = "bootloader(nor)", 42 .name = "bootloader(nor)",
@@ -73,14 +73,14 @@ static struct platform_device tcm_flash_device = {
73}; 73};
74#endif 74#endif
75 75
76#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 76#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
77static struct platform_device rtc_device = { 77static struct platform_device rtc_device = {
78 .name = "rtc-bfin", 78 .name = "rtc-bfin",
79 .id = -1, 79 .id = -1,
80}; 80};
81#endif 81#endif
82 82
83#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 83#if IS_ENABLED(CONFIG_BFIN_MAC)
84#include <linux/bfin_mac.h> 84#include <linux/bfin_mac.h>
85static const unsigned short bfin_mac_peripherals[] = P_MII0; 85static const unsigned short bfin_mac_peripherals[] = P_MII0;
86 86
@@ -113,8 +113,7 @@ static struct platform_device bfin_mac_device = {
113}; 113};
114#endif 114#endif
115 115
116#if defined(CONFIG_MTD_M25P80) \ 116#if IS_ENABLED(CONFIG_MTD_M25P80)
117 || defined(CONFIG_MTD_M25P80_MODULE)
118static struct mtd_partition bfin_spi_flash_partitions[] = { 117static struct mtd_partition bfin_spi_flash_partitions[] = {
119 { 118 {
120 .name = "bootloader(spi)", 119 .name = "bootloader(spi)",
@@ -141,13 +140,13 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
141}; 140};
142#endif 141#endif
143 142
144#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 143#if IS_ENABLED(CONFIG_MMC_SPI)
145static struct bfin5xx_spi_chip mmc_spi_chip_info = { 144static struct bfin5xx_spi_chip mmc_spi_chip_info = {
146 .enable_dma = 0, 145 .enable_dma = 0,
147}; 146};
148#endif 147#endif
149 148
150#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 149#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
151static const struct ad7877_platform_data bfin_ad7877_ts_info = { 150static const struct ad7877_platform_data bfin_ad7877_ts_info = {
152 .model = 7877, 151 .model = 7877,
153 .vref_delay_usecs = 50, /* internal, no capacitor */ 152 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -164,8 +163,7 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
164#endif 163#endif
165 164
166static struct spi_board_info bfin_spi_board_info[] __initdata = { 165static struct spi_board_info bfin_spi_board_info[] __initdata = {
167#if defined(CONFIG_MTD_M25P80) \ 166#if IS_ENABLED(CONFIG_MTD_M25P80)
168 || defined(CONFIG_MTD_M25P80_MODULE)
169 { 167 {
170 /* the modalias must be the same as spi device driver name */ 168 /* the modalias must be the same as spi device driver name */
171 .modalias = "m25p80", /* Name of spi_driver for this device */ 169 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -178,7 +176,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
178 }, 176 },
179#endif 177#endif
180 178
181#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 179#if IS_ENABLED(CONFIG_MMC_SPI)
182 { 180 {
183 .modalias = "mmc_spi", 181 .modalias = "mmc_spi",
184 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 182 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -188,7 +186,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
188 .mode = SPI_MODE_3, 186 .mode = SPI_MODE_3,
189 }, 187 },
190#endif 188#endif
191#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 189#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
192 { 190 {
193 .modalias = "ad7877", 191 .modalias = "ad7877",
194 .platform_data = &bfin_ad7877_ts_info, 192 .platform_data = &bfin_ad7877_ts_info,
@@ -198,7 +196,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
198 .chip_select = 2, 196 .chip_select = 2,
199 }, 197 },
200#endif 198#endif
201#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ 199#if IS_ENABLED(CONFIG_SND_SOC_WM8731) \
202 && defined(CONFIG_SND_SOC_WM8731_SPI) 200 && defined(CONFIG_SND_SOC_WM8731_SPI)
203 { 201 {
204 .modalias = "wm8731", 202 .modalias = "wm8731",
@@ -208,7 +206,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
208 .mode = SPI_MODE_0, 206 .mode = SPI_MODE_0,
209 }, 207 },
210#endif 208#endif
211#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 209#if IS_ENABLED(CONFIG_SPI_SPIDEV)
212 { 210 {
213 .modalias = "spidev", 211 .modalias = "spidev",
214 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 212 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -216,7 +214,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
216 .chip_select = 1, 214 .chip_select = 1,
217 }, 215 },
218#endif 216#endif
219#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 217#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
220 { 218 {
221 .modalias = "bfin-lq035q1-spi", 219 .modalias = "bfin-lq035q1-spi",
222 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 220 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -228,7 +226,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
228}; 226};
229 227
230/* SPI controller data */ 228/* SPI controller data */
231#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 229#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
232/* SPI (0) */ 230/* SPI (0) */
233static struct bfin5xx_spi_master bfin_spi0_info = { 231static struct bfin5xx_spi_master bfin_spi0_info = {
234 .num_chipselect = 6, 232 .num_chipselect = 6,
@@ -300,7 +298,7 @@ static struct platform_device bfin_spi1_device = {
300}; 298};
301#endif /* spi master and devices */ 299#endif /* spi master and devices */
302 300
303#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 301#if IS_ENABLED(CONFIG_SERIAL_BFIN)
304#ifdef CONFIG_SERIAL_BFIN_UART0 302#ifdef CONFIG_SERIAL_BFIN_UART0
305static struct resource bfin_uart0_resources[] = { 303static struct resource bfin_uart0_resources[] = {
306 { 304 {
@@ -399,7 +397,7 @@ static struct platform_device bfin_uart1_device = {
399#endif 397#endif
400#endif 398#endif
401 399
402#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 400#if IS_ENABLED(CONFIG_BFIN_SIR)
403#ifdef CONFIG_BFIN_SIR0 401#ifdef CONFIG_BFIN_SIR0
404static struct resource bfin_sir0_resources[] = { 402static struct resource bfin_sir0_resources[] = {
405 { 403 {
@@ -454,7 +452,7 @@ static struct platform_device bfin_sir1_device = {
454#endif 452#endif
455#endif 453#endif
456 454
457#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 455#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
458static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 456static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
459 457
460static struct resource bfin_twi0_resource[] = { 458static struct resource bfin_twi0_resource[] = {
@@ -482,12 +480,12 @@ static struct platform_device i2c_bfin_twi_device = {
482#endif 480#endif
483 481
484static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 482static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
485#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 483#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
486 { 484 {
487 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 485 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
488 }, 486 },
489#endif 487#endif
490#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) 488#if IS_ENABLED(CONFIG_INPUT_PCF8574)
491 { 489 {
492 I2C_BOARD_INFO("pcf8574_keypad", 0x27), 490 I2C_BOARD_INFO("pcf8574_keypad", 0x27),
493 .irq = IRQ_PF8, 491 .irq = IRQ_PF8,
@@ -495,7 +493,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
495#endif 493#endif
496}; 494};
497 495
498#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 496#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
499#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 497#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
500static struct resource bfin_sport0_uart_resources[] = { 498static struct resource bfin_sport0_uart_resources[] = {
501 { 499 {
@@ -566,7 +564,7 @@ static struct platform_device bfin_sport1_uart_device = {
566#endif 564#endif
567#endif 565#endif
568 566
569#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 567#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
570#include <linux/input.h> 568#include <linux/input.h>
571#include <linux/gpio_keys.h> 569#include <linux/gpio_keys.h>
572 570
@@ -588,7 +586,7 @@ static struct platform_device bfin_device_gpiokeys = {
588}; 586};
589#endif 587#endif
590 588
591#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 589#if IS_ENABLED(CONFIG_SDH_BFIN)
592 590
593static struct bfin_sd_host bfin_sdh_data = { 591static struct bfin_sd_host bfin_sdh_data = {
594 .dma_chan = CH_RSI, 592 .dma_chan = CH_RSI,
@@ -631,21 +629,21 @@ static struct platform_device *tcm_devices[] __initdata = {
631 629
632 &bfin_dpmc, 630 &bfin_dpmc,
633 631
634#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 632#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
635 &rtc_device, 633 &rtc_device,
636#endif 634#endif
637 635
638#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 636#if IS_ENABLED(CONFIG_BFIN_MAC)
639 &bfin_mii_bus, 637 &bfin_mii_bus,
640 &bfin_mac_device, 638 &bfin_mac_device,
641#endif 639#endif
642 640
643#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 641#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
644 &bfin_spi0_device, 642 &bfin_spi0_device,
645 &bfin_spi1_device, 643 &bfin_spi1_device,
646#endif 644#endif
647 645
648#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 646#if IS_ENABLED(CONFIG_SERIAL_BFIN)
649#ifdef CONFIG_SERIAL_BFIN_UART0 647#ifdef CONFIG_SERIAL_BFIN_UART0
650 &bfin_uart0_device, 648 &bfin_uart0_device,
651#endif 649#endif
@@ -654,7 +652,7 @@ static struct platform_device *tcm_devices[] __initdata = {
654#endif 652#endif
655#endif 653#endif
656 654
657#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 655#if IS_ENABLED(CONFIG_BFIN_SIR)
658#ifdef CONFIG_BFIN_SIR0 656#ifdef CONFIG_BFIN_SIR0
659 &bfin_sir0_device, 657 &bfin_sir0_device,
660#endif 658#endif
@@ -663,11 +661,11 @@ static struct platform_device *tcm_devices[] __initdata = {
663#endif 661#endif
664#endif 662#endif
665 663
666#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 664#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
667 &i2c_bfin_twi_device, 665 &i2c_bfin_twi_device,
668#endif 666#endif
669 667
670#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 668#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
671#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 669#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
672 &bfin_sport0_uart_device, 670 &bfin_sport0_uart_device,
673#endif 671#endif
@@ -676,15 +674,15 @@ static struct platform_device *tcm_devices[] __initdata = {
676#endif 674#endif
677#endif 675#endif
678 676
679#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 677#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
680 &bfin_device_gpiokeys, 678 &bfin_device_gpiokeys,
681#endif 679#endif
682 680
683#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 681#if IS_ENABLED(CONFIG_SDH_BFIN)
684 &bf51x_sdh_device, 682 &bf51x_sdh_device,
685#endif 683#endif
686 684
687#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 685#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
688 &tcm_flash_device, 686 &tcm_flash_device,
689#endif 687#endif
690}; 688};
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index 1e7be62fccb6..9501bd8d9cd1 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -37,7 +37,7 @@ const char bfin_board_name[] = "ADI BF527-AD7160EVAL";
37 * Driver needs to know address, irq and flag pin. 37 * Driver needs to know address, irq and flag pin.
38 */ 38 */
39 39
40#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 40#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
41static struct resource musb_resources[] = { 41static struct resource musb_resources[] = {
42 [0] = { 42 [0] = {
43 .start = 0xffc03800, 43 .start = 0xffc03800,
@@ -97,7 +97,7 @@ static struct platform_device musb_device = {
97}; 97};
98#endif 98#endif
99 99
100#if defined(CONFIG_FB_BFIN_RA158Z) || defined(CONFIG_FB_BFIN_RA158Z_MODULE) 100#if IS_ENABLED(CONFIG_FB_BFIN_RA158Z)
101static struct resource bf52x_ra158z_resources[] = { 101static struct resource bf52x_ra158z_resources[] = {
102 { 102 {
103 .start = IRQ_PPI_ERROR, 103 .start = IRQ_PPI_ERROR,
@@ -114,7 +114,7 @@ static struct platform_device bf52x_ra158z_device = {
114}; 114};
115#endif 115#endif
116 116
117#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 117#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
118static struct mtd_partition ad7160eval_partitions[] = { 118static struct mtd_partition ad7160eval_partitions[] = {
119 { 119 {
120 .name = "bootloader(nor)", 120 .name = "bootloader(nor)",
@@ -154,7 +154,7 @@ static struct platform_device ad7160eval_flash_device = {
154}; 154};
155#endif 155#endif
156 156
157#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 157#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
158static struct mtd_partition partition_info[] = { 158static struct mtd_partition partition_info[] = {
159 { 159 {
160 .name = "linux kernel(nand)", 160 .name = "linux kernel(nand)",
@@ -200,14 +200,14 @@ static struct platform_device bf5xx_nand_device = {
200}; 200};
201#endif 201#endif
202 202
203#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 203#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
204static struct platform_device rtc_device = { 204static struct platform_device rtc_device = {
205 .name = "rtc-bfin", 205 .name = "rtc-bfin",
206 .id = -1, 206 .id = -1,
207}; 207};
208#endif 208#endif
209 209
210#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 210#if IS_ENABLED(CONFIG_BFIN_MAC)
211#include <linux/bfin_mac.h> 211#include <linux/bfin_mac.h>
212static const unsigned short bfin_mac_peripherals[] = P_RMII0; 212static const unsigned short bfin_mac_peripherals[] = P_RMII0;
213 213
@@ -241,8 +241,7 @@ static struct platform_device bfin_mac_device = {
241#endif 241#endif
242 242
243 243
244#if defined(CONFIG_MTD_M25P80) \ 244#if IS_ENABLED(CONFIG_MTD_M25P80)
245 || defined(CONFIG_MTD_M25P80_MODULE)
246static struct mtd_partition bfin_spi_flash_partitions[] = { 245static struct mtd_partition bfin_spi_flash_partitions[] = {
247 { 246 {
248 .name = "bootloader(spi)", 247 .name = "bootloader(spi)",
@@ -269,13 +268,13 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
269}; 268};
270#endif 269#endif
271 270
272#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 271#if IS_ENABLED(CONFIG_MMC_SPI)
273static struct bfin5xx_spi_chip mmc_spi_chip_info = { 272static struct bfin5xx_spi_chip mmc_spi_chip_info = {
274 .enable_dma = 0, 273 .enable_dma = 0,
275}; 274};
276#endif 275#endif
277 276
278#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 277#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
279static struct platform_device bfin_i2s = { 278static struct platform_device bfin_i2s = {
280 .name = "bfin-i2s", 279 .name = "bfin-i2s",
281 .id = CONFIG_SND_BF5XX_SPORT_NUM, 280 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -284,8 +283,7 @@ static struct platform_device bfin_i2s = {
284#endif 283#endif
285 284
286static struct spi_board_info bfin_spi_board_info[] __initdata = { 285static struct spi_board_info bfin_spi_board_info[] __initdata = {
287#if defined(CONFIG_MTD_M25P80) \ 286#if IS_ENABLED(CONFIG_MTD_M25P80)
288 || defined(CONFIG_MTD_M25P80_MODULE)
289 { 287 {
290 /* the modalias must be the same as spi device driver name */ 288 /* the modalias must be the same as spi device driver name */
291 .modalias = "m25p80", /* Name of spi_driver for this device */ 289 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -297,8 +295,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
297 .mode = SPI_MODE_3, 295 .mode = SPI_MODE_3,
298 }, 296 },
299#endif 297#endif
300#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 298#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
301 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
302 { 299 {
303 .modalias = "ad183x", 300 .modalias = "ad183x",
304 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 301 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -306,7 +303,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
306 .chip_select = 4, 303 .chip_select = 4,
307 }, 304 },
308#endif 305#endif
309#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 306#if IS_ENABLED(CONFIG_MMC_SPI)
310 { 307 {
311 .modalias = "mmc_spi", 308 .modalias = "mmc_spi",
312 .max_speed_hz = 30000000, /* max spi clock (SCK) speed in HZ */ 309 .max_speed_hz = 30000000, /* max spi clock (SCK) speed in HZ */
@@ -316,7 +313,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
316 .mode = SPI_MODE_3, 313 .mode = SPI_MODE_3,
317 }, 314 },
318#endif 315#endif
319#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 316#if IS_ENABLED(CONFIG_SPI_SPIDEV)
320 { 317 {
321 .modalias = "spidev", 318 .modalias = "spidev",
322 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 319 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -326,7 +323,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
326#endif 323#endif
327}; 324};
328 325
329#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 326#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
330/* SPI controller data */ 327/* SPI controller data */
331static struct bfin5xx_spi_master bfin_spi0_info = { 328static struct bfin5xx_spi_master bfin_spi0_info = {
332 .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, 329 .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
@@ -364,7 +361,7 @@ static struct platform_device bfin_spi0_device = {
364}; 361};
365#endif /* spi master and devices */ 362#endif /* spi master and devices */
366 363
367#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 364#if IS_ENABLED(CONFIG_SERIAL_BFIN)
368#ifdef CONFIG_SERIAL_BFIN_UART0 365#ifdef CONFIG_SERIAL_BFIN_UART0
369static struct resource bfin_uart0_resources[] = { 366static struct resource bfin_uart0_resources[] = {
370 { 367 {
@@ -475,7 +472,7 @@ static struct platform_device bfin_uart1_device = {
475#endif 472#endif
476#endif 473#endif
477 474
478#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 475#if IS_ENABLED(CONFIG_BFIN_SIR)
479#ifdef CONFIG_BFIN_SIR0 476#ifdef CONFIG_BFIN_SIR0
480static struct resource bfin_sir0_resources[] = { 477static struct resource bfin_sir0_resources[] = {
481 { 478 {
@@ -530,7 +527,7 @@ static struct platform_device bfin_sir1_device = {
530#endif 527#endif
531#endif 528#endif
532 529
533#if defined(CONFIG_TOUCHSCREEN_AD7160) || defined(CONFIG_TOUCHSCREEN_AD7160_MODULE) 530#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7160)
534#include <linux/input/ad7160.h> 531#include <linux/input/ad7160.h>
535static const struct ad7160_platform_data bfin_ad7160_ts_info = { 532static const struct ad7160_platform_data bfin_ad7160_ts_info = {
536 .sensor_x_res = 854, 533 .sensor_x_res = 854,
@@ -560,7 +557,7 @@ static const struct ad7160_platform_data bfin_ad7160_ts_info = {
560}; 557};
561#endif 558#endif
562 559
563#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 560#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
564static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 561static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
565 562
566static struct resource bfin_twi0_resource[] = { 563static struct resource bfin_twi0_resource[] = {
@@ -588,7 +585,7 @@ static struct platform_device i2c_bfin_twi_device = {
588#endif 585#endif
589 586
590static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 587static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
591#if defined(CONFIG_TOUCHSCREEN_AD7160) || defined(CONFIG_TOUCHSCREEN_AD7160_MODULE) 588#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7160)
592 { 589 {
593 I2C_BOARD_INFO("ad7160", 0x33), 590 I2C_BOARD_INFO("ad7160", 0x33),
594 .irq = IRQ_PH1, 591 .irq = IRQ_PH1,
@@ -597,7 +594,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
597#endif 594#endif
598}; 595};
599 596
600#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 597#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
601#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 598#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
602static struct resource bfin_sport0_uart_resources[] = { 599static struct resource bfin_sport0_uart_resources[] = {
603 { 600 {
@@ -668,7 +665,7 @@ static struct platform_device bfin_sport1_uart_device = {
668#endif 665#endif
669#endif 666#endif
670 667
671#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) 668#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
672#include <asm/bfin_rotary.h> 669#include <asm/bfin_rotary.h>
673 670
674static struct bfin_rotary_platform_data bfin_rotary_data = { 671static struct bfin_rotary_platform_data bfin_rotary_data = {
@@ -725,28 +722,28 @@ static struct platform_device *stamp_devices[] __initdata = {
725 722
726 &bfin_dpmc, 723 &bfin_dpmc,
727 724
728#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 725#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
729 &bf5xx_nand_device, 726 &bf5xx_nand_device,
730#endif 727#endif
731 728
732#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 729#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
733 &rtc_device, 730 &rtc_device,
734#endif 731#endif
735 732
736#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 733#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
737 &musb_device, 734 &musb_device,
738#endif 735#endif
739 736
740#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 737#if IS_ENABLED(CONFIG_BFIN_MAC)
741 &bfin_mii_bus, 738 &bfin_mii_bus,
742 &bfin_mac_device, 739 &bfin_mac_device,
743#endif 740#endif
744 741
745#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 742#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
746 &bfin_spi0_device, 743 &bfin_spi0_device,
747#endif 744#endif
748 745
749#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 746#if IS_ENABLED(CONFIG_SERIAL_BFIN)
750#ifdef CONFIG_SERIAL_BFIN_UART0 747#ifdef CONFIG_SERIAL_BFIN_UART0
751 &bfin_uart0_device, 748 &bfin_uart0_device,
752#endif 749#endif
@@ -755,11 +752,11 @@ static struct platform_device *stamp_devices[] __initdata = {
755#endif 752#endif
756#endif 753#endif
757 754
758#if defined(CONFIG_FB_BFIN_RA158Z) || defined(CONFIG_FB_BFIN_RA158Z_MODULE) 755#if IS_ENABLED(CONFIG_FB_BFIN_RA158Z)
759 &bf52x_ra158z_device, 756 &bf52x_ra158z_device,
760#endif 757#endif
761 758
762#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 759#if IS_ENABLED(CONFIG_BFIN_SIR)
763#ifdef CONFIG_BFIN_SIR0 760#ifdef CONFIG_BFIN_SIR0
764 &bfin_sir0_device, 761 &bfin_sir0_device,
765#endif 762#endif
@@ -768,11 +765,11 @@ static struct platform_device *stamp_devices[] __initdata = {
768#endif 765#endif
769#endif 766#endif
770 767
771#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 768#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
772 &i2c_bfin_twi_device, 769 &i2c_bfin_twi_device,
773#endif 770#endif
774 771
775#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 772#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
776#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 773#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
777 &bfin_sport0_uart_device, 774 &bfin_sport0_uart_device,
778#endif 775#endif
@@ -781,15 +778,15 @@ static struct platform_device *stamp_devices[] __initdata = {
781#endif 778#endif
782#endif 779#endif
783 780
784#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) 781#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
785 &bfin_rotary_device, 782 &bfin_rotary_device,
786#endif 783#endif
787 784
788#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 785#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
789 &ad7160eval_flash_device, 786 &ad7160eval_flash_device,
790#endif 787#endif
791 788
792#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 789#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
793 &bfin_i2s, 790 &bfin_i2s,
794#endif 791#endif
795}; 792};
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 413d0132b66f..b1004b35db36 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -37,7 +37,7 @@ const char bfin_board_name[] = "Bluetechnix CM-BF527";
37 * Driver needs to know address, irq and flag pin. 37 * Driver needs to know address, irq and flag pin.
38 */ 38 */
39 39
40#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 40#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
41#include <linux/usb/isp1760.h> 41#include <linux/usb/isp1760.h>
42static struct resource bfin_isp1760_resources[] = { 42static struct resource bfin_isp1760_resources[] = {
43 [0] = { 43 [0] = {
@@ -72,7 +72,7 @@ static struct platform_device bfin_isp1760_device = {
72}; 72};
73#endif 73#endif
74 74
75#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 75#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
76static struct resource musb_resources[] = { 76static struct resource musb_resources[] = {
77 [0] = { 77 [0] = {
78 .start = 0xffc03800, 78 .start = 0xffc03800,
@@ -134,7 +134,7 @@ static struct platform_device musb_device = {
134}; 134};
135#endif 135#endif
136 136
137#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 137#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
138static struct mtd_partition partition_info[] = { 138static struct mtd_partition partition_info[] = {
139 { 139 {
140 .name = "linux kernel(nand)", 140 .name = "linux kernel(nand)",
@@ -180,7 +180,7 @@ static struct platform_device bf5xx_nand_device = {
180}; 180};
181#endif 181#endif
182 182
183#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 183#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
184static struct resource bfin_pcmcia_cf_resources[] = { 184static struct resource bfin_pcmcia_cf_resources[] = {
185 { 185 {
186 .start = 0x20310000, /* IO PORT */ 186 .start = 0x20310000, /* IO PORT */
@@ -209,14 +209,14 @@ static struct platform_device bfin_pcmcia_cf_device = {
209}; 209};
210#endif 210#endif
211 211
212#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 212#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
213static struct platform_device rtc_device = { 213static struct platform_device rtc_device = {
214 .name = "rtc-bfin", 214 .name = "rtc-bfin",
215 .id = -1, 215 .id = -1,
216}; 216};
217#endif 217#endif
218 218
219#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 219#if IS_ENABLED(CONFIG_SMC91X)
220#include <linux/smc91x.h> 220#include <linux/smc91x.h>
221 221
222static struct smc91x_platdata smc91x_info = { 222static struct smc91x_platdata smc91x_info = {
@@ -249,7 +249,7 @@ static struct platform_device smc91x_device = {
249}; 249};
250#endif 250#endif
251 251
252#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 252#if IS_ENABLED(CONFIG_DM9000)
253static struct resource dm9000_resources[] = { 253static struct resource dm9000_resources[] = {
254 [0] = { 254 [0] = {
255 .start = 0x203FB800, 255 .start = 0x203FB800,
@@ -276,7 +276,7 @@ static struct platform_device dm9000_device = {
276}; 276};
277#endif 277#endif
278 278
279#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 279#if IS_ENABLED(CONFIG_BFIN_MAC)
280#include <linux/bfin_mac.h> 280#include <linux/bfin_mac.h>
281static const unsigned short bfin_mac_peripherals[] = P_RMII0; 281static const unsigned short bfin_mac_peripherals[] = P_RMII0;
282 282
@@ -309,7 +309,7 @@ static struct platform_device bfin_mac_device = {
309}; 309};
310#endif 310#endif
311 311
312#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 312#if IS_ENABLED(CONFIG_USB_NET2272)
313static struct resource net2272_bfin_resources[] = { 313static struct resource net2272_bfin_resources[] = {
314 { 314 {
315 .start = 0x20300000, 315 .start = 0x20300000,
@@ -330,8 +330,7 @@ static struct platform_device net2272_bfin_device = {
330}; 330};
331#endif 331#endif
332 332
333#if defined(CONFIG_MTD_M25P80) \ 333#if IS_ENABLED(CONFIG_MTD_M25P80)
334 || defined(CONFIG_MTD_M25P80_MODULE)
335static struct mtd_partition bfin_spi_flash_partitions[] = { 334static struct mtd_partition bfin_spi_flash_partitions[] = {
336 { 335 {
337 .name = "bootloader(spi)", 336 .name = "bootloader(spi)",
@@ -358,13 +357,13 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
358}; 357};
359#endif 358#endif
360 359
361#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 360#if IS_ENABLED(CONFIG_MMC_SPI)
362static struct bfin5xx_spi_chip mmc_spi_chip_info = { 361static struct bfin5xx_spi_chip mmc_spi_chip_info = {
363 .enable_dma = 0, 362 .enable_dma = 0,
364}; 363};
365#endif 364#endif
366 365
367#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 366#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
368static const struct ad7877_platform_data bfin_ad7877_ts_info = { 367static const struct ad7877_platform_data bfin_ad7877_ts_info = {
369 .model = 7877, 368 .model = 7877,
370 .vref_delay_usecs = 50, /* internal, no capacitor */ 369 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -381,8 +380,7 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
381#endif 380#endif
382 381
383static struct spi_board_info bfin_spi_board_info[] __initdata = { 382static struct spi_board_info bfin_spi_board_info[] __initdata = {
384#if defined(CONFIG_MTD_M25P80) \ 383#if IS_ENABLED(CONFIG_MTD_M25P80)
385 || defined(CONFIG_MTD_M25P80_MODULE)
386 { 384 {
387 /* the modalias must be the same as spi device driver name */ 385 /* the modalias must be the same as spi device driver name */
388 .modalias = "m25p80", /* Name of spi_driver for this device */ 386 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -395,8 +393,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
395 }, 393 },
396#endif 394#endif
397 395
398#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 396#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
399 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
400 { 397 {
401 .modalias = "ad183x", 398 .modalias = "ad183x",
402 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 399 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -404,7 +401,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
404 .chip_select = 4, 401 .chip_select = 4,
405 }, 402 },
406#endif 403#endif
407#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 404#if IS_ENABLED(CONFIG_MMC_SPI)
408 { 405 {
409 .modalias = "mmc_spi", 406 .modalias = "mmc_spi",
410 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 407 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -414,7 +411,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
414 .mode = SPI_MODE_3, 411 .mode = SPI_MODE_3,
415 }, 412 },
416#endif 413#endif
417#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 414#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
418 { 415 {
419 .modalias = "ad7877", 416 .modalias = "ad7877",
420 .platform_data = &bfin_ad7877_ts_info, 417 .platform_data = &bfin_ad7877_ts_info,
@@ -424,7 +421,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
424 .chip_select = 2, 421 .chip_select = 2,
425 }, 422 },
426#endif 423#endif
427#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ 424#if IS_ENABLED(CONFIG_SND_SOC_WM8731) \
428 && defined(CONFIG_SND_SOC_WM8731_SPI) 425 && defined(CONFIG_SND_SOC_WM8731_SPI)
429 { 426 {
430 .modalias = "wm8731", 427 .modalias = "wm8731",
@@ -434,7 +431,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
434 .mode = SPI_MODE_0, 431 .mode = SPI_MODE_0,
435 }, 432 },
436#endif 433#endif
437#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 434#if IS_ENABLED(CONFIG_SPI_SPIDEV)
438 { 435 {
439 .modalias = "spidev", 436 .modalias = "spidev",
440 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 437 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -444,7 +441,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
444#endif 441#endif
445}; 442};
446 443
447#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 444#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
448/* SPI controller data */ 445/* SPI controller data */
449static struct bfin5xx_spi_master bfin_spi0_info = { 446static struct bfin5xx_spi_master bfin_spi0_info = {
450 .num_chipselect = 8, 447 .num_chipselect = 8,
@@ -482,7 +479,7 @@ static struct platform_device bfin_spi0_device = {
482}; 479};
483#endif /* spi master and devices */ 480#endif /* spi master and devices */
484 481
485#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 482#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
486static struct mtd_partition cm_partitions[] = { 483static struct mtd_partition cm_partitions[] = {
487 { 484 {
488 .name = "bootloader(nor)", 485 .name = "bootloader(nor)",
@@ -531,7 +528,7 @@ static struct platform_device cm_flash_device = {
531}; 528};
532#endif 529#endif
533 530
534#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 531#if IS_ENABLED(CONFIG_SERIAL_BFIN)
535#ifdef CONFIG_SERIAL_BFIN_UART0 532#ifdef CONFIG_SERIAL_BFIN_UART0
536static struct resource bfin_uart0_resources[] = { 533static struct resource bfin_uart0_resources[] = {
537 { 534 {
@@ -642,7 +639,7 @@ static struct platform_device bfin_uart1_device = {
642#endif 639#endif
643#endif 640#endif
644 641
645#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 642#if IS_ENABLED(CONFIG_BFIN_SIR)
646#ifdef CONFIG_BFIN_SIR0 643#ifdef CONFIG_BFIN_SIR0
647static struct resource bfin_sir0_resources[] = { 644static struct resource bfin_sir0_resources[] = {
648 { 645 {
@@ -697,7 +694,7 @@ static struct platform_device bfin_sir1_device = {
697#endif 694#endif
698#endif 695#endif
699 696
700#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 697#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
701static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 698static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
702 699
703static struct resource bfin_twi0_resource[] = { 700static struct resource bfin_twi0_resource[] = {
@@ -725,25 +722,25 @@ static struct platform_device i2c_bfin_twi_device = {
725#endif 722#endif
726 723
727static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 724static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
728#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 725#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
729 { 726 {
730 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 727 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
731 }, 728 },
732#endif 729#endif
733#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) 730#if IS_ENABLED(CONFIG_INPUT_PCF8574)
734 { 731 {
735 I2C_BOARD_INFO("pcf8574_keypad", 0x27), 732 I2C_BOARD_INFO("pcf8574_keypad", 0x27),
736 .irq = IRQ_PF8, 733 .irq = IRQ_PF8,
737 }, 734 },
738#endif 735#endif
739#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE) 736#if IS_ENABLED(CONFIG_FB_BFIN_7393)
740 { 737 {
741 I2C_BOARD_INFO("bfin-adv7393", 0x2B), 738 I2C_BOARD_INFO("bfin-adv7393", 0x2B),
742 }, 739 },
743#endif 740#endif
744}; 741};
745 742
746#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 743#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
747#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 744#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
748static struct resource bfin_sport0_uart_resources[] = { 745static struct resource bfin_sport0_uart_resources[] = {
749 { 746 {
@@ -814,7 +811,7 @@ static struct platform_device bfin_sport1_uart_device = {
814#endif 811#endif
815#endif 812#endif
816 813
817#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 814#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
818#include <linux/input.h> 815#include <linux/input.h>
819#include <linux/gpio_keys.h> 816#include <linux/gpio_keys.h>
820 817
@@ -861,48 +858,48 @@ static struct platform_device *cmbf527_devices[] __initdata = {
861 858
862 &bfin_dpmc, 859 &bfin_dpmc,
863 860
864#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 861#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
865 &bf5xx_nand_device, 862 &bf5xx_nand_device,
866#endif 863#endif
867 864
868#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 865#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
869 &bfin_pcmcia_cf_device, 866 &bfin_pcmcia_cf_device,
870#endif 867#endif
871 868
872#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 869#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
873 &rtc_device, 870 &rtc_device,
874#endif 871#endif
875 872
876#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 873#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
877 &bfin_isp1760_device, 874 &bfin_isp1760_device,
878#endif 875#endif
879 876
880#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 877#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
881 &musb_device, 878 &musb_device,
882#endif 879#endif
883 880
884#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 881#if IS_ENABLED(CONFIG_SMC91X)
885 &smc91x_device, 882 &smc91x_device,
886#endif 883#endif
887 884
888#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 885#if IS_ENABLED(CONFIG_DM9000)
889 &dm9000_device, 886 &dm9000_device,
890#endif 887#endif
891 888
892#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 889#if IS_ENABLED(CONFIG_BFIN_MAC)
893 &bfin_mii_bus, 890 &bfin_mii_bus,
894 &bfin_mac_device, 891 &bfin_mac_device,
895#endif 892#endif
896 893
897#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 894#if IS_ENABLED(CONFIG_USB_NET2272)
898 &net2272_bfin_device, 895 &net2272_bfin_device,
899#endif 896#endif
900 897
901#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 898#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
902 &bfin_spi0_device, 899 &bfin_spi0_device,
903#endif 900#endif
904 901
905#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 902#if IS_ENABLED(CONFIG_SERIAL_BFIN)
906#ifdef CONFIG_SERIAL_BFIN_UART0 903#ifdef CONFIG_SERIAL_BFIN_UART0
907 &bfin_uart0_device, 904 &bfin_uart0_device,
908#endif 905#endif
@@ -911,7 +908,7 @@ static struct platform_device *cmbf527_devices[] __initdata = {
911#endif 908#endif
912#endif 909#endif
913 910
914#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 911#if IS_ENABLED(CONFIG_BFIN_SIR)
915#ifdef CONFIG_BFIN_SIR0 912#ifdef CONFIG_BFIN_SIR0
916 &bfin_sir0_device, 913 &bfin_sir0_device,
917#endif 914#endif
@@ -920,11 +917,11 @@ static struct platform_device *cmbf527_devices[] __initdata = {
920#endif 917#endif
921#endif 918#endif
922 919
923#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 920#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
924 &i2c_bfin_twi_device, 921 &i2c_bfin_twi_device,
925#endif 922#endif
926 923
927#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 924#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
928#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 925#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
929 &bfin_sport0_uart_device, 926 &bfin_sport0_uart_device,
930#endif 927#endif
@@ -933,11 +930,11 @@ static struct platform_device *cmbf527_devices[] __initdata = {
933#endif 930#endif
934#endif 931#endif
935 932
936#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 933#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
937 &bfin_device_gpiokeys, 934 &bfin_device_gpiokeys,
938#endif 935#endif
939 936
940#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 937#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
941 &cm_flash_device, 938 &cm_flash_device,
942#endif 939#endif
943}; 940};
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 50bda79194e5..a3a572352769 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -36,7 +36,7 @@ const char bfin_board_name[] = "ADI BF526-EZBRD";
36 * Driver needs to know address, irq and flag pin. 36 * Driver needs to know address, irq and flag pin.
37 */ 37 */
38 38
39#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 39#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
40static struct resource musb_resources[] = { 40static struct resource musb_resources[] = {
41 [0] = { 41 [0] = {
42 .start = 0xffc03800, 42 .start = 0xffc03800,
@@ -98,7 +98,7 @@ static struct platform_device musb_device = {
98}; 98};
99#endif 99#endif
100 100
101#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 101#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
102static struct mtd_partition ezbrd_partitions[] = { 102static struct mtd_partition ezbrd_partitions[] = {
103 { 103 {
104 .name = "bootloader(nor)", 104 .name = "bootloader(nor)",
@@ -138,7 +138,7 @@ static struct platform_device ezbrd_flash_device = {
138}; 138};
139#endif 139#endif
140 140
141#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 141#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
142static struct mtd_partition partition_info[] = { 142static struct mtd_partition partition_info[] = {
143 { 143 {
144 .name = "bootloader(nand)", 144 .name = "bootloader(nand)",
@@ -188,7 +188,7 @@ static struct platform_device bf5xx_nand_device = {
188}; 188};
189#endif 189#endif
190 190
191#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 191#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
192static struct platform_device rtc_device = { 192static struct platform_device rtc_device = {
193 .name = "rtc-bfin", 193 .name = "rtc-bfin",
194 .id = -1, 194 .id = -1,
@@ -196,7 +196,7 @@ static struct platform_device rtc_device = {
196#endif 196#endif
197 197
198 198
199#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 199#if IS_ENABLED(CONFIG_BFIN_MAC)
200#include <linux/bfin_mac.h> 200#include <linux/bfin_mac.h>
201static const unsigned short bfin_mac_peripherals[] = P_RMII0; 201static const unsigned short bfin_mac_peripherals[] = P_RMII0;
202 202
@@ -229,8 +229,7 @@ static struct platform_device bfin_mac_device = {
229}; 229};
230#endif 230#endif
231 231
232#if defined(CONFIG_MTD_M25P80) \ 232#if IS_ENABLED(CONFIG_MTD_M25P80)
233 || defined(CONFIG_MTD_M25P80_MODULE)
234static struct mtd_partition bfin_spi_flash_partitions[] = { 233static struct mtd_partition bfin_spi_flash_partitions[] = {
235 { 234 {
236 .name = "bootloader(spi)", 235 .name = "bootloader(spi)",
@@ -257,13 +256,13 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
257}; 256};
258#endif 257#endif
259 258
260#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 259#if IS_ENABLED(CONFIG_MMC_SPI)
261static struct bfin5xx_spi_chip mmc_spi_chip_info = { 260static struct bfin5xx_spi_chip mmc_spi_chip_info = {
262 .enable_dma = 0, 261 .enable_dma = 0,
263}; 262};
264#endif 263#endif
265 264
266#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 265#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
267static const struct ad7877_platform_data bfin_ad7877_ts_info = { 266static const struct ad7877_platform_data bfin_ad7877_ts_info = {
268 .model = 7877, 267 .model = 7877,
269 .vref_delay_usecs = 50, /* internal, no capacitor */ 268 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -279,7 +278,7 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
279}; 278};
280#endif 279#endif
281 280
282#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) 281#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879)
283#include <linux/spi/ad7879.h> 282#include <linux/spi/ad7879.h>
284static const struct ad7879_platform_data bfin_ad7879_ts_info = { 283static const struct ad7879_platform_data bfin_ad7879_ts_info = {
285 .model = 7879, /* Model = AD7879 */ 284 .model = 7879, /* Model = AD7879 */
@@ -297,8 +296,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
297#endif 296#endif
298 297
299static struct spi_board_info bfin_spi_board_info[] __initdata = { 298static struct spi_board_info bfin_spi_board_info[] __initdata = {
300#if defined(CONFIG_MTD_M25P80) \ 299#if IS_ENABLED(CONFIG_MTD_M25P80)
301 || defined(CONFIG_MTD_M25P80_MODULE)
302 { 300 {
303 /* the modalias must be the same as spi device driver name */ 301 /* the modalias must be the same as spi device driver name */
304 .modalias = "m25p80", /* Name of spi_driver for this device */ 302 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -311,7 +309,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
311 }, 309 },
312#endif 310#endif
313 311
314#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 312#if IS_ENABLED(CONFIG_MMC_SPI)
315 { 313 {
316 .modalias = "mmc_spi", 314 .modalias = "mmc_spi",
317 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 315 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
@@ -321,7 +319,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
321 .mode = SPI_MODE_3, 319 .mode = SPI_MODE_3,
322 }, 320 },
323#endif 321#endif
324#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 322#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
325 { 323 {
326 .modalias = "ad7877", 324 .modalias = "ad7877",
327 .platform_data = &bfin_ad7877_ts_info, 325 .platform_data = &bfin_ad7877_ts_info,
@@ -331,7 +329,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
331 .chip_select = 2, 329 .chip_select = 2,
332 }, 330 },
333#endif 331#endif
334#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) 332#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879_SPI)
335 { 333 {
336 .modalias = "ad7879", 334 .modalias = "ad7879",
337 .platform_data = &bfin_ad7879_ts_info, 335 .platform_data = &bfin_ad7879_ts_info,
@@ -342,7 +340,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
342 .mode = SPI_CPHA | SPI_CPOL, 340 .mode = SPI_CPHA | SPI_CPOL,
343 }, 341 },
344#endif 342#endif
345#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ 343#if IS_ENABLED(CONFIG_SND_SOC_WM8731) \
346 && defined(CONFIG_SND_SOC_WM8731_SPI) 344 && defined(CONFIG_SND_SOC_WM8731_SPI)
347 { 345 {
348 .modalias = "wm8731", 346 .modalias = "wm8731",
@@ -352,7 +350,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
352 .mode = SPI_MODE_0, 350 .mode = SPI_MODE_0,
353 }, 351 },
354#endif 352#endif
355#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 353#if IS_ENABLED(CONFIG_SPI_SPIDEV)
356 { 354 {
357 .modalias = "spidev", 355 .modalias = "spidev",
358 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 356 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -360,7 +358,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
360 .chip_select = 1, 358 .chip_select = 1,
361 }, 359 },
362#endif 360#endif
363#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 361#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
364 { 362 {
365 .modalias = "bfin-lq035q1-spi", 363 .modalias = "bfin-lq035q1-spi",
366 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 364 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -371,7 +369,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
371#endif 369#endif
372}; 370};
373 371
374#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 372#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
375/* SPI controller data */ 373/* SPI controller data */
376static struct bfin5xx_spi_master bfin_spi0_info = { 374static struct bfin5xx_spi_master bfin_spi0_info = {
377 .num_chipselect = 8, 375 .num_chipselect = 8,
@@ -409,7 +407,7 @@ static struct platform_device bfin_spi0_device = {
409}; 407};
410#endif /* spi master and devices */ 408#endif /* spi master and devices */
411 409
412#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 410#if IS_ENABLED(CONFIG_SERIAL_BFIN)
413#ifdef CONFIG_SERIAL_BFIN_UART0 411#ifdef CONFIG_SERIAL_BFIN_UART0
414static struct resource bfin_uart0_resources[] = { 412static struct resource bfin_uart0_resources[] = {
415 { 413 {
@@ -520,7 +518,7 @@ static struct platform_device bfin_uart1_device = {
520#endif 518#endif
521#endif 519#endif
522 520
523#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 521#if IS_ENABLED(CONFIG_BFIN_SIR)
524#ifdef CONFIG_BFIN_SIR0 522#ifdef CONFIG_BFIN_SIR0
525static struct resource bfin_sir0_resources[] = { 523static struct resource bfin_sir0_resources[] = {
526 { 524 {
@@ -575,7 +573,7 @@ static struct platform_device bfin_sir1_device = {
575#endif 573#endif
576#endif 574#endif
577 575
578#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 576#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
579static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 577static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
580 578
581static struct resource bfin_twi0_resource[] = { 579static struct resource bfin_twi0_resource[] = {
@@ -603,12 +601,12 @@ static struct platform_device i2c_bfin_twi_device = {
603#endif 601#endif
604 602
605static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 603static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
606#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 604#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
607 { 605 {
608 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 606 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
609 }, 607 },
610#endif 608#endif
611#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) 609#if IS_ENABLED(CONFIG_INPUT_PCF8574)
612 { 610 {
613 I2C_BOARD_INFO("pcf8574_keypad", 0x27), 611 I2C_BOARD_INFO("pcf8574_keypad", 0x27),
614 .irq = IRQ_PF8, 612 .irq = IRQ_PF8,
@@ -616,7 +614,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
616#endif 614#endif
617}; 615};
618 616
619#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 617#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
620#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 618#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
621static struct resource bfin_sport0_uart_resources[] = { 619static struct resource bfin_sport0_uart_resources[] = {
622 { 620 {
@@ -687,7 +685,7 @@ static struct platform_device bfin_sport1_uart_device = {
687#endif 685#endif
688#endif 686#endif
689 687
690#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 688#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
691#include <linux/input.h> 689#include <linux/input.h>
692#include <linux/gpio_keys.h> 690#include <linux/gpio_keys.h>
693 691
@@ -731,7 +729,7 @@ static struct platform_device bfin_dpmc = {
731 }, 729 },
732}; 730};
733 731
734#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 732#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
735#include <asm/bfin-lq035q1.h> 733#include <asm/bfin-lq035q1.h>
736 734
737static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { 735static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
@@ -764,28 +762,28 @@ static struct platform_device *stamp_devices[] __initdata = {
764 762
765 &bfin_dpmc, 763 &bfin_dpmc,
766 764
767#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 765#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
768 &bf5xx_nand_device, 766 &bf5xx_nand_device,
769#endif 767#endif
770 768
771#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 769#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
772 &rtc_device, 770 &rtc_device,
773#endif 771#endif
774 772
775#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 773#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
776 &musb_device, 774 &musb_device,
777#endif 775#endif
778 776
779#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 777#if IS_ENABLED(CONFIG_BFIN_MAC)
780 &bfin_mii_bus, 778 &bfin_mii_bus,
781 &bfin_mac_device, 779 &bfin_mac_device,
782#endif 780#endif
783 781
784#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 782#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
785 &bfin_spi0_device, 783 &bfin_spi0_device,
786#endif 784#endif
787 785
788#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 786#if IS_ENABLED(CONFIG_SERIAL_BFIN)
789#ifdef CONFIG_SERIAL_BFIN_UART0 787#ifdef CONFIG_SERIAL_BFIN_UART0
790 &bfin_uart0_device, 788 &bfin_uart0_device,
791#endif 789#endif
@@ -794,11 +792,11 @@ static struct platform_device *stamp_devices[] __initdata = {
794#endif 792#endif
795#endif 793#endif
796 794
797#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 795#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
798 &bfin_lq035q1_device, 796 &bfin_lq035q1_device,
799#endif 797#endif
800 798
801#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 799#if IS_ENABLED(CONFIG_BFIN_SIR)
802#ifdef CONFIG_BFIN_SIR0 800#ifdef CONFIG_BFIN_SIR0
803 &bfin_sir0_device, 801 &bfin_sir0_device,
804#endif 802#endif
@@ -807,11 +805,11 @@ static struct platform_device *stamp_devices[] __initdata = {
807#endif 805#endif
808#endif 806#endif
809 807
810#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 808#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
811 &i2c_bfin_twi_device, 809 &i2c_bfin_twi_device,
812#endif 810#endif
813 811
814#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 812#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
815#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 813#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
816 &bfin_sport0_uart_device, 814 &bfin_sport0_uart_device,
817#endif 815#endif
@@ -820,11 +818,11 @@ static struct platform_device *stamp_devices[] __initdata = {
820#endif 818#endif
821#endif 819#endif
822 820
823#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 821#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
824 &bfin_device_gpiokeys, 822 &bfin_device_gpiokeys,
825#endif 823#endif
826 824
827#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 825#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
828 &ezbrd_flash_device, 826 &ezbrd_flash_device,
829#endif 827#endif
830}; 828};
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index d0a0c5e527cd..d64f565dc2a0 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -42,7 +42,7 @@ const char bfin_board_name[] = "ADI BF527-EZKIT";
42 * Driver needs to know address, irq and flag pin. 42 * Driver needs to know address, irq and flag pin.
43 */ 43 */
44 44
45#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 45#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
46#include <linux/usb/isp1760.h> 46#include <linux/usb/isp1760.h>
47static struct resource bfin_isp1760_resources[] = { 47static struct resource bfin_isp1760_resources[] = {
48 [0] = { 48 [0] = {
@@ -77,7 +77,7 @@ static struct platform_device bfin_isp1760_device = {
77}; 77};
78#endif 78#endif
79 79
80#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 80#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
81static struct resource musb_resources[] = { 81static struct resource musb_resources[] = {
82 [0] = { 82 [0] = {
83 .start = 0xffc03800, 83 .start = 0xffc03800,
@@ -139,7 +139,7 @@ static struct platform_device musb_device = {
139}; 139};
140#endif 140#endif
141 141
142#if defined(CONFIG_FB_BFIN_T350MCQB) || defined(CONFIG_FB_BFIN_T350MCQB_MODULE) 142#if IS_ENABLED(CONFIG_FB_BFIN_T350MCQB)
143 143
144static struct resource bf52x_t350mcqb_resources[] = { 144static struct resource bf52x_t350mcqb_resources[] = {
145 { 145 {
@@ -157,7 +157,7 @@ static struct platform_device bf52x_t350mcqb_device = {
157}; 157};
158#endif 158#endif
159 159
160#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 160#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
161#include <asm/bfin-lq035q1.h> 161#include <asm/bfin-lq035q1.h>
162 162
163static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { 163static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
@@ -184,7 +184,7 @@ static struct platform_device bfin_lq035q1_device = {
184}; 184};
185#endif 185#endif
186 186
187#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 187#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
188static struct mtd_partition ezkit_partitions[] = { 188static struct mtd_partition ezkit_partitions[] = {
189 { 189 {
190 .name = "bootloader(nor)", 190 .name = "bootloader(nor)",
@@ -224,7 +224,7 @@ static struct platform_device ezkit_flash_device = {
224}; 224};
225#endif 225#endif
226 226
227#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 227#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
228static struct mtd_partition partition_info[] = { 228static struct mtd_partition partition_info[] = {
229 { 229 {
230 .name = "bootloader(nand)", 230 .name = "bootloader(nand)",
@@ -274,7 +274,7 @@ static struct platform_device bf5xx_nand_device = {
274}; 274};
275#endif 275#endif
276 276
277#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 277#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
278static struct resource bfin_pcmcia_cf_resources[] = { 278static struct resource bfin_pcmcia_cf_resources[] = {
279 { 279 {
280 .start = 0x20310000, /* IO PORT */ 280 .start = 0x20310000, /* IO PORT */
@@ -303,14 +303,14 @@ static struct platform_device bfin_pcmcia_cf_device = {
303}; 303};
304#endif 304#endif
305 305
306#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 306#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
307static struct platform_device rtc_device = { 307static struct platform_device rtc_device = {
308 .name = "rtc-bfin", 308 .name = "rtc-bfin",
309 .id = -1, 309 .id = -1,
310}; 310};
311#endif 311#endif
312 312
313#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 313#if IS_ENABLED(CONFIG_SMC91X)
314#include <linux/smc91x.h> 314#include <linux/smc91x.h>
315 315
316static struct smc91x_platdata smc91x_info = { 316static struct smc91x_platdata smc91x_info = {
@@ -343,7 +343,7 @@ static struct platform_device smc91x_device = {
343}; 343};
344#endif 344#endif
345 345
346#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 346#if IS_ENABLED(CONFIG_DM9000)
347static struct resource dm9000_resources[] = { 347static struct resource dm9000_resources[] = {
348 [0] = { 348 [0] = {
349 .start = 0x203FB800, 349 .start = 0x203FB800,
@@ -370,7 +370,7 @@ static struct platform_device dm9000_device = {
370}; 370};
371#endif 371#endif
372 372
373#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 373#if IS_ENABLED(CONFIG_BFIN_MAC)
374#include <linux/bfin_mac.h> 374#include <linux/bfin_mac.h>
375static const unsigned short bfin_mac_peripherals[] = P_RMII0; 375static const unsigned short bfin_mac_peripherals[] = P_RMII0;
376 376
@@ -403,7 +403,7 @@ static struct platform_device bfin_mac_device = {
403}; 403};
404#endif 404#endif
405 405
406#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 406#if IS_ENABLED(CONFIG_USB_NET2272)
407static struct resource net2272_bfin_resources[] = { 407static struct resource net2272_bfin_resources[] = {
408 { 408 {
409 .start = 0x20300000, 409 .start = 0x20300000,
@@ -427,8 +427,7 @@ static struct platform_device net2272_bfin_device = {
427}; 427};
428#endif 428#endif
429 429
430#if defined(CONFIG_MTD_M25P80) \ 430#if IS_ENABLED(CONFIG_MTD_M25P80)
431 || defined(CONFIG_MTD_M25P80_MODULE)
432static struct mtd_partition bfin_spi_flash_partitions[] = { 431static struct mtd_partition bfin_spi_flash_partitions[] = {
433 { 432 {
434 .name = "bootloader(spi)", 433 .name = "bootloader(spi)",
@@ -455,13 +454,13 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
455}; 454};
456#endif 455#endif
457 456
458#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 457#if IS_ENABLED(CONFIG_MMC_SPI)
459static struct bfin5xx_spi_chip mmc_spi_chip_info = { 458static struct bfin5xx_spi_chip mmc_spi_chip_info = {
460 .enable_dma = 0, 459 .enable_dma = 0,
461}; 460};
462#endif 461#endif
463 462
464#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 463#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
465static const struct ad7877_platform_data bfin_ad7877_ts_info = { 464static const struct ad7877_platform_data bfin_ad7877_ts_info = {
466 .model = 7877, 465 .model = 7877,
467 .vref_delay_usecs = 50, /* internal, no capacitor */ 466 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -477,7 +476,7 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
477}; 476};
478#endif 477#endif
479 478
480#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) 479#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879)
481#include <linux/spi/ad7879.h> 480#include <linux/spi/ad7879.h>
482static const struct ad7879_platform_data bfin_ad7879_ts_info = { 481static const struct ad7879_platform_data bfin_ad7879_ts_info = {
483 .model = 7879, /* Model = AD7879 */ 482 .model = 7879, /* Model = AD7879 */
@@ -493,7 +492,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
493}; 492};
494#endif 493#endif
495 494
496#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 495#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
497 496
498static const u16 bfin_snd_pin[][7] = { 497static const u16 bfin_snd_pin[][7] = {
499 {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 498 {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
@@ -541,21 +540,21 @@ static struct resource bfin_snd_resources[][4] = {
541}; 540};
542#endif 541#endif
543 542
544#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 543#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
545static struct platform_device bfin_i2s_pcm = { 544static struct platform_device bfin_i2s_pcm = {
546 .name = "bfin-i2s-pcm-audio", 545 .name = "bfin-i2s-pcm-audio",
547 .id = -1, 546 .id = -1,
548}; 547};
549#endif 548#endif
550 549
551#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 550#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
552static struct platform_device bfin_ac97_pcm = { 551static struct platform_device bfin_ac97_pcm = {
553 .name = "bfin-ac97-pcm-audio", 552 .name = "bfin-ac97-pcm-audio",
554 .id = -1, 553 .id = -1,
555}; 554};
556#endif 555#endif
557 556
558#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 557#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
559static struct platform_device bfin_i2s = { 558static struct platform_device bfin_i2s = {
560 .name = "bfin-i2s", 559 .name = "bfin-i2s",
561 .id = CONFIG_SND_BF5XX_SPORT_NUM, 560 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -567,8 +566,7 @@ static struct platform_device bfin_i2s = {
567}; 566};
568#endif 567#endif
569 568
570#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ 569#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
571 || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
572static const char * const ad1836_link[] = { 570static const char * const ad1836_link[] = {
573 "bfin-i2s.0", 571 "bfin-i2s.0",
574 "spi0.4", 572 "spi0.4",
@@ -583,8 +581,7 @@ static struct platform_device bfin_ad1836_machine = {
583#endif 581#endif
584 582
585static struct spi_board_info bfin_spi_board_info[] __initdata = { 583static struct spi_board_info bfin_spi_board_info[] __initdata = {
586#if defined(CONFIG_MTD_M25P80) \ 584#if IS_ENABLED(CONFIG_MTD_M25P80)
587 || defined(CONFIG_MTD_M25P80_MODULE)
588 { 585 {
589 /* the modalias must be the same as spi device driver name */ 586 /* the modalias must be the same as spi device driver name */
590 .modalias = "m25p80", /* Name of spi_driver for this device */ 587 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -597,8 +594,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
597 }, 594 },
598#endif 595#endif
599 596
600#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 597#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
601 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
602 { 598 {
603 .modalias = "ad183x", 599 .modalias = "ad183x",
604 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 600 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -608,7 +604,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
608 .mode = SPI_MODE_3, 604 .mode = SPI_MODE_3,
609 }, 605 },
610#endif 606#endif
611#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 607#if IS_ENABLED(CONFIG_MMC_SPI)
612 { 608 {
613 .modalias = "mmc_spi", 609 .modalias = "mmc_spi",
614 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 610 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -619,7 +615,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
619 }, 615 },
620#endif 616#endif
621 617
622#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 618#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
623 { 619 {
624 .modalias = "ad7877", 620 .modalias = "ad7877",
625 .platform_data = &bfin_ad7877_ts_info, 621 .platform_data = &bfin_ad7877_ts_info,
@@ -629,7 +625,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
629 .chip_select = 2, 625 .chip_select = 2,
630 }, 626 },
631#endif 627#endif
632#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) 628#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879_SPI)
633 { 629 {
634 .modalias = "ad7879", 630 .modalias = "ad7879",
635 .platform_data = &bfin_ad7879_ts_info, 631 .platform_data = &bfin_ad7879_ts_info,
@@ -640,7 +636,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
640 .mode = SPI_CPHA | SPI_CPOL, 636 .mode = SPI_CPHA | SPI_CPOL,
641 }, 637 },
642#endif 638#endif
643#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 639#if IS_ENABLED(CONFIG_SPI_SPIDEV)
644 { 640 {
645 .modalias = "spidev", 641 .modalias = "spidev",
646 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 642 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -648,7 +644,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
648 .chip_select = 1, 644 .chip_select = 1,
649 }, 645 },
650#endif 646#endif
651#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 647#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
652 { 648 {
653 .modalias = "bfin-lq035q1-spi", 649 .modalias = "bfin-lq035q1-spi",
654 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 650 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -659,7 +655,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
659#endif 655#endif
660}; 656};
661 657
662#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 658#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
663/* SPI controller data */ 659/* SPI controller data */
664static struct bfin5xx_spi_master bfin_spi0_info = { 660static struct bfin5xx_spi_master bfin_spi0_info = {
665 .num_chipselect = 8, 661 .num_chipselect = 8,
@@ -697,7 +693,7 @@ static struct platform_device bfin_spi0_device = {
697}; 693};
698#endif /* spi master and devices */ 694#endif /* spi master and devices */
699 695
700#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 696#if IS_ENABLED(CONFIG_SERIAL_BFIN)
701#ifdef CONFIG_SERIAL_BFIN_UART0 697#ifdef CONFIG_SERIAL_BFIN_UART0
702static struct resource bfin_uart0_resources[] = { 698static struct resource bfin_uart0_resources[] = {
703 { 699 {
@@ -808,7 +804,7 @@ static struct platform_device bfin_uart1_device = {
808#endif 804#endif
809#endif 805#endif
810 806
811#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 807#if IS_ENABLED(CONFIG_BFIN_SIR)
812#ifdef CONFIG_BFIN_SIR0 808#ifdef CONFIG_BFIN_SIR0
813static struct resource bfin_sir0_resources[] = { 809static struct resource bfin_sir0_resources[] = {
814 { 810 {
@@ -863,7 +859,7 @@ static struct platform_device bfin_sir1_device = {
863#endif 859#endif
864#endif 860#endif
865 861
866#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 862#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
867static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 863static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
868 864
869static struct resource bfin_twi0_resource[] = { 865static struct resource bfin_twi0_resource[] = {
@@ -890,7 +886,7 @@ static struct platform_device i2c_bfin_twi_device = {
890}; 886};
891#endif 887#endif
892 888
893#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) 889#if IS_ENABLED(CONFIG_PMIC_ADP5520)
894#include <linux/mfd/adp5520.h> 890#include <linux/mfd/adp5520.h>
895 891
896 /* 892 /*
@@ -956,54 +952,54 @@ static struct adp5520_platform_data adp5520_pdev_data = {
956#endif 952#endif
957 953
958static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 954static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
959#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 955#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
960 { 956 {
961 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 957 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
962 }, 958 },
963#endif 959#endif
964#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) 960#if IS_ENABLED(CONFIG_INPUT_PCF8574)
965 { 961 {
966 I2C_BOARD_INFO("pcf8574_keypad", 0x27), 962 I2C_BOARD_INFO("pcf8574_keypad", 0x27),
967 .irq = IRQ_PF8, 963 .irq = IRQ_PF8,
968 }, 964 },
969#endif 965#endif
970#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE) 966#if IS_ENABLED(CONFIG_FB_BFIN_7393)
971 { 967 {
972 I2C_BOARD_INFO("bfin-adv7393", 0x2B), 968 I2C_BOARD_INFO("bfin-adv7393", 0x2B),
973 }, 969 },
974#endif 970#endif
975#if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) 971#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879_I2C)
976 { 972 {
977 I2C_BOARD_INFO("ad7879", 0x2C), 973 I2C_BOARD_INFO("ad7879", 0x2C),
978 .irq = IRQ_PF8, 974 .irq = IRQ_PF8,
979 .platform_data = (void *)&bfin_ad7879_ts_info, 975 .platform_data = (void *)&bfin_ad7879_ts_info,
980 }, 976 },
981#endif 977#endif
982#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) 978#if IS_ENABLED(CONFIG_PMIC_ADP5520)
983 { 979 {
984 I2C_BOARD_INFO("pmic-adp5520", 0x32), 980 I2C_BOARD_INFO("pmic-adp5520", 0x32),
985 .irq = IRQ_PF9, 981 .irq = IRQ_PF9,
986 .platform_data = (void *)&adp5520_pdev_data, 982 .platform_data = (void *)&adp5520_pdev_data,
987 }, 983 },
988#endif 984#endif
989#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) 985#if IS_ENABLED(CONFIG_SND_SOC_SSM2602)
990 { 986 {
991 I2C_BOARD_INFO("ssm2602", 0x1b), 987 I2C_BOARD_INFO("ssm2602", 0x1b),
992 }, 988 },
993#endif 989#endif
994#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 990#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
995 { 991 {
996 I2C_BOARD_INFO("ad5252", 0x2f), 992 I2C_BOARD_INFO("ad5252", 0x2f),
997 }, 993 },
998#endif 994#endif
999#if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE) 995#if IS_ENABLED(CONFIG_SND_SOC_ADAU1373)
1000 { 996 {
1001 I2C_BOARD_INFO("adau1373", 0x1A), 997 I2C_BOARD_INFO("adau1373", 0x1A),
1002 }, 998 },
1003#endif 999#endif
1004}; 1000};
1005 1001
1006#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 1002#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
1007#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 1003#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1008static struct resource bfin_sport0_uart_resources[] = { 1004static struct resource bfin_sport0_uart_resources[] = {
1009 { 1005 {
@@ -1074,7 +1070,7 @@ static struct platform_device bfin_sport1_uart_device = {
1074#endif 1070#endif
1075#endif 1071#endif
1076 1072
1077#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 1073#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
1078#include <linux/gpio_keys.h> 1074#include <linux/gpio_keys.h>
1079 1075
1080static struct gpio_keys_button bfin_gpio_keys_table[] = { 1076static struct gpio_keys_button bfin_gpio_keys_table[] = {
@@ -1095,7 +1091,7 @@ static struct platform_device bfin_device_gpiokeys = {
1095}; 1091};
1096#endif 1092#endif
1097 1093
1098#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) 1094#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
1099#include <asm/bfin_rotary.h> 1095#include <asm/bfin_rotary.h>
1100 1096
1101static struct bfin_rotary_platform_data bfin_rotary_data = { 1097static struct bfin_rotary_platform_data bfin_rotary_data = {
@@ -1153,56 +1149,56 @@ static struct platform_device *stamp_devices[] __initdata = {
1153 1149
1154 &bfin_dpmc, 1150 &bfin_dpmc,
1155 1151
1156#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 1152#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
1157 &bf5xx_nand_device, 1153 &bf5xx_nand_device,
1158#endif 1154#endif
1159 1155
1160#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 1156#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
1161 &bfin_pcmcia_cf_device, 1157 &bfin_pcmcia_cf_device,
1162#endif 1158#endif
1163 1159
1164#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 1160#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
1165 &rtc_device, 1161 &rtc_device,
1166#endif 1162#endif
1167 1163
1168#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 1164#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
1169 &bfin_isp1760_device, 1165 &bfin_isp1760_device,
1170#endif 1166#endif
1171 1167
1172#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 1168#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
1173 &musb_device, 1169 &musb_device,
1174#endif 1170#endif
1175 1171
1176#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 1172#if IS_ENABLED(CONFIG_SMC91X)
1177 &smc91x_device, 1173 &smc91x_device,
1178#endif 1174#endif
1179 1175
1180#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 1176#if IS_ENABLED(CONFIG_DM9000)
1181 &dm9000_device, 1177 &dm9000_device,
1182#endif 1178#endif
1183 1179
1184#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1180#if IS_ENABLED(CONFIG_BFIN_MAC)
1185 &bfin_mii_bus, 1181 &bfin_mii_bus,
1186 &bfin_mac_device, 1182 &bfin_mac_device,
1187#endif 1183#endif
1188 1184
1189#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 1185#if IS_ENABLED(CONFIG_USB_NET2272)
1190 &net2272_bfin_device, 1186 &net2272_bfin_device,
1191#endif 1187#endif
1192 1188
1193#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 1189#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
1194 &bfin_spi0_device, 1190 &bfin_spi0_device,
1195#endif 1191#endif
1196 1192
1197#if defined(CONFIG_FB_BFIN_T350MCQB) || defined(CONFIG_FB_BFIN_T350MCQB_MODULE) 1193#if IS_ENABLED(CONFIG_FB_BFIN_T350MCQB)
1198 &bf52x_t350mcqb_device, 1194 &bf52x_t350mcqb_device,
1199#endif 1195#endif
1200 1196
1201#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 1197#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
1202 &bfin_lq035q1_device, 1198 &bfin_lq035q1_device,
1203#endif 1199#endif
1204 1200
1205#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 1201#if IS_ENABLED(CONFIG_SERIAL_BFIN)
1206#ifdef CONFIG_SERIAL_BFIN_UART0 1202#ifdef CONFIG_SERIAL_BFIN_UART0
1207 &bfin_uart0_device, 1203 &bfin_uart0_device,
1208#endif 1204#endif
@@ -1211,7 +1207,7 @@ static struct platform_device *stamp_devices[] __initdata = {
1211#endif 1207#endif
1212#endif 1208#endif
1213 1209
1214#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 1210#if IS_ENABLED(CONFIG_BFIN_SIR)
1215#ifdef CONFIG_BFIN_SIR0 1211#ifdef CONFIG_BFIN_SIR0
1216 &bfin_sir0_device, 1212 &bfin_sir0_device,
1217#endif 1213#endif
@@ -1220,11 +1216,11 @@ static struct platform_device *stamp_devices[] __initdata = {
1220#endif 1216#endif
1221#endif 1217#endif
1222 1218
1223#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 1219#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
1224 &i2c_bfin_twi_device, 1220 &i2c_bfin_twi_device,
1225#endif 1221#endif
1226 1222
1227#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 1223#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
1228#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 1224#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1229 &bfin_sport0_uart_device, 1225 &bfin_sport0_uart_device,
1230#endif 1226#endif
@@ -1233,32 +1229,31 @@ static struct platform_device *stamp_devices[] __initdata = {
1233#endif 1229#endif
1234#endif 1230#endif
1235 1231
1236#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 1232#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
1237 &bfin_device_gpiokeys, 1233 &bfin_device_gpiokeys,
1238#endif 1234#endif
1239 1235
1240#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) 1236#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
1241 &bfin_rotary_device, 1237 &bfin_rotary_device,
1242#endif 1238#endif
1243 1239
1244#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 1240#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
1245 &ezkit_flash_device, 1241 &ezkit_flash_device,
1246#endif 1242#endif
1247 1243
1248#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1244#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
1249 &bfin_i2s_pcm, 1245 &bfin_i2s_pcm,
1250#endif 1246#endif
1251 1247
1252#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 1248#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
1253 &bfin_ac97_pcm, 1249 &bfin_ac97_pcm,
1254#endif 1250#endif
1255 1251
1256#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1252#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
1257 &bfin_i2s, 1253 &bfin_i2s,
1258#endif 1254#endif
1259 1255
1260#if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \ 1256#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
1261 defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
1262 &bfin_ad1836_machine, 1257 &bfin_ad1836_machine,
1263#endif 1258#endif
1264}; 1259};
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 1509c5a8a3ff..a0f5856a5ff8 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -28,8 +28,7 @@
28#include <asm/portmux.h> 28#include <asm/portmux.h>
29#include <asm/dpmc.h> 29#include <asm/dpmc.h>
30 30
31#if defined(CONFIG_TOUCHSCREEN_AD7879) \ 31#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879)
32 || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
33#include <linux/spi/ad7879.h> 32#include <linux/spi/ad7879.h>
34#define LCD_BACKLIGHT_GPIO 0x40 33#define LCD_BACKLIGHT_GPIO 0x40
35/* TLL6527M uses TLL7UIQ35 / ADI LCD EZ Extender. AD7879 AUX GPIO is used for 34/* TLL6527M uses TLL7UIQ35 / ADI LCD EZ Extender. AD7879 AUX GPIO is used for
@@ -45,7 +44,7 @@ const char bfin_board_name[] = "TLL6527M";
45 * Driver needs to know address, irq and flag pin. 44 * Driver needs to know address, irq and flag pin.
46 */ 45 */
47 46
48#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 47#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
49static struct resource musb_resources[] = { 48static struct resource musb_resources[] = {
50 [0] = { 49 [0] = {
51 .start = 0xffc03800, 50 .start = 0xffc03800,
@@ -104,7 +103,7 @@ static struct platform_device musb_device = {
104}; 103};
105#endif 104#endif
106 105
107#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 106#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
108#include <asm/bfin-lq035q1.h> 107#include <asm/bfin-lq035q1.h>
109 108
110static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { 109static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
@@ -133,7 +132,7 @@ static struct platform_device bfin_lq035q1_device = {
133}; 132};
134#endif 133#endif
135 134
136#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 135#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
137static struct mtd_partition tll6527m_partitions[] = { 136static struct mtd_partition tll6527m_partitions[] = {
138 { 137 {
139 .name = "bootloader(nor)", 138 .name = "bootloader(nor)",
@@ -182,7 +181,7 @@ static struct platform_device tll6527m_flash_device = {
182}; 181};
183#endif 182#endif
184 183
185#if defined(CONFIG_GPIO_DECODER) || defined(CONFIG_GPIO_DECODER_MODULE) 184#if IS_ENABLED(CONFIG_GPIO_DECODER)
186/* An SN74LVC138A 3:8 decoder chip has been used to generate 7 augmented 185/* An SN74LVC138A 3:8 decoder chip has been used to generate 7 augmented
187 * outputs used as SPI CS lines for all SPI SLAVE devices on TLL6527v1-0. 186 * outputs used as SPI CS lines for all SPI SLAVE devices on TLL6527v1-0.
188 * EXP_GPIO_SPISEL_BASE is the base number for the expanded outputs being 187 * EXP_GPIO_SPISEL_BASE is the base number for the expanded outputs being
@@ -215,7 +214,7 @@ static struct platform_device spi_decoded_gpio = {
215 214
216#endif 215#endif
217 216
218#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE) 217#if IS_ENABLED(CONFIG_INPUT_ADXL34X)
219#include <linux/input/adxl34x.h> 218#include <linux/input/adxl34x.h>
220static const struct adxl34x_platform_data adxl345_info = { 219static const struct adxl34x_platform_data adxl345_info = {
221 .x_axis_offset = 0, 220 .x_axis_offset = 0,
@@ -250,14 +249,14 @@ static const struct adxl34x_platform_data adxl345_info = {
250}; 249};
251#endif 250#endif
252 251
253#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 252#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
254static struct platform_device rtc_device = { 253static struct platform_device rtc_device = {
255 .name = "rtc-bfin", 254 .name = "rtc-bfin",
256 .id = -1, 255 .id = -1,
257}; 256};
258#endif 257#endif
259 258
260#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 259#if IS_ENABLED(CONFIG_BFIN_MAC)
261#include <linux/bfin_mac.h> 260#include <linux/bfin_mac.h>
262static const unsigned short bfin_mac_peripherals[] = P_RMII0; 261static const unsigned short bfin_mac_peripherals[] = P_RMII0;
263 262
@@ -290,8 +289,7 @@ static struct platform_device bfin_mac_device = {
290}; 289};
291#endif 290#endif
292 291
293#if defined(CONFIG_MTD_M25P80) \ 292#if IS_ENABLED(CONFIG_MTD_M25P80)
294 || defined(CONFIG_MTD_M25P80_MODULE)
295static struct mtd_partition bfin_spi_flash_partitions[] = { 293static struct mtd_partition bfin_spi_flash_partitions[] = {
296 { 294 {
297 .name = "bootloader(spi)", 295 .name = "bootloader(spi)",
@@ -318,14 +316,13 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
318}; 316};
319#endif 317#endif
320 318
321#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 319#if IS_ENABLED(CONFIG_MMC_SPI)
322static struct bfin5xx_spi_chip mmc_spi_chip_info = { 320static struct bfin5xx_spi_chip mmc_spi_chip_info = {
323 .enable_dma = 0, 321 .enable_dma = 0,
324}; 322};
325#endif 323#endif
326 324
327#if defined(CONFIG_TOUCHSCREEN_AD7879) \ 325#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879)
328 || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
329static const struct ad7879_platform_data bfin_ad7879_ts_info = { 326static const struct ad7879_platform_data bfin_ad7879_ts_info = {
330 .model = 7879, /* Model = AD7879 */ 327 .model = 7879, /* Model = AD7879 */
331 .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ 328 .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */
@@ -343,7 +340,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
343}; 340};
344#endif 341#endif
345 342
346#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 343#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
347static struct platform_device bfin_i2s = { 344static struct platform_device bfin_i2s = {
348 .name = "bfin-i2s", 345 .name = "bfin-i2s",
349 .id = CONFIG_SND_BF5XX_SPORT_NUM, 346 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -351,7 +348,7 @@ static struct platform_device bfin_i2s = {
351}; 348};
352#endif 349#endif
353 350
354#if defined(CONFIG_GPIO_MCP23S08) || defined(CONFIG_GPIO_MCP23S08_MODULE) 351#if IS_ENABLED(CONFIG_GPIO_MCP23S08)
355#include <linux/spi/mcp23s08.h> 352#include <linux/spi/mcp23s08.h>
356static const struct mcp23s08_platform_data bfin_mcp23s08_sys_gpio_info = { 353static const struct mcp23s08_platform_data bfin_mcp23s08_sys_gpio_info = {
357 .chip[0].is_present = true, 354 .chip[0].is_present = true,
@@ -364,8 +361,7 @@ static const struct mcp23s08_platform_data bfin_mcp23s08_usr_gpio_info = {
364#endif 361#endif
365 362
366static struct spi_board_info bfin_spi_board_info[] __initdata = { 363static struct spi_board_info bfin_spi_board_info[] __initdata = {
367#if defined(CONFIG_MTD_M25P80) \ 364#if IS_ENABLED(CONFIG_MTD_M25P80)
368 || defined(CONFIG_MTD_M25P80_MODULE)
369 { 365 {
370 /* the modalias must be the same as spi device driver name */ 366 /* the modalias must be the same as spi device driver name */
371 .modalias = "m25p80", /* Name of spi_driver for this device */ 367 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -381,7 +377,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
381 }, 377 },
382#endif 378#endif
383 379
384#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 380#if IS_ENABLED(CONFIG_MMC_SPI)
385 { 381 {
386 .modalias = "mmc_spi", 382 .modalias = "mmc_spi",
387/* 383/*
@@ -396,8 +392,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
396 .mode = SPI_MODE_0, 392 .mode = SPI_MODE_0,
397 }, 393 },
398#endif 394#endif
399#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) \ 395#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879_SPI)
400 || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
401 { 396 {
402 .modalias = "ad7879", 397 .modalias = "ad7879",
403 .platform_data = &bfin_ad7879_ts_info, 398 .platform_data = &bfin_ad7879_ts_info,
@@ -409,7 +404,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
409 .mode = SPI_CPHA | SPI_CPOL, 404 .mode = SPI_CPHA | SPI_CPOL,
410 }, 405 },
411#endif 406#endif
412#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 407#if IS_ENABLED(CONFIG_SPI_SPIDEV)
413 { 408 {
414 .modalias = "spidev", 409 .modalias = "spidev",
415 .max_speed_hz = 10000000, 410 .max_speed_hz = 10000000,
@@ -419,7 +414,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
419 .mode = SPI_CPHA | SPI_CPOL, 414 .mode = SPI_CPHA | SPI_CPOL,
420 }, 415 },
421#endif 416#endif
422#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 417#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
423 { 418 {
424 .modalias = "bfin-lq035q1-spi", 419 .modalias = "bfin-lq035q1-spi",
425 .max_speed_hz = 20000000, 420 .max_speed_hz = 20000000,
@@ -428,7 +423,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
428 .mode = SPI_CPHA | SPI_CPOL, 423 .mode = SPI_CPHA | SPI_CPOL,
429 }, 424 },
430#endif 425#endif
431#if defined(CONFIG_GPIO_MCP23S08) || defined(CONFIG_GPIO_MCP23S08_MODULE) 426#if IS_ENABLED(CONFIG_GPIO_MCP23S08)
432 { 427 {
433 .modalias = "mcp23s08", 428 .modalias = "mcp23s08",
434 .platform_data = &bfin_mcp23s08_sys_gpio_info, 429 .platform_data = &bfin_mcp23s08_sys_gpio_info,
@@ -448,7 +443,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
448#endif 443#endif
449}; 444};
450 445
451#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 446#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
452/* SPI controller data */ 447/* SPI controller data */
453static struct bfin5xx_spi_master bfin_spi0_info = { 448static struct bfin5xx_spi_master bfin_spi0_info = {
454 .num_chipselect = EXP_GPIO_SPISEL_BASE + 8 + MAX_CTRL_CS, 449 .num_chipselect = EXP_GPIO_SPISEL_BASE + 8 + MAX_CTRL_CS,
@@ -487,7 +482,7 @@ static struct platform_device bfin_spi0_device = {
487}; 482};
488#endif /* spi master and devices */ 483#endif /* spi master and devices */
489 484
490#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 485#if IS_ENABLED(CONFIG_SERIAL_BFIN)
491#ifdef CONFIG_SERIAL_BFIN_UART0 486#ifdef CONFIG_SERIAL_BFIN_UART0
492static struct resource bfin_uart0_resources[] = { 487static struct resource bfin_uart0_resources[] = {
493 { 488 {
@@ -600,7 +595,7 @@ static struct platform_device bfin_uart1_device = {
600#endif 595#endif
601#endif 596#endif
602 597
603#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 598#if IS_ENABLED(CONFIG_BFIN_SIR)
604#ifdef CONFIG_BFIN_SIR0 599#ifdef CONFIG_BFIN_SIR0
605static struct resource bfin_sir0_resources[] = { 600static struct resource bfin_sir0_resources[] = {
606 { 601 {
@@ -655,7 +650,7 @@ static struct platform_device bfin_sir1_device = {
655#endif 650#endif
656#endif 651#endif
657 652
658#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 653#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
659static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 654static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
660 655
661static struct resource bfin_twi0_resource[] = { 656static struct resource bfin_twi0_resource[] = {
@@ -683,26 +678,25 @@ static struct platform_device i2c_bfin_twi_device = {
683#endif 678#endif
684 679
685static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 680static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
686#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 681#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
687 { 682 {
688 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 683 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
689 }, 684 },
690#endif 685#endif
691 686
692#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE) 687#if IS_ENABLED(CONFIG_FB_BFIN_7393)
693 { 688 {
694 I2C_BOARD_INFO("bfin-adv7393", 0x2B), 689 I2C_BOARD_INFO("bfin-adv7393", 0x2B),
695 }, 690 },
696#endif 691#endif
697#if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) \ 692#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879_I2C)
698 || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE)
699 { 693 {
700 I2C_BOARD_INFO("ad7879", 0x2C), 694 I2C_BOARD_INFO("ad7879", 0x2C),
701 .irq = IRQ_PH14, 695 .irq = IRQ_PH14,
702 .platform_data = (void *)&bfin_ad7879_ts_info, 696 .platform_data = (void *)&bfin_ad7879_ts_info,
703 }, 697 },
704#endif 698#endif
705#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) 699#if IS_ENABLED(CONFIG_SND_SOC_SSM2602)
706 { 700 {
707 I2C_BOARD_INFO("ssm2602", 0x1b), 701 I2C_BOARD_INFO("ssm2602", 0x1b),
708 }, 702 },
@@ -714,8 +708,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
714 { 708 {
715 I2C_BOARD_INFO("ltc3576", 0x09), 709 I2C_BOARD_INFO("ltc3576", 0x09),
716 }, 710 },
717#if defined(CONFIG_INPUT_ADXL34X_I2C) \ 711#if IS_ENABLED(CONFIG_INPUT_ADXL34X_I2C)
718 || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE)
719 { 712 {
720 I2C_BOARD_INFO("adxl34x", 0x53), 713 I2C_BOARD_INFO("adxl34x", 0x53),
721 .irq = IRQ_PH13, 714 .irq = IRQ_PH13,
@@ -724,8 +717,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
724#endif 717#endif
725}; 718};
726 719
727#if defined(CONFIG_SERIAL_BFIN_SPORT) \ 720#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
728 || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
729#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 721#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
730static struct resource bfin_sport0_uart_resources[] = { 722static struct resource bfin_sport0_uart_resources[] = {
731 { 723 {
@@ -823,28 +815,28 @@ static struct platform_device *tll6527m_devices[] __initdata = {
823 815
824 &bfin_dpmc, 816 &bfin_dpmc,
825 817
826#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 818#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
827 &rtc_device, 819 &rtc_device,
828#endif 820#endif
829 821
830#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 822#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
831 &musb_device, 823 &musb_device,
832#endif 824#endif
833 825
834#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 826#if IS_ENABLED(CONFIG_BFIN_MAC)
835 &bfin_mii_bus, 827 &bfin_mii_bus,
836 &bfin_mac_device, 828 &bfin_mac_device,
837#endif 829#endif
838 830
839#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 831#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
840 &bfin_spi0_device, 832 &bfin_spi0_device,
841#endif 833#endif
842 834
843#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 835#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
844 &bfin_lq035q1_device, 836 &bfin_lq035q1_device,
845#endif 837#endif
846 838
847#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 839#if IS_ENABLED(CONFIG_SERIAL_BFIN)
848#ifdef CONFIG_SERIAL_BFIN_UART0 840#ifdef CONFIG_SERIAL_BFIN_UART0
849 &bfin_uart0_device, 841 &bfin_uart0_device,
850#endif 842#endif
@@ -853,7 +845,7 @@ static struct platform_device *tll6527m_devices[] __initdata = {
853#endif 845#endif
854#endif 846#endif
855 847
856#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 848#if IS_ENABLED(CONFIG_BFIN_SIR)
857#ifdef CONFIG_BFIN_SIR0 849#ifdef CONFIG_BFIN_SIR0
858 &bfin_sir0_device, 850 &bfin_sir0_device,
859#endif 851#endif
@@ -862,12 +854,11 @@ static struct platform_device *tll6527m_devices[] __initdata = {
862#endif 854#endif
863#endif 855#endif
864 856
865#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 857#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
866 &i2c_bfin_twi_device, 858 &i2c_bfin_twi_device,
867#endif 859#endif
868 860
869#if defined(CONFIG_SERIAL_BFIN_SPORT) \ 861#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
870 || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
871#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 862#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
872 &bfin_sport0_uart_device, 863 &bfin_sport0_uart_device,
873#endif 864#endif
@@ -876,15 +867,15 @@ static struct platform_device *tll6527m_devices[] __initdata = {
876#endif 867#endif
877#endif 868#endif
878 869
879#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 870#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
880 &tll6527m_flash_device, 871 &tll6527m_flash_device,
881#endif 872#endif
882 873
883#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 874#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
884 &bfin_i2s, 875 &bfin_i2s,
885#endif 876#endif
886 877
887#if defined(CONFIG_GPIO_DECODER) || defined(CONFIG_GPIO_DECODER_MODULE) 878#if IS_ENABLED(CONFIG_GPIO_DECODER)
888 &spi_decoded_gpio, 879 &spi_decoded_gpio,
889#endif 880#endif
890}; 881};
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 6cb7b3ed9b3d..01300f40db15 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -14,7 +14,7 @@
14#include <linux/mtd/partitions.h> 14#include <linux/mtd/partitions.h>
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/spi/flash.h> 16#include <linux/spi/flash.h>
17#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 17#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
18#include <linux/usb/isp1362.h> 18#include <linux/usb/isp1362.h>
19#endif 19#endif
20#include <linux/irq.h> 20#include <linux/irq.h>
@@ -29,7 +29,7 @@
29 */ 29 */
30const char bfin_board_name[] = "HV Sistemas H8606"; 30const char bfin_board_name[] = "HV Sistemas H8606";
31 31
32#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 32#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
33static struct platform_device rtc_device = { 33static struct platform_device rtc_device = {
34 .name = "rtc-bfin", 34 .name = "rtc-bfin",
35 .id = -1, 35 .id = -1,
@@ -39,7 +39,7 @@ static struct platform_device rtc_device = {
39/* 39/*
40* Driver needs to know address, irq and flag pin. 40* Driver needs to know address, irq and flag pin.
41 */ 41 */
42 #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 42#if IS_ENABLED(CONFIG_DM9000)
43static struct resource dm9000_resources[] = { 43static struct resource dm9000_resources[] = {
44 [0] = { 44 [0] = {
45 .start = 0x20300000, 45 .start = 0x20300000,
@@ -67,7 +67,7 @@ static struct platform_device dm9000_device = {
67}; 67};
68#endif 68#endif
69 69
70#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 70#if IS_ENABLED(CONFIG_SMC91X)
71#include <linux/smc91x.h> 71#include <linux/smc91x.h>
72 72
73static struct smc91x_platdata smc91x_info = { 73static struct smc91x_platdata smc91x_info = {
@@ -104,7 +104,7 @@ static struct platform_device smc91x_device = {
104}; 104};
105#endif 105#endif
106 106
107#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 107#if IS_ENABLED(CONFIG_USB_NET2272)
108static struct resource net2272_bfin_resources[] = { 108static struct resource net2272_bfin_resources[] = {
109 { 109 {
110 .start = 0x20300000, 110 .start = 0x20300000,
@@ -125,10 +125,10 @@ static struct platform_device net2272_bfin_device = {
125}; 125};
126#endif 126#endif
127 127
128#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 128#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
129/* all SPI peripherals info goes here */ 129/* all SPI peripherals info goes here */
130 130
131#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 131#if IS_ENABLED(CONFIG_MTD_M25P80)
132static struct mtd_partition bfin_spi_flash_partitions[] = { 132static struct mtd_partition bfin_spi_flash_partitions[] = {
133 { 133 {
134 .name = "bootloader (spi)", 134 .name = "bootloader (spi)",
@@ -166,7 +166,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
166/* Notice: for blackfin, the speed_hz is the value of register 166/* Notice: for blackfin, the speed_hz is the value of register
167 * SPI_BAUD, not the real baudrate */ 167 * SPI_BAUD, not the real baudrate */
168static struct spi_board_info bfin_spi_board_info[] __initdata = { 168static struct spi_board_info bfin_spi_board_info[] __initdata = {
169#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 169#if IS_ENABLED(CONFIG_MTD_M25P80)
170 { 170 {
171 /* the modalias must be the same as spi device driver name */ 171 /* the modalias must be the same as spi device driver name */
172 .modalias = "m25p80", /* Name of spi_driver for this device */ 172 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -180,7 +180,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
180 }, 180 },
181#endif 181#endif
182 182
183#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 183#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
184 { 184 {
185 .modalias = "ad183x", 185 .modalias = "ad183x",
186 .max_speed_hz = 16, 186 .max_speed_hz = 16,
@@ -229,7 +229,7 @@ static struct platform_device bfin_spi0_device = {
229}; 229};
230#endif /* spi master and devices */ 230#endif /* spi master and devices */
231 231
232#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 232#if IS_ENABLED(CONFIG_SERIAL_BFIN)
233#ifdef CONFIG_SERIAL_BFIN_UART0 233#ifdef CONFIG_SERIAL_BFIN_UART0
234static struct resource bfin_uart0_resources[] = { 234static struct resource bfin_uart0_resources[] = {
235 { 235 {
@@ -280,7 +280,7 @@ static struct platform_device bfin_uart0_device = {
280#endif 280#endif
281#endif 281#endif
282 282
283#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 283#if IS_ENABLED(CONFIG_BFIN_SIR)
284#ifdef CONFIG_BFIN_SIR0 284#ifdef CONFIG_BFIN_SIR0
285static struct resource bfin_sir0_resources[] = { 285static struct resource bfin_sir0_resources[] = {
286 { 286 {
@@ -309,7 +309,7 @@ static struct platform_device bfin_sir0_device = {
309#endif 309#endif
310#endif 310#endif
311 311
312#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE) 312#if IS_ENABLED(CONFIG_SERIAL_8250)
313 313
314#include <linux/serial_8250.h> 314#include <linux/serial_8250.h>
315#include <linux/serial.h> 315#include <linux/serial.h>
@@ -353,7 +353,7 @@ static struct platform_device serial8250_device = {
353 353
354#endif 354#endif
355 355
356#if defined(CONFIG_KEYBOARD_OPENCORES) || defined(CONFIG_KEYBOARD_OPENCORES_MODULE) 356#if IS_ENABLED(CONFIG_KEYBOARD_OPENCORES)
357 357
358/* 358/*
359 * Configuration for one OpenCores keyboard controller in FPGA at address 0x20200030, 359 * Configuration for one OpenCores keyboard controller in FPGA at address 0x20200030,
@@ -382,43 +382,43 @@ static struct platform_device opencores_kbd_device = {
382#endif 382#endif
383 383
384static struct platform_device *h8606_devices[] __initdata = { 384static struct platform_device *h8606_devices[] __initdata = {
385#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 385#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
386 &rtc_device, 386 &rtc_device,
387#endif 387#endif
388 388
389#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 389#if IS_ENABLED(CONFIG_DM9000)
390 &dm9000_device, 390 &dm9000_device,
391#endif 391#endif
392 392
393#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 393#if IS_ENABLED(CONFIG_SMC91X)
394 &smc91x_device, 394 &smc91x_device,
395#endif 395#endif
396 396
397#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 397#if IS_ENABLED(CONFIG_USB_NET2272)
398 &net2272_bfin_device, 398 &net2272_bfin_device,
399#endif 399#endif
400 400
401#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 401#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
402 &bfin_spi0_device, 402 &bfin_spi0_device,
403#endif 403#endif
404 404
405#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 405#if IS_ENABLED(CONFIG_SERIAL_BFIN)
406#ifdef CONFIG_SERIAL_BFIN_UART0 406#ifdef CONFIG_SERIAL_BFIN_UART0
407 &bfin_uart0_device, 407 &bfin_uart0_device,
408#endif 408#endif
409#endif 409#endif
410 410
411#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE) 411#if IS_ENABLED(CONFIG_SERIAL_8250)
412 &serial8250_device, 412 &serial8250_device,
413#endif 413#endif
414 414
415#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 415#if IS_ENABLED(CONFIG_BFIN_SIR)
416#ifdef CONFIG_BFIN_SIR0 416#ifdef CONFIG_BFIN_SIR0
417 &bfin_sir0_device, 417 &bfin_sir0_device,
418#endif 418#endif
419#endif 419#endif
420 420
421#if defined(CONFIG_KEYBOARD_OPENCORES) || defined(CONFIG_KEYBOARD_OPENCORES_MODULE) 421#if IS_ENABLED(CONFIG_KEYBOARD_OPENCORES)
422 &opencores_kbd_device, 422 &opencores_kbd_device,
423#endif 423#endif
424}; 424};
@@ -428,7 +428,7 @@ static int __init H8606_init(void)
428 printk(KERN_INFO "HV Sistemas H8606 board support by http://www.hvsistemas.com\n"); 428 printk(KERN_INFO "HV Sistemas H8606 board support by http://www.hvsistemas.com\n");
429 printk(KERN_INFO "%s(): registering device resources\n", __func__); 429 printk(KERN_INFO "%s(): registering device resources\n", __func__);
430 platform_add_devices(h8606_devices, ARRAY_SIZE(h8606_devices)); 430 platform_add_devices(h8606_devices, ARRAY_SIZE(h8606_devices));
431#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 431#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
432 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 432 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
433#endif 433#endif
434 return 0; 434 return 0;
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index de44a3765e59..63b0e4fe760c 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -31,7 +31,7 @@
31 */ 31 */
32const char bfin_board_name[] = "BlackStamp"; 32const char bfin_board_name[] = "BlackStamp";
33 33
34#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 34#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
35static struct platform_device rtc_device = { 35static struct platform_device rtc_device = {
36 .name = "rtc-bfin", 36 .name = "rtc-bfin",
37 .id = -1, 37 .id = -1,
@@ -41,7 +41,7 @@ static struct platform_device rtc_device = {
41/* 41/*
42 * Driver needs to know address, irq and flag pin. 42 * Driver needs to know address, irq and flag pin.
43 */ 43 */
44#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 44#if IS_ENABLED(CONFIG_SMC91X)
45#include <linux/smc91x.h> 45#include <linux/smc91x.h>
46 46
47static struct smc91x_platdata smc91x_info = { 47static struct smc91x_platdata smc91x_info = {
@@ -74,7 +74,7 @@ static struct platform_device smc91x_device = {
74}; 74};
75#endif 75#endif
76 76
77#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 77#if IS_ENABLED(CONFIG_MTD_M25P80)
78static struct mtd_partition bfin_spi_flash_partitions[] = { 78static struct mtd_partition bfin_spi_flash_partitions[] = {
79 { 79 {
80 .name = "bootloader(spi)", 80 .name = "bootloader(spi)",
@@ -105,14 +105,14 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
105}; 105};
106#endif 106#endif
107 107
108#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 108#if IS_ENABLED(CONFIG_MMC_SPI)
109static struct bfin5xx_spi_chip mmc_spi_chip_info = { 109static struct bfin5xx_spi_chip mmc_spi_chip_info = {
110 .enable_dma = 0, 110 .enable_dma = 0,
111}; 111};
112#endif 112#endif
113 113
114static struct spi_board_info bfin_spi_board_info[] __initdata = { 114static struct spi_board_info bfin_spi_board_info[] __initdata = {
115#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 115#if IS_ENABLED(CONFIG_MTD_M25P80)
116 { 116 {
117 /* the modalias must be the same as spi device driver name */ 117 /* the modalias must be the same as spi device driver name */
118 .modalias = "m25p80", /* Name of spi_driver for this device */ 118 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -125,7 +125,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
125 }, 125 },
126#endif 126#endif
127 127
128#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 128#if IS_ENABLED(CONFIG_MMC_SPI)
129 { 129 {
130 .modalias = "mmc_spi", 130 .modalias = "mmc_spi",
131 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 131 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -136,7 +136,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
136 }, 136 },
137#endif 137#endif
138 138
139#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 139#if IS_ENABLED(CONFIG_SPI_SPIDEV)
140 { 140 {
141 .modalias = "spidev", 141 .modalias = "spidev",
142 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 142 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -146,7 +146,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
146#endif 146#endif
147}; 147};
148 148
149#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 149#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
150/* SPI (0) */ 150/* SPI (0) */
151static struct resource bfin_spi0_resource[] = { 151static struct resource bfin_spi0_resource[] = {
152 [0] = { 152 [0] = {
@@ -184,7 +184,7 @@ static struct platform_device bfin_spi0_device = {
184}; 184};
185#endif /* spi master and devices */ 185#endif /* spi master and devices */
186 186
187#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 187#if IS_ENABLED(CONFIG_SERIAL_BFIN)
188#ifdef CONFIG_SERIAL_BFIN_UART0 188#ifdef CONFIG_SERIAL_BFIN_UART0
189static struct resource bfin_uart0_resources[] = { 189static struct resource bfin_uart0_resources[] = {
190 { 190 {
@@ -235,7 +235,7 @@ static struct platform_device bfin_uart0_device = {
235#endif 235#endif
236#endif 236#endif
237 237
238#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 238#if IS_ENABLED(CONFIG_BFIN_SIR)
239#ifdef CONFIG_BFIN_SIR0 239#ifdef CONFIG_BFIN_SIR0
240static struct resource bfin_sir0_resources[] = { 240static struct resource bfin_sir0_resources[] = {
241 { 241 {
@@ -264,7 +264,7 @@ static struct platform_device bfin_sir0_device = {
264#endif 264#endif
265#endif 265#endif
266 266
267#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 267#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
268#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 268#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
269static struct resource bfin_sport0_uart_resources[] = { 269static struct resource bfin_sport0_uart_resources[] = {
270 { 270 {
@@ -335,7 +335,7 @@ static struct platform_device bfin_sport1_uart_device = {
335#endif 335#endif
336#endif 336#endif
337 337
338#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 338#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
339#include <linux/input.h> 339#include <linux/input.h>
340#include <linux/gpio_keys.h> 340#include <linux/gpio_keys.h>
341 341
@@ -358,7 +358,7 @@ static struct platform_device bfin_device_gpiokeys = {
358}; 358};
359#endif 359#endif
360 360
361#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) 361#if IS_ENABLED(CONFIG_I2C_GPIO)
362#include <linux/i2c-gpio.h> 362#include <linux/i2c-gpio.h>
363 363
364static struct i2c_gpio_platform_data i2c_gpio_data = { 364static struct i2c_gpio_platform_data i2c_gpio_data = {
@@ -413,32 +413,32 @@ static struct platform_device *stamp_devices[] __initdata = {
413 413
414 &bfin_dpmc, 414 &bfin_dpmc,
415 415
416#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 416#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
417 &rtc_device, 417 &rtc_device,
418#endif 418#endif
419 419
420#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 420#if IS_ENABLED(CONFIG_SMC91X)
421 &smc91x_device, 421 &smc91x_device,
422#endif 422#endif
423 423
424 424
425#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 425#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
426 &bfin_spi0_device, 426 &bfin_spi0_device,
427#endif 427#endif
428 428
429#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 429#if IS_ENABLED(CONFIG_SERIAL_BFIN)
430#ifdef CONFIG_SERIAL_BFIN_UART0 430#ifdef CONFIG_SERIAL_BFIN_UART0
431 &bfin_uart0_device, 431 &bfin_uart0_device,
432#endif 432#endif
433#endif 433#endif
434 434
435#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 435#if IS_ENABLED(CONFIG_BFIN_SIR)
436#ifdef CONFIG_BFIN_SIR0 436#ifdef CONFIG_BFIN_SIR0
437 &bfin_sir0_device, 437 &bfin_sir0_device,
438#endif 438#endif
439#endif 439#endif
440 440
441#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 441#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
442#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 442#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
443 &bfin_sport0_uart_device, 443 &bfin_sport0_uart_device,
444#endif 444#endif
@@ -447,11 +447,11 @@ static struct platform_device *stamp_devices[] __initdata = {
447#endif 447#endif
448#endif 448#endif
449 449
450#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 450#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
451 &bfin_device_gpiokeys, 451 &bfin_device_gpiokeys,
452#endif 452#endif
453 453
454#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) 454#if IS_ENABLED(CONFIG_I2C_GPIO)
455 &i2c_gpio_device, 455 &i2c_gpio_device,
456#endif 456#endif
457}; 457};
@@ -469,7 +469,7 @@ static int __init blackstamp_init(void)
469 if (ret < 0) 469 if (ret < 0)
470 return ret; 470 return ret;
471 471
472#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 472#if IS_ENABLED(CONFIG_SMC91X)
473 /* 473 /*
474 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC. 474 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
475 * the bfin-async-map driver takes care of flipping between 475 * the bfin-async-map driver takes care of flipping between
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index fe47e048c4e6..4ef2fb0e48d5 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -15,7 +15,7 @@
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/spi/flash.h> 16#include <linux/spi/flash.h>
17#include <linux/spi/mmc_spi.h> 17#include <linux/spi/mmc_spi.h>
18#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 18#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
19#include <linux/usb/isp1362.h> 19#include <linux/usb/isp1362.h>
20#endif 20#endif
21#include <linux/irq.h> 21#include <linux/irq.h>
@@ -29,9 +29,9 @@
29 */ 29 */
30const char bfin_board_name[] = "Bluetechnix CM BF533"; 30const char bfin_board_name[] = "Bluetechnix CM BF533";
31 31
32#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 32#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
33/* all SPI peripherals info goes here */ 33/* all SPI peripherals info goes here */
34#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 34#if IS_ENABLED(CONFIG_MTD_M25P80)
35static struct mtd_partition bfin_spi_flash_partitions[] = { 35static struct mtd_partition bfin_spi_flash_partitions[] = {
36 { 36 {
37 .name = "bootloader(spi)", 37 .name = "bootloader(spi)",
@@ -62,14 +62,14 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
62}; 62};
63#endif 63#endif
64 64
65#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 65#if IS_ENABLED(CONFIG_MMC_SPI)
66static struct bfin5xx_spi_chip mmc_spi_chip_info = { 66static struct bfin5xx_spi_chip mmc_spi_chip_info = {
67 .enable_dma = 0, 67 .enable_dma = 0,
68}; 68};
69#endif 69#endif
70 70
71static struct spi_board_info bfin_spi_board_info[] __initdata = { 71static struct spi_board_info bfin_spi_board_info[] __initdata = {
72#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 72#if IS_ENABLED(CONFIG_MTD_M25P80)
73 { 73 {
74 /* the modalias must be the same as spi device driver name */ 74 /* the modalias must be the same as spi device driver name */
75 .modalias = "m25p80", /* Name of spi_driver for this device */ 75 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -82,7 +82,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
82 }, 82 },
83#endif 83#endif
84 84
85#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 85#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
86 { 86 {
87 .modalias = "ad183x", 87 .modalias = "ad183x",
88 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 88 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -91,7 +91,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
91 }, 91 },
92#endif 92#endif
93 93
94#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 94#if IS_ENABLED(CONFIG_MMC_SPI)
95 { 95 {
96 .modalias = "mmc_spi", 96 .modalias = "mmc_spi",
97 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 97 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -140,14 +140,14 @@ static struct platform_device bfin_spi0_device = {
140}; 140};
141#endif /* spi master and devices */ 141#endif /* spi master and devices */
142 142
143#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 143#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
144static struct platform_device rtc_device = { 144static struct platform_device rtc_device = {
145 .name = "rtc-bfin", 145 .name = "rtc-bfin",
146 .id = -1, 146 .id = -1,
147}; 147};
148#endif 148#endif
149 149
150#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 150#if IS_ENABLED(CONFIG_SMC91X)
151#include <linux/smc91x.h> 151#include <linux/smc91x.h>
152 152
153static struct smc91x_platdata smc91x_info = { 153static struct smc91x_platdata smc91x_info = {
@@ -178,7 +178,7 @@ static struct platform_device smc91x_device = {
178}; 178};
179#endif 179#endif
180 180
181#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 181#if IS_ENABLED(CONFIG_SMSC911X)
182#include <linux/smsc911x.h> 182#include <linux/smsc911x.h>
183 183
184static struct resource smsc911x_resources[] = { 184static struct resource smsc911x_resources[] = {
@@ -212,7 +212,7 @@ static struct platform_device smsc911x_device = {
212}; 212};
213#endif 213#endif
214 214
215#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 215#if IS_ENABLED(CONFIG_SERIAL_BFIN)
216#ifdef CONFIG_SERIAL_BFIN_UART0 216#ifdef CONFIG_SERIAL_BFIN_UART0
217static struct resource bfin_uart0_resources[] = { 217static struct resource bfin_uart0_resources[] = {
218 { 218 {
@@ -263,7 +263,7 @@ static struct platform_device bfin_uart0_device = {
263#endif 263#endif
264#endif 264#endif
265 265
266#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 266#if IS_ENABLED(CONFIG_BFIN_SIR)
267#ifdef CONFIG_BFIN_SIR0 267#ifdef CONFIG_BFIN_SIR0
268static struct resource bfin_sir0_resources[] = { 268static struct resource bfin_sir0_resources[] = {
269 { 269 {
@@ -292,7 +292,7 @@ static struct platform_device bfin_sir0_device = {
292#endif 292#endif
293#endif 293#endif
294 294
295#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 295#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
296#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 296#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
297static struct resource bfin_sport0_uart_resources[] = { 297static struct resource bfin_sport0_uart_resources[] = {
298 { 298 {
@@ -363,7 +363,7 @@ static struct platform_device bfin_sport1_uart_device = {
363#endif 363#endif
364#endif 364#endif
365 365
366#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 366#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
367static struct resource isp1362_hcd_resources[] = { 367static struct resource isp1362_hcd_resources[] = {
368 { 368 {
369 .start = 0x20308000, 369 .start = 0x20308000,
@@ -403,7 +403,7 @@ static struct platform_device isp1362_hcd_device = {
403#endif 403#endif
404 404
405 405
406#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 406#if IS_ENABLED(CONFIG_USB_NET2272)
407static struct resource net2272_bfin_resources[] = { 407static struct resource net2272_bfin_resources[] = {
408 { 408 {
409 .start = 0x20300000, 409 .start = 0x20300000,
@@ -426,7 +426,7 @@ static struct platform_device net2272_bfin_device = {
426 426
427 427
428 428
429#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 429#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
430static struct mtd_partition para_partitions[] = { 430static struct mtd_partition para_partitions[] = {
431 { 431 {
432 .name = "bootloader(nor)", 432 .name = "bootloader(nor)",
@@ -495,19 +495,19 @@ static struct platform_device *cm_bf533_devices[] __initdata = {
495 495
496 &bfin_dpmc, 496 &bfin_dpmc,
497 497
498#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 498#if IS_ENABLED(CONFIG_SERIAL_BFIN)
499#ifdef CONFIG_SERIAL_BFIN_UART0 499#ifdef CONFIG_SERIAL_BFIN_UART0
500 &bfin_uart0_device, 500 &bfin_uart0_device,
501#endif 501#endif
502#endif 502#endif
503 503
504#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 504#if IS_ENABLED(CONFIG_BFIN_SIR)
505#ifdef CONFIG_BFIN_SIR0 505#ifdef CONFIG_BFIN_SIR0
506 &bfin_sir0_device, 506 &bfin_sir0_device,
507#endif 507#endif
508#endif 508#endif
509 509
510#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 510#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
511#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 511#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
512 &bfin_sport0_uart_device, 512 &bfin_sport0_uart_device,
513#endif 513#endif
@@ -516,31 +516,31 @@ static struct platform_device *cm_bf533_devices[] __initdata = {
516#endif 516#endif
517#endif 517#endif
518 518
519#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 519#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
520 &rtc_device, 520 &rtc_device,
521#endif 521#endif
522 522
523#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 523#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
524 &isp1362_hcd_device, 524 &isp1362_hcd_device,
525#endif 525#endif
526 526
527#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 527#if IS_ENABLED(CONFIG_SMC91X)
528 &smc91x_device, 528 &smc91x_device,
529#endif 529#endif
530 530
531#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 531#if IS_ENABLED(CONFIG_SMSC911X)
532 &smsc911x_device, 532 &smsc911x_device,
533#endif 533#endif
534 534
535#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 535#if IS_ENABLED(CONFIG_USB_NET2272)
536 &net2272_bfin_device, 536 &net2272_bfin_device,
537#endif 537#endif
538 538
539#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 539#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
540 &bfin_spi0_device, 540 &bfin_spi0_device,
541#endif 541#endif
542 542
543#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 543#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
544 &para_flash_device, 544 &para_flash_device,
545#endif 545#endif
546}; 546};
@@ -549,7 +549,7 @@ static int __init cm_bf533_init(void)
549{ 549{
550 printk(KERN_INFO "%s(): registering device resources\n", __func__); 550 printk(KERN_INFO "%s(): registering device resources\n", __func__);
551 platform_add_devices(cm_bf533_devices, ARRAY_SIZE(cm_bf533_devices)); 551 platform_add_devices(cm_bf533_devices, ARRAY_SIZE(cm_bf533_devices));
552#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 552#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
553 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 553 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
554#endif 554#endif
555 return 0; 555 return 0;
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 90fb0d14b147..3625e9eaa8a8 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -14,7 +14,7 @@
14#include <linux/mtd/physmap.h> 14#include <linux/mtd/physmap.h>
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/spi/flash.h> 16#include <linux/spi/flash.h>
17#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 17#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
18#include <linux/usb/isp1362.h> 18#include <linux/usb/isp1362.h>
19#endif 19#endif
20#include <linux/irq.h> 20#include <linux/irq.h>
@@ -29,7 +29,7 @@
29 */ 29 */
30const char bfin_board_name[] = "ADI BF533-EZKIT"; 30const char bfin_board_name[] = "ADI BF533-EZKIT";
31 31
32#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 32#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
33static struct platform_device rtc_device = { 33static struct platform_device rtc_device = {
34 .name = "rtc-bfin", 34 .name = "rtc-bfin",
35 .id = -1, 35 .id = -1,
@@ -40,7 +40,7 @@ static struct platform_device rtc_device = {
40 * USB-LAN EzExtender board 40 * USB-LAN EzExtender board
41 * Driver needs to know address, irq and flag pin. 41 * Driver needs to know address, irq and flag pin.
42 */ 42 */
43#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 43#if IS_ENABLED(CONFIG_SMC91X)
44#include <linux/smc91x.h> 44#include <linux/smc91x.h>
45 45
46static struct smc91x_platdata smc91x_info = { 46static struct smc91x_platdata smc91x_info = {
@@ -72,7 +72,7 @@ static struct platform_device smc91x_device = {
72}; 72};
73#endif 73#endif
74 74
75#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 75#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
76static struct mtd_partition ezkit_partitions_a[] = { 76static struct mtd_partition ezkit_partitions_a[] = {
77 { 77 {
78 .name = "bootloader(nor a)", 78 .name = "bootloader(nor a)",
@@ -138,7 +138,7 @@ static struct platform_device ezkit_flash_device_b = {
138}; 138};
139#endif 139#endif
140 140
141#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE) 141#if IS_ENABLED(CONFIG_MTD_PLATRAM)
142static struct platdata_mtd_ram sram_data_a = { 142static struct platdata_mtd_ram sram_data_a = {
143 .mapname = "Flash A SRAM", 143 .mapname = "Flash A SRAM",
144 .bankwidth = 2, 144 .bankwidth = 2,
@@ -182,7 +182,7 @@ static struct platform_device sram_device_b = {
182}; 182};
183#endif 183#endif
184 184
185#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 185#if IS_ENABLED(CONFIG_MTD_M25P80)
186static struct mtd_partition bfin_spi_flash_partitions[] = { 186static struct mtd_partition bfin_spi_flash_partitions[] = {
187 { 187 {
188 .name = "bootloader(spi)", 188 .name = "bootloader(spi)",
@@ -214,7 +214,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
214#endif 214#endif
215 215
216static struct spi_board_info bfin_spi_board_info[] __initdata = { 216static struct spi_board_info bfin_spi_board_info[] __initdata = {
217#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 217#if IS_ENABLED(CONFIG_MTD_M25P80)
218 { 218 {
219 /* the modalias must be the same as spi device driver name */ 219 /* the modalias must be the same as spi device driver name */
220 .modalias = "m25p80", /* Name of spi_driver for this device */ 220 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -227,7 +227,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
227 }, 227 },
228#endif 228#endif
229 229
230#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 230#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
231 { 231 {
232 .modalias = "ad183x", 232 .modalias = "ad183x",
233 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 233 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -235,7 +235,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
235 .chip_select = 4, 235 .chip_select = 4,
236 }, 236 },
237#endif 237#endif
238#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 238#if IS_ENABLED(CONFIG_SPI_SPIDEV)
239 { 239 {
240 .modalias = "spidev", 240 .modalias = "spidev",
241 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 241 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -245,7 +245,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
245#endif 245#endif
246}; 246};
247 247
248#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 248#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
249/* SPI (0) */ 249/* SPI (0) */
250static struct resource bfin_spi0_resource[] = { 250static struct resource bfin_spi0_resource[] = {
251 [0] = { 251 [0] = {
@@ -283,7 +283,7 @@ static struct platform_device bfin_spi0_device = {
283}; 283};
284#endif /* spi master and devices */ 284#endif /* spi master and devices */
285 285
286#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 286#if IS_ENABLED(CONFIG_SERIAL_BFIN)
287#ifdef CONFIG_SERIAL_BFIN_UART0 287#ifdef CONFIG_SERIAL_BFIN_UART0
288static struct resource bfin_uart0_resources[] = { 288static struct resource bfin_uart0_resources[] = {
289 { 289 {
@@ -334,7 +334,7 @@ static struct platform_device bfin_uart0_device = {
334#endif 334#endif
335#endif 335#endif
336 336
337#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 337#if IS_ENABLED(CONFIG_BFIN_SIR)
338#ifdef CONFIG_BFIN_SIR0 338#ifdef CONFIG_BFIN_SIR0
339static struct resource bfin_sir0_resources[] = { 339static struct resource bfin_sir0_resources[] = {
340 { 340 {
@@ -363,7 +363,7 @@ static struct platform_device bfin_sir0_device = {
363#endif 363#endif
364#endif 364#endif
365 365
366#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 366#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
367#include <linux/input.h> 367#include <linux/input.h>
368#include <linux/gpio_keys.h> 368#include <linux/gpio_keys.h>
369 369
@@ -387,7 +387,7 @@ static struct platform_device bfin_device_gpiokeys = {
387}; 387};
388#endif 388#endif
389 389
390#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) 390#if IS_ENABLED(CONFIG_I2C_GPIO)
391#include <linux/i2c-gpio.h> 391#include <linux/i2c-gpio.h>
392 392
393static struct i2c_gpio_platform_data i2c_gpio_data = { 393static struct i2c_gpio_platform_data i2c_gpio_data = {
@@ -435,14 +435,14 @@ static struct platform_device bfin_dpmc = {
435}; 435};
436 436
437static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 437static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
438#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE) 438#if IS_ENABLED(CONFIG_FB_BFIN_7393)
439 { 439 {
440 I2C_BOARD_INFO("bfin-adv7393", 0x2B), 440 I2C_BOARD_INFO("bfin-adv7393", 0x2B),
441 }, 441 },
442#endif 442#endif
443}; 443};
444 444
445#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 445#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
446static struct platform_device bfin_i2s = { 446static struct platform_device bfin_i2s = {
447 .name = "bfin-i2s", 447 .name = "bfin-i2s",
448 .id = CONFIG_SND_BF5XX_SPORT_NUM, 448 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -450,7 +450,7 @@ static struct platform_device bfin_i2s = {
450}; 450};
451#endif 451#endif
452 452
453#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 453#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
454static struct platform_device bfin_ac97 = { 454static struct platform_device bfin_ac97 = {
455 .name = "bfin-ac97", 455 .name = "bfin-ac97",
456 .id = CONFIG_SND_BF5XX_SPORT_NUM, 456 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -462,53 +462,53 @@ static struct platform_device *ezkit_devices[] __initdata = {
462 462
463 &bfin_dpmc, 463 &bfin_dpmc,
464 464
465#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 465#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
466 &ezkit_flash_device_a, 466 &ezkit_flash_device_a,
467 &ezkit_flash_device_b, 467 &ezkit_flash_device_b,
468#endif 468#endif
469 469
470#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE) 470#if IS_ENABLED(CONFIG_MTD_PLATRAM)
471 &sram_device_a, 471 &sram_device_a,
472 &sram_device_b, 472 &sram_device_b,
473#endif 473#endif
474 474
475#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 475#if IS_ENABLED(CONFIG_SMC91X)
476 &smc91x_device, 476 &smc91x_device,
477#endif 477#endif
478 478
479#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 479#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
480 &bfin_spi0_device, 480 &bfin_spi0_device,
481#endif 481#endif
482 482
483#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 483#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
484 &rtc_device, 484 &rtc_device,
485#endif 485#endif
486 486
487#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 487#if IS_ENABLED(CONFIG_SERIAL_BFIN)
488#ifdef CONFIG_SERIAL_BFIN_UART0 488#ifdef CONFIG_SERIAL_BFIN_UART0
489 &bfin_uart0_device, 489 &bfin_uart0_device,
490#endif 490#endif
491#endif 491#endif
492 492
493#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 493#if IS_ENABLED(CONFIG_BFIN_SIR)
494#ifdef CONFIG_BFIN_SIR0 494#ifdef CONFIG_BFIN_SIR0
495 &bfin_sir0_device, 495 &bfin_sir0_device,
496#endif 496#endif
497#endif 497#endif
498 498
499#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 499#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
500 &bfin_device_gpiokeys, 500 &bfin_device_gpiokeys,
501#endif 501#endif
502 502
503#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) 503#if IS_ENABLED(CONFIG_I2C_GPIO)
504 &i2c_gpio_device, 504 &i2c_gpio_device,
505#endif 505#endif
506 506
507#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 507#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
508 &bfin_i2s, 508 &bfin_i2s,
509#endif 509#endif
510 510
511#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 511#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
512 &bfin_ac97, 512 &bfin_ac97,
513#endif 513#endif
514}; 514};
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index e303dae4e2d9..39c8e8547b82 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -15,7 +15,7 @@
15#include <linux/mtd/partitions.h> 15#include <linux/mtd/partitions.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/spi/flash.h> 17#include <linux/spi/flash.h>
18#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 18#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
19#include <linux/usb/isp1362.h> 19#include <linux/usb/isp1362.h>
20#endif 20#endif
21#include <asm/irq.h> 21#include <asm/irq.h>
@@ -32,7 +32,7 @@ const char bfin_board_name[] = "IP04/IP08";
32 * Driver needs to know address, irq and flag pin. 32 * Driver needs to know address, irq and flag pin.
33 */ 33 */
34#if defined(CONFIG_BFIN532_IP0X) 34#if defined(CONFIG_BFIN532_IP0X)
35#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 35#if IS_ENABLED(CONFIG_DM9000)
36 36
37#include <linux/dm9000.h> 37#include <linux/dm9000.h>
38 38
@@ -104,10 +104,10 @@ static struct platform_device dm9000_device2 = {
104#endif 104#endif
105 105
106 106
107#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 107#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
108/* all SPI peripherals info goes here */ 108/* all SPI peripherals info goes here */
109 109
110#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 110#if IS_ENABLED(CONFIG_MMC_SPI)
111static struct bfin5xx_spi_chip mmc_spi_chip_info = { 111static struct bfin5xx_spi_chip mmc_spi_chip_info = {
112 .enable_dma = 0, /* if 1 - block!!! */ 112 .enable_dma = 0, /* if 1 - block!!! */
113}; 113};
@@ -116,7 +116,7 @@ static struct bfin5xx_spi_chip mmc_spi_chip_info = {
116/* Notice: for blackfin, the speed_hz is the value of register 116/* Notice: for blackfin, the speed_hz is the value of register
117 * SPI_BAUD, not the real baudrate */ 117 * SPI_BAUD, not the real baudrate */
118static struct spi_board_info bfin_spi_board_info[] __initdata = { 118static struct spi_board_info bfin_spi_board_info[] __initdata = {
119#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 119#if IS_ENABLED(CONFIG_MMC_SPI)
120 { 120 {
121 .modalias = "mmc_spi", 121 .modalias = "mmc_spi",
122 .max_speed_hz = 2, 122 .max_speed_hz = 2,
@@ -142,7 +142,7 @@ static struct platform_device spi_bfin_master_device = {
142}; 142};
143#endif /* spi master and devices */ 143#endif /* spi master and devices */
144 144
145#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 145#if IS_ENABLED(CONFIG_SERIAL_BFIN)
146#ifdef CONFIG_SERIAL_BFIN_UART0 146#ifdef CONFIG_SERIAL_BFIN_UART0
147static struct resource bfin_uart0_resources[] = { 147static struct resource bfin_uart0_resources[] = {
148 { 148 {
@@ -193,7 +193,7 @@ static struct platform_device bfin_uart0_device = {
193#endif 193#endif
194#endif 194#endif
195 195
196#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 196#if IS_ENABLED(CONFIG_BFIN_SIR)
197#ifdef CONFIG_BFIN_SIR0 197#ifdef CONFIG_BFIN_SIR0
198static struct resource bfin_sir0_resources[] = { 198static struct resource bfin_sir0_resources[] = {
199 { 199 {
@@ -222,7 +222,7 @@ static struct platform_device bfin_sir0_device = {
222#endif 222#endif
223#endif 223#endif
224 224
225#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 225#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
226static struct resource isp1362_hcd_resources[] = { 226static struct resource isp1362_hcd_resources[] = {
227 { 227 {
228 .start = 0x20300000, 228 .start = 0x20300000,
@@ -264,29 +264,29 @@ static struct platform_device isp1362_hcd_device = {
264 264
265static struct platform_device *ip0x_devices[] __initdata = { 265static struct platform_device *ip0x_devices[] __initdata = {
266#if defined(CONFIG_BFIN532_IP0X) 266#if defined(CONFIG_BFIN532_IP0X)
267#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 267#if IS_ENABLED(CONFIG_DM9000)
268 &dm9000_device1, 268 &dm9000_device1,
269 &dm9000_device2, 269 &dm9000_device2,
270#endif 270#endif
271#endif 271#endif
272 272
273#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 273#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
274 &spi_bfin_master_device, 274 &spi_bfin_master_device,
275#endif 275#endif
276 276
277#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 277#if IS_ENABLED(CONFIG_SERIAL_BFIN)
278#ifdef CONFIG_SERIAL_BFIN_UART0 278#ifdef CONFIG_SERIAL_BFIN_UART0
279 &bfin_uart0_device, 279 &bfin_uart0_device,
280#endif 280#endif
281#endif 281#endif
282 282
283#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 283#if IS_ENABLED(CONFIG_BFIN_SIR)
284#ifdef CONFIG_BFIN_SIR0 284#ifdef CONFIG_BFIN_SIR0
285 &bfin_sir0_device, 285 &bfin_sir0_device,
286#endif 286#endif
287#endif 287#endif
288 288
289#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 289#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
290 &isp1362_hcd_device, 290 &isp1362_hcd_device,
291#endif 291#endif
292}; 292};
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 4da70c47cc05..d0989290f54c 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -14,7 +14,7 @@
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/spi/flash.h> 15#include <linux/spi/flash.h>
16#include <linux/spi/mmc_spi.h> 16#include <linux/spi/mmc_spi.h>
17#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 17#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
18#include <linux/usb/isp1362.h> 18#include <linux/usb/isp1362.h>
19#endif 19#endif
20#include <linux/irq.h> 20#include <linux/irq.h>
@@ -30,7 +30,7 @@
30 */ 30 */
31const char bfin_board_name[] = "ADI BF533-STAMP"; 31const char bfin_board_name[] = "ADI BF533-STAMP";
32 32
33#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 33#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
34static struct platform_device rtc_device = { 34static struct platform_device rtc_device = {
35 .name = "rtc-bfin", 35 .name = "rtc-bfin",
36 .id = -1, 36 .id = -1,
@@ -40,7 +40,7 @@ static struct platform_device rtc_device = {
40/* 40/*
41 * Driver needs to know address, irq and flag pin. 41 * Driver needs to know address, irq and flag pin.
42 */ 42 */
43#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 43#if IS_ENABLED(CONFIG_SMC91X)
44#include <linux/smc91x.h> 44#include <linux/smc91x.h>
45 45
46static struct smc91x_platdata smc91x_info = { 46static struct smc91x_platdata smc91x_info = {
@@ -73,7 +73,7 @@ static struct platform_device smc91x_device = {
73}; 73};
74#endif 74#endif
75 75
76#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 76#if IS_ENABLED(CONFIG_USB_NET2272)
77static struct resource net2272_bfin_resources[] = { 77static struct resource net2272_bfin_resources[] = {
78 { 78 {
79 .start = 0x20300000, 79 .start = 0x20300000,
@@ -97,7 +97,7 @@ static struct platform_device net2272_bfin_device = {
97}; 97};
98#endif 98#endif
99 99
100#if defined(CONFIG_MTD_BFIN_ASYNC) || defined(CONFIG_MTD_BFIN_ASYNC_MODULE) 100#if IS_ENABLED(CONFIG_MTD_BFIN_ASYNC)
101static struct mtd_partition stamp_partitions[] = { 101static struct mtd_partition stamp_partitions[] = {
102 { 102 {
103 .name = "bootloader(nor)", 103 .name = "bootloader(nor)",
@@ -147,7 +147,7 @@ static struct platform_device stamp_flash_device = {
147}; 147};
148#endif 148#endif
149 149
150#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 150#if IS_ENABLED(CONFIG_MTD_M25P80)
151static struct mtd_partition bfin_spi_flash_partitions[] = { 151static struct mtd_partition bfin_spi_flash_partitions[] = {
152 { 152 {
153 .name = "bootloader(spi)", 153 .name = "bootloader(spi)",
@@ -178,7 +178,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
178}; 178};
179#endif 179#endif
180 180
181#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 181#if IS_ENABLED(CONFIG_MMC_SPI)
182#define MMC_SPI_CARD_DETECT_INT IRQ_PF5 182#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
183static int bfin_mmc_spi_init(struct device *dev, 183static int bfin_mmc_spi_init(struct device *dev,
184 irqreturn_t (*detect_int)(int, void *), void *data) 184 irqreturn_t (*detect_int)(int, void *), void *data)
@@ -206,7 +206,7 @@ static struct bfin5xx_spi_chip mmc_spi_chip_info = {
206#endif 206#endif
207 207
208static struct spi_board_info bfin_spi_board_info[] __initdata = { 208static struct spi_board_info bfin_spi_board_info[] __initdata = {
209#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 209#if IS_ENABLED(CONFIG_MTD_M25P80)
210 { 210 {
211 /* the modalias must be the same as spi device driver name */ 211 /* the modalias must be the same as spi device driver name */
212 .modalias = "m25p80", /* Name of spi_driver for this device */ 212 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -219,8 +219,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
219 }, 219 },
220#endif 220#endif
221 221
222#if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \ 222#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
223 defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
224 { 223 {
225 .modalias = "ad1836", 224 .modalias = "ad1836",
226 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 225 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -231,7 +230,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
231 }, 230 },
232#endif 231#endif
233 232
234#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 233#if IS_ENABLED(CONFIG_SPI_SPIDEV)
235 { 234 {
236 .modalias = "spidev", 235 .modalias = "spidev",
237 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 236 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -239,7 +238,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
239 .chip_select = 1, 238 .chip_select = 1,
240 }, 239 },
241#endif 240#endif
242#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 241#if IS_ENABLED(CONFIG_MMC_SPI)
243 { 242 {
244 .modalias = "mmc_spi", 243 .modalias = "mmc_spi",
245 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 244 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -252,7 +251,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
252#endif 251#endif
253}; 252};
254 253
255#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 254#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
256/* SPI (0) */ 255/* SPI (0) */
257static struct resource bfin_spi0_resource[] = { 256static struct resource bfin_spi0_resource[] = {
258 [0] = { 257 [0] = {
@@ -290,7 +289,7 @@ static struct platform_device bfin_spi0_device = {
290}; 289};
291#endif /* spi master and devices */ 290#endif /* spi master and devices */
292 291
293#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 292#if IS_ENABLED(CONFIG_SERIAL_BFIN)
294#ifdef CONFIG_SERIAL_BFIN_UART0 293#ifdef CONFIG_SERIAL_BFIN_UART0
295static struct resource bfin_uart0_resources[] = { 294static struct resource bfin_uart0_resources[] = {
296 { 295 {
@@ -341,7 +340,7 @@ static struct platform_device bfin_uart0_device = {
341#endif 340#endif
342#endif 341#endif
343 342
344#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 343#if IS_ENABLED(CONFIG_BFIN_SIR)
345#ifdef CONFIG_BFIN_SIR0 344#ifdef CONFIG_BFIN_SIR0
346static struct resource bfin_sir0_resources[] = { 345static struct resource bfin_sir0_resources[] = {
347 { 346 {
@@ -370,8 +369,7 @@ static struct platform_device bfin_sir0_device = {
370#endif 369#endif
371#endif 370#endif
372 371
373#if defined(CONFIG_SERIAL_BFIN_SPORT) || \ 372#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
374 defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
375#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 373#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
376static struct resource bfin_sport0_uart_resources[] = { 374static struct resource bfin_sport0_uart_resources[] = {
377 { 375 {
@@ -442,7 +440,7 @@ static struct platform_device bfin_sport1_uart_device = {
442#endif 440#endif
443#endif 441#endif
444 442
445#if defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE) 443#if IS_ENABLED(CONFIG_BFIN_SPORT)
446static struct resource bfin_sport0_resources[] = { 444static struct resource bfin_sport0_resources[] = {
447 { 445 {
448 .start = SPORT0_TCR1, 446 .start = SPORT0_TCR1,
@@ -486,7 +484,7 @@ static struct platform_device bfin_sport0_device = {
486}; 484};
487#endif 485#endif
488 486
489#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 487#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
490#include <linux/input.h> 488#include <linux/input.h>
491#include <linux/gpio_keys.h> 489#include <linux/gpio_keys.h>
492 490
@@ -509,7 +507,7 @@ static struct platform_device bfin_device_gpiokeys = {
509}; 507};
510#endif 508#endif
511 509
512#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) 510#if IS_ENABLED(CONFIG_I2C_GPIO)
513#include <linux/i2c-gpio.h> 511#include <linux/i2c-gpio.h>
514 512
515static struct i2c_gpio_platform_data i2c_gpio_data = { 513static struct i2c_gpio_platform_data i2c_gpio_data = {
@@ -530,29 +528,29 @@ static struct platform_device i2c_gpio_device = {
530#endif 528#endif
531 529
532static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 530static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
533#if defined(CONFIG_JOYSTICK_AD7142) || defined(CONFIG_JOYSTICK_AD7142_MODULE) 531#if IS_ENABLED(CONFIG_JOYSTICK_AD7142)
534 { 532 {
535 I2C_BOARD_INFO("ad7142_joystick", 0x2C), 533 I2C_BOARD_INFO("ad7142_joystick", 0x2C),
536 .irq = 39, 534 .irq = 39,
537 }, 535 },
538#endif 536#endif
539#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 537#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
540 { 538 {
541 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 539 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
542 }, 540 },
543#endif 541#endif
544#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) 542#if IS_ENABLED(CONFIG_INPUT_PCF8574)
545 { 543 {
546 I2C_BOARD_INFO("pcf8574_keypad", 0x27), 544 I2C_BOARD_INFO("pcf8574_keypad", 0x27),
547 .irq = 39, 545 .irq = 39,
548 }, 546 },
549#endif 547#endif
550#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE) 548#if IS_ENABLED(CONFIG_FB_BFIN_7393)
551 { 549 {
552 I2C_BOARD_INFO("bfin-adv7393", 0x2B), 550 I2C_BOARD_INFO("bfin-adv7393", 0x2B),
553 }, 551 },
554#endif 552#endif
555#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 553#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
556 { 554 {
557 I2C_BOARD_INFO("ad5252", 0x2f), 555 I2C_BOARD_INFO("ad5252", 0x2f),
558 }, 556 },
@@ -586,9 +584,8 @@ static struct platform_device bfin_dpmc = {
586 }, 584 },
587}; 585};
588 586
589#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ 587#if IS_ENABLED(CONFIG_SND_BF5XX_I2S) || \
590 defined(CONFIG_SND_BF5XX_AC97) || \ 588 IS_ENABLED(CONFIG_SND_BF5XX_AC97)
591 defined(CONFIG_SND_BF5XX_AC97_MODULE)
592 589
593#include <asm/bfin_sport.h> 590#include <asm/bfin_sport.h>
594 591
@@ -640,22 +637,21 @@ static struct resource bfin_snd_resources[][4] = {
640}; 637};
641#endif 638#endif
642 639
643#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 640#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
644static struct platform_device bfin_i2s_pcm = { 641static struct platform_device bfin_i2s_pcm = {
645 .name = "bfin-i2s-pcm-audio", 642 .name = "bfin-i2s-pcm-audio",
646 .id = -1, 643 .id = -1,
647}; 644};
648#endif 645#endif
649 646
650#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 647#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
651static struct platform_device bfin_ac97_pcm = { 648static struct platform_device bfin_ac97_pcm = {
652 .name = "bfin-ac97-pcm-audio", 649 .name = "bfin-ac97-pcm-audio",
653 .id = -1, 650 .id = -1,
654}; 651};
655#endif 652#endif
656 653
657#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ 654#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
658 || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
659static const char * const ad1836_link[] = { 655static const char * const ad1836_link[] = {
660 "bfin-i2s.0", 656 "bfin-i2s.0",
661 "spi0.4", 657 "spi0.4",
@@ -669,8 +665,7 @@ static struct platform_device bfin_ad1836_machine = {
669}; 665};
670#endif 666#endif
671 667
672#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \ 668#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD73311)
673 defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
674static const unsigned ad73311_gpio[] = { 669static const unsigned ad73311_gpio[] = {
675 GPIO_PF4, 670 GPIO_PF4,
676}; 671};
@@ -684,22 +679,21 @@ static struct platform_device bfin_ad73311_machine = {
684}; 679};
685#endif 680#endif
686 681
687#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE) 682#if IS_ENABLED(CONFIG_SND_SOC_AD73311)
688static struct platform_device bfin_ad73311_codec_device = { 683static struct platform_device bfin_ad73311_codec_device = {
689 .name = "ad73311", 684 .name = "ad73311",
690 .id = -1, 685 .id = -1,
691}; 686};
692#endif 687#endif
693 688
694#if defined(CONFIG_SND_SOC_AD74111) || defined(CONFIG_SND_SOC_AD74111_MODULE) 689#if IS_ENABLED(CONFIG_SND_SOC_AD74111)
695static struct platform_device bfin_ad74111_codec_device = { 690static struct platform_device bfin_ad74111_codec_device = {
696 .name = "ad74111", 691 .name = "ad74111",
697 .id = -1, 692 .id = -1,
698}; 693};
699#endif 694#endif
700 695
701#if defined(CONFIG_SND_BF5XX_SOC_I2S) || \ 696#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_I2S)
702 defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
703static struct platform_device bfin_i2s = { 697static struct platform_device bfin_i2s = {
704 .name = "bfin-i2s", 698 .name = "bfin-i2s",
705 .id = CONFIG_SND_BF5XX_SPORT_NUM, 699 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -712,8 +706,7 @@ static struct platform_device bfin_i2s = {
712}; 706};
713#endif 707#endif
714 708
715#if defined(CONFIG_SND_BF5XX_SOC_AC97) || \ 709#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AC97)
716 defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
717static struct platform_device bfin_ac97 = { 710static struct platform_device bfin_ac97 = {
718 .name = "bfin-ac97", 711 .name = "bfin-ac97",
719 .id = CONFIG_SND_BF5XX_SPORT_NUM, 712 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -730,36 +723,35 @@ static struct platform_device *stamp_devices[] __initdata = {
730 723
731 &bfin_dpmc, 724 &bfin_dpmc,
732 725
733#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 726#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
734 &rtc_device, 727 &rtc_device,
735#endif 728#endif
736 729
737#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 730#if IS_ENABLED(CONFIG_SMC91X)
738 &smc91x_device, 731 &smc91x_device,
739#endif 732#endif
740 733
741#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 734#if IS_ENABLED(CONFIG_USB_NET2272)
742 &net2272_bfin_device, 735 &net2272_bfin_device,
743#endif 736#endif
744 737
745#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 738#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
746 &bfin_spi0_device, 739 &bfin_spi0_device,
747#endif 740#endif
748 741
749#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 742#if IS_ENABLED(CONFIG_SERIAL_BFIN)
750#ifdef CONFIG_SERIAL_BFIN_UART0 743#ifdef CONFIG_SERIAL_BFIN_UART0
751 &bfin_uart0_device, 744 &bfin_uart0_device,
752#endif 745#endif
753#endif 746#endif
754 747
755#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 748#if IS_ENABLED(CONFIG_BFIN_SIR)
756#ifdef CONFIG_BFIN_SIR0 749#ifdef CONFIG_BFIN_SIR0
757 &bfin_sir0_device, 750 &bfin_sir0_device,
758#endif 751#endif
759#endif 752#endif
760 753
761#if defined(CONFIG_SERIAL_BFIN_SPORT) || \ 754#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
762 defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
763#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 755#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
764 &bfin_sport0_uart_device, 756 &bfin_sport0_uart_device,
765#endif 757#endif
@@ -768,58 +760,54 @@ static struct platform_device *stamp_devices[] __initdata = {
768#endif 760#endif
769#endif 761#endif
770 762
771#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 763#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
772 &bfin_device_gpiokeys, 764 &bfin_device_gpiokeys,
773#endif 765#endif
774 766
775#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) 767#if IS_ENABLED(CONFIG_I2C_GPIO)
776 &i2c_gpio_device, 768 &i2c_gpio_device,
777#endif 769#endif
778 770
779#if defined(CONFIG_MTD_BFIN_ASYNC) || defined(CONFIG_MTD_BFIN_ASYNC_MODULE) 771#if IS_ENABLED(CONFIG_MTD_BFIN_ASYNC)
780 &stamp_flash_device, 772 &stamp_flash_device,
781#endif 773#endif
782 774
783#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 775#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
784 &bfin_i2s_pcm, 776 &bfin_i2s_pcm,
785#endif 777#endif
786 778
787#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 779#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
788 &bfin_ac97_pcm, 780 &bfin_ac97_pcm,
789#endif 781#endif
790 782
791#if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \ 783#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
792 defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
793 &bfin_ad1836_machine, 784 &bfin_ad1836_machine,
794#endif 785#endif
795 786
796#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \ 787#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD73311)
797 defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
798 &bfin_ad73311_machine, 788 &bfin_ad73311_machine,
799#endif 789#endif
800 790
801#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE) 791#if IS_ENABLED(CONFIG_SND_SOC_AD73311)
802 &bfin_ad73311_codec_device, 792 &bfin_ad73311_codec_device,
803#endif 793#endif
804 794
805#if defined(CONFIG_SND_SOC_AD74111) || defined(CONFIG_SND_SOC_AD74111_MODULE) 795#if IS_ENABLED(CONFIG_SND_SOC_AD74111)
806 &bfin_ad74111_codec_device, 796 &bfin_ad74111_codec_device,
807#endif 797#endif
808 798
809#if defined(CONFIG_SND_BF5XX_SOC_I2S) || \ 799#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_I2S)
810 defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
811 &bfin_i2s, 800 &bfin_i2s,
812#endif 801#endif
813 802
814#if defined(CONFIG_SND_BF5XX_SOC_AC97) || \ 803#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AC97)
815 defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
816 &bfin_ac97, 804 &bfin_ac97,
817#endif 805#endif
818}; 806};
819 807
820static int __init net2272_init(void) 808static int __init net2272_init(void)
821{ 809{
822#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 810#if IS_ENABLED(CONFIG_USB_NET2272)
823 int ret; 811 int ret;
824 812
825 /* Set PF0 to 0, PF1 to 1 make /AMS3 work properly */ 813 /* Set PF0 to 0, PF1 to 1 make /AMS3 work properly */
@@ -865,7 +853,7 @@ static int __init stamp_init(void)
865 if (ret < 0) 853 if (ret < 0)
866 return ret; 854 return ret;
867 855
868#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 856#if IS_ENABLED(CONFIG_SMC91X)
869 /* 857 /*
870 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC. 858 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
871 * the bfin-async-map driver takes care of flipping between 859 * the bfin-async-map driver takes care of flipping between
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 85e4fc9f9c22..c65c6dbda3da 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -16,7 +16,7 @@
16#include <linux/mtd/physmap.h> 16#include <linux/mtd/physmap.h>
17#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
18#include <linux/spi/flash.h> 18#include <linux/spi/flash.h>
19#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 19#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
20#include <linux/usb/isp1362.h> 20#include <linux/usb/isp1362.h>
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
@@ -32,10 +32,10 @@
32 */ 32 */
33const char bfin_board_name[] = "Bluetechnix CM BF537E"; 33const char bfin_board_name[] = "Bluetechnix CM BF537E";
34 34
35#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 35#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
36/* all SPI peripherals info goes here */ 36/* all SPI peripherals info goes here */
37 37
38#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 38#if IS_ENABLED(CONFIG_MTD_M25P80)
39static struct mtd_partition bfin_spi_flash_partitions[] = { 39static struct mtd_partition bfin_spi_flash_partitions[] = {
40 { 40 {
41 .name = "bootloader(spi)", 41 .name = "bootloader(spi)",
@@ -66,14 +66,14 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
66}; 66};
67#endif 67#endif
68 68
69#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 69#if IS_ENABLED(CONFIG_MMC_SPI)
70static struct bfin5xx_spi_chip mmc_spi_chip_info = { 70static struct bfin5xx_spi_chip mmc_spi_chip_info = {
71 .enable_dma = 0, 71 .enable_dma = 0,
72}; 72};
73#endif 73#endif
74 74
75static struct spi_board_info bfin_spi_board_info[] __initdata = { 75static struct spi_board_info bfin_spi_board_info[] __initdata = {
76#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 76#if IS_ENABLED(CONFIG_MTD_M25P80)
77 { 77 {
78 /* the modalias must be the same as spi device driver name */ 78 /* the modalias must be the same as spi device driver name */
79 .modalias = "m25p80", /* Name of spi_driver for this device */ 79 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -86,7 +86,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
86 }, 86 },
87#endif 87#endif
88 88
89#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 89#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
90 { 90 {
91 .modalias = "ad183x", 91 .modalias = "ad183x",
92 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 92 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -95,7 +95,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
95 }, 95 },
96#endif 96#endif
97 97
98#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 98#if IS_ENABLED(CONFIG_MMC_SPI)
99 { 99 {
100 .modalias = "mmc_spi", 100 .modalias = "mmc_spi",
101 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 101 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -144,7 +144,7 @@ static struct platform_device bfin_spi0_device = {
144}; 144};
145#endif /* spi master and devices */ 145#endif /* spi master and devices */
146 146
147#if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE) 147#if IS_ENABLED(CONFIG_SPI_BFIN_SPORT)
148 148
149/* SPORT SPI controller data */ 149/* SPORT SPI controller data */
150static struct bfin5xx_spi_master bfin_sport_spi0_info = { 150static struct bfin5xx_spi_master bfin_sport_spi0_info = {
@@ -209,20 +209,20 @@ static struct platform_device bfin_sport_spi1_device = {
209 209
210#endif /* sport spi master and devices */ 210#endif /* sport spi master and devices */
211 211
212#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 212#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
213static struct platform_device rtc_device = { 213static struct platform_device rtc_device = {
214 .name = "rtc-bfin", 214 .name = "rtc-bfin",
215 .id = -1, 215 .id = -1,
216}; 216};
217#endif 217#endif
218 218
219#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) 219#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
220static struct platform_device hitachi_fb_device = { 220static struct platform_device hitachi_fb_device = {
221 .name = "hitachi-tx09", 221 .name = "hitachi-tx09",
222}; 222};
223#endif 223#endif
224 224
225#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 225#if IS_ENABLED(CONFIG_SMC91X)
226#include <linux/smc91x.h> 226#include <linux/smc91x.h>
227 227
228static struct smc91x_platdata smc91x_info = { 228static struct smc91x_platdata smc91x_info = {
@@ -254,7 +254,7 @@ static struct platform_device smc91x_device = {
254}; 254};
255#endif 255#endif
256 256
257#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 257#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
258static struct resource isp1362_hcd_resources[] = { 258static struct resource isp1362_hcd_resources[] = {
259 { 259 {
260 .start = 0x20308000, 260 .start = 0x20308000,
@@ -293,7 +293,7 @@ static struct platform_device isp1362_hcd_device = {
293}; 293};
294#endif 294#endif
295 295
296#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 296#if IS_ENABLED(CONFIG_USB_NET2272)
297static struct resource net2272_bfin_resources[] = { 297static struct resource net2272_bfin_resources[] = {
298 { 298 {
299 .start = 0x20300000, 299 .start = 0x20300000,
@@ -314,7 +314,7 @@ static struct platform_device net2272_bfin_device = {
314}; 314};
315#endif 315#endif
316 316
317#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 317#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
318static struct mtd_partition cm_partitions[] = { 318static struct mtd_partition cm_partitions[] = {
319 { 319 {
320 .name = "bootloader(nor)", 320 .name = "bootloader(nor)",
@@ -363,7 +363,7 @@ static struct platform_device cm_flash_device = {
363}; 363};
364#endif 364#endif
365 365
366#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 366#if IS_ENABLED(CONFIG_SERIAL_BFIN)
367#ifdef CONFIG_SERIAL_BFIN_UART0 367#ifdef CONFIG_SERIAL_BFIN_UART0
368static struct resource bfin_uart0_resources[] = { 368static struct resource bfin_uart0_resources[] = {
369 { 369 {
@@ -498,7 +498,7 @@ static struct platform_device bfin_uart1_device = {
498#endif 498#endif
499#endif 499#endif
500 500
501#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 501#if IS_ENABLED(CONFIG_BFIN_SIR)
502#ifdef CONFIG_BFIN_SIR0 502#ifdef CONFIG_BFIN_SIR0
503static struct resource bfin_sir0_resources[] = { 503static struct resource bfin_sir0_resources[] = {
504 { 504 {
@@ -551,7 +551,7 @@ static struct platform_device bfin_sir1_device = {
551#endif 551#endif
552#endif 552#endif
553 553
554#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 554#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
555static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 555static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
556 556
557static struct resource bfin_twi0_resource[] = { 557static struct resource bfin_twi0_resource[] = {
@@ -578,14 +578,14 @@ static struct platform_device i2c_bfin_twi_device = {
578}; 578};
579#endif 579#endif
580 580
581#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) \ 581#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT) \
582|| defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE) 582|| IS_ENABLED(CONFIG_BFIN_SPORT)
583unsigned short bfin_sport0_peripherals[] = { 583unsigned short bfin_sport0_peripherals[] = {
584 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 584 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
585 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 585 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
586}; 586};
587#endif 587#endif
588#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 588#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
589#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 589#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
590static struct resource bfin_sport0_uart_resources[] = { 590static struct resource bfin_sport0_uart_resources[] = {
591 { 591 {
@@ -650,7 +650,7 @@ static struct platform_device bfin_sport1_uart_device = {
650}; 650};
651#endif 651#endif
652#endif 652#endif
653#if defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE) 653#if IS_ENABLED(CONFIG_BFIN_SPORT)
654static struct resource bfin_sport0_resources[] = { 654static struct resource bfin_sport0_resources[] = {
655 { 655 {
656 .start = SPORT0_TCR1, 656 .start = SPORT0_TCR1,
@@ -694,7 +694,7 @@ static struct platform_device bfin_sport0_device = {
694}; 694};
695#endif 695#endif
696 696
697#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 697#if IS_ENABLED(CONFIG_BFIN_MAC)
698#include <linux/bfin_mac.h> 698#include <linux/bfin_mac.h>
699static const unsigned short bfin_mac_peripherals[] = P_MII0; 699static const unsigned short bfin_mac_peripherals[] = P_MII0;
700 700
@@ -727,7 +727,7 @@ static struct platform_device bfin_mac_device = {
727}; 727};
728#endif 728#endif
729 729
730#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 730#if IS_ENABLED(CONFIG_PATA_PLATFORM)
731#define PATA_INT IRQ_PF14 731#define PATA_INT IRQ_PF14
732 732
733static struct pata_platform_info bfin_pata_platform_data = { 733static struct pata_platform_info bfin_pata_platform_data = {
@@ -795,19 +795,19 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
795 795
796 &bfin_dpmc, 796 &bfin_dpmc,
797 797
798#if defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE) 798#if IS_ENABLED(CONFIG_BFIN_SPORT)
799 &bfin_sport0_device, 799 &bfin_sport0_device,
800#endif 800#endif
801 801
802#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) 802#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
803 &hitachi_fb_device, 803 &hitachi_fb_device,
804#endif 804#endif
805 805
806#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 806#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
807 &rtc_device, 807 &rtc_device,
808#endif 808#endif
809 809
810#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 810#if IS_ENABLED(CONFIG_SERIAL_BFIN)
811#ifdef CONFIG_SERIAL_BFIN_UART0 811#ifdef CONFIG_SERIAL_BFIN_UART0
812 &bfin_uart0_device, 812 &bfin_uart0_device,
813#endif 813#endif
@@ -816,7 +816,7 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
816#endif 816#endif
817#endif 817#endif
818 818
819#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 819#if IS_ENABLED(CONFIG_BFIN_SIR)
820#ifdef CONFIG_BFIN_SIR0 820#ifdef CONFIG_BFIN_SIR0
821 &bfin_sir0_device, 821 &bfin_sir0_device,
822#endif 822#endif
@@ -825,11 +825,11 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
825#endif 825#endif
826#endif 826#endif
827 827
828#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 828#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
829 &i2c_bfin_twi_device, 829 &i2c_bfin_twi_device,
830#endif 830#endif
831 831
832#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 832#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
833#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 833#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
834 &bfin_sport0_uart_device, 834 &bfin_sport0_uart_device,
835#endif 835#endif
@@ -838,44 +838,44 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
838#endif 838#endif
839#endif 839#endif
840 840
841#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 841#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
842 &isp1362_hcd_device, 842 &isp1362_hcd_device,
843#endif 843#endif
844 844
845#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 845#if IS_ENABLED(CONFIG_SMC91X)
846 &smc91x_device, 846 &smc91x_device,
847#endif 847#endif
848 848
849#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 849#if IS_ENABLED(CONFIG_BFIN_MAC)
850 &bfin_mii_bus, 850 &bfin_mii_bus,
851 &bfin_mac_device, 851 &bfin_mac_device,
852#endif 852#endif
853 853
854#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 854#if IS_ENABLED(CONFIG_USB_NET2272)
855 &net2272_bfin_device, 855 &net2272_bfin_device,
856#endif 856#endif
857 857
858#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 858#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
859 &bfin_spi0_device, 859 &bfin_spi0_device,
860#endif 860#endif
861 861
862#if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE) 862#if IS_ENABLED(CONFIG_SPI_BFIN_SPORT)
863 &bfin_sport_spi0_device, 863 &bfin_sport_spi0_device,
864 &bfin_sport_spi1_device, 864 &bfin_sport_spi1_device,
865#endif 865#endif
866 866
867#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 867#if IS_ENABLED(CONFIG_PATA_PLATFORM)
868 &bfin_pata_device, 868 &bfin_pata_device,
869#endif 869#endif
870 870
871#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 871#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
872 &cm_flash_device, 872 &cm_flash_device,
873#endif 873#endif
874}; 874};
875 875
876static int __init net2272_init(void) 876static int __init net2272_init(void)
877{ 877{
878#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 878#if IS_ENABLED(CONFIG_USB_NET2272)
879 int ret; 879 int ret;
880 880
881 ret = gpio_request(GPIO_PG14, "net2272"); 881 ret = gpio_request(GPIO_PG14, "net2272");
@@ -895,11 +895,11 @@ static int __init cm_bf537e_init(void)
895{ 895{
896 printk(KERN_INFO "%s(): registering device resources\n", __func__); 896 printk(KERN_INFO "%s(): registering device resources\n", __func__);
897 platform_add_devices(cm_bf537e_devices, ARRAY_SIZE(cm_bf537e_devices)); 897 platform_add_devices(cm_bf537e_devices, ARRAY_SIZE(cm_bf537e_devices));
898#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 898#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
899 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 899 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
900#endif 900#endif
901 901
902#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 902#if IS_ENABLED(CONFIG_PATA_PLATFORM)
903 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); 903 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
904#endif 904#endif
905 905
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 0143d8bef909..af58454b4bff 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -16,7 +16,7 @@
16#include <linux/mtd/physmap.h> 16#include <linux/mtd/physmap.h>
17#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
18#include <linux/spi/flash.h> 18#include <linux/spi/flash.h>
19#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 19#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
20#include <linux/usb/isp1362.h> 20#include <linux/usb/isp1362.h>
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
@@ -32,10 +32,10 @@
32 */ 32 */
33const char bfin_board_name[] = "Bluetechnix CM BF537U"; 33const char bfin_board_name[] = "Bluetechnix CM BF537U";
34 34
35#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 35#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
36/* all SPI peripherals info goes here */ 36/* all SPI peripherals info goes here */
37 37
38#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 38#if IS_ENABLED(CONFIG_MTD_M25P80)
39static struct mtd_partition bfin_spi_flash_partitions[] = { 39static struct mtd_partition bfin_spi_flash_partitions[] = {
40 { 40 {
41 .name = "bootloader(spi)", 41 .name = "bootloader(spi)",
@@ -66,14 +66,14 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
66}; 66};
67#endif 67#endif
68 68
69#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 69#if IS_ENABLED(CONFIG_MMC_SPI)
70static struct bfin5xx_spi_chip mmc_spi_chip_info = { 70static struct bfin5xx_spi_chip mmc_spi_chip_info = {
71 .enable_dma = 0, 71 .enable_dma = 0,
72}; 72};
73#endif 73#endif
74 74
75static struct spi_board_info bfin_spi_board_info[] __initdata = { 75static struct spi_board_info bfin_spi_board_info[] __initdata = {
76#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 76#if IS_ENABLED(CONFIG_MTD_M25P80)
77 { 77 {
78 /* the modalias must be the same as spi device driver name */ 78 /* the modalias must be the same as spi device driver name */
79 .modalias = "m25p80", /* Name of spi_driver for this device */ 79 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -86,7 +86,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
86 }, 86 },
87#endif 87#endif
88 88
89#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 89#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
90 { 90 {
91 .modalias = "ad183x", 91 .modalias = "ad183x",
92 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 92 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -95,7 +95,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
95 }, 95 },
96#endif 96#endif
97 97
98#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 98#if IS_ENABLED(CONFIG_MMC_SPI)
99 { 99 {
100 .modalias = "mmc_spi", 100 .modalias = "mmc_spi",
101 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 101 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -144,20 +144,20 @@ static struct platform_device bfin_spi0_device = {
144}; 144};
145#endif /* spi master and devices */ 145#endif /* spi master and devices */
146 146
147#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 147#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
148static struct platform_device rtc_device = { 148static struct platform_device rtc_device = {
149 .name = "rtc-bfin", 149 .name = "rtc-bfin",
150 .id = -1, 150 .id = -1,
151}; 151};
152#endif 152#endif
153 153
154#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) 154#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
155static struct platform_device hitachi_fb_device = { 155static struct platform_device hitachi_fb_device = {
156 .name = "hitachi-tx09", 156 .name = "hitachi-tx09",
157}; 157};
158#endif 158#endif
159 159
160#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 160#if IS_ENABLED(CONFIG_SMC91X)
161#include <linux/smc91x.h> 161#include <linux/smc91x.h>
162 162
163static struct smc91x_platdata smc91x_info = { 163static struct smc91x_platdata smc91x_info = {
@@ -189,7 +189,7 @@ static struct platform_device smc91x_device = {
189}; 189};
190#endif 190#endif
191 191
192#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 192#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
193static struct resource isp1362_hcd_resources[] = { 193static struct resource isp1362_hcd_resources[] = {
194 { 194 {
195 .start = 0x20308000, 195 .start = 0x20308000,
@@ -228,7 +228,7 @@ static struct platform_device isp1362_hcd_device = {
228}; 228};
229#endif 229#endif
230 230
231#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 231#if IS_ENABLED(CONFIG_USB_NET2272)
232static struct resource net2272_bfin_resources[] = { 232static struct resource net2272_bfin_resources[] = {
233 { 233 {
234 .start = 0x20200000, 234 .start = 0x20200000,
@@ -249,7 +249,7 @@ static struct platform_device net2272_bfin_device = {
249}; 249};
250#endif 250#endif
251 251
252#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 252#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
253static struct mtd_partition cm_partitions[] = { 253static struct mtd_partition cm_partitions[] = {
254 { 254 {
255 .name = "bootloader(nor)", 255 .name = "bootloader(nor)",
@@ -298,7 +298,7 @@ static struct platform_device cm_flash_device = {
298}; 298};
299#endif 299#endif
300 300
301#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 301#if IS_ENABLED(CONFIG_SERIAL_BFIN)
302#ifdef CONFIG_SERIAL_BFIN_UART0 302#ifdef CONFIG_SERIAL_BFIN_UART0
303static struct resource bfin_uart0_resources[] = { 303static struct resource bfin_uart0_resources[] = {
304 { 304 {
@@ -397,7 +397,7 @@ static struct platform_device bfin_uart1_device = {
397#endif 397#endif
398#endif 398#endif
399 399
400#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 400#if IS_ENABLED(CONFIG_BFIN_SIR)
401#ifdef CONFIG_BFIN_SIR0 401#ifdef CONFIG_BFIN_SIR0
402static struct resource bfin_sir0_resources[] = { 402static struct resource bfin_sir0_resources[] = {
403 { 403 {
@@ -450,7 +450,7 @@ static struct platform_device bfin_sir1_device = {
450#endif 450#endif
451#endif 451#endif
452 452
453#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 453#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
454static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 454static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
455 455
456static struct resource bfin_twi0_resource[] = { 456static struct resource bfin_twi0_resource[] = {
@@ -477,7 +477,7 @@ static struct platform_device i2c_bfin_twi_device = {
477}; 477};
478#endif 478#endif
479 479
480#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 480#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
481#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 481#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
482static struct resource bfin_sport0_uart_resources[] = { 482static struct resource bfin_sport0_uart_resources[] = {
483 { 483 {
@@ -548,7 +548,7 @@ static struct platform_device bfin_sport1_uart_device = {
548#endif 548#endif
549#endif 549#endif
550 550
551#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 551#if IS_ENABLED(CONFIG_BFIN_MAC)
552#include <linux/bfin_mac.h> 552#include <linux/bfin_mac.h>
553static const unsigned short bfin_mac_peripherals[] = P_MII0; 553static const unsigned short bfin_mac_peripherals[] = P_MII0;
554 554
@@ -581,7 +581,7 @@ static struct platform_device bfin_mac_device = {
581}; 581};
582#endif 582#endif
583 583
584#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 584#if IS_ENABLED(CONFIG_PATA_PLATFORM)
585#define PATA_INT IRQ_PF14 585#define PATA_INT IRQ_PF14
586 586
587static struct pata_platform_info bfin_pata_platform_data = { 587static struct pata_platform_info bfin_pata_platform_data = {
@@ -649,15 +649,15 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
649 649
650 &bfin_dpmc, 650 &bfin_dpmc,
651 651
652#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) 652#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
653 &hitachi_fb_device, 653 &hitachi_fb_device,
654#endif 654#endif
655 655
656#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 656#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
657 &rtc_device, 657 &rtc_device,
658#endif 658#endif
659 659
660#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 660#if IS_ENABLED(CONFIG_SERIAL_BFIN)
661#ifdef CONFIG_SERIAL_BFIN_UART0 661#ifdef CONFIG_SERIAL_BFIN_UART0
662 &bfin_uart0_device, 662 &bfin_uart0_device,
663#endif 663#endif
@@ -666,7 +666,7 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
666#endif 666#endif
667#endif 667#endif
668 668
669#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 669#if IS_ENABLED(CONFIG_BFIN_SIR)
670#ifdef CONFIG_BFIN_SIR0 670#ifdef CONFIG_BFIN_SIR0
671 &bfin_sir0_device, 671 &bfin_sir0_device,
672#endif 672#endif
@@ -675,11 +675,11 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
675#endif 675#endif
676#endif 676#endif
677 677
678#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 678#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
679 &i2c_bfin_twi_device, 679 &i2c_bfin_twi_device,
680#endif 680#endif
681 681
682#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 682#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
683#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 683#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
684 &bfin_sport0_uart_device, 684 &bfin_sport0_uart_device,
685#endif 685#endif
@@ -688,39 +688,39 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
688#endif 688#endif
689#endif 689#endif
690 690
691#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 691#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
692 &isp1362_hcd_device, 692 &isp1362_hcd_device,
693#endif 693#endif
694 694
695#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 695#if IS_ENABLED(CONFIG_SMC91X)
696 &smc91x_device, 696 &smc91x_device,
697#endif 697#endif
698 698
699#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 699#if IS_ENABLED(CONFIG_BFIN_MAC)
700 &bfin_mii_bus, 700 &bfin_mii_bus,
701 &bfin_mac_device, 701 &bfin_mac_device,
702#endif 702#endif
703 703
704#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 704#if IS_ENABLED(CONFIG_USB_NET2272)
705 &net2272_bfin_device, 705 &net2272_bfin_device,
706#endif 706#endif
707 707
708#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 708#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
709 &bfin_spi0_device, 709 &bfin_spi0_device,
710#endif 710#endif
711 711
712#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 712#if IS_ENABLED(CONFIG_PATA_PLATFORM)
713 &bfin_pata_device, 713 &bfin_pata_device,
714#endif 714#endif
715 715
716#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 716#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
717 &cm_flash_device, 717 &cm_flash_device,
718#endif 718#endif
719}; 719};
720 720
721static int __init net2272_init(void) 721static int __init net2272_init(void)
722{ 722{
723#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 723#if IS_ENABLED(CONFIG_USB_NET2272)
724 int ret; 724 int ret;
725 725
726 ret = gpio_request(GPIO_PH15, driver_name); 726 ret = gpio_request(GPIO_PH15, driver_name);
@@ -752,11 +752,11 @@ static int __init cm_bf537u_init(void)
752{ 752{
753 printk(KERN_INFO "%s(): registering device resources\n", __func__); 753 printk(KERN_INFO "%s(): registering device resources\n", __func__);
754 platform_add_devices(cm_bf537u_devices, ARRAY_SIZE(cm_bf537u_devices)); 754 platform_add_devices(cm_bf537u_devices, ARRAY_SIZE(cm_bf537u_devices));
755#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 755#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
756 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 756 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
757#endif 757#endif
758 758
759#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 759#if IS_ENABLED(CONFIG_PATA_PLATFORM)
760 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); 760 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
761#endif 761#endif
762 762
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
index 8bbf0a23fd49..e79b3b810c39 100644
--- a/arch/blackfin/mach-bf537/boards/dnp5370.c
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -41,14 +41,14 @@ const char bfin_board_name[] = "DNP/5370";
41#define FLASH_MAC 0x202f0000 41#define FLASH_MAC 0x202f0000
42#define CONFIG_MTD_PHYSMAP_LEN 0x300000 42#define CONFIG_MTD_PHYSMAP_LEN 0x300000
43 43
44#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 44#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
45static struct platform_device rtc_device = { 45static struct platform_device rtc_device = {
46 .name = "rtc-bfin", 46 .name = "rtc-bfin",
47 .id = -1, 47 .id = -1,
48}; 48};
49#endif 49#endif
50 50
51#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 51#if IS_ENABLED(CONFIG_BFIN_MAC)
52#include <linux/bfin_mac.h> 52#include <linux/bfin_mac.h>
53static const unsigned short bfin_mac_peripherals[] = P_RMII0; 53static const unsigned short bfin_mac_peripherals[] = P_RMII0;
54 54
@@ -81,7 +81,7 @@ static struct platform_device bfin_mac_device = {
81}; 81};
82#endif 82#endif
83 83
84#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 84#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
85static struct mtd_partition asmb_flash_partitions[] = { 85static struct mtd_partition asmb_flash_partitions[] = {
86 { 86 {
87 .name = "bootloader(nor)", 87 .name = "bootloader(nor)",
@@ -125,9 +125,9 @@ static struct platform_device asmb_flash_device = {
125}; 125};
126#endif 126#endif
127 127
128#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 128#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
129 129
130#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 130#if IS_ENABLED(CONFIG_MMC_SPI)
131 131
132static struct bfin5xx_spi_chip mmc_spi_chip_info = { 132static struct bfin5xx_spi_chip mmc_spi_chip_info = {
133 .enable_dma = 0, /* use no dma transfer with this chip*/ 133 .enable_dma = 0, /* use no dma transfer with this chip*/
@@ -135,7 +135,7 @@ static struct bfin5xx_spi_chip mmc_spi_chip_info = {
135 135
136#endif 136#endif
137 137
138#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE) 138#if IS_ENABLED(CONFIG_MTD_DATAFLASH)
139/* This mapping is for at45db642 it has 1056 page size, 139/* This mapping is for at45db642 it has 1056 page size,
140 * partition size and offset should be page aligned 140 * partition size and offset should be page aligned
141 */ 141 */
@@ -166,7 +166,7 @@ static struct bfin5xx_spi_chip spi_dataflash_chip_info = {
166 166
167static struct spi_board_info bfin_spi_board_info[] __initdata = { 167static struct spi_board_info bfin_spi_board_info[] __initdata = {
168/* SD/MMC card reader at SPI bus */ 168/* SD/MMC card reader at SPI bus */
169#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 169#if IS_ENABLED(CONFIG_MMC_SPI)
170 { 170 {
171 .modalias = "mmc_spi", 171 .modalias = "mmc_spi",
172 .max_speed_hz = 20000000, 172 .max_speed_hz = 20000000,
@@ -178,7 +178,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
178#endif 178#endif
179 179
180/* 8 Megabyte Atmel NOR flash chip at SPI bus */ 180/* 8 Megabyte Atmel NOR flash chip at SPI bus */
181#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE) 181#if IS_ENABLED(CONFIG_MTD_DATAFLASH)
182 { 182 {
183 .modalias = "mtd_dataflash", 183 .modalias = "mtd_dataflash",
184 .max_speed_hz = 16700000, 184 .max_speed_hz = 16700000,
@@ -228,7 +228,7 @@ static struct platform_device spi_bfin_master_device = {
228}; 228};
229#endif 229#endif
230 230
231#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 231#if IS_ENABLED(CONFIG_SERIAL_BFIN)
232#ifdef CONFIG_SERIAL_BFIN_UART0 232#ifdef CONFIG_SERIAL_BFIN_UART0
233static struct resource bfin_uart0_resources[] = { 233static struct resource bfin_uart0_resources[] = {
234 { 234 {
@@ -328,7 +328,7 @@ static struct platform_device bfin_uart1_device = {
328#endif 328#endif
329#endif 329#endif
330 330
331#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 331#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
332static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 332static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
333 333
334static struct resource bfin_twi0_resource[] = { 334static struct resource bfin_twi0_resource[] = {
@@ -357,7 +357,7 @@ static struct platform_device i2c_bfin_twi_device = {
357 357
358static struct platform_device *dnp5370_devices[] __initdata = { 358static struct platform_device *dnp5370_devices[] __initdata = {
359 359
360#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 360#if IS_ENABLED(CONFIG_SERIAL_BFIN)
361#ifdef CONFIG_SERIAL_BFIN_UART0 361#ifdef CONFIG_SERIAL_BFIN_UART0
362 &bfin_uart0_device, 362 &bfin_uart0_device,
363#endif 363#endif
@@ -366,24 +366,24 @@ static struct platform_device *dnp5370_devices[] __initdata = {
366#endif 366#endif
367#endif 367#endif
368 368
369#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 369#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
370 &asmb_flash_device, 370 &asmb_flash_device,
371#endif 371#endif
372 372
373#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 373#if IS_ENABLED(CONFIG_BFIN_MAC)
374 &bfin_mii_bus, 374 &bfin_mii_bus,
375 &bfin_mac_device, 375 &bfin_mac_device,
376#endif 376#endif
377 377
378#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 378#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
379 &spi_bfin_master_device, 379 &spi_bfin_master_device,
380#endif 380#endif
381 381
382#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 382#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
383 &i2c_bfin_twi_device, 383 &i2c_bfin_twi_device,
384#endif 384#endif
385 385
386#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 386#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
387 &rtc_device, 387 &rtc_device,
388#endif 388#endif
389 389
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index a10f90e444bc..dd7bda07bf90 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -13,7 +13,7 @@
13#include <linux/mtd/partitions.h> 13#include <linux/mtd/partitions.h>
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/spi/flash.h> 15#include <linux/spi/flash.h>
16#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 16#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
17#include <linux/usb/isp1362.h> 17#include <linux/usb/isp1362.h>
18#endif 18#endif
19#include <linux/ata_platform.h> 19#include <linux/ata_platform.h>
@@ -31,7 +31,7 @@
31 */ 31 */
32const char bfin_board_name[] = "CamSig Minotaur BF537"; 32const char bfin_board_name[] = "CamSig Minotaur BF537";
33 33
34#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 34#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
35static struct resource bfin_pcmcia_cf_resources[] = { 35static struct resource bfin_pcmcia_cf_resources[] = {
36 { 36 {
37 .start = 0x20310000, /* IO PORT */ 37 .start = 0x20310000, /* IO PORT */
@@ -60,14 +60,14 @@ static struct platform_device bfin_pcmcia_cf_device = {
60}; 60};
61#endif 61#endif
62 62
63#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 63#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
64static struct platform_device rtc_device = { 64static struct platform_device rtc_device = {
65 .name = "rtc-bfin", 65 .name = "rtc-bfin",
66 .id = -1, 66 .id = -1,
67}; 67};
68#endif 68#endif
69 69
70#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 70#if IS_ENABLED(CONFIG_BFIN_MAC)
71#include <linux/bfin_mac.h> 71#include <linux/bfin_mac.h>
72static const unsigned short bfin_mac_peripherals[] = P_MII0; 72static const unsigned short bfin_mac_peripherals[] = P_MII0;
73 73
@@ -100,7 +100,7 @@ static struct platform_device bfin_mac_device = {
100}; 100};
101#endif 101#endif
102 102
103#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 103#if IS_ENABLED(CONFIG_USB_NET2272)
104static struct resource net2272_bfin_resources[] = { 104static struct resource net2272_bfin_resources[] = {
105 { 105 {
106 .start = 0x20300000, 106 .start = 0x20300000,
@@ -121,11 +121,10 @@ static struct platform_device net2272_bfin_device = {
121}; 121};
122#endif 122#endif
123 123
124#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 124#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
125/* all SPI peripherals info goes here */ 125/* all SPI peripherals info goes here */
126 126
127#if defined(CONFIG_MTD_M25P80) \ 127#if IS_ENABLED(CONFIG_MTD_M25P80)
128 || defined(CONFIG_MTD_M25P80_MODULE)
129 128
130/* Partition sizes */ 129/* Partition sizes */
131#define FLASH_SIZE 0x00400000 130#define FLASH_SIZE 0x00400000
@@ -162,15 +161,14 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
162}; 161};
163#endif 162#endif
164 163
165#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 164#if IS_ENABLED(CONFIG_MMC_SPI)
166static struct bfin5xx_spi_chip mmc_spi_chip_info = { 165static struct bfin5xx_spi_chip mmc_spi_chip_info = {
167 .enable_dma = 0, 166 .enable_dma = 0,
168}; 167};
169#endif 168#endif
170 169
171static struct spi_board_info bfin_spi_board_info[] __initdata = { 170static struct spi_board_info bfin_spi_board_info[] __initdata = {
172#if defined(CONFIG_MTD_M25P80) \ 171#if IS_ENABLED(CONFIG_MTD_M25P80)
173 || defined(CONFIG_MTD_M25P80_MODULE)
174 { 172 {
175 /* the modalias must be the same as spi device driver name */ 173 /* the modalias must be the same as spi device driver name */
176 .modalias = "m25p80", /* Name of spi_driver for this device */ 174 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -183,7 +181,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
183 }, 181 },
184#endif 182#endif
185 183
186#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 184#if IS_ENABLED(CONFIG_MMC_SPI)
187 { 185 {
188 .modalias = "mmc_spi", 186 .modalias = "mmc_spi",
189 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 187 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
@@ -231,7 +229,7 @@ static struct platform_device bfin_spi0_device = {
231}; 229};
232#endif /* spi master and devices */ 230#endif /* spi master and devices */
233 231
234#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 232#if IS_ENABLED(CONFIG_SERIAL_BFIN)
235#ifdef CONFIG_SERIAL_BFIN_UART0 233#ifdef CONFIG_SERIAL_BFIN_UART0
236static struct resource bfin_uart0_resources[] = { 234static struct resource bfin_uart0_resources[] = {
237 { 235 {
@@ -330,7 +328,7 @@ static struct platform_device bfin_uart1_device = {
330#endif 328#endif
331#endif 329#endif
332 330
333#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 331#if IS_ENABLED(CONFIG_BFIN_SIR)
334#ifdef CONFIG_BFIN_SIR0 332#ifdef CONFIG_BFIN_SIR0
335static struct resource bfin_sir0_resources[] = { 333static struct resource bfin_sir0_resources[] = {
336 { 334 {
@@ -385,7 +383,7 @@ static struct platform_device bfin_sir1_device = {
385#endif 383#endif
386#endif 384#endif
387 385
388#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 386#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
389static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 387static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
390 388
391static struct resource bfin_twi0_resource[] = { 389static struct resource bfin_twi0_resource[] = {
@@ -412,7 +410,7 @@ static struct platform_device i2c_bfin_twi_device = {
412}; 410};
413#endif 411#endif
414 412
415#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 413#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
416#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 414#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
417static struct resource bfin_sport0_uart_resources[] = { 415static struct resource bfin_sport0_uart_resources[] = {
418 { 416 {
@@ -484,28 +482,28 @@ static struct platform_device bfin_sport1_uart_device = {
484#endif 482#endif
485 483
486static struct platform_device *minotaur_devices[] __initdata = { 484static struct platform_device *minotaur_devices[] __initdata = {
487#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 485#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
488 &bfin_pcmcia_cf_device, 486 &bfin_pcmcia_cf_device,
489#endif 487#endif
490 488
491#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 489#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
492 &rtc_device, 490 &rtc_device,
493#endif 491#endif
494 492
495#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 493#if IS_ENABLED(CONFIG_BFIN_MAC)
496 &bfin_mii_bus, 494 &bfin_mii_bus,
497 &bfin_mac_device, 495 &bfin_mac_device,
498#endif 496#endif
499 497
500#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 498#if IS_ENABLED(CONFIG_USB_NET2272)
501 &net2272_bfin_device, 499 &net2272_bfin_device,
502#endif 500#endif
503 501
504#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 502#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
505 &bfin_spi0_device, 503 &bfin_spi0_device,
506#endif 504#endif
507 505
508#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 506#if IS_ENABLED(CONFIG_SERIAL_BFIN)
509#ifdef CONFIG_SERIAL_BFIN_UART0 507#ifdef CONFIG_SERIAL_BFIN_UART0
510 &bfin_uart0_device, 508 &bfin_uart0_device,
511#endif 509#endif
@@ -514,7 +512,7 @@ static struct platform_device *minotaur_devices[] __initdata = {
514#endif 512#endif
515#endif 513#endif
516 514
517#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 515#if IS_ENABLED(CONFIG_BFIN_SIR)
518#ifdef CONFIG_BFIN_SIR0 516#ifdef CONFIG_BFIN_SIR0
519 &bfin_sir0_device, 517 &bfin_sir0_device,
520#endif 518#endif
@@ -523,11 +521,11 @@ static struct platform_device *minotaur_devices[] __initdata = {
523#endif 521#endif
524#endif 522#endif
525 523
526#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 524#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
527 &i2c_bfin_twi_device, 525 &i2c_bfin_twi_device,
528#endif 526#endif
529 527
530#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 528#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
531#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 529#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
532 &bfin_sport0_uart_device, 530 &bfin_sport0_uart_device,
533#endif 531#endif
@@ -542,7 +540,7 @@ static int __init minotaur_init(void)
542{ 540{
543 printk(KERN_INFO "%s(): registering device resources\n", __func__); 541 printk(KERN_INFO "%s(): registering device resources\n", __func__);
544 platform_add_devices(minotaur_devices, ARRAY_SIZE(minotaur_devices)); 542 platform_add_devices(minotaur_devices, ARRAY_SIZE(minotaur_devices));
545#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 543#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
546 spi_register_board_info(bfin_spi_board_info, 544 spi_register_board_info(bfin_spi_board_info,
547 ARRAY_SIZE(bfin_spi_board_info)); 545 ARRAY_SIZE(bfin_spi_board_info));
548#endif 546#endif
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 6b395510405b..06a50ddb54c0 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -30,7 +30,7 @@ const char bfin_board_name[] = "ADI PNAV-1.0";
30 * Driver needs to know address, irq and flag pin. 30 * Driver needs to know address, irq and flag pin.
31 */ 31 */
32 32
33#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 33#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
34static struct resource bfin_pcmcia_cf_resources[] = { 34static struct resource bfin_pcmcia_cf_resources[] = {
35 { 35 {
36 .start = 0x20310000, /* IO PORT */ 36 .start = 0x20310000, /* IO PORT */
@@ -59,14 +59,14 @@ static struct platform_device bfin_pcmcia_cf_device = {
59}; 59};
60#endif 60#endif
61 61
62#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 62#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
63static struct platform_device rtc_device = { 63static struct platform_device rtc_device = {
64 .name = "rtc-bfin", 64 .name = "rtc-bfin",
65 .id = -1, 65 .id = -1,
66}; 66};
67#endif 67#endif
68 68
69#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 69#if IS_ENABLED(CONFIG_SMC91X)
70#include <linux/smc91x.h> 70#include <linux/smc91x.h>
71 71
72static struct smc91x_platdata smc91x_info = { 72static struct smc91x_platdata smc91x_info = {
@@ -99,7 +99,7 @@ static struct platform_device smc91x_device = {
99}; 99};
100#endif 100#endif
101 101
102#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 102#if IS_ENABLED(CONFIG_BFIN_MAC)
103#include <linux/bfin_mac.h> 103#include <linux/bfin_mac.h>
104static const unsigned short bfin_mac_peripherals[] = P_RMII0; 104static const unsigned short bfin_mac_peripherals[] = P_RMII0;
105 105
@@ -132,7 +132,7 @@ static struct platform_device bfin_mac_device = {
132}; 132};
133#endif 133#endif
134 134
135#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 135#if IS_ENABLED(CONFIG_USB_NET2272)
136static struct resource net2272_bfin_resources[] = { 136static struct resource net2272_bfin_resources[] = {
137 { 137 {
138 .start = 0x20300000, 138 .start = 0x20300000,
@@ -153,11 +153,10 @@ static struct platform_device net2272_bfin_device = {
153}; 153};
154#endif 154#endif
155 155
156#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 156#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
157/* all SPI peripherals info goes here */ 157/* all SPI peripherals info goes here */
158 158
159#if defined(CONFIG_MTD_M25P80) \ 159#if IS_ENABLED(CONFIG_MTD_M25P80)
160 || defined(CONFIG_MTD_M25P80_MODULE)
161static struct mtd_partition bfin_spi_flash_partitions[] = { 160static struct mtd_partition bfin_spi_flash_partitions[] = {
162 { 161 {
163 .name = "bootloader(spi)", 162 .name = "bootloader(spi)",
@@ -188,13 +187,13 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
188}; 187};
189#endif 188#endif
190 189
191#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 190#if IS_ENABLED(CONFIG_MMC_SPI)
192static struct bfin5xx_spi_chip mmc_spi_chip_info = { 191static struct bfin5xx_spi_chip mmc_spi_chip_info = {
193 .enable_dma = 0, 192 .enable_dma = 0,
194}; 193};
195#endif 194#endif
196 195
197#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 196#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
198static const struct ad7877_platform_data bfin_ad7877_ts_info = { 197static const struct ad7877_platform_data bfin_ad7877_ts_info = {
199 .model = 7877, 198 .model = 7877,
200 .vref_delay_usecs = 50, /* internal, no capacitor */ 199 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -211,8 +210,7 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
211#endif 210#endif
212 211
213static struct spi_board_info bfin_spi_board_info[] __initdata = { 212static struct spi_board_info bfin_spi_board_info[] __initdata = {
214#if defined(CONFIG_MTD_M25P80) \ 213#if IS_ENABLED(CONFIG_MTD_M25P80)
215 || defined(CONFIG_MTD_M25P80_MODULE)
216 { 214 {
217 /* the modalias must be the same as spi device driver name */ 215 /* the modalias must be the same as spi device driver name */
218 .modalias = "m25p80", /* Name of spi_driver for this device */ 216 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -225,8 +223,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
225 }, 223 },
226#endif 224#endif
227 225
228#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 226#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
229 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
230 { 227 {
231 .modalias = "ad183x", 228 .modalias = "ad183x",
232 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 229 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -234,7 +231,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
234 .chip_select = 4, 231 .chip_select = 4,
235 }, 232 },
236#endif 233#endif
237#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 234#if IS_ENABLED(CONFIG_MMC_SPI)
238 { 235 {
239 .modalias = "mmc_spi", 236 .modalias = "mmc_spi",
240 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 237 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
@@ -244,7 +241,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
244 .mode = SPI_MODE_3, 241 .mode = SPI_MODE_3,
245 }, 242 },
246#endif 243#endif
247#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 244#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
248{ 245{
249 .modalias = "ad7877", 246 .modalias = "ad7877",
250 .platform_data = &bfin_ad7877_ts_info, 247 .platform_data = &bfin_ad7877_ts_info,
@@ -294,13 +291,13 @@ static struct platform_device bfin_spi0_device = {
294}; 291};
295#endif /* spi master and devices */ 292#endif /* spi master and devices */
296 293
297#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) 294#if IS_ENABLED(CONFIG_FB_BF537_LQ035)
298static struct platform_device bfin_fb_device = { 295static struct platform_device bfin_fb_device = {
299 .name = "bf537-lq035", 296 .name = "bf537-lq035",
300}; 297};
301#endif 298#endif
302 299
303#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 300#if IS_ENABLED(CONFIG_SERIAL_BFIN)
304#ifdef CONFIG_SERIAL_BFIN_UART0 301#ifdef CONFIG_SERIAL_BFIN_UART0
305static struct resource bfin_uart0_resources[] = { 302static struct resource bfin_uart0_resources[] = {
306 { 303 {
@@ -399,7 +396,7 @@ static struct platform_device bfin_uart1_device = {
399#endif 396#endif
400#endif 397#endif
401 398
402#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 399#if IS_ENABLED(CONFIG_BFIN_SIR)
403#ifdef CONFIG_BFIN_SIR0 400#ifdef CONFIG_BFIN_SIR0
404static struct resource bfin_sir0_resources[] = { 401static struct resource bfin_sir0_resources[] = {
405 { 402 {
@@ -455,36 +452,36 @@ static struct platform_device bfin_sir1_device = {
455#endif 452#endif
456 453
457static struct platform_device *stamp_devices[] __initdata = { 454static struct platform_device *stamp_devices[] __initdata = {
458#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 455#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
459 &bfin_pcmcia_cf_device, 456 &bfin_pcmcia_cf_device,
460#endif 457#endif
461 458
462#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 459#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
463 &rtc_device, 460 &rtc_device,
464#endif 461#endif
465 462
466#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 463#if IS_ENABLED(CONFIG_SMC91X)
467 &smc91x_device, 464 &smc91x_device,
468#endif 465#endif
469 466
470#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 467#if IS_ENABLED(CONFIG_BFIN_MAC)
471 &bfin_mii_bus, 468 &bfin_mii_bus,
472 &bfin_mac_device, 469 &bfin_mac_device,
473#endif 470#endif
474 471
475#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 472#if IS_ENABLED(CONFIG_USB_NET2272)
476 &net2272_bfin_device, 473 &net2272_bfin_device,
477#endif 474#endif
478 475
479#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 476#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
480 &bfin_spi0_device, 477 &bfin_spi0_device,
481#endif 478#endif
482 479
483#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) 480#if IS_ENABLED(CONFIG_FB_BF537_LQ035)
484 &bfin_fb_device, 481 &bfin_fb_device,
485#endif 482#endif
486 483
487#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 484#if IS_ENABLED(CONFIG_SERIAL_BFIN)
488#ifdef CONFIG_SERIAL_BFIN_UART0 485#ifdef CONFIG_SERIAL_BFIN_UART0
489 &bfin_uart0_device, 486 &bfin_uart0_device,
490#endif 487#endif
@@ -493,7 +490,7 @@ static struct platform_device *stamp_devices[] __initdata = {
493#endif 490#endif
494#endif 491#endif
495 492
496#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 493#if IS_ENABLED(CONFIG_BFIN_SIR)
497#ifdef CONFIG_BFIN_SIR0 494#ifdef CONFIG_BFIN_SIR0
498 &bfin_sir0_device, 495 &bfin_sir0_device,
499#endif 496#endif
@@ -507,7 +504,7 @@ static int __init pnav_init(void)
507{ 504{
508 printk(KERN_INFO "%s(): registering device resources\n", __func__); 505 printk(KERN_INFO "%s(): registering device resources\n", __func__);
509 platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); 506 platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
510#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 507#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
511 spi_register_board_info(bfin_spi_board_info, 508 spi_register_board_info(bfin_spi_board_info,
512 ARRAY_SIZE(bfin_spi_board_info)); 509 ARRAY_SIZE(bfin_spi_board_info));
513#endif 510#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 44fd1d4682ac..de19b8a56007 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -18,7 +18,7 @@
18#include <linux/mtd/physmap.h> 18#include <linux/mtd/physmap.h>
19#include <linux/spi/spi.h> 19#include <linux/spi/spi.h>
20#include <linux/spi/flash.h> 20#include <linux/spi/flash.h>
21#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 21#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
22#include <linux/usb/isp1362.h> 22#include <linux/usb/isp1362.h>
23#endif 23#endif
24#include <linux/i2c.h> 24#include <linux/i2c.h>
@@ -53,7 +53,7 @@ const char bfin_board_name[] = "ADI BF537-STAMP";
53 * Driver needs to know address, irq and flag pin. 53 * Driver needs to know address, irq and flag pin.
54 */ 54 */
55 55
56#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 56#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
57#include <linux/usb/isp1760.h> 57#include <linux/usb/isp1760.h>
58static struct resource bfin_isp1760_resources[] = { 58static struct resource bfin_isp1760_resources[] = {
59 [0] = { 59 [0] = {
@@ -88,7 +88,7 @@ static struct platform_device bfin_isp1760_device = {
88}; 88};
89#endif 89#endif
90 90
91#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 91#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
92#include <linux/gpio_keys.h> 92#include <linux/gpio_keys.h>
93 93
94static struct gpio_keys_button bfin_gpio_keys_table[] = { 94static struct gpio_keys_button bfin_gpio_keys_table[] = {
@@ -111,7 +111,7 @@ static struct platform_device bfin_device_gpiokeys = {
111}; 111};
112#endif 112#endif
113 113
114#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 114#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
115static struct resource bfin_pcmcia_cf_resources[] = { 115static struct resource bfin_pcmcia_cf_resources[] = {
116 { 116 {
117 .start = 0x20310000, /* IO PORT */ 117 .start = 0x20310000, /* IO PORT */
@@ -140,14 +140,14 @@ static struct platform_device bfin_pcmcia_cf_device = {
140}; 140};
141#endif 141#endif
142 142
143#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 143#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
144static struct platform_device rtc_device = { 144static struct platform_device rtc_device = {
145 .name = "rtc-bfin", 145 .name = "rtc-bfin",
146 .id = -1, 146 .id = -1,
147}; 147};
148#endif 148#endif
149 149
150#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 150#if IS_ENABLED(CONFIG_SMC91X)
151#include <linux/smc91x.h> 151#include <linux/smc91x.h>
152 152
153static struct smc91x_platdata smc91x_info = { 153static struct smc91x_platdata smc91x_info = {
@@ -180,7 +180,7 @@ static struct platform_device smc91x_device = {
180}; 180};
181#endif 181#endif
182 182
183#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 183#if IS_ENABLED(CONFIG_DM9000)
184static struct resource dm9000_resources[] = { 184static struct resource dm9000_resources[] = {
185 [0] = { 185 [0] = {
186 .start = 0x203FB800, 186 .start = 0x203FB800,
@@ -207,7 +207,7 @@ static struct platform_device dm9000_device = {
207}; 207};
208#endif 208#endif
209 209
210#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) 210#if IS_ENABLED(CONFIG_USB_SL811_HCD)
211static struct resource sl811_hcd_resources[] = { 211static struct resource sl811_hcd_resources[] = {
212 { 212 {
213 .start = 0x20340000, 213 .start = 0x20340000,
@@ -251,7 +251,7 @@ static struct platform_device sl811_hcd_device = {
251}; 251};
252#endif 252#endif
253 253
254#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 254#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
255static struct resource isp1362_hcd_resources[] = { 255static struct resource isp1362_hcd_resources[] = {
256 { 256 {
257 .start = 0x20360000, 257 .start = 0x20360000,
@@ -290,7 +290,7 @@ static struct platform_device isp1362_hcd_device = {
290}; 290};
291#endif 291#endif
292 292
293#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 293#if IS_ENABLED(CONFIG_CAN_BFIN)
294static unsigned short bfin_can_peripherals[] = { 294static unsigned short bfin_can_peripherals[] = {
295 P_CAN0_RX, P_CAN0_TX, 0 295 P_CAN0_RX, P_CAN0_TX, 0
296}; 296};
@@ -328,7 +328,7 @@ static struct platform_device bfin_can_device = {
328}; 328};
329#endif 329#endif
330 330
331#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 331#if IS_ENABLED(CONFIG_BFIN_MAC)
332#include <linux/bfin_mac.h> 332#include <linux/bfin_mac.h>
333static const unsigned short bfin_mac_peripherals[] = P_MII0; 333static const unsigned short bfin_mac_peripherals[] = P_MII0;
334 334
@@ -361,7 +361,7 @@ static struct platform_device bfin_mac_device = {
361}; 361};
362#endif 362#endif
363 363
364#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 364#if IS_ENABLED(CONFIG_USB_NET2272)
365static struct resource net2272_bfin_resources[] = { 365static struct resource net2272_bfin_resources[] = {
366 { 366 {
367 .start = 0x20300000, 367 .start = 0x20300000,
@@ -385,7 +385,7 @@ static struct platform_device net2272_bfin_device = {
385}; 385};
386#endif 386#endif
387 387
388#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 388#if IS_ENABLED(CONFIG_MTD_NAND_PLATFORM)
389const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; 389const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
390 390
391static struct mtd_partition bfin_plat_nand_partitions[] = { 391static struct mtd_partition bfin_plat_nand_partitions[] = {
@@ -461,7 +461,7 @@ static void bfin_plat_nand_init(void)
461static void bfin_plat_nand_init(void) {} 461static void bfin_plat_nand_init(void) {}
462#endif 462#endif
463 463
464#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 464#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
465static struct mtd_partition stamp_partitions[] = { 465static struct mtd_partition stamp_partitions[] = {
466 { 466 {
467 .name = "bootloader(nor)", 467 .name = "bootloader(nor)",
@@ -509,8 +509,7 @@ static struct platform_device stamp_flash_device = {
509}; 509};
510#endif 510#endif
511 511
512#if defined(CONFIG_MTD_M25P80) \ 512#if IS_ENABLED(CONFIG_MTD_M25P80)
513 || defined(CONFIG_MTD_M25P80_MODULE)
514static struct mtd_partition bfin_spi_flash_partitions[] = { 513static struct mtd_partition bfin_spi_flash_partitions[] = {
515 { 514 {
516 .name = "bootloader(spi)", 515 .name = "bootloader(spi)",
@@ -541,7 +540,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
541}; 540};
542#endif 541#endif
543 542
544#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE) 543#if IS_ENABLED(CONFIG_INPUT_AD714X_SPI)
545#include <linux/input/ad714x.h> 544#include <linux/input/ad714x.h>
546 545
547static struct ad714x_slider_plat ad7147_spi_slider_plat[] = { 546static struct ad714x_slider_plat ad7147_spi_slider_plat[] = {
@@ -602,7 +601,7 @@ static struct ad714x_platform_data ad7147_spi_platform_data = {
602}; 601};
603#endif 602#endif
604 603
605#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE) 604#if IS_ENABLED(CONFIG_INPUT_AD714X_I2C)
606#include <linux/input/ad714x.h> 605#include <linux/input/ad714x.h>
607static struct ad714x_button_plat ad7142_i2c_button_plat[] = { 606static struct ad714x_button_plat ad7142_i2c_button_plat[] = {
608 { 607 {
@@ -649,24 +648,24 @@ static struct ad714x_platform_data ad7142_i2c_platform_data = {
649}; 648};
650#endif 649#endif
651 650
652#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE) 651#if IS_ENABLED(CONFIG_AD2S90)
653static struct bfin5xx_spi_chip ad2s90_spi_chip_info = { 652static struct bfin5xx_spi_chip ad2s90_spi_chip_info = {
654 .enable_dma = 0, 653 .enable_dma = 0,
655}; 654};
656#endif 655#endif
657 656
658#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE) 657#if IS_ENABLED(CONFIG_AD2S1200)
659static unsigned short ad2s120x_platform_data[] = { 658static unsigned short ad2s1200_platform_data[] = {
660 /* used as SAMPLE and RDVEL */ 659 /* used as SAMPLE and RDVEL */
661 GPIO_PF5, GPIO_PF6, 0 660 GPIO_PF5, GPIO_PF6, 0
662}; 661};
663 662
664static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = { 663static struct bfin5xx_spi_chip ad2s1200_spi_chip_info = {
665 .enable_dma = 0, 664 .enable_dma = 0,
666}; 665};
667#endif 666#endif
668 667
669#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE) 668#if IS_ENABLED(CONFIG_AD2S1210)
670static unsigned short ad2s1210_platform_data[] = { 669static unsigned short ad2s1210_platform_data[] = {
671 /* use as SAMPLE, A0, A1 */ 670 /* use as SAMPLE, A0, A1 */
672 GPIO_PF7, GPIO_PF8, GPIO_PF9, 671 GPIO_PF7, GPIO_PF8, GPIO_PF9,
@@ -682,13 +681,13 @@ static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = {
682}; 681};
683#endif 682#endif
684 683
685#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE) 684#if IS_ENABLED(CONFIG_SENSORS_AD7314)
686static struct bfin5xx_spi_chip ad7314_spi_chip_info = { 685static struct bfin5xx_spi_chip ad7314_spi_chip_info = {
687 .enable_dma = 0, 686 .enable_dma = 0,
688}; 687};
689#endif 688#endif
690 689
691#if defined(CONFIG_AD7816) || defined(CONFIG_AD7816_MODULE) 690#if IS_ENABLED(CONFIG_AD7816)
692static unsigned short ad7816_platform_data[] = { 691static unsigned short ad7816_platform_data[] = {
693 GPIO_PF4, /* rdwr_pin */ 692 GPIO_PF4, /* rdwr_pin */
694 GPIO_PF5, /* convert_pin */ 693 GPIO_PF5, /* convert_pin */
@@ -701,7 +700,7 @@ static struct bfin5xx_spi_chip ad7816_spi_chip_info = {
701}; 700};
702#endif 701#endif
703 702
704#if defined(CONFIG_ADT7310) || defined(CONFIG_ADT7310_MODULE) 703#if IS_ENABLED(CONFIG_ADT7310)
705static unsigned long adt7310_platform_data[3] = { 704static unsigned long adt7310_platform_data[3] = {
706/* INT bound temperature alarm event. line 1 */ 705/* INT bound temperature alarm event. line 1 */
707 IRQ_PG4, IRQF_TRIGGER_LOW, 706 IRQ_PG4, IRQF_TRIGGER_LOW,
@@ -714,14 +713,14 @@ static struct bfin5xx_spi_chip adt7310_spi_chip_info = {
714}; 713};
715#endif 714#endif
716 715
717#if defined(CONFIG_AD7298) || defined(CONFIG_AD7298_MODULE) 716#if IS_ENABLED(CONFIG_AD7298)
718static unsigned short ad7298_platform_data[] = { 717static unsigned short ad7298_platform_data[] = {
719 GPIO_PF7, /* busy_pin */ 718 GPIO_PF7, /* busy_pin */
720 0, 719 0,
721}; 720};
722#endif 721#endif
723 722
724#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE) 723#if IS_ENABLED(CONFIG_ADT7316_SPI)
725static unsigned long adt7316_spi_data[2] = { 724static unsigned long adt7316_spi_data[2] = {
726 IRQF_TRIGGER_LOW, /* interrupt flags */ 725 IRQF_TRIGGER_LOW, /* interrupt flags */
727 GPIO_PF7, /* ldac_pin, 0 means DAC/LDAC registers control DAC update */ 726 GPIO_PF7, /* ldac_pin, 0 means DAC/LDAC registers control DAC update */
@@ -732,7 +731,7 @@ static struct bfin5xx_spi_chip adt7316_spi_chip_info = {
732}; 731};
733#endif 732#endif
734 733
735#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 734#if IS_ENABLED(CONFIG_MMC_SPI)
736#define MMC_SPI_CARD_DETECT_INT IRQ_PF5 735#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
737 736
738static int bfin_mmc_spi_init(struct device *dev, 737static int bfin_mmc_spi_init(struct device *dev,
@@ -759,7 +758,7 @@ static struct bfin5xx_spi_chip mmc_spi_chip_info = {
759}; 758};
760#endif 759#endif
761 760
762#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 761#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
763#include <linux/spi/ad7877.h> 762#include <linux/spi/ad7877.h>
764static const struct ad7877_platform_data bfin_ad7877_ts_info = { 763static const struct ad7877_platform_data bfin_ad7877_ts_info = {
765 .model = 7877, 764 .model = 7877,
@@ -776,7 +775,7 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
776}; 775};
777#endif 776#endif
778 777
779#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) 778#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879)
780#include <linux/spi/ad7879.h> 779#include <linux/spi/ad7879.h>
781static const struct ad7879_platform_data bfin_ad7879_ts_info = { 780static const struct ad7879_platform_data bfin_ad7879_ts_info = {
782 .model = 7879, /* Model = AD7879 */ 781 .model = 7879, /* Model = AD7879 */
@@ -793,7 +792,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
793}; 792};
794#endif 793#endif
795 794
796#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE) 795#if IS_ENABLED(CONFIG_INPUT_ADXL34X)
797#include <linux/input/adxl34x.h> 796#include <linux/input/adxl34x.h>
798static const struct adxl34x_platform_data adxl34x_info = { 797static const struct adxl34x_platform_data adxl34x_info = {
799 .x_axis_offset = 0, 798 .x_axis_offset = 0,
@@ -832,13 +831,13 @@ static const struct adxl34x_platform_data adxl34x_info = {
832}; 831};
833#endif 832#endif
834 833
835#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE) 834#if IS_ENABLED(CONFIG_ENC28J60)
836static struct bfin5xx_spi_chip enc28j60_spi_chip_info = { 835static struct bfin5xx_spi_chip enc28j60_spi_chip_info = {
837 .enable_dma = 1, 836 .enable_dma = 1,
838}; 837};
839#endif 838#endif
840 839
841#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE) 840#if IS_ENABLED(CONFIG_ADF702X)
842#include <linux/spi/adf702x.h> 841#include <linux/spi/adf702x.h>
843#define TXREG 0x0160A470 842#define TXREG 0x0160A470
844static const u32 adf7021_regs[] = { 843static const u32 adf7021_regs[] = {
@@ -880,7 +879,7 @@ static inline void adf702x_mac_init(void)
880static inline void adf702x_mac_init(void) {} 879static inline void adf702x_mac_init(void) {}
881#endif 880#endif
882 881
883#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) 882#if IS_ENABLED(CONFIG_TOUCHSCREEN_ADS7846)
884#include <linux/spi/ads7846.h> 883#include <linux/spi/ads7846.h>
885static int ads7873_get_pendown_state(void) 884static int ads7873_get_pendown_state(void)
886{ 885{
@@ -899,8 +898,7 @@ static struct ads7846_platform_data __initdata ad7873_pdata = {
899}; 898};
900#endif 899#endif
901 900
902#if defined(CONFIG_MTD_DATAFLASH) \ 901#if IS_ENABLED(CONFIG_MTD_DATAFLASH)
903 || defined(CONFIG_MTD_DATAFLASH_MODULE)
904 902
905static struct mtd_partition bfin_spi_dataflash_partitions[] = { 903static struct mtd_partition bfin_spi_dataflash_partitions[] = {
906 { 904 {
@@ -931,15 +929,14 @@ static struct bfin5xx_spi_chip data_flash_chip_info = {
931}; 929};
932#endif 930#endif
933 931
934#if defined(CONFIG_AD7476) || defined(CONFIG_AD7476_MODULE) 932#if IS_ENABLED(CONFIG_AD7476)
935static struct bfin5xx_spi_chip spi_ad7476_chip_info = { 933static struct bfin5xx_spi_chip spi_ad7476_chip_info = {
936 .enable_dma = 0, /* use dma transfer with this chip*/ 934 .enable_dma = 0, /* use dma transfer with this chip*/
937}; 935};
938#endif 936#endif
939 937
940static struct spi_board_info bfin_spi_board_info[] __initdata = { 938static struct spi_board_info bfin_spi_board_info[] __initdata = {
941#if defined(CONFIG_MTD_M25P80) \ 939#if IS_ENABLED(CONFIG_MTD_M25P80)
942 || defined(CONFIG_MTD_M25P80_MODULE)
943 { 940 {
944 /* the modalias must be the same as spi device driver name */ 941 /* the modalias must be the same as spi device driver name */
945 .modalias = "m25p80", /* Name of spi_driver for this device */ 942 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -951,8 +948,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
951 .mode = SPI_MODE_3, 948 .mode = SPI_MODE_3,
952 }, 949 },
953#endif 950#endif
954#if defined(CONFIG_MTD_DATAFLASH) \ 951#if IS_ENABLED(CONFIG_MTD_DATAFLASH)
955 || defined(CONFIG_MTD_DATAFLASH_MODULE)
956 { /* DataFlash chip */ 952 { /* DataFlash chip */
957 .modalias = "mtd_dataflash", 953 .modalias = "mtd_dataflash",
958 .max_speed_hz = 33250000, /* max spi clock (SCK) speed in HZ */ 954 .max_speed_hz = 33250000, /* max spi clock (SCK) speed in HZ */
@@ -964,8 +960,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
964 }, 960 },
965#endif 961#endif
966 962
967#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ 963#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
968 || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
969 { 964 {
970 .modalias = "ad1836", 965 .modalias = "ad1836",
971 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 966 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -986,7 +981,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
986 }, 981 },
987#endif 982#endif
988 983
989#if defined(CONFIG_SND_SOC_ADAV80X) || defined(CONFIG_SND_SOC_ADV80X_MODULE) 984#if IS_ENABLED(CONFIG_SND_SOC_ADAV80X)
990 { 985 {
991 .modalias = "adav801", 986 .modalias = "adav801",
992 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 987 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -996,7 +991,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
996 }, 991 },
997#endif 992#endif
998 993
999#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE) 994#if IS_ENABLED(CONFIG_INPUT_AD714X_SPI)
1000 { 995 {
1001 .modalias = "ad714x_captouch", 996 .modalias = "ad714x_captouch",
1002 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 997 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1008,7 +1003,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1008 }, 1003 },
1009#endif 1004#endif
1010 1005
1011#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE) 1006#if IS_ENABLED(CONFIG_AD2S90)
1012 { 1007 {
1013 .modalias = "ad2s90", 1008 .modalias = "ad2s90",
1014 .bus_num = 0, 1009 .bus_num = 0,
@@ -1019,17 +1014,17 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1019 }, 1014 },
1020#endif 1015#endif
1021 1016
1022#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE) 1017#if IS_ENABLED(CONFIG_AD2S1200)
1023 { 1018 {
1024 .modalias = "ad2s120x", 1019 .modalias = "ad2s1200",
1025 .bus_num = 0, 1020 .bus_num = 0,
1026 .chip_select = 4, /* CS, change it for your board */ 1021 .chip_select = 4, /* CS, change it for your board */
1027 .platform_data = ad2s120x_platform_data, 1022 .platform_data = ad2s1200_platform_data,
1028 .controller_data = &ad2s120x_spi_chip_info, 1023 .controller_data = &ad2s1200_spi_chip_info,
1029 }, 1024 },
1030#endif 1025#endif
1031 1026
1032#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE) 1027#if IS_ENABLED(CONFIG_AD2S1210)
1033 { 1028 {
1034 .modalias = "ad2s1210", 1029 .modalias = "ad2s1210",
1035 .max_speed_hz = 8192000, 1030 .max_speed_hz = 8192000,
@@ -1040,7 +1035,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1040 }, 1035 },
1041#endif 1036#endif
1042 1037
1043#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE) 1038#if IS_ENABLED(CONFIG_SENSORS_AD7314)
1044 { 1039 {
1045 .modalias = "ad7314", 1040 .modalias = "ad7314",
1046 .max_speed_hz = 1000000, 1041 .max_speed_hz = 1000000,
@@ -1051,7 +1046,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1051 }, 1046 },
1052#endif 1047#endif
1053 1048
1054#if defined(CONFIG_AD7816) || defined(CONFIG_AD7816_MODULE) 1049#if IS_ENABLED(CONFIG_AD7816)
1055 { 1050 {
1056 .modalias = "ad7818", 1051 .modalias = "ad7818",
1057 .max_speed_hz = 1000000, 1052 .max_speed_hz = 1000000,
@@ -1063,7 +1058,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1063 }, 1058 },
1064#endif 1059#endif
1065 1060
1066#if defined(CONFIG_ADT7310) || defined(CONFIG_ADT7310_MODULE) 1061#if IS_ENABLED(CONFIG_ADT7310)
1067 { 1062 {
1068 .modalias = "adt7310", 1063 .modalias = "adt7310",
1069 .max_speed_hz = 1000000, 1064 .max_speed_hz = 1000000,
@@ -1076,7 +1071,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1076 }, 1071 },
1077#endif 1072#endif
1078 1073
1079#if defined(CONFIG_AD7298) || defined(CONFIG_AD7298_MODULE) 1074#if IS_ENABLED(CONFIG_AD7298)
1080 { 1075 {
1081 .modalias = "ad7298", 1076 .modalias = "ad7298",
1082 .max_speed_hz = 1000000, 1077 .max_speed_hz = 1000000,
@@ -1087,7 +1082,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1087 }, 1082 },
1088#endif 1083#endif
1089 1084
1090#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE) 1085#if IS_ENABLED(CONFIG_ADT7316_SPI)
1091 { 1086 {
1092 .modalias = "adt7316", 1087 .modalias = "adt7316",
1093 .max_speed_hz = 1000000, 1088 .max_speed_hz = 1000000,
@@ -1100,7 +1095,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1100 }, 1095 },
1101#endif 1096#endif
1102 1097
1103#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 1098#if IS_ENABLED(CONFIG_MMC_SPI)
1104 { 1099 {
1105 .modalias = "mmc_spi", 1100 .modalias = "mmc_spi",
1106 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 1101 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -1111,7 +1106,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1111 .mode = SPI_MODE_3, 1106 .mode = SPI_MODE_3,
1112 }, 1107 },
1113#endif 1108#endif
1114#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 1109#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
1115 { 1110 {
1116 .modalias = "ad7877", 1111 .modalias = "ad7877",
1117 .platform_data = &bfin_ad7877_ts_info, 1112 .platform_data = &bfin_ad7877_ts_info,
@@ -1121,7 +1116,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1121 .chip_select = 1, 1116 .chip_select = 1,
1122 }, 1117 },
1123#endif 1118#endif
1124#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) 1119#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879_SPI)
1125 { 1120 {
1126 .modalias = "ad7879", 1121 .modalias = "ad7879",
1127 .platform_data = &bfin_ad7879_ts_info, 1122 .platform_data = &bfin_ad7879_ts_info,
@@ -1132,7 +1127,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1132 .mode = SPI_CPHA | SPI_CPOL, 1127 .mode = SPI_CPHA | SPI_CPOL,
1133 }, 1128 },
1134#endif 1129#endif
1135#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 1130#if IS_ENABLED(CONFIG_SPI_SPIDEV)
1136 { 1131 {
1137 .modalias = "spidev", 1132 .modalias = "spidev",
1138 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 1133 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -1140,7 +1135,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1140 .chip_select = 1, 1135 .chip_select = 1,
1141 }, 1136 },
1142#endif 1137#endif
1143#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 1138#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
1144 { 1139 {
1145 .modalias = "bfin-lq035q1-spi", 1140 .modalias = "bfin-lq035q1-spi",
1146 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 1141 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -1149,7 +1144,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1149 .mode = SPI_CPHA | SPI_CPOL, 1144 .mode = SPI_CPHA | SPI_CPOL,
1150 }, 1145 },
1151#endif 1146#endif
1152#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE) 1147#if IS_ENABLED(CONFIG_ENC28J60)
1153 { 1148 {
1154 .modalias = "enc28j60", 1149 .modalias = "enc28j60",
1155 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 1150 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -1160,7 +1155,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1160 .mode = SPI_MODE_0, 1155 .mode = SPI_MODE_0,
1161 }, 1156 },
1162#endif 1157#endif
1163#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE) 1158#if IS_ENABLED(CONFIG_INPUT_ADXL34X_SPI)
1164 { 1159 {
1165 .modalias = "adxl34x", 1160 .modalias = "adxl34x",
1166 .platform_data = &adxl34x_info, 1161 .platform_data = &adxl34x_info,
@@ -1171,7 +1166,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1171 .mode = SPI_MODE_3, 1166 .mode = SPI_MODE_3,
1172 }, 1167 },
1173#endif 1168#endif
1174#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE) 1169#if IS_ENABLED(CONFIG_ADF702X)
1175 { 1170 {
1176 .modalias = "adf702x", 1171 .modalias = "adf702x",
1177 .max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */ 1172 .max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */
@@ -1181,7 +1176,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1181 .mode = SPI_MODE_0, 1176 .mode = SPI_MODE_0,
1182 }, 1177 },
1183#endif 1178#endif
1184#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) 1179#if IS_ENABLED(CONFIG_TOUCHSCREEN_ADS7846)
1185 { 1180 {
1186 .modalias = "ads7846", 1181 .modalias = "ads7846",
1187 .max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */ 1182 .max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */
@@ -1192,8 +1187,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1192 .mode = SPI_MODE_0, 1187 .mode = SPI_MODE_0,
1193 }, 1188 },
1194#endif 1189#endif
1195#if defined(CONFIG_AD7476) \ 1190#if IS_ENABLED(CONFIG_AD7476)
1196 || defined(CONFIG_AD7476_MODULE)
1197 { 1191 {
1198 .modalias = "ad7476", /* Name of spi_driver for this device */ 1192 .modalias = "ad7476", /* Name of spi_driver for this device */
1199 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 1193 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
@@ -1204,8 +1198,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1204 .mode = SPI_MODE_3, 1198 .mode = SPI_MODE_3,
1205 }, 1199 },
1206#endif 1200#endif
1207#if defined(CONFIG_ADE7753) \ 1201#if IS_ENABLED(CONFIG_ADE7753)
1208 || defined(CONFIG_ADE7753_MODULE)
1209 { 1202 {
1210 .modalias = "ade7753", 1203 .modalias = "ade7753",
1211 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1204 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1215,8 +1208,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1215 .mode = SPI_MODE_1, 1208 .mode = SPI_MODE_1,
1216 }, 1209 },
1217#endif 1210#endif
1218#if defined(CONFIG_ADE7754) \ 1211#if IS_ENABLED(CONFIG_ADE7754)
1219 || defined(CONFIG_ADE7754_MODULE)
1220 { 1212 {
1221 .modalias = "ade7754", 1213 .modalias = "ade7754",
1222 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1214 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1226,8 +1218,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1226 .mode = SPI_MODE_1, 1218 .mode = SPI_MODE_1,
1227 }, 1219 },
1228#endif 1220#endif
1229#if defined(CONFIG_ADE7758) \ 1221#if IS_ENABLED(CONFIG_ADE7758)
1230 || defined(CONFIG_ADE7758_MODULE)
1231 { 1222 {
1232 .modalias = "ade7758", 1223 .modalias = "ade7758",
1233 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1224 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1237,8 +1228,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1237 .mode = SPI_MODE_1, 1228 .mode = SPI_MODE_1,
1238 }, 1229 },
1239#endif 1230#endif
1240#if defined(CONFIG_ADE7759) \ 1231#if IS_ENABLED(CONFIG_ADE7759)
1241 || defined(CONFIG_ADE7759_MODULE)
1242 { 1232 {
1243 .modalias = "ade7759", 1233 .modalias = "ade7759",
1244 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1234 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1248,8 +1238,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1248 .mode = SPI_MODE_1, 1238 .mode = SPI_MODE_1,
1249 }, 1239 },
1250#endif 1240#endif
1251#if defined(CONFIG_ADE7854_SPI) \ 1241#if IS_ENABLED(CONFIG_ADE7854_SPI)
1252 || defined(CONFIG_ADE7854_SPI_MODULE)
1253 { 1242 {
1254 .modalias = "ade7854", 1243 .modalias = "ade7854",
1255 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1244 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1259,8 +1248,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1259 .mode = SPI_MODE_3, 1248 .mode = SPI_MODE_3,
1260 }, 1249 },
1261#endif 1250#endif
1262#if defined(CONFIG_ADIS16060) \ 1251#if IS_ENABLED(CONFIG_ADIS16060)
1263 || defined(CONFIG_ADIS16060_MODULE)
1264 { 1252 {
1265 .modalias = "adis16060_r", 1253 .modalias = "adis16060_r",
1266 .max_speed_hz = 2900000, /* max spi clock (SCK) speed in HZ */ 1254 .max_speed_hz = 2900000, /* max spi clock (SCK) speed in HZ */
@@ -1278,8 +1266,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1278 .mode = SPI_MODE_1, 1266 .mode = SPI_MODE_1,
1279 }, 1267 },
1280#endif 1268#endif
1281#if defined(CONFIG_ADIS16130) \ 1269#if IS_ENABLED(CONFIG_ADIS16130)
1282 || defined(CONFIG_ADIS16130_MODULE)
1283 { 1270 {
1284 .modalias = "adis16130", 1271 .modalias = "adis16130",
1285 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1272 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1289,8 +1276,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1289 .mode = SPI_MODE_3, 1276 .mode = SPI_MODE_3,
1290 }, 1277 },
1291#endif 1278#endif
1292#if defined(CONFIG_ADIS16201) \ 1279#if IS_ENABLED(CONFIG_ADIS16201)
1293 || defined(CONFIG_ADIS16201_MODULE)
1294 { 1280 {
1295 .modalias = "adis16201", 1281 .modalias = "adis16201",
1296 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1282 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1301,8 +1287,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1301 .irq = IRQ_PF4, 1287 .irq = IRQ_PF4,
1302 }, 1288 },
1303#endif 1289#endif
1304#if defined(CONFIG_ADIS16203) \ 1290#if IS_ENABLED(CONFIG_ADIS16203)
1305 || defined(CONFIG_ADIS16203_MODULE)
1306 { 1291 {
1307 .modalias = "adis16203", 1292 .modalias = "adis16203",
1308 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1293 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1313,8 +1298,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1313 .irq = IRQ_PF4, 1298 .irq = IRQ_PF4,
1314 }, 1299 },
1315#endif 1300#endif
1316#if defined(CONFIG_ADIS16204) \ 1301#if IS_ENABLED(CONFIG_ADIS16204)
1317 || defined(CONFIG_ADIS16204_MODULE)
1318 { 1302 {
1319 .modalias = "adis16204", 1303 .modalias = "adis16204",
1320 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1304 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1325,8 +1309,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1325 .irq = IRQ_PF4, 1309 .irq = IRQ_PF4,
1326 }, 1310 },
1327#endif 1311#endif
1328#if defined(CONFIG_ADIS16209) \ 1312#if IS_ENABLED(CONFIG_ADIS16209)
1329 || defined(CONFIG_ADIS16209_MODULE)
1330 { 1313 {
1331 .modalias = "adis16209", 1314 .modalias = "adis16209",
1332 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1315 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1337,8 +1320,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1337 .irq = IRQ_PF4, 1320 .irq = IRQ_PF4,
1338 }, 1321 },
1339#endif 1322#endif
1340#if defined(CONFIG_ADIS16220) \ 1323#if IS_ENABLED(CONFIG_ADIS16220)
1341 || defined(CONFIG_ADIS16220_MODULE)
1342 { 1324 {
1343 .modalias = "adis16220", 1325 .modalias = "adis16220",
1344 .max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */ 1326 .max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */
@@ -1349,8 +1331,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1349 .irq = IRQ_PF4, 1331 .irq = IRQ_PF4,
1350 }, 1332 },
1351#endif 1333#endif
1352#if defined(CONFIG_ADIS16240) \ 1334#if IS_ENABLED(CONFIG_ADIS16240)
1353 || defined(CONFIG_ADIS16240_MODULE)
1354 { 1335 {
1355 .modalias = "adis16240", 1336 .modalias = "adis16240",
1356 .max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */ 1337 .max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */
@@ -1361,8 +1342,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1361 .irq = IRQ_PF4, 1342 .irq = IRQ_PF4,
1362 }, 1343 },
1363#endif 1344#endif
1364#if defined(CONFIG_ADIS16260) \ 1345#if IS_ENABLED(CONFIG_ADIS16260)
1365 || defined(CONFIG_ADIS16260_MODULE)
1366 { 1346 {
1367 .modalias = "adis16260", 1347 .modalias = "adis16260",
1368 .max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */ 1348 .max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */
@@ -1373,8 +1353,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1373 .irq = IRQ_PF4, 1353 .irq = IRQ_PF4,
1374 }, 1354 },
1375#endif 1355#endif
1376#if defined(CONFIG_ADIS16261) \ 1356#if IS_ENABLED(CONFIG_ADIS16261)
1377 || defined(CONFIG_ADIS16261_MODULE)
1378 { 1357 {
1379 .modalias = "adis16261", 1358 .modalias = "adis16261",
1380 .max_speed_hz = 2500000, /* max spi clock (SCK) speed in HZ */ 1359 .max_speed_hz = 2500000, /* max spi clock (SCK) speed in HZ */
@@ -1384,8 +1363,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1384 .mode = SPI_MODE_3, 1363 .mode = SPI_MODE_3,
1385 }, 1364 },
1386#endif 1365#endif
1387#if defined(CONFIG_ADIS16300) \ 1366#if IS_ENABLED(CONFIG_ADIS16300)
1388 || defined(CONFIG_ADIS16300_MODULE)
1389 { 1367 {
1390 .modalias = "adis16300", 1368 .modalias = "adis16300",
1391 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1369 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1396,8 +1374,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1396 .irq = IRQ_PF4, 1374 .irq = IRQ_PF4,
1397 }, 1375 },
1398#endif 1376#endif
1399#if defined(CONFIG_ADIS16350) \ 1377#if IS_ENABLED(CONFIG_ADIS16350)
1400 || defined(CONFIG_ADIS16350_MODULE)
1401 { 1378 {
1402 .modalias = "adis16364", 1379 .modalias = "adis16364",
1403 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1380 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1408,8 +1385,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1408 .irq = IRQ_PF4, 1385 .irq = IRQ_PF4,
1409 }, 1386 },
1410#endif 1387#endif
1411#if defined(CONFIG_ADIS16400) \ 1388#if IS_ENABLED(CONFIG_ADIS16400)
1412 || defined(CONFIG_ADIS16400_MODULE)
1413 { 1389 {
1414 .modalias = "adis16400", 1390 .modalias = "adis16400",
1415 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ 1391 .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -1421,7 +1397,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1421#endif 1397#endif
1422}; 1398};
1423 1399
1424#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 1400#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
1425/* SPI controller data */ 1401/* SPI controller data */
1426static struct bfin5xx_spi_master bfin_spi0_info = { 1402static struct bfin5xx_spi_master bfin_spi0_info = {
1427 .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, 1403 .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
@@ -1459,7 +1435,7 @@ static struct platform_device bfin_spi0_device = {
1459}; 1435};
1460#endif /* spi master and devices */ 1436#endif /* spi master and devices */
1461 1437
1462#if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE) 1438#if IS_ENABLED(CONFIG_SPI_BFIN_SPORT)
1463 1439
1464/* SPORT SPI controller data */ 1440/* SPORT SPI controller data */
1465static struct bfin5xx_spi_master bfin_sport_spi0_info = { 1441static struct bfin5xx_spi_master bfin_sport_spi0_info = {
@@ -1524,13 +1500,13 @@ static struct platform_device bfin_sport_spi1_device = {
1524 1500
1525#endif /* sport spi master and devices */ 1501#endif /* sport spi master and devices */
1526 1502
1527#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) 1503#if IS_ENABLED(CONFIG_FB_BF537_LQ035)
1528static struct platform_device bfin_fb_device = { 1504static struct platform_device bfin_fb_device = {
1529 .name = "bf537_lq035", 1505 .name = "bf537_lq035",
1530}; 1506};
1531#endif 1507#endif
1532 1508
1533#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 1509#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
1534#include <asm/bfin-lq035q1.h> 1510#include <asm/bfin-lq035q1.h>
1535 1511
1536static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { 1512static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
@@ -1559,8 +1535,7 @@ static struct platform_device bfin_lq035q1_device = {
1559}; 1535};
1560#endif 1536#endif
1561 1537
1562#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ 1538#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_CAPTURE)
1563 || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
1564#include <linux/videodev2.h> 1539#include <linux/videodev2.h>
1565#include <media/blackfin/bfin_capture.h> 1540#include <media/blackfin/bfin_capture.h>
1566#include <media/blackfin/ppi.h> 1541#include <media/blackfin/ppi.h>
@@ -1580,8 +1555,7 @@ static const struct ppi_info ppi_info = {
1580 .pin_req = ppi_req, 1555 .pin_req = ppi_req,
1581}; 1556};
1582 1557
1583#if defined(CONFIG_VIDEO_VS6624) \ 1558#if IS_ENABLED(CONFIG_VIDEO_VS6624)
1584 || defined(CONFIG_VIDEO_VS6624_MODULE)
1585static struct v4l2_input vs6624_inputs[] = { 1559static struct v4l2_input vs6624_inputs[] = {
1586 { 1560 {
1587 .index = 0, 1561 .index = 0,
@@ -1624,7 +1598,7 @@ static struct platform_device bfin_capture_device = {
1624}; 1598};
1625#endif 1599#endif
1626 1600
1627#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 1601#if IS_ENABLED(CONFIG_SERIAL_BFIN)
1628#ifdef CONFIG_SERIAL_BFIN_UART0 1602#ifdef CONFIG_SERIAL_BFIN_UART0
1629static struct resource bfin_uart0_resources[] = { 1603static struct resource bfin_uart0_resources[] = {
1630 { 1604 {
@@ -1735,7 +1709,7 @@ static struct platform_device bfin_uart1_device = {
1735#endif 1709#endif
1736#endif 1710#endif
1737 1711
1738#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 1712#if IS_ENABLED(CONFIG_BFIN_SIR)
1739#ifdef CONFIG_BFIN_SIR0 1713#ifdef CONFIG_BFIN_SIR0
1740static struct resource bfin_sir0_resources[] = { 1714static struct resource bfin_sir0_resources[] = {
1741 { 1715 {
@@ -1790,7 +1764,7 @@ static struct platform_device bfin_sir1_device = {
1790#endif 1764#endif
1791#endif 1765#endif
1792 1766
1793#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 1767#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
1794static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 1768static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
1795 1769
1796static struct resource bfin_twi0_resource[] = { 1770static struct resource bfin_twi0_resource[] = {
@@ -1817,7 +1791,7 @@ static struct platform_device i2c_bfin_twi_device = {
1817}; 1791};
1818#endif 1792#endif
1819 1793
1820#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE) 1794#if IS_ENABLED(CONFIG_KEYBOARD_ADP5588)
1821static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = { 1795static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = {
1822 [0] = KEY_GRAVE, 1796 [0] = KEY_GRAVE,
1823 [1] = KEY_1, 1797 [1] = KEY_1,
@@ -1902,7 +1876,7 @@ static struct adp5588_kpad_platform_data adp5588_kpad_data = {
1902}; 1876};
1903#endif 1877#endif
1904 1878
1905#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) 1879#if IS_ENABLED(CONFIG_PMIC_ADP5520)
1906#include <linux/mfd/adp5520.h> 1880#include <linux/mfd/adp5520.h>
1907 1881
1908 /* 1882 /*
@@ -2013,14 +1987,14 @@ static struct adp5520_platform_data adp5520_pdev_data = {
2013 1987
2014#endif 1988#endif
2015 1989
2016#if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE) 1990#if IS_ENABLED(CONFIG_GPIO_ADP5588)
2017static struct adp5588_gpio_platform_data adp5588_gpio_data = { 1991static struct adp5588_gpio_platform_data adp5588_gpio_data = {
2018 .gpio_start = 50, 1992 .gpio_start = 50,
2019 .pullup_dis_mask = 0, 1993 .pullup_dis_mask = 0,
2020}; 1994};
2021#endif 1995#endif
2022 1996
2023#if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE) 1997#if IS_ENABLED(CONFIG_BACKLIGHT_ADP8870)
2024#include <linux/i2c/adp8870.h> 1998#include <linux/i2c/adp8870.h>
2025static struct led_info adp8870_leds[] = { 1999static struct led_info adp8870_leds[] = {
2026 { 2000 {
@@ -2072,7 +2046,7 @@ static struct adp8870_backlight_platform_data adp8870_pdata = {
2072}; 2046};
2073#endif 2047#endif
2074 2048
2075#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE) 2049#if IS_ENABLED(CONFIG_BACKLIGHT_ADP8860)
2076#include <linux/i2c/adp8860.h> 2050#include <linux/i2c/adp8860.h>
2077static struct led_info adp8860_leds[] = { 2051static struct led_info adp8860_leds[] = {
2078 { 2052 {
@@ -2114,7 +2088,7 @@ static struct adp8860_backlight_platform_data adp8860_pdata = {
2114}; 2088};
2115#endif 2089#endif
2116 2090
2117#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) 2091#if IS_ENABLED(CONFIG_REGULATOR_AD5398)
2118static struct regulator_consumer_supply ad5398_consumer = { 2092static struct regulator_consumer_supply ad5398_consumer = {
2119 .supply = "current", 2093 .supply = "current",
2120}; 2094};
@@ -2129,8 +2103,7 @@ static struct regulator_init_data ad5398_regulator_data = {
2129 .consumer_supplies = &ad5398_consumer, 2103 .consumer_supplies = &ad5398_consumer,
2130}; 2104};
2131 2105
2132#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \ 2106#if IS_ENABLED(CONFIG_REGULATOR_VIRTUAL_CONSUMER)
2133 defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
2134static struct platform_device ad5398_virt_consumer_device = { 2107static struct platform_device ad5398_virt_consumer_device = {
2135 .name = "reg-virt-consumer", 2108 .name = "reg-virt-consumer",
2136 .id = 0, 2109 .id = 0,
@@ -2139,8 +2112,7 @@ static struct platform_device ad5398_virt_consumer_device = {
2139 }, 2112 },
2140}; 2113};
2141#endif 2114#endif
2142#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ 2115#if IS_ENABLED(CONFIG_REGULATOR_USERSPACE_CONSUMER)
2143 defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
2144static struct regulator_bulk_data ad5398_bulk_data = { 2116static struct regulator_bulk_data ad5398_bulk_data = {
2145 .supply = "current", 2117 .supply = "current",
2146}; 2118};
@@ -2161,14 +2133,14 @@ static struct platform_device ad5398_userspace_consumer_device = {
2161#endif 2133#endif
2162#endif 2134#endif
2163 2135
2164#if defined(CONFIG_ADT7410) || defined(CONFIG_ADT7410_MODULE) 2136#if IS_ENABLED(CONFIG_ADT7410)
2165/* INT bound temperature alarm event. line 1 */ 2137/* INT bound temperature alarm event. line 1 */
2166static unsigned long adt7410_platform_data[2] = { 2138static unsigned long adt7410_platform_data[2] = {
2167 IRQ_PG4, IRQF_TRIGGER_LOW, 2139 IRQ_PG4, IRQF_TRIGGER_LOW,
2168}; 2140};
2169#endif 2141#endif
2170 2142
2171#if defined(CONFIG_ADT7316_I2C) || defined(CONFIG_ADT7316_I2C_MODULE) 2143#if IS_ENABLED(CONFIG_ADT7316_I2C)
2172/* INT bound temperature alarm event. line 1 */ 2144/* INT bound temperature alarm event. line 1 */
2173static unsigned long adt7316_i2c_data[2] = { 2145static unsigned long adt7316_i2c_data[2] = {
2174 IRQF_TRIGGER_LOW, /* interrupt flags */ 2146 IRQF_TRIGGER_LOW, /* interrupt flags */
@@ -2183,13 +2155,13 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2183 }, 2155 },
2184#endif 2156#endif
2185 2157
2186#if defined(CONFIG_SND_SOC_ADAV80X) || defined(CONFIG_SND_SOC_ADAV80X_MODULE) 2158#if IS_ENABLED(CONFIG_SND_SOC_ADAV80X)
2187 { 2159 {
2188 I2C_BOARD_INFO("adav803", 0x10), 2160 I2C_BOARD_INFO("adav803", 0x10),
2189 }, 2161 },
2190#endif 2162#endif
2191 2163
2192#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE) 2164#if IS_ENABLED(CONFIG_INPUT_AD714X_I2C)
2193 { 2165 {
2194 I2C_BOARD_INFO("ad7142_captouch", 0x2C), 2166 I2C_BOARD_INFO("ad7142_captouch", 0x2C),
2195 .irq = IRQ_PG5, 2167 .irq = IRQ_PG5,
@@ -2197,39 +2169,39 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2197 }, 2169 },
2198#endif 2170#endif
2199 2171
2200#if defined(CONFIG_AD7150) || defined(CONFIG_AD7150_MODULE) 2172#if IS_ENABLED(CONFIG_AD7150)
2201 { 2173 {
2202 I2C_BOARD_INFO("ad7150", 0x48), 2174 I2C_BOARD_INFO("ad7150", 0x48),
2203 .irq = IRQ_PG5, /* fixme: use real interrupt number */ 2175 .irq = IRQ_PG5, /* fixme: use real interrupt number */
2204 }, 2176 },
2205#endif 2177#endif
2206 2178
2207#if defined(CONFIG_AD7152) || defined(CONFIG_AD7152_MODULE) 2179#if IS_ENABLED(CONFIG_AD7152)
2208 { 2180 {
2209 I2C_BOARD_INFO("ad7152", 0x48), 2181 I2C_BOARD_INFO("ad7152", 0x48),
2210 }, 2182 },
2211#endif 2183#endif
2212 2184
2213#if defined(CONFIG_AD774X) || defined(CONFIG_AD774X_MODULE) 2185#if IS_ENABLED(CONFIG_AD774X)
2214 { 2186 {
2215 I2C_BOARD_INFO("ad774x", 0x48), 2187 I2C_BOARD_INFO("ad774x", 0x48),
2216 }, 2188 },
2217#endif 2189#endif
2218 2190
2219#if defined(CONFIG_ADE7854_I2C) || defined(CONFIG_ADE7854_I2C_MODULE) 2191#if IS_ENABLED(CONFIG_ADE7854_I2C)
2220 { 2192 {
2221 I2C_BOARD_INFO("ade7854", 0x38), 2193 I2C_BOARD_INFO("ade7854", 0x38),
2222 }, 2194 },
2223#endif 2195#endif
2224 2196
2225#if defined(CONFIG_ADT75) || defined(CONFIG_ADT75_MODULE) 2197#if IS_ENABLED(CONFIG_SENSORS_LM75)
2226 { 2198 {
2227 I2C_BOARD_INFO("adt75", 0x9), 2199 I2C_BOARD_INFO("adt75", 0x9),
2228 .irq = IRQ_PG5, 2200 .irq = IRQ_PG5,
2229 }, 2201 },
2230#endif 2202#endif
2231 2203
2232#if defined(CONFIG_ADT7410) || defined(CONFIG_ADT7410_MODULE) 2204#if IS_ENABLED(CONFIG_ADT7410)
2233 { 2205 {
2234 I2C_BOARD_INFO("adt7410", 0x48), 2206 I2C_BOARD_INFO("adt7410", 0x48),
2235 /* CT critical temperature event. line 0 */ 2207 /* CT critical temperature event. line 0 */
@@ -2238,14 +2210,14 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2238 }, 2210 },
2239#endif 2211#endif
2240 2212
2241#if defined(CONFIG_AD7291) || defined(CONFIG_AD7291_MODULE) 2213#if IS_ENABLED(CONFIG_AD7291)
2242 { 2214 {
2243 I2C_BOARD_INFO("ad7291", 0x20), 2215 I2C_BOARD_INFO("ad7291", 0x20),
2244 .irq = IRQ_PG5, 2216 .irq = IRQ_PG5,
2245 }, 2217 },
2246#endif 2218#endif
2247 2219
2248#if defined(CONFIG_ADT7316_I2C) || defined(CONFIG_ADT7316_I2C_MODULE) 2220#if IS_ENABLED(CONFIG_ADT7316_I2C)
2249 { 2221 {
2250 I2C_BOARD_INFO("adt7316", 0x48), 2222 I2C_BOARD_INFO("adt7316", 0x48),
2251 .irq = IRQ_PG6, 2223 .irq = IRQ_PG6,
@@ -2253,128 +2225,128 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
2253 }, 2225 },
2254#endif 2226#endif
2255 2227
2256#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 2228#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
2257 { 2229 {
2258 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 2230 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
2259 }, 2231 },
2260#endif 2232#endif
2261#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) 2233#if IS_ENABLED(CONFIG_INPUT_PCF8574)
2262 { 2234 {
2263 I2C_BOARD_INFO("pcf8574_keypad", 0x27), 2235 I2C_BOARD_INFO("pcf8574_keypad", 0x27),
2264 .irq = IRQ_PG6, 2236 .irq = IRQ_PG6,
2265 }, 2237 },
2266#endif 2238#endif
2267#if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) 2239#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879_I2C)
2268 { 2240 {
2269 I2C_BOARD_INFO("ad7879", 0x2F), 2241 I2C_BOARD_INFO("ad7879", 0x2F),
2270 .irq = IRQ_PG5, 2242 .irq = IRQ_PG5,
2271 .platform_data = (void *)&bfin_ad7879_ts_info, 2243 .platform_data = (void *)&bfin_ad7879_ts_info,
2272 }, 2244 },
2273#endif 2245#endif
2274#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE) 2246#if IS_ENABLED(CONFIG_KEYBOARD_ADP5588)
2275 { 2247 {
2276 I2C_BOARD_INFO("adp5588-keys", 0x34), 2248 I2C_BOARD_INFO("adp5588-keys", 0x34),
2277 .irq = IRQ_PG0, 2249 .irq = IRQ_PG0,
2278 .platform_data = (void *)&adp5588_kpad_data, 2250 .platform_data = (void *)&adp5588_kpad_data,
2279 }, 2251 },
2280#endif 2252#endif
2281#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) 2253#if IS_ENABLED(CONFIG_PMIC_ADP5520)
2282 { 2254 {
2283 I2C_BOARD_INFO("pmic-adp5520", 0x32), 2255 I2C_BOARD_INFO("pmic-adp5520", 0x32),
2284 .irq = IRQ_PG0, 2256 .irq = IRQ_PG0,
2285 .platform_data = (void *)&adp5520_pdev_data, 2257 .platform_data = (void *)&adp5520_pdev_data,
2286 }, 2258 },
2287#endif 2259#endif
2288#if defined(CONFIG_INPUT_ADXL34X_I2C) || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE) 2260#if IS_ENABLED(CONFIG_INPUT_ADXL34X_I2C)
2289 { 2261 {
2290 I2C_BOARD_INFO("adxl34x", 0x53), 2262 I2C_BOARD_INFO("adxl34x", 0x53),
2291 .irq = IRQ_PG3, 2263 .irq = IRQ_PG3,
2292 .platform_data = (void *)&adxl34x_info, 2264 .platform_data = (void *)&adxl34x_info,
2293 }, 2265 },
2294#endif 2266#endif
2295#if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE) 2267#if IS_ENABLED(CONFIG_GPIO_ADP5588)
2296 { 2268 {
2297 I2C_BOARD_INFO("adp5588-gpio", 0x34), 2269 I2C_BOARD_INFO("adp5588-gpio", 0x34),
2298 .platform_data = (void *)&adp5588_gpio_data, 2270 .platform_data = (void *)&adp5588_gpio_data,
2299 }, 2271 },
2300#endif 2272#endif
2301#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE) 2273#if IS_ENABLED(CONFIG_FB_BFIN_7393)
2302 { 2274 {
2303 I2C_BOARD_INFO("bfin-adv7393", 0x2B), 2275 I2C_BOARD_INFO("bfin-adv7393", 0x2B),
2304 }, 2276 },
2305#endif 2277#endif
2306#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) 2278#if IS_ENABLED(CONFIG_FB_BF537_LQ035)
2307 { 2279 {
2308 I2C_BOARD_INFO("bf537-lq035-ad5280", 0x2F), 2280 I2C_BOARD_INFO("bf537-lq035-ad5280", 0x2F),
2309 }, 2281 },
2310#endif 2282#endif
2311#if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE) 2283#if IS_ENABLED(CONFIG_BACKLIGHT_ADP8870)
2312 { 2284 {
2313 I2C_BOARD_INFO("adp8870", 0x2B), 2285 I2C_BOARD_INFO("adp8870", 0x2B),
2314 .platform_data = (void *)&adp8870_pdata, 2286 .platform_data = (void *)&adp8870_pdata,
2315 }, 2287 },
2316#endif 2288#endif
2317#if defined(CONFIG_SND_SOC_ADAU1371) || defined(CONFIG_SND_SOC_ADAU1371_MODULE) 2289#if IS_ENABLED(CONFIG_SND_SOC_ADAU1371)
2318 { 2290 {
2319 I2C_BOARD_INFO("adau1371", 0x1A), 2291 I2C_BOARD_INFO("adau1371", 0x1A),
2320 }, 2292 },
2321#endif 2293#endif
2322#if defined(CONFIG_SND_SOC_ADAU1761) || defined(CONFIG_SND_SOC_ADAU1761_MODULE) 2294#if IS_ENABLED(CONFIG_SND_SOC_ADAU1761)
2323 { 2295 {
2324 I2C_BOARD_INFO("adau1761", 0x38), 2296 I2C_BOARD_INFO("adau1761", 0x38),
2325 }, 2297 },
2326#endif 2298#endif
2327#if defined(CONFIG_SND_SOC_ADAU1361) || defined(CONFIG_SND_SOC_ADAU1361_MODULE) 2299#if IS_ENABLED(CONFIG_SND_SOC_ADAU1361)
2328 { 2300 {
2329 I2C_BOARD_INFO("adau1361", 0x38), 2301 I2C_BOARD_INFO("adau1361", 0x38),
2330 }, 2302 },
2331#endif 2303#endif
2332#if defined(CONFIG_SND_SOC_ADAU1701) || defined(CONFIG_SND_SOC_ADAU1701_MODULE) 2304#if IS_ENABLED(CONFIG_SND_SOC_ADAU1701)
2333 { 2305 {
2334 I2C_BOARD_INFO("adau1701", 0x34), 2306 I2C_BOARD_INFO("adau1701", 0x34),
2335 }, 2307 },
2336#endif 2308#endif
2337#if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE) 2309#if IS_ENABLED(CONFIG_AD525X_DPOT)
2338 { 2310 {
2339 I2C_BOARD_INFO("ad5258", 0x18), 2311 I2C_BOARD_INFO("ad5258", 0x18),
2340 }, 2312 },
2341#endif 2313#endif
2342#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) 2314#if IS_ENABLED(CONFIG_SND_SOC_SSM2602)
2343 { 2315 {
2344 I2C_BOARD_INFO("ssm2602", 0x1b), 2316 I2C_BOARD_INFO("ssm2602", 0x1b),
2345 }, 2317 },
2346#endif 2318#endif
2347#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) 2319#if IS_ENABLED(CONFIG_REGULATOR_AD5398)
2348 { 2320 {
2349 I2C_BOARD_INFO("ad5398", 0xC), 2321 I2C_BOARD_INFO("ad5398", 0xC),
2350 .platform_data = (void *)&ad5398_regulator_data, 2322 .platform_data = (void *)&ad5398_regulator_data,
2351 }, 2323 },
2352#endif 2324#endif
2353#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE) 2325#if IS_ENABLED(CONFIG_BACKLIGHT_ADP8860)
2354 { 2326 {
2355 I2C_BOARD_INFO("adp8860", 0x2A), 2327 I2C_BOARD_INFO("adp8860", 0x2A),
2356 .platform_data = (void *)&adp8860_pdata, 2328 .platform_data = (void *)&adp8860_pdata,
2357 }, 2329 },
2358#endif 2330#endif
2359#if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE) 2331#if IS_ENABLED(CONFIG_SND_SOC_ADAU1373)
2360 { 2332 {
2361 I2C_BOARD_INFO("adau1373", 0x1A), 2333 I2C_BOARD_INFO("adau1373", 0x1A),
2362 }, 2334 },
2363#endif 2335#endif
2364#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 2336#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
2365 { 2337 {
2366 I2C_BOARD_INFO("ad5252", 0x2e), 2338 I2C_BOARD_INFO("ad5252", 0x2e),
2367 }, 2339 },
2368#endif 2340#endif
2369}; 2341};
2370#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) \ 2342#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT) \
2371|| defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE) 2343|| IS_ENABLED(CONFIG_BFIN_SPORT)
2372unsigned short bfin_sport0_peripherals[] = { 2344unsigned short bfin_sport0_peripherals[] = {
2373 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 2345 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
2374 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 2346 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
2375}; 2347};
2376#endif 2348#endif
2377#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 2349#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
2378#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 2350#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
2379static struct resource bfin_sport0_uart_resources[] = { 2351static struct resource bfin_sport0_uart_resources[] = {
2380 { 2352 {
@@ -2439,7 +2411,7 @@ static struct platform_device bfin_sport1_uart_device = {
2439}; 2411};
2440#endif 2412#endif
2441#endif 2413#endif
2442#if defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE) 2414#if IS_ENABLED(CONFIG_BFIN_SPORT)
2443static struct resource bfin_sport0_resources[] = { 2415static struct resource bfin_sport0_resources[] = {
2444 { 2416 {
2445 .start = SPORT0_TCR1, 2417 .start = SPORT0_TCR1,
@@ -2482,7 +2454,7 @@ static struct platform_device bfin_sport0_device = {
2482 }, 2454 },
2483}; 2455};
2484#endif 2456#endif
2485#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 2457#if IS_ENABLED(CONFIG_PATA_PLATFORM)
2486#define CF_IDE_NAND_CARD_USE_HDD_INTERFACE 2458#define CF_IDE_NAND_CARD_USE_HDD_INTERFACE
2487/* #define CF_IDE_NAND_CARD_USE_CF_IN_COMMON_MEMORY_MODE */ 2459/* #define CF_IDE_NAND_CARD_USE_CF_IN_COMMON_MEMORY_MODE */
2488 2460
@@ -2569,8 +2541,8 @@ static struct platform_device bfin_dpmc = {
2569 }, 2541 },
2570}; 2542};
2571 2543
2572#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ 2544#if IS_ENABLED(CONFIG_SND_BF5XX_I2S) || \
2573 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2545 IS_ENABLED(CONFIG_SND_BF5XX_AC97)
2574 2546
2575#define SPORT_REQ(x) \ 2547#define SPORT_REQ(x) \
2576 [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \ 2548 [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
@@ -2620,22 +2592,21 @@ static struct resource bfin_snd_resources[][4] = {
2620}; 2592};
2621#endif 2593#endif
2622 2594
2623#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 2595#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
2624static struct platform_device bfin_i2s_pcm = { 2596static struct platform_device bfin_i2s_pcm = {
2625 .name = "bfin-i2s-pcm-audio", 2597 .name = "bfin-i2s-pcm-audio",
2626 .id = -1, 2598 .id = -1,
2627}; 2599};
2628#endif 2600#endif
2629 2601
2630#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2602#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
2631static struct platform_device bfin_ac97_pcm = { 2603static struct platform_device bfin_ac97_pcm = {
2632 .name = "bfin-ac97-pcm-audio", 2604 .name = "bfin-ac97-pcm-audio",
2633 .id = -1, 2605 .id = -1,
2634}; 2606};
2635#endif 2607#endif
2636 2608
2637#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ 2609#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
2638 || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
2639static const char * const ad1836_link[] = { 2610static const char * const ad1836_link[] = {
2640 "bfin-i2s.0", 2611 "bfin-i2s.0",
2641 "spi0.4", 2612 "spi0.4",
@@ -2649,8 +2620,7 @@ static struct platform_device bfin_ad1836_machine = {
2649}; 2620};
2650#endif 2621#endif
2651 2622
2652#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \ 2623#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD73311)
2653 defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
2654static const unsigned ad73311_gpio[] = { 2624static const unsigned ad73311_gpio[] = {
2655 GPIO_PF4, 2625 GPIO_PF4,
2656}; 2626};
@@ -2664,22 +2634,21 @@ static struct platform_device bfin_ad73311_machine = {
2664}; 2634};
2665#endif 2635#endif
2666 2636
2667#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE) 2637#if IS_ENABLED(CONFIG_SND_SOC_AD73311)
2668static struct platform_device bfin_ad73311_codec_device = { 2638static struct platform_device bfin_ad73311_codec_device = {
2669 .name = "ad73311", 2639 .name = "ad73311",
2670 .id = -1, 2640 .id = -1,
2671}; 2641};
2672#endif 2642#endif
2673 2643
2674#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X) || \ 2644#if IS_ENABLED(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X)
2675 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X_MODULE)
2676static struct platform_device bfin_eval_adav801_device = { 2645static struct platform_device bfin_eval_adav801_device = {
2677 .name = "bfin-eval-adav801", 2646 .name = "bfin-eval-adav801",
2678 .id = -1, 2647 .id = -1,
2679}; 2648};
2680#endif 2649#endif
2681 2650
2682#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE) 2651#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_I2S)
2683static struct platform_device bfin_i2s = { 2652static struct platform_device bfin_i2s = {
2684 .name = "bfin-i2s", 2653 .name = "bfin-i2s",
2685 .id = CONFIG_SND_BF5XX_SPORT_NUM, 2654 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -2691,7 +2660,7 @@ static struct platform_device bfin_i2s = {
2691}; 2660};
2692#endif 2661#endif
2693 2662
2694#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) 2663#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AC97)
2695static struct platform_device bfin_ac97 = { 2664static struct platform_device bfin_ac97 = {
2696 .name = "bfin-ac97", 2665 .name = "bfin-ac97",
2697 .id = CONFIG_SND_BF5XX_SPORT_NUM, 2666 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -2703,7 +2672,7 @@ static struct platform_device bfin_ac97 = {
2703}; 2672};
2704#endif 2673#endif
2705 2674
2706#if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE) 2675#if IS_ENABLED(CONFIG_REGULATOR_FIXED_VOLTAGE)
2707#define REGULATOR_ADP122 "adp122" 2676#define REGULATOR_ADP122 "adp122"
2708#define REGULATOR_ADP122_UV 2500000 2677#define REGULATOR_ADP122_UV 2500000
2709 2678
@@ -2741,8 +2710,7 @@ static struct platform_device adp_switch_device = {
2741 }, 2710 },
2742}; 2711};
2743 2712
2744#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ 2713#if IS_ENABLED(CONFIG_REGULATOR_USERSPACE_CONSUMER)
2745 defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
2746static struct regulator_bulk_data adp122_bulk_data = { 2714static struct regulator_bulk_data adp122_bulk_data = {
2747 .supply = REGULATOR_ADP122, 2715 .supply = REGULATOR_ADP122,
2748}; 2716};
@@ -2763,8 +2731,7 @@ static struct platform_device adp122_userspace_consumer_device = {
2763#endif 2731#endif
2764#endif 2732#endif
2765 2733
2766#if defined(CONFIG_IIO_GPIO_TRIGGER) || \ 2734#if IS_ENABLED(CONFIG_IIO_GPIO_TRIGGER)
2767 defined(CONFIG_IIO_GPIO_TRIGGER_MODULE)
2768 2735
2769static struct resource iio_gpio_trigger_resources[] = { 2736static struct resource iio_gpio_trigger_resources[] = {
2770 [0] = { 2737 [0] = {
@@ -2781,15 +2748,13 @@ static struct platform_device iio_gpio_trigger = {
2781}; 2748};
2782#endif 2749#endif
2783 2750
2784#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373) || \ 2751#if IS_ENABLED(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373)
2785 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373_MODULE)
2786static struct platform_device bf5xx_adau1373_device = { 2752static struct platform_device bf5xx_adau1373_device = {
2787 .name = "bfin-eval-adau1373", 2753 .name = "bfin-eval-adau1373",
2788}; 2754};
2789#endif 2755#endif
2790 2756
2791#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701) || \ 2757#if IS_ENABLED(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701)
2792 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701_MODULE)
2793static struct platform_device bf5xx_adau1701_device = { 2758static struct platform_device bf5xx_adau1701_device = {
2794 .name = "bfin-eval-adau1701", 2759 .name = "bfin-eval-adau1701",
2795}; 2760};
@@ -2798,73 +2763,72 @@ static struct platform_device bf5xx_adau1701_device = {
2798static struct platform_device *stamp_devices[] __initdata = { 2763static struct platform_device *stamp_devices[] __initdata = {
2799 2764
2800 &bfin_dpmc, 2765 &bfin_dpmc,
2801#if defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE) 2766#if IS_ENABLED(CONFIG_BFIN_SPORT)
2802 &bfin_sport0_device, 2767 &bfin_sport0_device,
2803#endif 2768#endif
2804#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 2769#if IS_ENABLED(CONFIG_BFIN_CFPCMCIA)
2805 &bfin_pcmcia_cf_device, 2770 &bfin_pcmcia_cf_device,
2806#endif 2771#endif
2807 2772
2808#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 2773#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
2809 &rtc_device, 2774 &rtc_device,
2810#endif 2775#endif
2811 2776
2812#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) 2777#if IS_ENABLED(CONFIG_USB_SL811_HCD)
2813 &sl811_hcd_device, 2778 &sl811_hcd_device,
2814#endif 2779#endif
2815 2780
2816#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 2781#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
2817 &isp1362_hcd_device, 2782 &isp1362_hcd_device,
2818#endif 2783#endif
2819 2784
2820#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 2785#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
2821 &bfin_isp1760_device, 2786 &bfin_isp1760_device,
2822#endif 2787#endif
2823 2788
2824#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 2789#if IS_ENABLED(CONFIG_SMC91X)
2825 &smc91x_device, 2790 &smc91x_device,
2826#endif 2791#endif
2827 2792
2828#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) 2793#if IS_ENABLED(CONFIG_DM9000)
2829 &dm9000_device, 2794 &dm9000_device,
2830#endif 2795#endif
2831 2796
2832#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 2797#if IS_ENABLED(CONFIG_CAN_BFIN)
2833 &bfin_can_device, 2798 &bfin_can_device,
2834#endif 2799#endif
2835 2800
2836#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 2801#if IS_ENABLED(CONFIG_BFIN_MAC)
2837 &bfin_mii_bus, 2802 &bfin_mii_bus,
2838 &bfin_mac_device, 2803 &bfin_mac_device,
2839#endif 2804#endif
2840 2805
2841#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 2806#if IS_ENABLED(CONFIG_USB_NET2272)
2842 &net2272_bfin_device, 2807 &net2272_bfin_device,
2843#endif 2808#endif
2844 2809
2845#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 2810#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
2846 &bfin_spi0_device, 2811 &bfin_spi0_device,
2847#endif 2812#endif
2848 2813
2849#if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE) 2814#if IS_ENABLED(CONFIG_SPI_BFIN_SPORT)
2850 &bfin_sport_spi0_device, 2815 &bfin_sport_spi0_device,
2851 &bfin_sport_spi1_device, 2816 &bfin_sport_spi1_device,
2852#endif 2817#endif
2853 2818
2854#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) 2819#if IS_ENABLED(CONFIG_FB_BF537_LQ035)
2855 &bfin_fb_device, 2820 &bfin_fb_device,
2856#endif 2821#endif
2857 2822
2858#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 2823#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
2859 &bfin_lq035q1_device, 2824 &bfin_lq035q1_device,
2860#endif 2825#endif
2861 2826
2862#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ 2827#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_CAPTURE)
2863 || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
2864 &bfin_capture_device, 2828 &bfin_capture_device,
2865#endif 2829#endif
2866 2830
2867#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 2831#if IS_ENABLED(CONFIG_SERIAL_BFIN)
2868#ifdef CONFIG_SERIAL_BFIN_UART0 2832#ifdef CONFIG_SERIAL_BFIN_UART0
2869 &bfin_uart0_device, 2833 &bfin_uart0_device,
2870#endif 2834#endif
@@ -2873,7 +2837,7 @@ static struct platform_device *stamp_devices[] __initdata = {
2873#endif 2837#endif
2874#endif 2838#endif
2875 2839
2876#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 2840#if IS_ENABLED(CONFIG_BFIN_SIR)
2877#ifdef CONFIG_BFIN_SIR0 2841#ifdef CONFIG_BFIN_SIR0
2878 &bfin_sir0_device, 2842 &bfin_sir0_device,
2879#endif 2843#endif
@@ -2882,11 +2846,11 @@ static struct platform_device *stamp_devices[] __initdata = {
2882#endif 2846#endif
2883#endif 2847#endif
2884 2848
2885#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 2849#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
2886 &i2c_bfin_twi_device, 2850 &i2c_bfin_twi_device,
2887#endif 2851#endif
2888 2852
2889#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 2853#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
2890#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 2854#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
2891 &bfin_sport0_uart_device, 2855 &bfin_sport0_uart_device,
2892#endif 2856#endif
@@ -2895,95 +2859,86 @@ static struct platform_device *stamp_devices[] __initdata = {
2895#endif 2859#endif
2896#endif 2860#endif
2897 2861
2898#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 2862#if IS_ENABLED(CONFIG_PATA_PLATFORM)
2899 &bfin_pata_device, 2863 &bfin_pata_device,
2900#endif 2864#endif
2901 2865
2902#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 2866#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
2903 &bfin_device_gpiokeys, 2867 &bfin_device_gpiokeys,
2904#endif 2868#endif
2905 2869
2906#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 2870#if IS_ENABLED(CONFIG_MTD_NAND_PLATFORM)
2907 &bfin_async_nand_device, 2871 &bfin_async_nand_device,
2908#endif 2872#endif
2909 2873
2910#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 2874#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
2911 &stamp_flash_device, 2875 &stamp_flash_device,
2912#endif 2876#endif
2913 2877
2914#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 2878#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
2915 &bfin_i2s_pcm, 2879 &bfin_i2s_pcm,
2916#endif 2880#endif
2917 2881
2918#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2882#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
2919 &bfin_ac97_pcm, 2883 &bfin_ac97_pcm,
2920#endif 2884#endif
2921 2885
2922#if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \ 2886#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
2923 defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
2924 &bfin_ad1836_machine, 2887 &bfin_ad1836_machine,
2925#endif 2888#endif
2926 2889
2927#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \ 2890#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD73311)
2928 defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
2929 &bfin_ad73311_machine, 2891 &bfin_ad73311_machine,
2930#endif 2892#endif
2931 2893
2932#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE) 2894#if IS_ENABLED(CONFIG_SND_SOC_AD73311)
2933 &bfin_ad73311_codec_device, 2895 &bfin_ad73311_codec_device,
2934#endif 2896#endif
2935 2897
2936#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE) 2898#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_I2S)
2937 &bfin_i2s, 2899 &bfin_i2s,
2938#endif 2900#endif
2939 2901
2940#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) 2902#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AC97)
2941 &bfin_ac97, 2903 &bfin_ac97,
2942#endif 2904#endif
2943 2905
2944#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) 2906#if IS_ENABLED(CONFIG_REGULATOR_AD5398)
2945#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \ 2907#if IS_ENABLED(CONFIG_REGULATOR_VIRTUAL_CONSUMER)
2946 defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
2947 &ad5398_virt_consumer_device, 2908 &ad5398_virt_consumer_device,
2948#endif 2909#endif
2949#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ 2910#if IS_ENABLED(CONFIG_REGULATOR_USERSPACE_CONSUMER)
2950 defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
2951 &ad5398_userspace_consumer_device, 2911 &ad5398_userspace_consumer_device,
2952#endif 2912#endif
2953#endif 2913#endif
2954 2914
2955#if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE) 2915#if IS_ENABLED(CONFIG_REGULATOR_FIXED_VOLTAGE)
2956 &adp_switch_device, 2916 &adp_switch_device,
2957#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ 2917#if IS_ENABLED(CONFIG_REGULATOR_USERSPACE_CONSUMER)
2958 defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
2959 &adp122_userspace_consumer_device, 2918 &adp122_userspace_consumer_device,
2960#endif 2919#endif
2961#endif 2920#endif
2962 2921
2963#if defined(CONFIG_IIO_GPIO_TRIGGER) || \ 2922#if IS_ENABLED(CONFIG_IIO_GPIO_TRIGGER)
2964 defined(CONFIG_IIO_GPIO_TRIGGER_MODULE)
2965 &iio_gpio_trigger, 2923 &iio_gpio_trigger,
2966#endif 2924#endif
2967 2925
2968#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373) || \ 2926#if IS_ENABLED(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373)
2969 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373_MODULE)
2970 &bf5xx_adau1373_device, 2927 &bf5xx_adau1373_device,
2971#endif 2928#endif
2972 2929
2973#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701) || \ 2930#if IS_ENABLED(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701)
2974 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701_MODULE)
2975 &bf5xx_adau1701_device, 2931 &bf5xx_adau1701_device,
2976#endif 2932#endif
2977 2933
2978#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X) || \ 2934#if IS_ENABLED(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X)
2979 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X_MODULE)
2980 &bfin_eval_adav801_device, 2935 &bfin_eval_adav801_device,
2981#endif 2936#endif
2982}; 2937};
2983 2938
2984static int __init net2272_init(void) 2939static int __init net2272_init(void)
2985{ 2940{
2986#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 2941#if IS_ENABLED(CONFIG_USB_NET2272)
2987 int ret; 2942 int ret;
2988 2943
2989 ret = gpio_request(GPIO_PF6, "net2272"); 2944 ret = gpio_request(GPIO_PF6, "net2272");
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index e285c3675286..a0211225748d 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -16,7 +16,7 @@
16#include <linux/mtd/physmap.h> 16#include <linux/mtd/physmap.h>
17#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
18#include <linux/spi/flash.h> 18#include <linux/spi/flash.h>
19#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 19#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
20#include <linux/usb/isp1362.h> 20#include <linux/usb/isp1362.h>
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
@@ -32,10 +32,10 @@
32 */ 32 */
33const char bfin_board_name[] = "Bluetechnix TCM BF537"; 33const char bfin_board_name[] = "Bluetechnix TCM BF537";
34 34
35#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 35#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
36/* all SPI peripherals info goes here */ 36/* all SPI peripherals info goes here */
37 37
38#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 38#if IS_ENABLED(CONFIG_MTD_M25P80)
39static struct mtd_partition bfin_spi_flash_partitions[] = { 39static struct mtd_partition bfin_spi_flash_partitions[] = {
40 { 40 {
41 .name = "bootloader(spi)", 41 .name = "bootloader(spi)",
@@ -66,14 +66,14 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
66}; 66};
67#endif 67#endif
68 68
69#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 69#if IS_ENABLED(CONFIG_MMC_SPI)
70static struct bfin5xx_spi_chip mmc_spi_chip_info = { 70static struct bfin5xx_spi_chip mmc_spi_chip_info = {
71 .enable_dma = 0, 71 .enable_dma = 0,
72}; 72};
73#endif 73#endif
74 74
75static struct spi_board_info bfin_spi_board_info[] __initdata = { 75static struct spi_board_info bfin_spi_board_info[] __initdata = {
76#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 76#if IS_ENABLED(CONFIG_MTD_M25P80)
77 { 77 {
78 /* the modalias must be the same as spi device driver name */ 78 /* the modalias must be the same as spi device driver name */
79 .modalias = "m25p80", /* Name of spi_driver for this device */ 79 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -86,7 +86,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
86 }, 86 },
87#endif 87#endif
88 88
89#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 89#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
90 { 90 {
91 .modalias = "ad183x", 91 .modalias = "ad183x",
92 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 92 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -95,7 +95,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
95 }, 95 },
96#endif 96#endif
97 97
98#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 98#if IS_ENABLED(CONFIG_MMC_SPI)
99 { 99 {
100 .modalias = "mmc_spi", 100 .modalias = "mmc_spi",
101 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 101 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
@@ -144,20 +144,20 @@ static struct platform_device bfin_spi0_device = {
144}; 144};
145#endif /* spi master and devices */ 145#endif /* spi master and devices */
146 146
147#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 147#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
148static struct platform_device rtc_device = { 148static struct platform_device rtc_device = {
149 .name = "rtc-bfin", 149 .name = "rtc-bfin",
150 .id = -1, 150 .id = -1,
151}; 151};
152#endif 152#endif
153 153
154#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) 154#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
155static struct platform_device hitachi_fb_device = { 155static struct platform_device hitachi_fb_device = {
156 .name = "hitachi-tx09", 156 .name = "hitachi-tx09",
157}; 157};
158#endif 158#endif
159 159
160#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 160#if IS_ENABLED(CONFIG_SMC91X)
161#include <linux/smc91x.h> 161#include <linux/smc91x.h>
162 162
163static struct smc91x_platdata smc91x_info = { 163static struct smc91x_platdata smc91x_info = {
@@ -189,7 +189,7 @@ static struct platform_device smc91x_device = {
189}; 189};
190#endif 190#endif
191 191
192#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 192#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
193static struct resource isp1362_hcd_resources[] = { 193static struct resource isp1362_hcd_resources[] = {
194 { 194 {
195 .start = 0x20308000, 195 .start = 0x20308000,
@@ -228,7 +228,7 @@ static struct platform_device isp1362_hcd_device = {
228}; 228};
229#endif 229#endif
230 230
231#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 231#if IS_ENABLED(CONFIG_USB_NET2272)
232static struct resource net2272_bfin_resources[] = { 232static struct resource net2272_bfin_resources[] = {
233 { 233 {
234 .start = 0x20300000, 234 .start = 0x20300000,
@@ -249,7 +249,7 @@ static struct platform_device net2272_bfin_device = {
249}; 249};
250#endif 250#endif
251 251
252#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 252#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
253static struct mtd_partition cm_partitions[] = { 253static struct mtd_partition cm_partitions[] = {
254 { 254 {
255 .name = "bootloader(nor)", 255 .name = "bootloader(nor)",
@@ -298,7 +298,7 @@ static struct platform_device cm_flash_device = {
298}; 298};
299#endif 299#endif
300 300
301#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 301#if IS_ENABLED(CONFIG_SERIAL_BFIN)
302#ifdef CONFIG_SERIAL_BFIN_UART0 302#ifdef CONFIG_SERIAL_BFIN_UART0
303static struct resource bfin_uart0_resources[] = { 303static struct resource bfin_uart0_resources[] = {
304 { 304 {
@@ -397,7 +397,7 @@ static struct platform_device bfin_uart1_device = {
397#endif 397#endif
398#endif 398#endif
399 399
400#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 400#if IS_ENABLED(CONFIG_BFIN_SIR)
401#ifdef CONFIG_BFIN_SIR0 401#ifdef CONFIG_BFIN_SIR0
402static struct resource bfin_sir0_resources[] = { 402static struct resource bfin_sir0_resources[] = {
403 { 403 {
@@ -452,7 +452,7 @@ static struct platform_device bfin_sir1_device = {
452#endif 452#endif
453#endif 453#endif
454 454
455#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 455#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
456static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 456static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
457 457
458static struct resource bfin_twi0_resource[] = { 458static struct resource bfin_twi0_resource[] = {
@@ -479,7 +479,7 @@ static struct platform_device i2c_bfin_twi_device = {
479}; 479};
480#endif 480#endif
481 481
482#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 482#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
483#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 483#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
484static struct resource bfin_sport0_uart_resources[] = { 484static struct resource bfin_sport0_uart_resources[] = {
485 { 485 {
@@ -550,7 +550,7 @@ static struct platform_device bfin_sport1_uart_device = {
550#endif 550#endif
551#endif 551#endif
552 552
553#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 553#if IS_ENABLED(CONFIG_BFIN_MAC)
554#include <linux/bfin_mac.h> 554#include <linux/bfin_mac.h>
555static const unsigned short bfin_mac_peripherals[] = P_MII0; 555static const unsigned short bfin_mac_peripherals[] = P_MII0;
556 556
@@ -583,7 +583,7 @@ static struct platform_device bfin_mac_device = {
583}; 583};
584#endif 584#endif
585 585
586#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 586#if IS_ENABLED(CONFIG_PATA_PLATFORM)
587#define PATA_INT IRQ_PF14 587#define PATA_INT IRQ_PF14
588 588
589static struct pata_platform_info bfin_pata_platform_data = { 589static struct pata_platform_info bfin_pata_platform_data = {
@@ -651,15 +651,15 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
651 651
652 &bfin_dpmc, 652 &bfin_dpmc,
653 653
654#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) 654#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
655 &hitachi_fb_device, 655 &hitachi_fb_device,
656#endif 656#endif
657 657
658#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 658#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
659 &rtc_device, 659 &rtc_device,
660#endif 660#endif
661 661
662#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 662#if IS_ENABLED(CONFIG_SERIAL_BFIN)
663#ifdef CONFIG_SERIAL_BFIN_UART0 663#ifdef CONFIG_SERIAL_BFIN_UART0
664 &bfin_uart0_device, 664 &bfin_uart0_device,
665#endif 665#endif
@@ -668,7 +668,7 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
668#endif 668#endif
669#endif 669#endif
670 670
671#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 671#if IS_ENABLED(CONFIG_BFIN_SIR)
672#ifdef CONFIG_BFIN_SIR0 672#ifdef CONFIG_BFIN_SIR0
673 &bfin_sir0_device, 673 &bfin_sir0_device,
674#endif 674#endif
@@ -677,11 +677,11 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
677#endif 677#endif
678#endif 678#endif
679 679
680#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 680#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
681 &i2c_bfin_twi_device, 681 &i2c_bfin_twi_device,
682#endif 682#endif
683 683
684#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 684#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
685#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 685#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
686 &bfin_sport0_uart_device, 686 &bfin_sport0_uart_device,
687#endif 687#endif
@@ -690,39 +690,39 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
690#endif 690#endif
691#endif 691#endif
692 692
693#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 693#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
694 &isp1362_hcd_device, 694 &isp1362_hcd_device,
695#endif 695#endif
696 696
697#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 697#if IS_ENABLED(CONFIG_SMC91X)
698 &smc91x_device, 698 &smc91x_device,
699#endif 699#endif
700 700
701#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 701#if IS_ENABLED(CONFIG_BFIN_MAC)
702 &bfin_mii_bus, 702 &bfin_mii_bus,
703 &bfin_mac_device, 703 &bfin_mac_device,
704#endif 704#endif
705 705
706#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 706#if IS_ENABLED(CONFIG_USB_NET2272)
707 &net2272_bfin_device, 707 &net2272_bfin_device,
708#endif 708#endif
709 709
710#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 710#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
711 &bfin_spi0_device, 711 &bfin_spi0_device,
712#endif 712#endif
713 713
714#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 714#if IS_ENABLED(CONFIG_PATA_PLATFORM)
715 &bfin_pata_device, 715 &bfin_pata_device,
716#endif 716#endif
717 717
718#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) 718#if IS_ENABLED(CONFIG_MTD_GPIO_ADDR)
719 &cm_flash_device, 719 &cm_flash_device,
720#endif 720#endif
721}; 721};
722 722
723static int __init net2272_init(void) 723static int __init net2272_init(void)
724{ 724{
725#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 725#if IS_ENABLED(CONFIG_USB_NET2272)
726 int ret; 726 int ret;
727 727
728 ret = gpio_request(GPIO_PG14, "net2272"); 728 ret = gpio_request(GPIO_PG14, "net2272");
@@ -742,11 +742,11 @@ static int __init tcm_bf537_init(void)
742{ 742{
743 printk(KERN_INFO "%s(): registering device resources\n", __func__); 743 printk(KERN_INFO "%s(): registering device resources\n", __func__);
744 platform_add_devices(cm_bf537_devices, ARRAY_SIZE(cm_bf537_devices)); 744 platform_add_devices(cm_bf537_devices, ARRAY_SIZE(cm_bf537_devices));
745#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 745#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
746 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 746 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
747#endif 747#endif
748 748
749#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 749#if IS_ENABLED(CONFIG_PATA_PLATFORM)
750 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); 750 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
751#endif 751#endif
752 752
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index 755f0dc12010..ae2fcbb00119 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -33,14 +33,14 @@ const char bfin_board_name[] = "ADI BF538-EZKIT";
33 */ 33 */
34 34
35 35
36#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 36#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
37static struct platform_device rtc_device = { 37static struct platform_device rtc_device = {
38 .name = "rtc-bfin", 38 .name = "rtc-bfin",
39 .id = -1, 39 .id = -1,
40}; 40};
41#endif /* CONFIG_RTC_DRV_BFIN */ 41#endif /* CONFIG_RTC_DRV_BFIN */
42 42
43#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 43#if IS_ENABLED(CONFIG_SERIAL_BFIN)
44#ifdef CONFIG_SERIAL_BFIN_UART0 44#ifdef CONFIG_SERIAL_BFIN_UART0
45static struct resource bfin_uart0_resources[] = { 45static struct resource bfin_uart0_resources[] = {
46 { 46 {
@@ -199,7 +199,7 @@ static struct platform_device bfin_uart2_device = {
199#endif /* CONFIG_SERIAL_BFIN_UART2 */ 199#endif /* CONFIG_SERIAL_BFIN_UART2 */
200#endif /* CONFIG_SERIAL_BFIN */ 200#endif /* CONFIG_SERIAL_BFIN */
201 201
202#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 202#if IS_ENABLED(CONFIG_BFIN_SIR)
203#ifdef CONFIG_BFIN_SIR0 203#ifdef CONFIG_BFIN_SIR0
204static struct resource bfin_sir0_resources[] = { 204static struct resource bfin_sir0_resources[] = {
205 { 205 {
@@ -277,7 +277,7 @@ static struct platform_device bfin_sir2_device = {
277#endif /* CONFIG_BFIN_SIR2 */ 277#endif /* CONFIG_BFIN_SIR2 */
278#endif /* CONFIG_BFIN_SIR */ 278#endif /* CONFIG_BFIN_SIR */
279 279
280#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 280#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
281#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 281#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
282static struct resource bfin_sport0_uart_resources[] = { 282static struct resource bfin_sport0_uart_resources[] = {
283 { 283 {
@@ -416,7 +416,7 @@ static struct platform_device bfin_sport3_uart_device = {
416#endif /* CONFIG_SERIAL_BFIN_SPORT3_UART */ 416#endif /* CONFIG_SERIAL_BFIN_SPORT3_UART */
417#endif /* CONFIG_SERIAL_BFIN_SPORT */ 417#endif /* CONFIG_SERIAL_BFIN_SPORT */
418 418
419#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 419#if IS_ENABLED(CONFIG_CAN_BFIN)
420static unsigned short bfin_can_peripherals[] = { 420static unsigned short bfin_can_peripherals[] = {
421 P_CAN0_RX, P_CAN0_TX, 0 421 P_CAN0_RX, P_CAN0_TX, 0
422}; 422};
@@ -458,7 +458,7 @@ static struct platform_device bfin_can_device = {
458 * USB-LAN EzExtender board 458 * USB-LAN EzExtender board
459 * Driver needs to know address, irq and flag pin. 459 * Driver needs to know address, irq and flag pin.
460 */ 460 */
461#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 461#if IS_ENABLED(CONFIG_SMC91X)
462#include <linux/smc91x.h> 462#include <linux/smc91x.h>
463 463
464static struct smc91x_platdata smc91x_info = { 464static struct smc91x_platdata smc91x_info = {
@@ -490,10 +490,9 @@ static struct platform_device smc91x_device = {
490}; 490};
491#endif /* CONFIG_SMC91X */ 491#endif /* CONFIG_SMC91X */
492 492
493#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 493#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
494/* all SPI peripherals info goes here */ 494/* all SPI peripherals info goes here */
495#if defined(CONFIG_MTD_M25P80) \ 495#if IS_ENABLED(CONFIG_MTD_M25P80)
496 || defined(CONFIG_MTD_M25P80_MODULE)
497/* SPI flash chip (m25p16) */ 496/* SPI flash chip (m25p16) */
498static struct mtd_partition bfin_spi_flash_partitions[] = { 497static struct mtd_partition bfin_spi_flash_partitions[] = {
499 { 498 {
@@ -521,7 +520,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
521#endif /* CONFIG_MTD_M25P80 */ 520#endif /* CONFIG_MTD_M25P80 */
522#endif /* CONFIG_SPI_BFIN5XX */ 521#endif /* CONFIG_SPI_BFIN5XX */
523 522
524#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) 523#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879)
525#include <linux/spi/ad7879.h> 524#include <linux/spi/ad7879.h>
526static const struct ad7879_platform_data bfin_ad7879_ts_info = { 525static const struct ad7879_platform_data bfin_ad7879_ts_info = {
527 .model = 7879, /* Model = AD7879 */ 526 .model = 7879, /* Model = AD7879 */
@@ -538,7 +537,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
538}; 537};
539#endif /* CONFIG_TOUCHSCREEN_AD7879 */ 538#endif /* CONFIG_TOUCHSCREEN_AD7879 */
540 539
541#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 540#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
542#include <asm/bfin-lq035q1.h> 541#include <asm/bfin-lq035q1.h>
543 542
544static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { 543static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
@@ -568,8 +567,7 @@ static struct platform_device bfin_lq035q1_device = {
568#endif /* CONFIG_FB_BFIN_LQ035Q1 */ 567#endif /* CONFIG_FB_BFIN_LQ035Q1 */
569 568
570static struct spi_board_info bf538_spi_board_info[] __initdata = { 569static struct spi_board_info bf538_spi_board_info[] __initdata = {
571#if defined(CONFIG_MTD_M25P80) \ 570#if IS_ENABLED(CONFIG_MTD_M25P80)
572 || defined(CONFIG_MTD_M25P80_MODULE)
573 { 571 {
574 /* the modalias must be the same as spi device driver name */ 572 /* the modalias must be the same as spi device driver name */
575 .modalias = "m25p80", /* Name of spi_driver for this device */ 573 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -581,7 +579,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
581 .mode = SPI_MODE_3, 579 .mode = SPI_MODE_3,
582 }, 580 },
583#endif /* CONFIG_MTD_M25P80 */ 581#endif /* CONFIG_MTD_M25P80 */
584#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) 582#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7879_SPI)
585 { 583 {
586 .modalias = "ad7879", 584 .modalias = "ad7879",
587 .platform_data = &bfin_ad7879_ts_info, 585 .platform_data = &bfin_ad7879_ts_info,
@@ -592,7 +590,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
592 .mode = SPI_CPHA | SPI_CPOL, 590 .mode = SPI_CPHA | SPI_CPOL,
593 }, 591 },
594#endif /* CONFIG_TOUCHSCREEN_AD7879_SPI */ 592#endif /* CONFIG_TOUCHSCREEN_AD7879_SPI */
595#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 593#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
596 { 594 {
597 .modalias = "bfin-lq035q1-spi", 595 .modalias = "bfin-lq035q1-spi",
598 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 596 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -601,7 +599,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
601 .mode = SPI_CPHA | SPI_CPOL, 599 .mode = SPI_CPHA | SPI_CPOL,
602 }, 600 },
603#endif /* CONFIG_FB_BFIN_LQ035Q1 */ 601#endif /* CONFIG_FB_BFIN_LQ035Q1 */
604#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 602#if IS_ENABLED(CONFIG_SPI_SPIDEV)
605 { 603 {
606 .modalias = "spidev", 604 .modalias = "spidev",
607 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 605 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -717,7 +715,7 @@ static struct platform_device bf538_spi_master2 = {
717 }, 715 },
718}; 716};
719 717
720#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 718#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
721static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 719static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
722 720
723static struct resource bfin_twi0_resource[] = { 721static struct resource bfin_twi0_resource[] = {
@@ -766,7 +764,7 @@ static struct platform_device i2c_bfin_twi1_device = {
766}; 764};
767#endif /* CONFIG_I2C_BLACKFIN_TWI */ 765#endif /* CONFIG_I2C_BLACKFIN_TWI */
768 766
769#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 767#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
770#include <linux/gpio_keys.h> 768#include <linux/gpio_keys.h>
771 769
772static struct gpio_keys_button bfin_gpio_keys_table[] = { 770static struct gpio_keys_button bfin_gpio_keys_table[] = {
@@ -814,7 +812,7 @@ static struct platform_device bfin_dpmc = {
814 }, 812 },
815}; 813};
816 814
817#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 815#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
818static struct mtd_partition ezkit_partitions[] = { 816static struct mtd_partition ezkit_partitions[] = {
819 { 817 {
820 .name = "bootloader(nor)", 818 .name = "bootloader(nor)",
@@ -839,7 +837,7 @@ static struct physmap_flash_data ezkit_flash_data = {
839 837
840static struct resource ezkit_flash_resource = { 838static struct resource ezkit_flash_resource = {
841 .start = 0x20000000, 839 .start = 0x20000000,
842#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 840#if IS_ENABLED(CONFIG_SMC91X)
843 .end = 0x202fffff, 841 .end = 0x202fffff,
844#else 842#else
845 .end = 0x203fffff, 843 .end = 0x203fffff,
@@ -862,11 +860,11 @@ static struct platform_device *cm_bf538_devices[] __initdata = {
862 860
863 &bfin_dpmc, 861 &bfin_dpmc,
864 862
865#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 863#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
866 &rtc_device, 864 &rtc_device,
867#endif 865#endif
868 866
869#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 867#if IS_ENABLED(CONFIG_SERIAL_BFIN)
870#ifdef CONFIG_SERIAL_BFIN_UART0 868#ifdef CONFIG_SERIAL_BFIN_UART0
871 &bfin_uart0_device, 869 &bfin_uart0_device,
872#endif 870#endif
@@ -878,18 +876,18 @@ static struct platform_device *cm_bf538_devices[] __initdata = {
878#endif 876#endif
879#endif 877#endif
880 878
881#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 879#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
882 &bf538_spi_master0, 880 &bf538_spi_master0,
883 &bf538_spi_master1, 881 &bf538_spi_master1,
884 &bf538_spi_master2, 882 &bf538_spi_master2,
885#endif 883#endif
886 884
887#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 885#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
888 &i2c_bfin_twi0_device, 886 &i2c_bfin_twi0_device,
889 &i2c_bfin_twi1_device, 887 &i2c_bfin_twi1_device,
890#endif 888#endif
891 889
892#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 890#if IS_ENABLED(CONFIG_BFIN_SIR)
893#ifdef CONFIG_BFIN_SIR0 891#ifdef CONFIG_BFIN_SIR0
894 &bfin_sir0_device, 892 &bfin_sir0_device,
895#endif 893#endif
@@ -901,7 +899,7 @@ static struct platform_device *cm_bf538_devices[] __initdata = {
901#endif 899#endif
902#endif 900#endif
903 901
904#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 902#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
905#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 903#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
906 &bfin_sport0_uart_device, 904 &bfin_sport0_uart_device,
907#endif 905#endif
@@ -916,23 +914,23 @@ static struct platform_device *cm_bf538_devices[] __initdata = {
916#endif 914#endif
917#endif 915#endif
918 916
919#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 917#if IS_ENABLED(CONFIG_CAN_BFIN)
920 &bfin_can_device, 918 &bfin_can_device,
921#endif 919#endif
922 920
923#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 921#if IS_ENABLED(CONFIG_SMC91X)
924 &smc91x_device, 922 &smc91x_device,
925#endif 923#endif
926 924
927#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 925#if IS_ENABLED(CONFIG_FB_BFIN_LQ035Q1)
928 &bfin_lq035q1_device, 926 &bfin_lq035q1_device,
929#endif 927#endif
930 928
931#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 929#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
932 &bfin_device_gpiokeys, 930 &bfin_device_gpiokeys,
933#endif 931#endif
934 932
935#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 933#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
936 &ezkit_flash_device, 934 &ezkit_flash_device,
937#endif 935#endif
938}; 936};
@@ -942,7 +940,7 @@ static int __init ezkit_init(void)
942 printk(KERN_INFO "%s(): registering device resources\n", __func__); 940 printk(KERN_INFO "%s(): registering device resources\n", __func__);
943 platform_add_devices(cm_bf538_devices, ARRAY_SIZE(cm_bf538_devices)); 941 platform_add_devices(cm_bf538_devices, ARRAY_SIZE(cm_bf538_devices));
944 942
945#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 943#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
946 spi_register_board_info(bf538_spi_board_info, 944 spi_register_board_info(bf538_spi_board_info,
947 ARRAY_SIZE(bf538_spi_board_info)); 945 ARRAY_SIZE(bf538_spi_board_info));
948#endif 946#endif
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index e92543362f35..6d5ffdead067 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -37,7 +37,7 @@ const char bfin_board_name[] = "Bluetechnix CM-BF548";
37 * Driver needs to know address, irq and flag pin. 37 * Driver needs to know address, irq and flag pin.
38 */ 38 */
39 39
40#if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE) 40#if IS_ENABLED(CONFIG_FB_BF54X_LQ043)
41 41
42#include <mach/bf54x-lq043.h> 42#include <mach/bf54x-lq043.h>
43 43
@@ -69,7 +69,7 @@ static struct platform_device bf54x_lq043_device = {
69}; 69};
70#endif 70#endif
71 71
72#if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE) 72#if IS_ENABLED(CONFIG_KEYBOARD_BFIN)
73static unsigned int bf548_keymap[] = { 73static unsigned int bf548_keymap[] = {
74 KEYVAL(0, 0, KEY_ENTER), 74 KEYVAL(0, 0, KEY_ENTER),
75 KEYVAL(0, 1, KEY_HELP), 75 KEYVAL(0, 1, KEY_HELP),
@@ -119,14 +119,14 @@ static struct platform_device bf54x_kpad_device = {
119}; 119};
120#endif 120#endif
121 121
122#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 122#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
123static struct platform_device rtc_device = { 123static struct platform_device rtc_device = {
124 .name = "rtc-bfin", 124 .name = "rtc-bfin",
125 .id = -1, 125 .id = -1,
126}; 126};
127#endif 127#endif
128 128
129#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 129#if IS_ENABLED(CONFIG_SERIAL_BFIN)
130#ifdef CONFIG_SERIAL_BFIN_UART0 130#ifdef CONFIG_SERIAL_BFIN_UART0
131static struct resource bfin_uart0_resources[] = { 131static struct resource bfin_uart0_resources[] = {
132 { 132 {
@@ -353,7 +353,7 @@ static struct platform_device bfin_uart3_device = {
353#endif 353#endif
354#endif 354#endif
355 355
356#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 356#if IS_ENABLED(CONFIG_BFIN_SIR)
357#ifdef CONFIG_BFIN_SIR0 357#ifdef CONFIG_BFIN_SIR0
358static struct resource bfin_sir0_resources[] = { 358static struct resource bfin_sir0_resources[] = {
359 { 359 {
@@ -456,7 +456,7 @@ static struct platform_device bfin_sir3_device = {
456#endif 456#endif
457#endif 457#endif
458 458
459#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 459#if IS_ENABLED(CONFIG_SMSC911X)
460#include <linux/smsc911x.h> 460#include <linux/smsc911x.h>
461 461
462static struct resource smsc911x_resources[] = { 462static struct resource smsc911x_resources[] = {
@@ -491,7 +491,7 @@ static struct platform_device smsc911x_device = {
491}; 491};
492#endif 492#endif
493 493
494#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 494#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
495static struct resource musb_resources[] = { 495static struct resource musb_resources[] = {
496 [0] = { 496 [0] = {
497 .start = 0xFFC03C00, 497 .start = 0xFFC03C00,
@@ -553,7 +553,7 @@ static struct platform_device musb_device = {
553}; 553};
554#endif 554#endif
555 555
556#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 556#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
557#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 557#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
558static struct resource bfin_sport0_uart_resources[] = { 558static struct resource bfin_sport0_uart_resources[] = {
559 { 559 {
@@ -692,7 +692,7 @@ static struct platform_device bfin_sport3_uart_device = {
692#endif 692#endif
693#endif 693#endif
694 694
695#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) 695#if IS_ENABLED(CONFIG_PATA_BF54X)
696static struct resource bfin_atapi_resources[] = { 696static struct resource bfin_atapi_resources[] = {
697 { 697 {
698 .start = 0xFFC03800, 698 .start = 0xFFC03800,
@@ -714,7 +714,7 @@ static struct platform_device bfin_atapi_device = {
714}; 714};
715#endif 715#endif
716 716
717#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 717#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
718static struct mtd_partition partition_info[] = { 718static struct mtd_partition partition_info[] = {
719 { 719 {
720 .name = "linux kernel(nand)", 720 .name = "linux kernel(nand)",
@@ -760,7 +760,7 @@ static struct platform_device bf5xx_nand_device = {
760}; 760};
761#endif 761#endif
762 762
763#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 763#if IS_ENABLED(CONFIG_SDH_BFIN)
764static struct bfin_sd_host bfin_sdh_data = { 764static struct bfin_sd_host bfin_sdh_data = {
765 .dma_chan = CH_SDH, 765 .dma_chan = CH_SDH,
766 .irq_int0 = IRQ_SDH_MASK0, 766 .irq_int0 = IRQ_SDH_MASK0,
@@ -776,7 +776,7 @@ static struct platform_device bf54x_sdh_device = {
776}; 776};
777#endif 777#endif
778 778
779#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 779#if IS_ENABLED(CONFIG_CAN_BFIN)
780static unsigned short bfin_can_peripherals[] = { 780static unsigned short bfin_can_peripherals[] = {
781 P_CAN0_RX, P_CAN0_TX, 0 781 P_CAN0_RX, P_CAN0_TX, 0
782}; 782};
@@ -814,7 +814,7 @@ static struct platform_device bfin_can_device = {
814}; 814};
815#endif 815#endif
816 816
817#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 817#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
818static struct mtd_partition para_partitions[] = { 818static struct mtd_partition para_partitions[] = {
819 { 819 {
820 .name = "bootloader(nor)", 820 .name = "bootloader(nor)",
@@ -854,10 +854,9 @@ static struct platform_device para_flash_device = {
854}; 854};
855#endif 855#endif
856 856
857#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 857#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
858/* all SPI peripherals info goes here */ 858/* all SPI peripherals info goes here */
859#if defined(CONFIG_MTD_M25P80) \ 859#if IS_ENABLED(CONFIG_MTD_M25P80)
860 || defined(CONFIG_MTD_M25P80_MODULE)
861/* SPI flash chip (m25p16) */ 860/* SPI flash chip (m25p16) */
862static struct mtd_partition bfin_spi_flash_partitions[] = { 861static struct mtd_partition bfin_spi_flash_partitions[] = {
863 { 862 {
@@ -884,7 +883,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
884}; 883};
885#endif 884#endif
886 885
887#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 886#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
888static const struct ad7877_platform_data bfin_ad7877_ts_info = { 887static const struct ad7877_platform_data bfin_ad7877_ts_info = {
889 .model = 7877, 888 .model = 7877,
890 .vref_delay_usecs = 50, /* internal, no capacitor */ 889 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -901,8 +900,7 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
901#endif 900#endif
902 901
903static struct spi_board_info bf54x_spi_board_info[] __initdata = { 902static struct spi_board_info bf54x_spi_board_info[] __initdata = {
904#if defined(CONFIG_MTD_M25P80) \ 903#if IS_ENABLED(CONFIG_MTD_M25P80)
905 || defined(CONFIG_MTD_M25P80_MODULE)
906 { 904 {
907 /* the modalias must be the same as spi device driver name */ 905 /* the modalias must be the same as spi device driver name */
908 .modalias = "m25p80", /* Name of spi_driver for this device */ 906 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -914,7 +912,7 @@ static struct spi_board_info bf54x_spi_board_info[] __initdata = {
914 .mode = SPI_MODE_3, 912 .mode = SPI_MODE_3,
915 }, 913 },
916#endif 914#endif
917#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 915#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
918{ 916{
919 .modalias = "ad7877", 917 .modalias = "ad7877",
920 .platform_data = &bfin_ad7877_ts_info, 918 .platform_data = &bfin_ad7877_ts_info,
@@ -924,7 +922,7 @@ static struct spi_board_info bf54x_spi_board_info[] __initdata = {
924 .chip_select = 2, 922 .chip_select = 2,
925}, 923},
926#endif 924#endif
927#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 925#if IS_ENABLED(CONFIG_SPI_SPIDEV)
928 { 926 {
929 .modalias = "spidev", 927 .modalias = "spidev",
930 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 928 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -1006,7 +1004,7 @@ static struct platform_device bf54x_spi_master1 = {
1006}; 1004};
1007#endif /* spi master and devices */ 1005#endif /* spi master and devices */
1008 1006
1009#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 1007#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
1010static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 1008static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
1011 1009
1012static struct resource bfin_twi0_resource[] = { 1010static struct resource bfin_twi0_resource[] = {
@@ -1060,7 +1058,7 @@ static struct platform_device i2c_bfin_twi1_device = {
1060#endif 1058#endif
1061#endif 1059#endif
1062 1060
1063#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 1061#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
1064#include <linux/gpio_keys.h> 1062#include <linux/gpio_keys.h>
1065 1063
1066static struct gpio_keys_button bfin_gpio_keys_table[] = { 1064static struct gpio_keys_button bfin_gpio_keys_table[] = {
@@ -1112,11 +1110,11 @@ static struct platform_device *cm_bf548_devices[] __initdata = {
1112 1110
1113 &bfin_dpmc, 1111 &bfin_dpmc,
1114 1112
1115#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 1113#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
1116 &rtc_device, 1114 &rtc_device,
1117#endif 1115#endif
1118 1116
1119#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 1117#if IS_ENABLED(CONFIG_SERIAL_BFIN)
1120#ifdef CONFIG_SERIAL_BFIN_UART0 1118#ifdef CONFIG_SERIAL_BFIN_UART0
1121 &bfin_uart0_device, 1119 &bfin_uart0_device,
1122#endif 1120#endif
@@ -1131,7 +1129,7 @@ static struct platform_device *cm_bf548_devices[] __initdata = {
1131#endif 1129#endif
1132#endif 1130#endif
1133 1131
1134#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 1132#if IS_ENABLED(CONFIG_BFIN_SIR)
1135#ifdef CONFIG_BFIN_SIR0 1133#ifdef CONFIG_BFIN_SIR0
1136 &bfin_sir0_device, 1134 &bfin_sir0_device,
1137#endif 1135#endif
@@ -1146,19 +1144,19 @@ static struct platform_device *cm_bf548_devices[] __initdata = {
1146#endif 1144#endif
1147#endif 1145#endif
1148 1146
1149#if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE) 1147#if IS_ENABLED(CONFIG_FB_BF54X_LQ043)
1150 &bf54x_lq043_device, 1148 &bf54x_lq043_device,
1151#endif 1149#endif
1152 1150
1153#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 1151#if IS_ENABLED(CONFIG_SMSC911X)
1154 &smsc911x_device, 1152 &smsc911x_device,
1155#endif 1153#endif
1156 1154
1157#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 1155#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
1158 &musb_device, 1156 &musb_device,
1159#endif 1157#endif
1160 1158
1161#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 1159#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
1162#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 1160#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1163 &bfin_sport0_uart_device, 1161 &bfin_sport0_uart_device,
1164#endif 1162#endif
@@ -1173,43 +1171,43 @@ static struct platform_device *cm_bf548_devices[] __initdata = {
1173#endif 1171#endif
1174#endif 1172#endif
1175 1173
1176#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) 1174#if IS_ENABLED(CONFIG_PATA_BF54X)
1177 &bfin_atapi_device, 1175 &bfin_atapi_device,
1178#endif 1176#endif
1179 1177
1180#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 1178#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
1181 &bf5xx_nand_device, 1179 &bf5xx_nand_device,
1182#endif 1180#endif
1183 1181
1184#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 1182#if IS_ENABLED(CONFIG_SDH_BFIN)
1185 &bf54x_sdh_device, 1183 &bf54x_sdh_device,
1186#endif 1184#endif
1187 1185
1188#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 1186#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
1189 &bf54x_spi_master0, 1187 &bf54x_spi_master0,
1190 &bf54x_spi_master1, 1188 &bf54x_spi_master1,
1191#endif 1189#endif
1192 1190
1193#if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE) 1191#if IS_ENABLED(CONFIG_KEYBOARD_BFIN)
1194 &bf54x_kpad_device, 1192 &bf54x_kpad_device,
1195#endif 1193#endif
1196 1194
1197#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 1195#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
1198 &i2c_bfin_twi0_device, 1196 &i2c_bfin_twi0_device,
1199#if !defined(CONFIG_BF542) 1197#if !defined(CONFIG_BF542)
1200 &i2c_bfin_twi1_device, 1198 &i2c_bfin_twi1_device,
1201#endif 1199#endif
1202#endif 1200#endif
1203 1201
1204#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 1202#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
1205 &bfin_device_gpiokeys, 1203 &bfin_device_gpiokeys,
1206#endif 1204#endif
1207 1205
1208#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 1206#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
1209 &para_flash_device, 1207 &para_flash_device,
1210#endif 1208#endif
1211 1209
1212#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 1210#if IS_ENABLED(CONFIG_CAN_BFIN)
1213 &bfin_can_device, 1211 &bfin_can_device,
1214#endif 1212#endif
1215 1213
@@ -1220,7 +1218,7 @@ static int __init cm_bf548_init(void)
1220 printk(KERN_INFO "%s(): registering device resources\n", __func__); 1218 printk(KERN_INFO "%s(): registering device resources\n", __func__);
1221 platform_add_devices(cm_bf548_devices, ARRAY_SIZE(cm_bf548_devices)); 1219 platform_add_devices(cm_bf548_devices, ARRAY_SIZE(cm_bf548_devices));
1222 1220
1223#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 1221#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
1224 spi_register_board_info(bf54x_spi_board_info, 1222 spi_register_board_info(bf54x_spi_board_info,
1225 ARRAY_SIZE(bf54x_spi_board_info)); 1223 ARRAY_SIZE(bf54x_spi_board_info));
1226#endif 1224#endif
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index d495000b81a0..90138e6112c1 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -41,7 +41,7 @@ const char bfin_board_name[] = "ADI BF548-EZKIT";
41 * Driver needs to know address, irq and flag pin. 41 * Driver needs to know address, irq and flag pin.
42 */ 42 */
43 43
44#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 44#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
45#include <linux/usb/isp1760.h> 45#include <linux/usb/isp1760.h>
46static struct resource bfin_isp1760_resources[] = { 46static struct resource bfin_isp1760_resources[] = {
47 [0] = { 47 [0] = {
@@ -76,7 +76,7 @@ static struct platform_device bfin_isp1760_device = {
76}; 76};
77#endif 77#endif
78 78
79#if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE) 79#if IS_ENABLED(CONFIG_FB_BF54X_LQ043)
80 80
81#include <mach/bf54x-lq043.h> 81#include <mach/bf54x-lq043.h>
82 82
@@ -108,7 +108,7 @@ static struct platform_device bf54x_lq043_device = {
108}; 108};
109#endif 109#endif
110 110
111#if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE) 111#if IS_ENABLED(CONFIG_KEYBOARD_BFIN)
112static const unsigned int bf548_keymap[] = { 112static const unsigned int bf548_keymap[] = {
113 KEYVAL(0, 0, KEY_ENTER), 113 KEYVAL(0, 0, KEY_ENTER),
114 KEYVAL(0, 1, KEY_HELP), 114 KEYVAL(0, 1, KEY_HELP),
@@ -158,7 +158,7 @@ static struct platform_device bf54x_kpad_device = {
158}; 158};
159#endif 159#endif
160 160
161#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) 161#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
162#include <asm/bfin_rotary.h> 162#include <asm/bfin_rotary.h>
163 163
164static struct bfin_rotary_platform_data bfin_rotary_data = { 164static struct bfin_rotary_platform_data bfin_rotary_data = {
@@ -190,7 +190,7 @@ static struct platform_device bfin_rotary_device = {
190}; 190};
191#endif 191#endif
192 192
193#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE) 193#if IS_ENABLED(CONFIG_INPUT_ADXL34X)
194#include <linux/input/adxl34x.h> 194#include <linux/input/adxl34x.h>
195static const struct adxl34x_platform_data adxl34x_info = { 195static const struct adxl34x_platform_data adxl34x_info = {
196 .x_axis_offset = 0, 196 .x_axis_offset = 0,
@@ -229,14 +229,14 @@ static const struct adxl34x_platform_data adxl34x_info = {
229}; 229};
230#endif 230#endif
231 231
232#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 232#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
233static struct platform_device rtc_device = { 233static struct platform_device rtc_device = {
234 .name = "rtc-bfin", 234 .name = "rtc-bfin",
235 .id = -1, 235 .id = -1,
236}; 236};
237#endif 237#endif
238 238
239#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 239#if IS_ENABLED(CONFIG_SERIAL_BFIN)
240#ifdef CONFIG_SERIAL_BFIN_UART0 240#ifdef CONFIG_SERIAL_BFIN_UART0
241static struct resource bfin_uart0_resources[] = { 241static struct resource bfin_uart0_resources[] = {
242 { 242 {
@@ -491,7 +491,7 @@ static struct platform_device bfin_uart3_device = {
491#endif 491#endif
492#endif 492#endif
493 493
494#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 494#if IS_ENABLED(CONFIG_BFIN_SIR)
495#ifdef CONFIG_BFIN_SIR0 495#ifdef CONFIG_BFIN_SIR0
496static struct resource bfin_sir0_resources[] = { 496static struct resource bfin_sir0_resources[] = {
497 { 497 {
@@ -594,7 +594,7 @@ static struct platform_device bfin_sir3_device = {
594#endif 594#endif
595#endif 595#endif
596 596
597#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 597#if IS_ENABLED(CONFIG_SMSC911X)
598#include <linux/smsc911x.h> 598#include <linux/smsc911x.h>
599 599
600static struct resource smsc911x_resources[] = { 600static struct resource smsc911x_resources[] = {
@@ -629,7 +629,7 @@ static struct platform_device smsc911x_device = {
629}; 629};
630#endif 630#endif
631 631
632#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 632#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
633static struct resource musb_resources[] = { 633static struct resource musb_resources[] = {
634 [0] = { 634 [0] = {
635 .start = 0xFFC03C00, 635 .start = 0xFFC03C00,
@@ -691,7 +691,7 @@ static struct platform_device musb_device = {
691}; 691};
692#endif 692#endif
693 693
694#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 694#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
695#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 695#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
696static struct resource bfin_sport0_uart_resources[] = { 696static struct resource bfin_sport0_uart_resources[] = {
697 { 697 {
@@ -830,7 +830,7 @@ static struct platform_device bfin_sport3_uart_device = {
830#endif 830#endif
831#endif 831#endif
832 832
833#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 833#if IS_ENABLED(CONFIG_CAN_BFIN)
834 834
835static unsigned short bfin_can0_peripherals[] = { 835static unsigned short bfin_can0_peripherals[] = {
836 P_CAN0_RX, P_CAN0_TX, 0 836 P_CAN0_RX, P_CAN0_TX, 0
@@ -908,7 +908,7 @@ static struct platform_device bfin_can1_device = {
908 908
909#endif 909#endif
910 910
911#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) 911#if IS_ENABLED(CONFIG_PATA_BF54X)
912static struct resource bfin_atapi_resources[] = { 912static struct resource bfin_atapi_resources[] = {
913 { 913 {
914 .start = 0xFFC03800, 914 .start = 0xFFC03800,
@@ -930,7 +930,7 @@ static struct platform_device bfin_atapi_device = {
930}; 930};
931#endif 931#endif
932 932
933#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 933#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
934static struct mtd_partition partition_info[] = { 934static struct mtd_partition partition_info[] = {
935 { 935 {
936 .name = "bootloader(nand)", 936 .name = "bootloader(nand)",
@@ -980,7 +980,7 @@ static struct platform_device bf5xx_nand_device = {
980}; 980};
981#endif 981#endif
982 982
983#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 983#if IS_ENABLED(CONFIG_SDH_BFIN)
984 984
985static struct bfin_sd_host bfin_sdh_data = { 985static struct bfin_sd_host bfin_sdh_data = {
986 .dma_chan = CH_SDH, 986 .dma_chan = CH_SDH,
@@ -997,7 +997,7 @@ static struct platform_device bf54x_sdh_device = {
997}; 997};
998#endif 998#endif
999 999
1000#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 1000#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
1001static struct mtd_partition ezkit_partitions[] = { 1001static struct mtd_partition ezkit_partitions[] = {
1002 { 1002 {
1003 .name = "bootloader(nor)", 1003 .name = "bootloader(nor)",
@@ -1045,8 +1045,7 @@ static struct platform_device ezkit_flash_device = {
1045}; 1045};
1046#endif 1046#endif
1047 1047
1048#if defined(CONFIG_MTD_M25P80) \ 1048#if IS_ENABLED(CONFIG_MTD_M25P80)
1049 || defined(CONFIG_MTD_M25P80_MODULE)
1050/* SPI flash chip (m25p16) */ 1049/* SPI flash chip (m25p16) */
1051static struct mtd_partition bfin_spi_flash_partitions[] = { 1050static struct mtd_partition bfin_spi_flash_partitions[] = {
1052 { 1051 {
@@ -1073,7 +1072,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
1073}; 1072};
1074#endif 1073#endif
1075 1074
1076#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 1075#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
1077static const struct ad7877_platform_data bfin_ad7877_ts_info = { 1076static const struct ad7877_platform_data bfin_ad7877_ts_info = {
1078 .model = 7877, 1077 .model = 7877,
1079 .vref_delay_usecs = 50, /* internal, no capacitor */ 1078 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -1495,8 +1494,7 @@ static struct platform_device bfin_gpj_device = {
1495#endif 1494#endif
1496 1495
1497static struct spi_board_info bfin_spi_board_info[] __initdata = { 1496static struct spi_board_info bfin_spi_board_info[] __initdata = {
1498#if defined(CONFIG_MTD_M25P80) \ 1497#if IS_ENABLED(CONFIG_MTD_M25P80)
1499 || defined(CONFIG_MTD_M25P80_MODULE)
1500 { 1498 {
1501 /* the modalias must be the same as spi device driver name */ 1499 /* the modalias must be the same as spi device driver name */
1502 .modalias = "m25p80", /* Name of spi_driver for this device */ 1500 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -1508,8 +1506,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1508 .mode = SPI_MODE_3, 1506 .mode = SPI_MODE_3,
1509 }, 1507 },
1510#endif 1508#endif
1511#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 1509#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
1512 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
1513 { 1510 {
1514 .modalias = "ad183x", 1511 .modalias = "ad183x",
1515 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 1512 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -1517,7 +1514,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1517 .chip_select = MAX_CTRL_CS + GPIO_PG6, /* SPI_SSEL2 */ 1514 .chip_select = MAX_CTRL_CS + GPIO_PG6, /* SPI_SSEL2 */
1518 }, 1515 },
1519#endif 1516#endif
1520#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 1517#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
1521 { 1518 {
1522 .modalias = "ad7877", 1519 .modalias = "ad7877",
1523 .platform_data = &bfin_ad7877_ts_info, 1520 .platform_data = &bfin_ad7877_ts_info,
@@ -1527,7 +1524,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1527 .chip_select = MAX_CTRL_CS + GPIO_PE5, /* SPI_SSEL2 */ 1524 .chip_select = MAX_CTRL_CS + GPIO_PE5, /* SPI_SSEL2 */
1528 }, 1525 },
1529#endif 1526#endif
1530#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 1527#if IS_ENABLED(CONFIG_SPI_SPIDEV)
1531 { 1528 {
1532 .modalias = "spidev", 1529 .modalias = "spidev",
1533 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 1530 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -1535,7 +1532,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1535 .chip_select = MAX_CTRL_CS + GPIO_PE4, /* SPI_SSEL1 */ 1532 .chip_select = MAX_CTRL_CS + GPIO_PE4, /* SPI_SSEL1 */
1536 }, 1533 },
1537#endif 1534#endif
1538#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE) 1535#if IS_ENABLED(CONFIG_INPUT_ADXL34X_SPI)
1539 { 1536 {
1540 .modalias = "adxl34x", 1537 .modalias = "adxl34x",
1541 .platform_data = &adxl34x_info, 1538 .platform_data = &adxl34x_info,
@@ -1547,7 +1544,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1547 }, 1544 },
1548#endif 1545#endif
1549}; 1546};
1550#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 1547#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
1551/* SPI (0) */ 1548/* SPI (0) */
1552static struct resource bfin_spi0_resource[] = { 1549static struct resource bfin_spi0_resource[] = {
1553 [0] = { 1550 [0] = {
@@ -1620,8 +1617,7 @@ static struct platform_device bf54x_spi_master1 = {
1620}; 1617};
1621#endif /* spi master and devices */ 1618#endif /* spi master and devices */
1622 1619
1623#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ 1620#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_CAPTURE)
1624 || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
1625#include <linux/videodev2.h> 1621#include <linux/videodev2.h>
1626#include <media/blackfin/bfin_capture.h> 1622#include <media/blackfin/bfin_capture.h>
1627#include <media/blackfin/ppi.h> 1623#include <media/blackfin/ppi.h>
@@ -1641,8 +1637,7 @@ static const struct ppi_info ppi_info = {
1641 .pin_req = ppi_req, 1637 .pin_req = ppi_req,
1642}; 1638};
1643 1639
1644#if defined(CONFIG_VIDEO_VS6624) \ 1640#if IS_ENABLED(CONFIG_VIDEO_VS6624)
1645 || defined(CONFIG_VIDEO_VS6624_MODULE)
1646static struct v4l2_input vs6624_inputs[] = { 1641static struct v4l2_input vs6624_inputs[] = {
1647 { 1642 {
1648 .index = 0, 1643 .index = 0,
@@ -1687,7 +1682,7 @@ static struct platform_device bfin_capture_device = {
1687}; 1682};
1688#endif 1683#endif
1689 1684
1690#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 1685#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
1691static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 1686static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
1692 1687
1693static struct resource bfin_twi0_resource[] = { 1688static struct resource bfin_twi0_resource[] = {
@@ -1742,7 +1737,7 @@ static struct platform_device i2c_bfin_twi1_device = {
1742#endif 1737#endif
1743 1738
1744static struct i2c_board_info __initdata bfin_i2c_board_info0[] = { 1739static struct i2c_board_info __initdata bfin_i2c_board_info0[] = {
1745#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) 1740#if IS_ENABLED(CONFIG_SND_SOC_SSM2602)
1746 { 1741 {
1747 I2C_BOARD_INFO("ssm2602", 0x1b), 1742 I2C_BOARD_INFO("ssm2602", 0x1b),
1748 }, 1743 },
@@ -1751,25 +1746,25 @@ static struct i2c_board_info __initdata bfin_i2c_board_info0[] = {
1751 1746
1752#if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */ 1747#if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */
1753static struct i2c_board_info __initdata bfin_i2c_board_info1[] = { 1748static struct i2c_board_info __initdata bfin_i2c_board_info1[] = {
1754#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 1749#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
1755 { 1750 {
1756 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 1751 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
1757 }, 1752 },
1758#endif 1753#endif
1759#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) 1754#if IS_ENABLED(CONFIG_INPUT_PCF8574)
1760 { 1755 {
1761 I2C_BOARD_INFO("pcf8574_keypad", 0x27), 1756 I2C_BOARD_INFO("pcf8574_keypad", 0x27),
1762 .irq = 212, 1757 .irq = 212,
1763 }, 1758 },
1764#endif 1759#endif
1765#if defined(CONFIG_INPUT_ADXL34X_I2C) || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE) 1760#if IS_ENABLED(CONFIG_INPUT_ADXL34X_I2C)
1766 { 1761 {
1767 I2C_BOARD_INFO("adxl34x", 0x53), 1762 I2C_BOARD_INFO("adxl34x", 0x53),
1768 .irq = IRQ_PC5, 1763 .irq = IRQ_PC5,
1769 .platform_data = (void *)&adxl34x_info, 1764 .platform_data = (void *)&adxl34x_info,
1770 }, 1765 },
1771#endif 1766#endif
1772#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 1767#if IS_ENABLED(CONFIG_BFIN_TWI_LCD)
1773 { 1768 {
1774 I2C_BOARD_INFO("ad5252", 0x2f), 1769 I2C_BOARD_INFO("ad5252", 0x2f),
1775 }, 1770 },
@@ -1777,7 +1772,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info1[] = {
1777}; 1772};
1778#endif 1773#endif
1779 1774
1780#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 1775#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
1781#include <linux/gpio_keys.h> 1776#include <linux/gpio_keys.h>
1782 1777
1783static struct gpio_keys_button bfin_gpio_keys_table[] = { 1778static struct gpio_keys_button bfin_gpio_keys_table[] = {
@@ -1828,8 +1823,8 @@ static struct platform_device bfin_dpmc = {
1828 }, 1823 },
1829}; 1824};
1830 1825
1831#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ 1826#if IS_ENABLED(CONFIG_SND_BF5XX_I2S) || \
1832 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 1827 IS_ENABLED(CONFIG_SND_BF5XX_AC97)
1833 1828
1834#define SPORT_REQ(x) \ 1829#define SPORT_REQ(x) \
1835 [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \ 1830 [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
@@ -1889,35 +1884,35 @@ static struct resource bfin_snd_resources[][4] = {
1889}; 1884};
1890#endif 1885#endif
1891 1886
1892#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1887#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
1893static struct platform_device bfin_i2s_pcm = { 1888static struct platform_device bfin_i2s_pcm = {
1894 .name = "bfin-i2s-pcm-audio", 1889 .name = "bfin-i2s-pcm-audio",
1895 .id = -1, 1890 .id = -1,
1896}; 1891};
1897#endif 1892#endif
1898 1893
1899#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 1894#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
1900static struct platform_device bfin_ac97_pcm = { 1895static struct platform_device bfin_ac97_pcm = {
1901 .name = "bfin-ac97-pcm-audio", 1896 .name = "bfin-ac97-pcm-audio",
1902 .id = -1, 1897 .id = -1,
1903}; 1898};
1904#endif 1899#endif
1905 1900
1906#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE) 1901#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD73311)
1907static struct platform_device bfin_ad73311_codec_device = { 1902static struct platform_device bfin_ad73311_codec_device = {
1908 .name = "ad73311", 1903 .name = "ad73311",
1909 .id = -1, 1904 .id = -1,
1910}; 1905};
1911#endif 1906#endif
1912 1907
1913#if defined(CONFIG_SND_BF5XX_SOC_AD1980) || defined(CONFIG_SND_BF5XX_SOC_AD1980_MODULE) 1908#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1980)
1914static struct platform_device bfin_ad1980_codec_device = { 1909static struct platform_device bfin_ad1980_codec_device = {
1915 .name = "ad1980", 1910 .name = "ad1980",
1916 .id = -1, 1911 .id = -1,
1917}; 1912};
1918#endif 1913#endif
1919 1914
1920#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE) 1915#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_I2S)
1921static struct platform_device bfin_i2s = { 1916static struct platform_device bfin_i2s = {
1922 .name = "bfin-i2s", 1917 .name = "bfin-i2s",
1923 .id = CONFIG_SND_BF5XX_SPORT_NUM, 1918 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -1929,7 +1924,7 @@ static struct platform_device bfin_i2s = {
1929}; 1924};
1930#endif 1925#endif
1931 1926
1932#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE) 1927#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AC97)
1933static struct platform_device bfin_ac97 = { 1928static struct platform_device bfin_ac97 = {
1934 .name = "bfin-ac97", 1929 .name = "bfin-ac97",
1935 .id = CONFIG_SND_BF5XX_SPORT_NUM, 1930 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -1962,11 +1957,11 @@ static struct platform_device *ezkit_devices[] __initdata = {
1962 &bfin_gpj_device, 1957 &bfin_gpj_device,
1963#endif 1958#endif
1964 1959
1965#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 1960#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
1966 &rtc_device, 1961 &rtc_device,
1967#endif 1962#endif
1968 1963
1969#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 1964#if IS_ENABLED(CONFIG_SERIAL_BFIN)
1970#ifdef CONFIG_SERIAL_BFIN_UART0 1965#ifdef CONFIG_SERIAL_BFIN_UART0
1971 &bfin_uart0_device, 1966 &bfin_uart0_device,
1972#endif 1967#endif
@@ -1981,7 +1976,7 @@ static struct platform_device *ezkit_devices[] __initdata = {
1981#endif 1976#endif
1982#endif 1977#endif
1983 1978
1984#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 1979#if IS_ENABLED(CONFIG_BFIN_SIR)
1985#ifdef CONFIG_BFIN_SIR0 1980#ifdef CONFIG_BFIN_SIR0
1986 &bfin_sir0_device, 1981 &bfin_sir0_device,
1987#endif 1982#endif
@@ -1996,23 +1991,23 @@ static struct platform_device *ezkit_devices[] __initdata = {
1996#endif 1991#endif
1997#endif 1992#endif
1998 1993
1999#if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE) 1994#if IS_ENABLED(CONFIG_FB_BF54X_LQ043)
2000 &bf54x_lq043_device, 1995 &bf54x_lq043_device,
2001#endif 1996#endif
2002 1997
2003#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 1998#if IS_ENABLED(CONFIG_SMSC911X)
2004 &smsc911x_device, 1999 &smsc911x_device,
2005#endif 2000#endif
2006 2001
2007#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 2002#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
2008 &musb_device, 2003 &musb_device,
2009#endif 2004#endif
2010 2005
2011#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 2006#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
2012 &bfin_isp1760_device, 2007 &bfin_isp1760_device,
2013#endif 2008#endif
2014 2009
2015#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 2010#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
2016#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 2011#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
2017 &bfin_sport0_uart_device, 2012 &bfin_sport0_uart_device,
2018#endif 2013#endif
@@ -2027,72 +2022,71 @@ static struct platform_device *ezkit_devices[] __initdata = {
2027#endif 2022#endif
2028#endif 2023#endif
2029 2024
2030#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 2025#if IS_ENABLED(CONFIG_CAN_BFIN)
2031 &bfin_can0_device, 2026 &bfin_can0_device,
2032 &bfin_can1_device, 2027 &bfin_can1_device,
2033#endif 2028#endif
2034 2029
2035#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) 2030#if IS_ENABLED(CONFIG_PATA_BF54X)
2036 &bfin_atapi_device, 2031 &bfin_atapi_device,
2037#endif 2032#endif
2038 2033
2039#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 2034#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
2040 &bf5xx_nand_device, 2035 &bf5xx_nand_device,
2041#endif 2036#endif
2042 2037
2043#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 2038#if IS_ENABLED(CONFIG_SDH_BFIN)
2044 &bf54x_sdh_device, 2039 &bf54x_sdh_device,
2045#endif 2040#endif
2046 2041
2047#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 2042#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
2048 &bf54x_spi_master0, 2043 &bf54x_spi_master0,
2049 &bf54x_spi_master1, 2044 &bf54x_spi_master1,
2050#endif 2045#endif
2051#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ 2046#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_CAPTURE)
2052 || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
2053 &bfin_capture_device, 2047 &bfin_capture_device,
2054#endif 2048#endif
2055 2049
2056#if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE) 2050#if IS_ENABLED(CONFIG_KEYBOARD_BFIN)
2057 &bf54x_kpad_device, 2051 &bf54x_kpad_device,
2058#endif 2052#endif
2059 2053
2060#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) 2054#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
2061 &bfin_rotary_device, 2055 &bfin_rotary_device,
2062#endif 2056#endif
2063 2057
2064#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 2058#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
2065 &i2c_bfin_twi0_device, 2059 &i2c_bfin_twi0_device,
2066#if !defined(CONFIG_BF542) 2060#if !defined(CONFIG_BF542)
2067 &i2c_bfin_twi1_device, 2061 &i2c_bfin_twi1_device,
2068#endif 2062#endif
2069#endif 2063#endif
2070 2064
2071#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 2065#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
2072 &bfin_device_gpiokeys, 2066 &bfin_device_gpiokeys,
2073#endif 2067#endif
2074 2068
2075#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 2069#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
2076 &ezkit_flash_device, 2070 &ezkit_flash_device,
2077#endif 2071#endif
2078 2072
2079#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 2073#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
2080 &bfin_i2s_pcm, 2074 &bfin_i2s_pcm,
2081#endif 2075#endif
2082 2076
2083#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2077#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
2084 &bfin_ac97_pcm, 2078 &bfin_ac97_pcm,
2085#endif 2079#endif
2086 2080
2087#if defined(CONFIG_SND_BF5XX_SOC_AD1980) || defined(CONFIG_SND_BF5XX_SOC_AD1980_MODULE) 2081#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1980)
2088 &bfin_ad1980_codec_device, 2082 &bfin_ad1980_codec_device,
2089#endif 2083#endif
2090 2084
2091#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 2085#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
2092 &bfin_i2s, 2086 &bfin_i2s,
2093#endif 2087#endif
2094 2088
2095#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2089#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
2096 &bfin_ac97, 2090 &bfin_ac97,
2097#endif 2091#endif
2098}; 2092};
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF544.h b/arch/blackfin/mach-bf548/include/mach/defBF544.h
index 329b2c58228b..018ebfc27f5a 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF544.h
@@ -601,36 +601,6 @@
601#define GU_TRANS 0xff00 /* Transparent Color - G/U Component */ 601#define GU_TRANS 0xff00 /* Transparent Color - G/U Component */
602#define BV_TRANS 0xff0000 /* Transparent Color - B/V Component */ 602#define BV_TRANS 0xff0000 /* Transparent Color - B/V Component */
603 603
604/* Bit masks for HOST_CONTROL */
605
606#define HOST_EN 0x1 /* Host Enable */
607#define HOST_END 0x2 /* Host Endianess */
608#define DATA_SIZE 0x4 /* Data Size */
609#define HOST_RST 0x8 /* Host Reset */
610#define HRDY_OVR 0x20 /* Host Ready Override */
611#define INT_MODE 0x40 /* Interrupt Mode */
612#define BT_EN 0x80 /* Bus Timeout Enable */
613#define EHW 0x100 /* Enable Host Write */
614#define EHR 0x200 /* Enable Host Read */
615#define BDR 0x400 /* Burst DMA Requests */
616
617/* Bit masks for HOST_STATUS */
618
619#define DMA_READY 0x1 /* DMA Ready */
620#define FIFOFULL 0x2 /* FIFO Full */
621#define FIFOEMPTY 0x4 /* FIFO Empty */
622#define DMA_COMPLETE 0x8 /* DMA Complete */
623#define HSHK 0x10 /* Host Handshake */
624#define HSTIMEOUT 0x20 /* Host Timeout */
625#define HIRQ 0x40 /* Host Interrupt Request */
626#define ALLOW_CNFG 0x80 /* Allow New Configuration */
627#define DMA_DIR 0x100 /* DMA Direction */
628#define BTE 0x200 /* Bus Timeout Enabled */
629
630/* Bit masks for HOST_TIMEOUT */
631
632#define COUNT_TIMEOUT 0x7ff /* Host Timeout count */
633
634/* Bit masks for TIMER_ENABLE1 */ 604/* Bit masks for TIMER_ENABLE1 */
635 605
636#define TIMEN8 0x1 /* Timer 8 Enable */ 606#define TIMEN8 0x1 /* Timer 8 Enable */
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index e18de212ba1a..d55dcc0f5324 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -581,36 +581,6 @@
581#define GU_TRANS 0xff00 /* Transparent Color - G/U Component */ 581#define GU_TRANS 0xff00 /* Transparent Color - G/U Component */
582#define BV_TRANS 0xff0000 /* Transparent Color - B/V Component */ 582#define BV_TRANS 0xff0000 /* Transparent Color - B/V Component */
583 583
584/* Bit masks for HOST_CONTROL */
585
586#define HOST_EN 0x1 /* Host Enable */
587#define HOST_END 0x2 /* Host Endianess */
588#define DATA_SIZE 0x4 /* Data Size */
589#define HOST_RST 0x8 /* Host Reset */
590#define HRDY_OVR 0x20 /* Host Ready Override */
591#define INT_MODE 0x40 /* Interrupt Mode */
592#define BT_EN 0x80 /* Bus Timeout Enable */
593#define EHW 0x100 /* Enable Host Write */
594#define EHR 0x200 /* Enable Host Read */
595#define BDR 0x400 /* Burst DMA Requests */
596
597/* Bit masks for HOST_STATUS */
598
599#define DMA_READY 0x1 /* DMA Ready */
600#define FIFOFULL 0x2 /* FIFO Full */
601#define FIFOEMPTY 0x4 /* FIFO Empty */
602#define DMA_COMPLETE 0x8 /* DMA Complete */
603#define HSHK 0x10 /* Host Handshake */
604#define HSTIMEOUT 0x20 /* Host Timeout */
605#define HIRQ 0x40 /* Host Interrupt Request */
606#define ALLOW_CNFG 0x80 /* Allow New Configuration */
607#define DMA_DIR 0x100 /* DMA Direction */
608#define BTE 0x200 /* Bus Timeout Enabled */
609
610/* Bit masks for HOST_TIMEOUT */
611
612#define COUNT_TIMEOUT 0x7ff /* Host Timeout count */
613
614/* Bit masks for KPAD_CTL */ 584/* Bit masks for KPAD_CTL */
615 585
616#define KPAD_EN 0x1 /* Keypad Enable */ 586#define KPAD_EN 0x1 /* Keypad Enable */
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 0b74218fdd3a..430b16d5ccb1 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -60,7 +60,7 @@
60 */ 60 */
61const char bfin_board_name[] = "Acvilon board"; 61const char bfin_board_name[] = "Acvilon board";
62 62
63#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 63#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
64#include <linux/usb/isp1760.h> 64#include <linux/usb/isp1760.h>
65static struct resource bfin_isp1760_resources[] = { 65static struct resource bfin_isp1760_resources[] = {
66 [0] = { 66 [0] = {
@@ -137,7 +137,7 @@ static struct i2c_board_info acvilon_i2c_devs[] __initdata = {
137 }, 137 },
138}; 138};
139 139
140#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE) 140#if IS_ENABLED(CONFIG_MTD_PLATRAM)
141static struct platdata_mtd_ram mtd_ram_data = { 141static struct platdata_mtd_ram mtd_ram_data = {
142 .mapname = "rootfs(RAM)", 142 .mapname = "rootfs(RAM)",
143 .bankwidth = 4, 143 .bankwidth = 4,
@@ -160,7 +160,7 @@ static struct platform_device mtd_ram_device = {
160}; 160};
161#endif 161#endif
162 162
163#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 163#if IS_ENABLED(CONFIG_SMSC911X)
164#include <linux/smsc911x.h> 164#include <linux/smsc911x.h>
165static struct resource smsc911x_resources[] = { 165static struct resource smsc911x_resources[] = {
166 { 166 {
@@ -194,7 +194,7 @@ static struct platform_device smsc911x_device = {
194}; 194};
195#endif 195#endif
196 196
197#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 197#if IS_ENABLED(CONFIG_SERIAL_BFIN)
198#ifdef CONFIG_SERIAL_BFIN_UART0 198#ifdef CONFIG_SERIAL_BFIN_UART0
199static struct resource bfin_uart0_resources[] = { 199static struct resource bfin_uart0_resources[] = {
200 { 200 {
@@ -246,7 +246,7 @@ static struct platform_device bfin_uart0_device = {
246#endif 246#endif
247#endif 247#endif
248 248
249#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 249#if IS_ENABLED(CONFIG_MTD_NAND_PLATFORM)
250 250
251static struct mtd_partition bfin_plat_nand_partitions[] = { 251static struct mtd_partition bfin_plat_nand_partitions[] = {
252 { 252 {
@@ -323,7 +323,7 @@ static void bfin_plat_nand_init(void)
323} 323}
324#endif 324#endif
325 325
326#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE) 326#if IS_ENABLED(CONFIG_MTD_DATAFLASH)
327static struct mtd_partition bfin_spi_dataflash_partitions[] = { 327static struct mtd_partition bfin_spi_dataflash_partitions[] = {
328 { 328 {
329 .name = "bootloader", 329 .name = "bootloader",
@@ -369,7 +369,7 @@ static struct bfin5xx_spi_chip data_flash_chip_info = {
369}; 369};
370#endif 370#endif
371 371
372#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 372#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
373/* SPI (0) */ 373/* SPI (0) */
374static struct resource bfin_spi0_resource[] = { 374static struct resource bfin_spi0_resource[] = {
375 [0] = { 375 [0] = {
@@ -408,7 +408,7 @@ static struct platform_device bfin_spi0_device = {
408#endif 408#endif
409 409
410static struct spi_board_info bfin_spi_board_info[] __initdata = { 410static struct spi_board_info bfin_spi_board_info[] __initdata = {
411#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 411#if IS_ENABLED(CONFIG_SPI_SPIDEV)
412 { 412 {
413 .modalias = "spidev", 413 .modalias = "spidev",
414 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 414 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -416,7 +416,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
416 .chip_select = 3, 416 .chip_select = 3,
417 }, 417 },
418#endif 418#endif
419#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE) 419#if IS_ENABLED(CONFIG_MTD_DATAFLASH)
420 { /* DataFlash chip */ 420 { /* DataFlash chip */
421 .modalias = "mtd_dataflash", 421 .modalias = "mtd_dataflash",
422 .max_speed_hz = 33250000, /* max spi clock (SCK) speed in HZ */ 422 .max_speed_hz = 33250000, /* max spi clock (SCK) speed in HZ */
@@ -472,11 +472,11 @@ static struct platform_device bfin_dpmc = {
472static struct platform_device *acvilon_devices[] __initdata = { 472static struct platform_device *acvilon_devices[] __initdata = {
473 &bfin_dpmc, 473 &bfin_dpmc,
474 474
475#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 475#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
476 &bfin_spi0_device, 476 &bfin_spi0_device,
477#endif 477#endif
478 478
479#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 479#if IS_ENABLED(CONFIG_SERIAL_BFIN)
480#ifdef CONFIG_SERIAL_BFIN_UART0 480#ifdef CONFIG_SERIAL_BFIN_UART0
481 &bfin_uart0_device, 481 &bfin_uart0_device,
482#endif 482#endif
@@ -484,17 +484,17 @@ static struct platform_device *acvilon_devices[] __initdata = {
484 484
485 &bfin_gpios_device, 485 &bfin_gpios_device,
486 486
487#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 487#if IS_ENABLED(CONFIG_SMSC911X)
488 &smsc911x_device, 488 &smsc911x_device,
489#endif 489#endif
490 490
491 &bfin_i2c_pca_device, 491 &bfin_i2c_pca_device,
492 492
493#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 493#if IS_ENABLED(CONFIG_MTD_NAND_PLATFORM)
494 &bfin_async_nand_device, 494 &bfin_async_nand_device,
495#endif 495#endif
496 496
497#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE) 497#if IS_ENABLED(CONFIG_MTD_PLATRAM)
498 &mtd_ram_device, 498 &mtd_ram_device,
499#endif 499#endif
500 500
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index d81450f635df..9f777df4cacc 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -13,7 +13,7 @@
13#include <linux/mtd/partitions.h> 13#include <linux/mtd/partitions.h>
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/spi/flash.h> 15#include <linux/spi/flash.h>
16#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 16#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
17#include <linux/usb/isp1362.h> 17#include <linux/usb/isp1362.h>
18#endif 18#endif
19#include <linux/ata_platform.h> 19#include <linux/ata_platform.h>
@@ -29,10 +29,10 @@
29 */ 29 */
30const char bfin_board_name[] = "Bluetechnix CM BF561"; 30const char bfin_board_name[] = "Bluetechnix CM BF561";
31 31
32#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 32#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
33/* all SPI peripherals info goes here */ 33/* all SPI peripherals info goes here */
34 34
35#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 35#if IS_ENABLED(CONFIG_MTD_M25P80)
36static struct mtd_partition bfin_spi_flash_partitions[] = { 36static struct mtd_partition bfin_spi_flash_partitions[] = {
37 { 37 {
38 .name = "bootloader(spi)", 38 .name = "bootloader(spi)",
@@ -64,7 +64,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
64#endif 64#endif
65 65
66static struct spi_board_info bfin_spi_board_info[] __initdata = { 66static struct spi_board_info bfin_spi_board_info[] __initdata = {
67#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) 67#if IS_ENABLED(CONFIG_MTD_M25P80)
68 { 68 {
69 /* the modalias must be the same as spi device driver name */ 69 /* the modalias must be the same as spi device driver name */
70 .modalias = "m25p80", /* Name of spi_driver for this device */ 70 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -77,7 +77,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
77 }, 77 },
78#endif 78#endif
79 79
80#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 80#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
81 { 81 {
82 .modalias = "ad183x", 82 .modalias = "ad183x",
83 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 83 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -85,7 +85,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
85 .chip_select = 4, 85 .chip_select = 4,
86 }, 86 },
87#endif 87#endif
88#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 88#if IS_ENABLED(CONFIG_MMC_SPI)
89 { 89 {
90 .modalias = "mmc_spi", 90 .modalias = "mmc_spi",
91 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 91 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
@@ -134,14 +134,14 @@ static struct platform_device bfin_spi0_device = {
134#endif /* spi master and devices */ 134#endif /* spi master and devices */
135 135
136 136
137#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) 137#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
138static struct platform_device hitachi_fb_device = { 138static struct platform_device hitachi_fb_device = {
139 .name = "hitachi-tx09", 139 .name = "hitachi-tx09",
140}; 140};
141#endif 141#endif
142 142
143 143
144#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 144#if IS_ENABLED(CONFIG_SMC91X)
145#include <linux/smc91x.h> 145#include <linux/smc91x.h>
146 146
147static struct smc91x_platdata smc91x_info = { 147static struct smc91x_platdata smc91x_info = {
@@ -173,7 +173,7 @@ static struct platform_device smc91x_device = {
173}; 173};
174#endif 174#endif
175 175
176#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 176#if IS_ENABLED(CONFIG_SMSC911X)
177#include <linux/smsc911x.h> 177#include <linux/smsc911x.h>
178 178
179static struct resource smsc911x_resources[] = { 179static struct resource smsc911x_resources[] = {
@@ -208,7 +208,7 @@ static struct platform_device smsc911x_device = {
208}; 208};
209#endif 209#endif
210 210
211#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 211#if IS_ENABLED(CONFIG_USB_NET2272)
212static struct resource net2272_bfin_resources[] = { 212static struct resource net2272_bfin_resources[] = {
213 { 213 {
214 .start = 0x24000000, 214 .start = 0x24000000,
@@ -229,7 +229,7 @@ static struct platform_device net2272_bfin_device = {
229}; 229};
230#endif 230#endif
231 231
232#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 232#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
233static struct resource isp1362_hcd_resources[] = { 233static struct resource isp1362_hcd_resources[] = {
234 { 234 {
235 .start = 0x24008000, 235 .start = 0x24008000,
@@ -268,7 +268,7 @@ static struct platform_device isp1362_hcd_device = {
268}; 268};
269#endif 269#endif
270 270
271#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 271#if IS_ENABLED(CONFIG_SERIAL_BFIN)
272#ifdef CONFIG_SERIAL_BFIN_UART0 272#ifdef CONFIG_SERIAL_BFIN_UART0
273static struct resource bfin_uart0_resources[] = { 273static struct resource bfin_uart0_resources[] = {
274 { 274 {
@@ -319,7 +319,7 @@ static struct platform_device bfin_uart0_device = {
319#endif 319#endif
320#endif 320#endif
321 321
322#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 322#if IS_ENABLED(CONFIG_BFIN_SIR)
323#ifdef CONFIG_BFIN_SIR0 323#ifdef CONFIG_BFIN_SIR0
324static struct resource bfin_sir0_resources[] = { 324static struct resource bfin_sir0_resources[] = {
325 { 325 {
@@ -348,7 +348,7 @@ static struct platform_device bfin_sir0_device = {
348#endif 348#endif
349#endif 349#endif
350 350
351#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 351#if IS_ENABLED(CONFIG_PATA_PLATFORM)
352#define PATA_INT IRQ_PF46 352#define PATA_INT IRQ_PF46
353 353
354static struct pata_platform_info bfin_pata_platform_data = { 354static struct pata_platform_info bfin_pata_platform_data = {
@@ -385,7 +385,7 @@ static struct platform_device bfin_pata_device = {
385}; 385};
386#endif 386#endif
387 387
388#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 388#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
389static struct mtd_partition para_partitions[] = { 389static struct mtd_partition para_partitions[] = {
390 { 390 {
391 .name = "bootloader(nor)", 391 .name = "bootloader(nor)",
@@ -456,54 +456,54 @@ static struct platform_device *cm_bf561_devices[] __initdata = {
456 456
457 &bfin_dpmc, 457 &bfin_dpmc,
458 458
459#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) 459#if IS_ENABLED(CONFIG_FB_HITACHI_TX09)
460 &hitachi_fb_device, 460 &hitachi_fb_device,
461#endif 461#endif
462 462
463#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 463#if IS_ENABLED(CONFIG_SERIAL_BFIN)
464#ifdef CONFIG_SERIAL_BFIN_UART0 464#ifdef CONFIG_SERIAL_BFIN_UART0
465 &bfin_uart0_device, 465 &bfin_uart0_device,
466#endif 466#endif
467#endif 467#endif
468 468
469#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 469#if IS_ENABLED(CONFIG_BFIN_SIR)
470#ifdef CONFIG_BFIN_SIR0 470#ifdef CONFIG_BFIN_SIR0
471 &bfin_sir0_device, 471 &bfin_sir0_device,
472#endif 472#endif
473#endif 473#endif
474 474
475#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 475#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
476 &isp1362_hcd_device, 476 &isp1362_hcd_device,
477#endif 477#endif
478 478
479#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 479#if IS_ENABLED(CONFIG_SMC91X)
480 &smc91x_device, 480 &smc91x_device,
481#endif 481#endif
482 482
483#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 483#if IS_ENABLED(CONFIG_SMSC911X)
484 &smsc911x_device, 484 &smsc911x_device,
485#endif 485#endif
486 486
487#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 487#if IS_ENABLED(CONFIG_USB_NET2272)
488 &net2272_bfin_device, 488 &net2272_bfin_device,
489#endif 489#endif
490 490
491#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 491#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
492 &bfin_spi0_device, 492 &bfin_spi0_device,
493#endif 493#endif
494 494
495#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 495#if IS_ENABLED(CONFIG_PATA_PLATFORM)
496 &bfin_pata_device, 496 &bfin_pata_device,
497#endif 497#endif
498 498
499#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 499#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
500 &para_flash_device, 500 &para_flash_device,
501#endif 501#endif
502}; 502};
503 503
504static int __init net2272_init(void) 504static int __init net2272_init(void)
505{ 505{
506#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 506#if IS_ENABLED(CONFIG_USB_NET2272)
507 int ret; 507 int ret;
508 508
509 ret = gpio_request(GPIO_PF46, "net2272"); 509 ret = gpio_request(GPIO_PF46, "net2272");
@@ -523,11 +523,11 @@ static int __init cm_bf561_init(void)
523{ 523{
524 printk(KERN_INFO "%s(): registering device resources\n", __func__); 524 printk(KERN_INFO "%s(): registering device resources\n", __func__);
525 platform_add_devices(cm_bf561_devices, ARRAY_SIZE(cm_bf561_devices)); 525 platform_add_devices(cm_bf561_devices, ARRAY_SIZE(cm_bf561_devices));
526#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 526#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
527 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 527 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
528#endif 528#endif
529 529
530#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 530#if IS_ENABLED(CONFIG_PATA_PLATFORM)
531 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); 531 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
532#endif 532#endif
533 533
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 92938e79b9e3..88dee43e7abe 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -25,7 +25,7 @@
25 */ 25 */
26const char bfin_board_name[] = "ADI BF561-EZKIT"; 26const char bfin_board_name[] = "ADI BF561-EZKIT";
27 27
28#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 28#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
29#include <linux/usb/isp1760.h> 29#include <linux/usb/isp1760.h>
30static struct resource bfin_isp1760_resources[] = { 30static struct resource bfin_isp1760_resources[] = {
31 [0] = { 31 [0] = {
@@ -60,7 +60,7 @@ static struct platform_device bfin_isp1760_device = {
60}; 60};
61#endif 61#endif
62 62
63#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 63#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
64#include <linux/usb/isp1362.h> 64#include <linux/usb/isp1362.h>
65 65
66static struct resource isp1362_hcd_resources[] = { 66static struct resource isp1362_hcd_resources[] = {
@@ -101,7 +101,7 @@ static struct platform_device isp1362_hcd_device = {
101}; 101};
102#endif 102#endif
103 103
104#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 104#if IS_ENABLED(CONFIG_USB_NET2272)
105static struct resource net2272_bfin_resources[] = { 105static struct resource net2272_bfin_resources[] = {
106 { 106 {
107 .start = 0x2C000000, 107 .start = 0x2C000000,
@@ -129,7 +129,7 @@ static struct platform_device net2272_bfin_device = {
129 * USB-LAN EzExtender board 129 * USB-LAN EzExtender board
130 * Driver needs to know address, irq and flag pin. 130 * Driver needs to know address, irq and flag pin.
131 */ 131 */
132#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 132#if IS_ENABLED(CONFIG_SMC91X)
133#include <linux/smc91x.h> 133#include <linux/smc91x.h>
134 134
135static struct smc91x_platdata smc91x_info = { 135static struct smc91x_platdata smc91x_info = {
@@ -163,7 +163,7 @@ static struct platform_device smc91x_device = {
163}; 163};
164#endif 164#endif
165 165
166#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 166#if IS_ENABLED(CONFIG_SERIAL_BFIN)
167#ifdef CONFIG_SERIAL_BFIN_UART0 167#ifdef CONFIG_SERIAL_BFIN_UART0
168static struct resource bfin_uart0_resources[] = { 168static struct resource bfin_uart0_resources[] = {
169 { 169 {
@@ -214,7 +214,7 @@ static struct platform_device bfin_uart0_device = {
214#endif 214#endif
215#endif 215#endif
216 216
217#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 217#if IS_ENABLED(CONFIG_BFIN_SIR)
218#ifdef CONFIG_BFIN_SIR0 218#ifdef CONFIG_BFIN_SIR0
219static struct resource bfin_sir0_resources[] = { 219static struct resource bfin_sir0_resources[] = {
220 { 220 {
@@ -243,7 +243,7 @@ static struct platform_device bfin_sir0_device = {
243#endif 243#endif
244#endif 244#endif
245 245
246#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 246#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
247static struct mtd_partition ezkit_partitions[] = { 247static struct mtd_partition ezkit_partitions[] = {
248 { 248 {
249 .name = "bootloader(nor)", 249 .name = "bootloader(nor)",
@@ -291,7 +291,7 @@ static struct platform_device ezkit_flash_device = {
291}; 291};
292#endif 292#endif
293 293
294#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 294#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
295/* SPI (0) */ 295/* SPI (0) */
296static struct resource bfin_spi0_resource[] = { 296static struct resource bfin_spi0_resource[] = {
297 [0] = { 297 [0] = {
@@ -330,8 +330,7 @@ static struct platform_device bfin_spi0_device = {
330#endif 330#endif
331 331
332static struct spi_board_info bfin_spi_board_info[] __initdata = { 332static struct spi_board_info bfin_spi_board_info[] __initdata = {
333#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 333#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
334 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
335 { 334 {
336 .modalias = "ad183x", 335 .modalias = "ad183x",
337 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 336 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -341,7 +340,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
341 .mode = SPI_MODE_3, 340 .mode = SPI_MODE_3,
342 }, 341 },
343#endif 342#endif
344#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 343#if IS_ENABLED(CONFIG_SPI_SPIDEV)
345 { 344 {
346 .modalias = "spidev", 345 .modalias = "spidev",
347 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 346 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -351,7 +350,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
351#endif 350#endif
352}; 351};
353 352
354#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 353#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
355#include <linux/input.h> 354#include <linux/input.h>
356#include <linux/gpio_keys.h> 355#include <linux/gpio_keys.h>
357 356
@@ -375,7 +374,7 @@ static struct platform_device bfin_device_gpiokeys = {
375}; 374};
376#endif 375#endif
377 376
378#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) 377#if IS_ENABLED(CONFIG_I2C_GPIO)
379#include <linux/i2c-gpio.h> 378#include <linux/i2c-gpio.h>
380 379
381static struct i2c_gpio_platform_data i2c_gpio_data = { 380static struct i2c_gpio_platform_data i2c_gpio_data = {
@@ -422,8 +421,7 @@ static struct platform_device bfin_dpmc = {
422 }, 421 },
423}; 422};
424 423
425#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ 424#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_CAPTURE)
426 || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
427#include <linux/videodev2.h> 425#include <linux/videodev2.h>
428#include <media/blackfin/bfin_capture.h> 426#include <media/blackfin/bfin_capture.h>
429#include <media/blackfin/ppi.h> 427#include <media/blackfin/ppi.h>
@@ -443,8 +441,7 @@ static const struct ppi_info ppi_info = {
443 .pin_req = ppi_req, 441 .pin_req = ppi_req,
444}; 442};
445 443
446#if defined(CONFIG_VIDEO_ADV7183) \ 444#if IS_ENABLED(CONFIG_VIDEO_ADV7183)
447 || defined(CONFIG_VIDEO_ADV7183_MODULE)
448#include <media/adv7183.h> 445#include <media/adv7183.h>
449static struct v4l2_input adv7183_inputs[] = { 446static struct v4l2_input adv7183_inputs[] = {
450 { 447 {
@@ -515,7 +512,7 @@ static struct platform_device bfin_capture_device = {
515}; 512};
516#endif 513#endif
517 514
518#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 515#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
519static struct platform_device bfin_i2s = { 516static struct platform_device bfin_i2s = {
520 .name = "bfin-i2s", 517 .name = "bfin-i2s",
521 .id = CONFIG_SND_BF5XX_SPORT_NUM, 518 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -523,7 +520,7 @@ static struct platform_device bfin_i2s = {
523}; 520};
524#endif 521#endif
525 522
526#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 523#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
527static struct platform_device bfin_ac97 = { 524static struct platform_device bfin_ac97 = {
528 .name = "bfin-ac97", 525 .name = "bfin-ac97",
529 .id = CONFIG_SND_BF5XX_SPORT_NUM, 526 .id = CONFIG_SND_BF5XX_SPORT_NUM,
@@ -531,8 +528,7 @@ static struct platform_device bfin_ac97 = {
531}; 528};
532#endif 529#endif
533 530
534#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ 531#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
535 || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
536static const char * const ad1836_link[] = { 532static const char * const ad1836_link[] = {
537 "bfin-i2s.0", 533 "bfin-i2s.0",
538 "spi0.4", 534 "spi0.4",
@@ -550,72 +546,70 @@ static struct platform_device *ezkit_devices[] __initdata = {
550 546
551 &bfin_dpmc, 547 &bfin_dpmc,
552 548
553#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 549#if IS_ENABLED(CONFIG_SMC91X)
554 &smc91x_device, 550 &smc91x_device,
555#endif 551#endif
556 552
557#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 553#if IS_ENABLED(CONFIG_USB_NET2272)
558 &net2272_bfin_device, 554 &net2272_bfin_device,
559#endif 555#endif
560 556
561#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 557#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
562 &bfin_isp1760_device, 558 &bfin_isp1760_device,
563#endif 559#endif
564 560
565#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) 561#if IS_ENABLED(CONFIG_SPI_BFIN5XX)
566 &bfin_spi0_device, 562 &bfin_spi0_device,
567#endif 563#endif
568 564
569#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 565#if IS_ENABLED(CONFIG_SERIAL_BFIN)
570#ifdef CONFIG_SERIAL_BFIN_UART0 566#ifdef CONFIG_SERIAL_BFIN_UART0
571 &bfin_uart0_device, 567 &bfin_uart0_device,
572#endif 568#endif
573#endif 569#endif
574 570
575#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 571#if IS_ENABLED(CONFIG_BFIN_SIR)
576#ifdef CONFIG_BFIN_SIR0 572#ifdef CONFIG_BFIN_SIR0
577 &bfin_sir0_device, 573 &bfin_sir0_device,
578#endif 574#endif
579#endif 575#endif
580 576
581#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 577#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
582 &bfin_device_gpiokeys, 578 &bfin_device_gpiokeys,
583#endif 579#endif
584 580
585#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) 581#if IS_ENABLED(CONFIG_I2C_GPIO)
586 &i2c_gpio_device, 582 &i2c_gpio_device,
587#endif 583#endif
588 584
589#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 585#if IS_ENABLED(CONFIG_USB_ISP1362_HCD)
590 &isp1362_hcd_device, 586 &isp1362_hcd_device,
591#endif 587#endif
592 588
593#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 589#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
594 &ezkit_flash_device, 590 &ezkit_flash_device,
595#endif 591#endif
596 592
597#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ 593#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_CAPTURE)
598 || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
599 &bfin_capture_device, 594 &bfin_capture_device,
600#endif 595#endif
601 596
602#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 597#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
603 &bfin_i2s, 598 &bfin_i2s,
604#endif 599#endif
605 600
606#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 601#if IS_ENABLED(CONFIG_SND_BF5XX_AC97)
607 &bfin_ac97, 602 &bfin_ac97,
608#endif 603#endif
609 604
610#if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \ 605#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
611 defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
612 &bfin_ad1836_machine, 606 &bfin_ad1836_machine,
613#endif 607#endif
614}; 608};
615 609
616static int __init net2272_init(void) 610static int __init net2272_init(void)
617{ 611{
618#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) 612#if IS_ENABLED(CONFIG_USB_NET2272)
619 int ret; 613 int ret;
620 614
621 ret = gpio_request(GPIO_PF11, "net2272"); 615 ret = gpio_request(GPIO_PF11, "net2272");
@@ -641,12 +635,12 @@ static int __init ezkit_init(void)
641 if (ret < 0) 635 if (ret < 0)
642 return ret; 636 return ret;
643 637
644#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 638#if IS_ENABLED(CONFIG_SMC91X)
645 bfin_write_FIO0_DIR(bfin_read_FIO0_DIR() | (1 << 12)); 639 bfin_write_FIO0_DIR(bfin_read_FIO0_DIR() | (1 << 12));
646 SSYNC(); 640 SSYNC();
647#endif 641#endif
648 642
649#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 643#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X)
650 bfin_write_FIO0_DIR(bfin_read_FIO0_DIR() | (1 << 15)); 644 bfin_write_FIO0_DIR(bfin_read_FIO0_DIR() | (1 << 15));
651 bfin_write_FIO0_FLAG_S(1 << 15); 645 bfin_write_FIO0_FLAG_S(1 << 15);
652 SSYNC(); 646 SSYNC();
diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c
index 1a57bc986aad..f87b8cc0cd4c 100644
--- a/arch/blackfin/mach-bf561/boards/tepla.c
+++ b/arch/blackfin/mach-bf561/boards/tepla.c
@@ -42,7 +42,7 @@ static struct platform_device smc91x_device = {
42 .resource = smc91x_resources, 42 .resource = smc91x_resources,
43}; 43};
44 44
45#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 45#if IS_ENABLED(CONFIG_SERIAL_BFIN)
46#ifdef CONFIG_SERIAL_BFIN_UART0 46#ifdef CONFIG_SERIAL_BFIN_UART0
47static struct resource bfin_uart0_resources[] = { 47static struct resource bfin_uart0_resources[] = {
48 { 48 {
@@ -93,7 +93,7 @@ static struct platform_device bfin_uart0_device = {
93#endif 93#endif
94#endif 94#endif
95 95
96#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 96#if IS_ENABLED(CONFIG_BFIN_SIR)
97#ifdef CONFIG_BFIN_SIR0 97#ifdef CONFIG_BFIN_SIR0
98static struct resource bfin_sir0_resources[] = { 98static struct resource bfin_sir0_resources[] = {
99 { 99 {
@@ -125,13 +125,13 @@ static struct platform_device bfin_sir0_device = {
125static struct platform_device *tepla_devices[] __initdata = { 125static struct platform_device *tepla_devices[] __initdata = {
126 &smc91x_device, 126 &smc91x_device,
127 127
128#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 128#if IS_ENABLED(CONFIG_SERIAL_BFIN)
129#ifdef CONFIG_SERIAL_BFIN_UART0 129#ifdef CONFIG_SERIAL_BFIN_UART0
130 &bfin_uart0_device, 130 &bfin_uart0_device,
131#endif 131#endif
132#endif 132#endif
133 133
134#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 134#if IS_ENABLED(CONFIG_BFIN_SIR)
135#ifdef CONFIG_BFIN_SIR0 135#ifdef CONFIG_BFIN_SIR0
136 &bfin_sir0_device, 136 &bfin_sir0_device,
137#endif 137#endif
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 8de8bc690b36..943f7e95ec15 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -39,7 +39,7 @@ const char bfin_board_name[] = "ADI BF609-EZKIT";
39 * Driver needs to know address, irq and flag pin. 39 * Driver needs to know address, irq and flag pin.
40 */ 40 */
41 41
42#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 42#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
43#include <linux/usb/isp1760.h> 43#include <linux/usb/isp1760.h>
44static struct resource bfin_isp1760_resources[] = { 44static struct resource bfin_isp1760_resources[] = {
45 [0] = { 45 [0] = {
@@ -74,7 +74,7 @@ static struct platform_device bfin_isp1760_device = {
74}; 74};
75#endif 75#endif
76 76
77#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) 77#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
78#include <asm/bfin_rotary.h> 78#include <asm/bfin_rotary.h>
79 79
80static struct bfin_rotary_platform_data bfin_rotary_data = { 80static struct bfin_rotary_platform_data bfin_rotary_data = {
@@ -105,7 +105,7 @@ static struct platform_device bfin_rotary_device = {
105}; 105};
106#endif 106#endif
107 107
108#if defined(CONFIG_STMMAC_ETH) || defined(CONFIG_STMMAC_ETH_MODULE) 108#if IS_ENABLED(CONFIG_STMMAC_ETH)
109#include <linux/stmmac.h> 109#include <linux/stmmac.h>
110#include <linux/phy.h> 110#include <linux/phy.h>
111 111
@@ -159,7 +159,7 @@ static struct platform_device bfin_eth_device = {
159}; 159};
160#endif 160#endif
161 161
162#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE) 162#if IS_ENABLED(CONFIG_INPUT_ADXL34X)
163#include <linux/input/adxl34x.h> 163#include <linux/input/adxl34x.h>
164static const struct adxl34x_platform_data adxl34x_info = { 164static const struct adxl34x_platform_data adxl34x_info = {
165 .x_axis_offset = 0, 165 .x_axis_offset = 0,
@@ -198,14 +198,14 @@ static const struct adxl34x_platform_data adxl34x_info = {
198}; 198};
199#endif 199#endif
200 200
201#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 201#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
202static struct platform_device rtc_device = { 202static struct platform_device rtc_device = {
203 .name = "rtc-bfin", 203 .name = "rtc-bfin",
204 .id = -1, 204 .id = -1,
205}; 205};
206#endif 206#endif
207 207
208#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 208#if IS_ENABLED(CONFIG_SERIAL_BFIN)
209#ifdef CONFIG_SERIAL_BFIN_UART0 209#ifdef CONFIG_SERIAL_BFIN_UART0
210static struct resource bfin_uart0_resources[] = { 210static struct resource bfin_uart0_resources[] = {
211 { 211 {
@@ -355,7 +355,7 @@ static struct platform_device bfin_uart1_device = {
355#endif 355#endif
356#endif 356#endif
357 357
358#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 358#if IS_ENABLED(CONFIG_BFIN_SIR)
359#ifdef CONFIG_BFIN_SIR0 359#ifdef CONFIG_BFIN_SIR0
360static struct resource bfin_sir0_resources[] = { 360static struct resource bfin_sir0_resources[] = {
361 { 361 {
@@ -408,7 +408,7 @@ static struct platform_device bfin_sir1_device = {
408#endif 408#endif
409#endif 409#endif
410 410
411#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 411#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
412static struct resource musb_resources[] = { 412static struct resource musb_resources[] = {
413 [0] = { 413 [0] = {
414 .start = 0xFFCC1000, 414 .start = 0xFFCC1000,
@@ -464,7 +464,7 @@ static struct platform_device musb_device = {
464}; 464};
465#endif 465#endif
466 466
467#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 467#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
468#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 468#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
469static struct resource bfin_sport0_uart_resources[] = { 469static struct resource bfin_sport0_uart_resources[] = {
470 { 470 {
@@ -569,7 +569,7 @@ static struct platform_device bfin_sport2_uart_device = {
569#endif 569#endif
570#endif 570#endif
571 571
572#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 572#if IS_ENABLED(CONFIG_CAN_BFIN)
573 573
574static unsigned short bfin_can0_peripherals[] = { 574static unsigned short bfin_can0_peripherals[] = {
575 P_CAN0_RX, P_CAN0_TX, 0 575 P_CAN0_RX, P_CAN0_TX, 0
@@ -610,7 +610,7 @@ static struct platform_device bfin_can0_device = {
610 610
611#endif 611#endif
612 612
613#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 613#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
614static struct mtd_partition partition_info[] = { 614static struct mtd_partition partition_info[] = {
615 { 615 {
616 .name = "bootloader(nand)", 616 .name = "bootloader(nand)",
@@ -660,7 +660,7 @@ static struct platform_device bfin_nand_device = {
660}; 660};
661#endif 661#endif
662 662
663#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 663#if IS_ENABLED(CONFIG_SDH_BFIN)
664 664
665static struct bfin_sd_host bfin_sdh_data = { 665static struct bfin_sd_host bfin_sdh_data = {
666 .dma_chan = CH_RSI, 666 .dma_chan = CH_RSI,
@@ -677,7 +677,7 @@ static struct platform_device bfin_sdh_device = {
677}; 677};
678#endif 678#endif
679 679
680#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 680#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
681static struct mtd_partition ezkit_partitions[] = { 681static struct mtd_partition ezkit_partitions[] = {
682 { 682 {
683 .name = "bootloader(nor)", 683 .name = "bootloader(nor)",
@@ -741,8 +741,7 @@ static struct platform_device ezkit_flash_device = {
741}; 741};
742#endif 742#endif
743 743
744#if defined(CONFIG_MTD_M25P80) \ 744#if IS_ENABLED(CONFIG_MTD_M25P80)
745 || defined(CONFIG_MTD_M25P80_MODULE)
746/* SPI flash chip (w25q32) */ 745/* SPI flash chip (w25q32) */
747static struct mtd_partition bfin_spi_flash_partitions[] = { 746static struct mtd_partition bfin_spi_flash_partitions[] = {
748 { 747 {
@@ -773,21 +772,20 @@ static struct bfin_spi3_chip spi_flash_chip_info = {
773}; 772};
774#endif 773#endif
775 774
776#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 775#if IS_ENABLED(CONFIG_SPI_SPIDEV)
777static struct bfin_spi3_chip spidev_chip_info = { 776static struct bfin_spi3_chip spidev_chip_info = {
778 .enable_dma = true, 777 .enable_dma = true,
779}; 778};
780#endif 779#endif
781 780
782#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 781#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
783static struct platform_device bfin_i2s_pcm = { 782static struct platform_device bfin_i2s_pcm = {
784 .name = "bfin-i2s-pcm-audio", 783 .name = "bfin-i2s-pcm-audio",
785 .id = -1, 784 .id = -1,
786}; 785};
787#endif 786#endif
788 787
789#if defined(CONFIG_SND_BF6XX_SOC_I2S) || \ 788#if IS_ENABLED(CONFIG_SND_BF6XX_SOC_I2S)
790 defined(CONFIG_SND_BF6XX_SOC_I2S_MODULE)
791#include <asm/bfin_sport3.h> 789#include <asm/bfin_sport3.h>
792static struct resource bfin_snd_resources[] = { 790static struct resource bfin_snd_resources[] = {
793 { 791 {
@@ -841,8 +839,7 @@ static struct platform_device bfin_i2s = {
841}; 839};
842#endif 840#endif
843 841
844#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ 842#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
845 || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
846static const char * const ad1836_link[] = { 843static const char * const ad1836_link[] = {
847 "bfin-i2s.0", 844 "bfin-i2s.0",
848 "spi0.76", 845 "spi0.76",
@@ -856,14 +853,13 @@ static struct platform_device bfin_ad1836_machine = {
856}; 853};
857#endif 854#endif
858 855
859#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61) || \ 856#if IS_ENABLED(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61)
860 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61_MODULE)
861static struct platform_device adau1761_device = { 857static struct platform_device adau1761_device = {
862 .name = "bfin-eval-adau1x61", 858 .name = "bfin-eval-adau1x61",
863}; 859};
864#endif 860#endif
865 861
866#if defined(CONFIG_SND_SOC_ADAU1761) || defined(CONFIG_SND_SOC_ADAU1761_MODULE) 862#if IS_ENABLED(CONFIG_SND_SOC_ADAU1761)
867#include <sound/adau17x1.h> 863#include <sound/adau17x1.h>
868static struct adau1761_platform_data adau1761_info = { 864static struct adau1761_platform_data adau1761_info = {
869 .lineout_mode = ADAU1761_OUTPUT_MODE_LINE, 865 .lineout_mode = ADAU1761_OUTPUT_MODE_LINE,
@@ -871,8 +867,7 @@ static struct adau1761_platform_data adau1761_info = {
871}; 867};
872#endif 868#endif
873 869
874#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ 870#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_CAPTURE)
875 || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
876#include <linux/videodev2.h> 871#include <linux/videodev2.h>
877#include <media/blackfin/bfin_capture.h> 872#include <media/blackfin/bfin_capture.h>
878#include <media/blackfin/ppi.h> 873#include <media/blackfin/ppi.h>
@@ -882,7 +877,7 @@ static const unsigned short ppi_req[] = {
882 P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, 877 P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
883 P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11, 878 P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
884 P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15, 879 P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15,
885#if !defined(CONFIG_VIDEO_VS6624) && !defined(CONFIG_VIDEO_VS6624_MODULE) 880#if !IS_ENABLED(CONFIG_VIDEO_VS6624)
886 P_PPI0_D16, P_PPI0_D17, P_PPI0_D18, P_PPI0_D19, 881 P_PPI0_D16, P_PPI0_D17, P_PPI0_D18, P_PPI0_D19,
887 P_PPI0_D20, P_PPI0_D21, P_PPI0_D22, P_PPI0_D23, 882 P_PPI0_D20, P_PPI0_D21, P_PPI0_D22, P_PPI0_D23,
888#endif 883#endif
@@ -898,8 +893,7 @@ static const struct ppi_info ppi_info = {
898 .pin_req = ppi_req, 893 .pin_req = ppi_req,
899}; 894};
900 895
901#if defined(CONFIG_VIDEO_VS6624) \ 896#if IS_ENABLED(CONFIG_VIDEO_VS6624)
902 || defined(CONFIG_VIDEO_VS6624_MODULE)
903static struct v4l2_input vs6624_inputs[] = { 897static struct v4l2_input vs6624_inputs[] = {
904 { 898 {
905 .index = 0, 899 .index = 0,
@@ -936,8 +930,7 @@ static struct bfin_capture_config bfin_capture_data = {
936}; 930};
937#endif 931#endif
938 932
939#if defined(CONFIG_VIDEO_ADV7842) \ 933#if IS_ENABLED(CONFIG_VIDEO_ADV7842)
940 || defined(CONFIG_VIDEO_ADV7842_MODULE)
941#include <media/adv7842.h> 934#include <media/adv7842.h>
942 935
943static struct v4l2_input adv7842_inputs[] = { 936static struct v4l2_input adv7842_inputs[] = {
@@ -1067,8 +1060,7 @@ static struct platform_device bfin_capture_device = {
1067}; 1060};
1068#endif 1061#endif
1069 1062
1070#if defined(CONFIG_VIDEO_BLACKFIN_DISPLAY) \ 1063#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_DISPLAY)
1071 || defined(CONFIG_VIDEO_BLACKFIN_DISPLAY_MODULE)
1072#include <linux/videodev2.h> 1064#include <linux/videodev2.h>
1073#include <media/blackfin/bfin_display.h> 1065#include <media/blackfin/bfin_display.h>
1074#include <media/blackfin/ppi.h> 1066#include <media/blackfin/ppi.h>
@@ -1090,8 +1082,7 @@ static const struct ppi_info ppi_info = {
1090 .pin_req = ppi_req_disp, 1082 .pin_req = ppi_req_disp,
1091}; 1083};
1092 1084
1093#if defined(CONFIG_VIDEO_ADV7511) \ 1085#if IS_ENABLED(CONFIG_VIDEO_ADV7511)
1094 || defined(CONFIG_VIDEO_ADV7511_MODULE)
1095#include <media/adv7511.h> 1086#include <media/adv7511.h>
1096 1087
1097static struct v4l2_output adv7511_outputs[] = { 1088static struct v4l2_output adv7511_outputs[] = {
@@ -1313,7 +1304,7 @@ static struct platform_device bfin_crypto_crc_device = {
1313}; 1304};
1314#endif 1305#endif
1315 1306
1316#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 1307#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
1317static const struct ad7877_platform_data bfin_ad7877_ts_info = { 1308static const struct ad7877_platform_data bfin_ad7877_ts_info = {
1318 .model = 7877, 1309 .model = 7877,
1319 .vref_delay_usecs = 50, /* internal, no capacitor */ 1310 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -1679,7 +1670,7 @@ static struct platform_device bfin_gpg_device = {
1679 1670
1680#endif 1671#endif
1681 1672
1682#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 1673#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
1683#include <linux/input.h> 1674#include <linux/input.h>
1684#include <linux/gpio_keys.h> 1675#include <linux/gpio_keys.h>
1685 1676
@@ -1702,8 +1693,7 @@ static struct platform_device bfin_device_gpiokeys = {
1702#endif 1693#endif
1703 1694
1704static struct spi_board_info bfin_spi_board_info[] __initdata = { 1695static struct spi_board_info bfin_spi_board_info[] __initdata = {
1705#if defined(CONFIG_MTD_M25P80) \ 1696#if IS_ENABLED(CONFIG_MTD_M25P80)
1706 || defined(CONFIG_MTD_M25P80_MODULE)
1707 { 1697 {
1708 /* the modalias must be the same as spi device driver name */ 1698 /* the modalias must be the same as spi device driver name */
1709 .modalias = "m25p80", /* Name of spi_driver for this device */ 1699 .modalias = "m25p80", /* Name of spi_driver for this device */
@@ -1715,7 +1705,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1715 .mode = SPI_MODE_3, 1705 .mode = SPI_MODE_3,
1716 }, 1706 },
1717#endif 1707#endif
1718#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 1708#if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877)
1719 { 1709 {
1720 .modalias = "ad7877", 1710 .modalias = "ad7877",
1721 .platform_data = &bfin_ad7877_ts_info, 1711 .platform_data = &bfin_ad7877_ts_info,
@@ -1725,7 +1715,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1725 .chip_select = MAX_CTRL_CS + GPIO_PC15, /* SPI_SSEL4 */ 1715 .chip_select = MAX_CTRL_CS + GPIO_PC15, /* SPI_SSEL4 */
1726 }, 1716 },
1727#endif 1717#endif
1728#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 1718#if IS_ENABLED(CONFIG_SPI_SPIDEV)
1729 { 1719 {
1730 .modalias = "spidev", 1720 .modalias = "spidev",
1731 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 1721 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -1734,7 +1724,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1734 .controller_data = &spidev_chip_info, 1724 .controller_data = &spidev_chip_info,
1735 }, 1725 },
1736#endif 1726#endif
1737#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE) 1727#if IS_ENABLED(CONFIG_INPUT_ADXL34X_SPI)
1738 { 1728 {
1739 .modalias = "adxl34x", 1729 .modalias = "adxl34x",
1740 .platform_data = &adxl34x_info, 1730 .platform_data = &adxl34x_info,
@@ -1818,7 +1808,7 @@ static struct platform_device bf60x_spi_master1 = {
1818}; 1808};
1819#endif /* spi master and devices */ 1809#endif /* spi master and devices */
1820 1810
1821#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 1811#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
1822static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; 1812static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
1823 1813
1824static struct resource bfin_twi0_resource[] = { 1814static struct resource bfin_twi0_resource[] = {
@@ -1871,20 +1861,20 @@ static struct platform_device i2c_bfin_twi1_device = {
1871#endif 1861#endif
1872 1862
1873static struct i2c_board_info __initdata bfin_i2c_board_info0[] = { 1863static struct i2c_board_info __initdata bfin_i2c_board_info0[] = {
1874#if defined(CONFIG_INPUT_ADXL34X_I2C) || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE) 1864#if IS_ENABLED(CONFIG_INPUT_ADXL34X_I2C)
1875 { 1865 {
1876 I2C_BOARD_INFO("adxl34x", 0x53), 1866 I2C_BOARD_INFO("adxl34x", 0x53),
1877 .irq = IRQ_PC5, 1867 .irq = IRQ_PC5,
1878 .platform_data = (void *)&adxl34x_info, 1868 .platform_data = (void *)&adxl34x_info,
1879 }, 1869 },
1880#endif 1870#endif
1881#if defined(CONFIG_SND_SOC_ADAU1761) || defined(CONFIG_SND_SOC_ADAU1761_MODULE) 1871#if IS_ENABLED(CONFIG_SND_SOC_ADAU1761)
1882 { 1872 {
1883 I2C_BOARD_INFO("adau1761", 0x38), 1873 I2C_BOARD_INFO("adau1761", 0x38),
1884 .platform_data = (void *)&adau1761_info 1874 .platform_data = (void *)&adau1761_info
1885 }, 1875 },
1886#endif 1876#endif
1887#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) 1877#if IS_ENABLED(CONFIG_SND_SOC_SSM2602)
1888 { 1878 {
1889 I2C_BOARD_INFO("ssm2602", 0x1b), 1879 I2C_BOARD_INFO("ssm2602", 0x1b),
1890 }, 1880 },
@@ -1942,11 +1932,11 @@ static struct platform_device *ezkit_devices[] __initdata = {
1942 &bfin_gpg_device, 1932 &bfin_gpg_device,
1943#endif 1933#endif
1944 1934
1945#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 1935#if IS_ENABLED(CONFIG_RTC_DRV_BFIN)
1946 &rtc_device, 1936 &rtc_device,
1947#endif 1937#endif
1948 1938
1949#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 1939#if IS_ENABLED(CONFIG_SERIAL_BFIN)
1950#ifdef CONFIG_SERIAL_BFIN_UART0 1940#ifdef CONFIG_SERIAL_BFIN_UART0
1951 &bfin_uart0_device, 1941 &bfin_uart0_device,
1952#endif 1942#endif
@@ -1955,7 +1945,7 @@ static struct platform_device *ezkit_devices[] __initdata = {
1955#endif 1945#endif
1956#endif 1946#endif
1957 1947
1958#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 1948#if IS_ENABLED(CONFIG_BFIN_SIR)
1959#ifdef CONFIG_BFIN_SIR0 1949#ifdef CONFIG_BFIN_SIR0
1960 &bfin_sir0_device, 1950 &bfin_sir0_device,
1961#endif 1951#endif
@@ -1964,19 +1954,19 @@ static struct platform_device *ezkit_devices[] __initdata = {
1964#endif 1954#endif
1965#endif 1955#endif
1966 1956
1967#if defined(CONFIG_STMMAC_ETH) || defined(CONFIG_STMMAC_ETH_MODULE) 1957#if IS_ENABLED(CONFIG_STMMAC_ETH)
1968 &bfin_eth_device, 1958 &bfin_eth_device,
1969#endif 1959#endif
1970 1960
1971#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) 1961#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
1972 &musb_device, 1962 &musb_device,
1973#endif 1963#endif
1974 1964
1975#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 1965#if IS_ENABLED(CONFIG_USB_ISP1760_HCD)
1976 &bfin_isp1760_device, 1966 &bfin_isp1760_device,
1977#endif 1967#endif
1978 1968
1979#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 1969#if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT)
1980#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART 1970#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1981 &bfin_sport0_uart_device, 1971 &bfin_sport0_uart_device,
1982#endif 1972#endif
@@ -1988,15 +1978,15 @@ static struct platform_device *ezkit_devices[] __initdata = {
1988#endif 1978#endif
1989#endif 1979#endif
1990 1980
1991#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 1981#if IS_ENABLED(CONFIG_CAN_BFIN)
1992 &bfin_can0_device, 1982 &bfin_can0_device,
1993#endif 1983#endif
1994 1984
1995#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) 1985#if IS_ENABLED(CONFIG_MTD_NAND_BF5XX)
1996 &bfin_nand_device, 1986 &bfin_nand_device,
1997#endif 1987#endif
1998 1988
1999#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) 1989#if IS_ENABLED(CONFIG_SDH_BFIN)
2000 &bfin_sdh_device, 1990 &bfin_sdh_device,
2001#endif 1991#endif
2002 1992
@@ -2005,11 +1995,11 @@ static struct platform_device *ezkit_devices[] __initdata = {
2005 &bf60x_spi_master1, 1995 &bf60x_spi_master1,
2006#endif 1996#endif
2007 1997
2008#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) 1998#if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
2009 &bfin_rotary_device, 1999 &bfin_rotary_device,
2010#endif 2000#endif
2011 2001
2012#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) 2002#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
2013 &i2c_bfin_twi0_device, 2003 &i2c_bfin_twi0_device,
2014#if !defined(CONFIG_BF542) 2004#if !defined(CONFIG_BF542)
2015 &i2c_bfin_twi1_device, 2005 &i2c_bfin_twi1_device,
@@ -2024,34 +2014,29 @@ static struct platform_device *ezkit_devices[] __initdata = {
2024 &bfin_crypto_crc_device, 2014 &bfin_crypto_crc_device,
2025#endif 2015#endif
2026 2016
2027#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 2017#if IS_ENABLED(CONFIG_KEYBOARD_GPIO)
2028 &bfin_device_gpiokeys, 2018 &bfin_device_gpiokeys,
2029#endif 2019#endif
2030 2020
2031#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 2021#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
2032 &ezkit_flash_device, 2022 &ezkit_flash_device,
2033#endif 2023#endif
2034#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 2024#if IS_ENABLED(CONFIG_SND_BF5XX_I2S)
2035 &bfin_i2s_pcm, 2025 &bfin_i2s_pcm,
2036#endif 2026#endif
2037#if defined(CONFIG_SND_BF6XX_SOC_I2S) || \ 2027#if IS_ENABLED(CONFIG_SND_BF6XX_SOC_I2S)
2038 defined(CONFIG_SND_BF6XX_SOC_I2S_MODULE)
2039 &bfin_i2s, 2028 &bfin_i2s,
2040#endif 2029#endif
2041#if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \ 2030#if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD1836)
2042 defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
2043 &bfin_ad1836_machine, 2031 &bfin_ad1836_machine,
2044#endif 2032#endif
2045#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61) || \ 2033#if IS_ENABLED(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61)
2046 defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61_MODULE)
2047 &adau1761_device, 2034 &adau1761_device,
2048#endif 2035#endif
2049#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ 2036#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_CAPTURE)
2050 || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
2051 &bfin_capture_device, 2037 &bfin_capture_device,
2052#endif 2038#endif
2053#if defined(CONFIG_VIDEO_BLACKFIN_DISPLAY) \ 2039#if IS_ENABLED(CONFIG_VIDEO_BLACKFIN_DISPLAY)
2054 || defined(CONFIG_VIDEO_BLACKFIN_DISPLAY_MODULE)
2055 &bfin_display_device, 2040 &bfin_display_device,
2056#endif 2041#endif
2057 2042
@@ -2075,9 +2060,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2075 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"), 2060 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
2076 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"), 2061 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"),
2077 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2062 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
2078#if defined(CONFIG_VIDEO_MT9M114) || defined(CONFIG_VIDEO_MT9M114_MODULE) 2063#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
2079 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"), 2064 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"),
2080#elif defined(CONFIG_VIDEO_VS6624) || defined(CONFIG_VIDEO_VS6624_MODULE) 2065#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
2081 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2066 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
2082#else 2067#else
2083 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"), 2068 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"),
diff --git a/arch/blackfin/mach-bf609/clock.c b/arch/blackfin/mach-bf609/clock.c
index 13644ed25489..56200f37cfc8 100644
--- a/arch/blackfin/mach-bf609/clock.c
+++ b/arch/blackfin/mach-bf609/clock.c
@@ -73,24 +73,6 @@ static void clk_reg_write_mask(u32 reg, uint32_t val, uint32_t mask)
73 bfin_write32(reg, val2); 73 bfin_write32(reg, val2);
74} 74}
75 75
76static void clk_reg_set_bits(u32 reg, uint32_t mask)
77{
78 u32 val;
79
80 val = bfin_read32(reg);
81 val |= mask;
82 bfin_write32(reg, val);
83}
84
85static void clk_reg_clear_bits(u32 reg, uint32_t mask)
86{
87 u32 val;
88
89 val = bfin_read32(reg);
90 val &= ~mask;
91 bfin_write32(reg, val);
92}
93
94int wait_for_pll_align(void) 76int wait_for_pll_align(void)
95{ 77{
96 int i = 10000; 78 int i = 10000;
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index ad505d9db4a8..0cdd6955c7be 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -210,7 +210,7 @@ void bf609_cpu_pm_enter(suspend_state_t state)
210 210
211#ifdef CONFIG_PM_BFIN_WAKE_PB15 211#ifdef CONFIG_PM_BFIN_WAKE_PB15
212 wakeup |= PB15WE; 212 wakeup |= PB15WE;
213# if CONFIG_PM_BFIN_WAKE_PA15_POL 213# if CONFIG_PM_BFIN_WAKE_PB15_POL
214 wakeup_pol |= PB15WE; 214 wakeup_pol |= PB15WE;
215# endif 215# endif
216#endif 216#endif
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1325c3bc58e1..12c3afee0f6f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -45,6 +45,7 @@ config IA64
45 select HAVE_MOD_ARCH_SPECIFIC 45 select HAVE_MOD_ARCH_SPECIFIC
46 select MODULES_USE_ELF_RELA 46 select MODULES_USE_ELF_RELA
47 select ARCH_USE_CMPXCHG_LOCKREF 47 select ARCH_USE_CMPXCHG_LOCKREF
48 select HAVE_ARCH_AUDITSYSCALL
48 default y 49 default y
49 help 50 help
50 The Itanium Processor Family is Intel's 64-bit successor to 51 The Itanium Processor Family is Intel's 64-bit successor to
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 6c488c85d791..c6e9cd2bca8d 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -14,7 +14,7 @@
14#define __ASM_MIPS_SYSCALL_H 14#define __ASM_MIPS_SYSCALL_H
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/audit.h> 17#include <uapi/linux/audit.h>
18#include <linux/elf-em.h> 18#include <linux/elf-em.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
@@ -127,12 +127,11 @@ extern const unsigned long sys_call_table[];
127extern const unsigned long sys32_call_table[]; 127extern const unsigned long sys32_call_table[];
128extern const unsigned long sysn32_call_table[]; 128extern const unsigned long sysn32_call_table[];
129 129
130static inline int syscall_get_arch(struct task_struct *task, 130static inline int syscall_get_arch(void)
131 struct pt_regs *regs)
132{ 131{
133 int arch = EM_MIPS; 132 int arch = EM_MIPS;
134#ifdef CONFIG_64BIT 133#ifdef CONFIG_64BIT
135 if (!test_tsk_thread_flag(task, TIF_32BIT_REGS)) 134 if (!test_thread_flag(TIF_32BIT_REGS))
136 arch |= __AUDIT_ARCH_64BIT; 135 arch |= __AUDIT_ARCH_64BIT;
137#endif 136#endif
138#if defined(__LITTLE_ENDIAN) 137#if defined(__LITTLE_ENDIAN)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 7271e5a83081..71f85f427034 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -649,7 +649,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
649 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 649 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
650 trace_sys_enter(regs, regs->regs[2]); 650 trace_sys_enter(regs, regs->regs[2]);
651 651
652 audit_syscall_entry(syscall_get_arch(current, regs), 652 audit_syscall_entry(syscall_get_arch(),
653 syscall, 653 syscall,
654 regs->regs[4], regs->regs[5], 654 regs->regs[4], regs->regs[5],
655 regs->regs[6], regs->regs[7]); 655 regs->regs[6], regs->regs[7]);
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index 7c137cd8aa37..2fbbe4d920aa 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
70 * be used in IRQ contexts, so in some (very limited) cases we need 70 * be used in IRQ contexts, so in some (very limited) cases we need
71 * it. 71 * it.
72 */ 72 */
73static inline unsigned long kmap_atomic(struct page *page) 73static inline void *kmap_atomic(struct page *page)
74{ 74{
75 unsigned long vaddr; 75 unsigned long vaddr;
76 int idx, type; 76 int idx, type;
@@ -89,7 +89,7 @@ static inline unsigned long kmap_atomic(struct page *page)
89 set_pte(kmap_pte - idx, mk_pte(page, kmap_prot)); 89 set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
90 local_flush_tlb_one(vaddr); 90 local_flush_tlb_one(vaddr);
91 91
92 return vaddr; 92 return (void *)vaddr;
93} 93}
94 94
95static inline void __kunmap_atomic(unsigned long vaddr) 95static inline void __kunmap_atomic(unsigned long vaddr)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bb2a8ec440e7..1faefed32749 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -28,6 +28,7 @@ config PARISC
28 select CLONE_BACKWARDS 28 select CLONE_BACKWARDS
29 select TTY # Needed for pdc_cons.c 29 select TTY # Needed for pdc_cons.c
30 select HAVE_DEBUG_STACKOVERFLOW 30 select HAVE_DEBUG_STACKOVERFLOW
31 select HAVE_ARCH_AUDITSYSCALL
31 32
32 help 33 help
33 The PA-RISC microprocessor is designed by Hewlett-Packard and used 34 The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6c03a94991ad..e0998997943b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -144,6 +144,7 @@ config PPC
144 select HAVE_DEBUG_STACKOVERFLOW 144 select HAVE_DEBUG_STACKOVERFLOW
145 select HAVE_IRQ_EXIT_ON_IRQ_STACK 145 select HAVE_IRQ_EXIT_ON_IRQ_STACK
146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64 146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
147 select HAVE_ARCH_AUDITSYSCALL
147 148
148config GENERIC_CSUM 149config GENERIC_CSUM
149 def_bool CPU_LITTLE_ENDIAN 150 def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index c2353bf059fd..175a8b99c196 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -1244,7 +1244,6 @@ CONFIG_DEBUG_SPINLOCK_SLEEP=y
1244CONFIG_DEBUG_HIGHMEM=y 1244CONFIG_DEBUG_HIGHMEM=y
1245CONFIG_DEBUG_INFO=y 1245CONFIG_DEBUG_INFO=y
1246CONFIG_DEBUG_VM=y 1246CONFIG_DEBUG_VM=y
1247CONFIG_DEBUG_WRITECOUNT=y
1248CONFIG_DEBUG_LIST=y 1247CONFIG_DEBUG_LIST=y
1249CONFIG_DEBUG_SG=y 1248CONFIG_DEBUG_SG=y
1250# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1249# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 139a8308070c..fdee37fab81c 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -174,7 +174,6 @@ CONFIG_DETECT_HUNG_TASK=y
174CONFIG_PROVE_LOCKING=y 174CONFIG_PROVE_LOCKING=y
175CONFIG_DEBUG_LOCKDEP=y 175CONFIG_DEBUG_LOCKDEP=y
176CONFIG_DEBUG_INFO=y 176CONFIG_DEBUG_INFO=y
177CONFIG_DEBUG_WRITECOUNT=y
178CONFIG_DEBUG_MEMORY_INIT=y 177CONFIG_DEBUG_MEMORY_INIT=y
179CONFIG_DEBUG_LIST=y 178CONFIG_DEBUG_LIST=y
180CONFIG_RCU_CPU_STALL_TIMEOUT=60 179CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 3d7a50a08f5e..fbe24377eda3 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -201,7 +201,8 @@ static void cpu_ready_for_interrupts(void)
201 get_paca()->kernel_msr = MSR_KERNEL; 201 get_paca()->kernel_msr = MSR_KERNEL;
202 202
203 /* Enable AIL if supported */ 203 /* Enable AIL if supported */
204 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 204 if (cpu_has_feature(CPU_FTR_HVMODE) &&
205 cpu_has_feature(CPU_FTR_ARCH_207S)) {
205 unsigned long lpcr = mfspr(SPRN_LPCR); 206 unsigned long lpcr = mfspr(SPRN_LPCR);
206 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 207 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
207 } 208 }
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 346d21678ffd..d68fe34799b0 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -103,6 +103,7 @@ config S390
103 select GENERIC_SMP_IDLE_THREAD 103 select GENERIC_SMP_IDLE_THREAD
104 select GENERIC_TIME_VSYSCALL 104 select GENERIC_TIME_VSYSCALL
105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
106 select HAVE_ARCH_AUDITSYSCALL
106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 107 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
107 select HAVE_ARCH_SECCOMP_FILTER 108 select HAVE_ARCH_SECCOMP_FILTER
108 select HAVE_ARCH_TRACEHOOK 109 select HAVE_ARCH_TRACEHOOK
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index ddaae2f5c913..8df022c43af7 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -581,7 +581,6 @@ CONFIG_LOCK_STAT=y
581CONFIG_DEBUG_LOCKDEP=y 581CONFIG_DEBUG_LOCKDEP=y
582CONFIG_DEBUG_ATOMIC_SLEEP=y 582CONFIG_DEBUG_ATOMIC_SLEEP=y
583CONFIG_DEBUG_LOCKING_API_SELFTESTS=y 583CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
584CONFIG_DEBUG_WRITECOUNT=y
585CONFIG_DEBUG_LIST=y 584CONFIG_DEBUG_LIST=y
586CONFIG_DEBUG_SG=y 585CONFIG_DEBUG_SG=y
587CONFIG_DEBUG_NOTIFIERS=y 586CONFIG_DEBUG_NOTIFIERS=y
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index cd29d2f4e4f3..777687055e7b 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -12,7 +12,7 @@
12#ifndef _ASM_SYSCALL_H 12#ifndef _ASM_SYSCALL_H
13#define _ASM_SYSCALL_H 1 13#define _ASM_SYSCALL_H 1
14 14
15#include <linux/audit.h> 15#include <uapi/linux/audit.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
@@ -89,11 +89,10 @@ static inline void syscall_set_arguments(struct task_struct *task,
89 regs->orig_gpr2 = args[0]; 89 regs->orig_gpr2 = args[0];
90} 90}
91 91
92static inline int syscall_get_arch(struct task_struct *task, 92static inline int syscall_get_arch(void)
93 struct pt_regs *regs)
94{ 93{
95#ifdef CONFIG_COMPAT 94#ifdef CONFIG_COMPAT
96 if (test_tsk_thread_flag(task, TIF_31BIT)) 95 if (test_tsk_thread_flag(current, TIF_31BIT))
97 return AUDIT_ARCH_S390; 96 return AUDIT_ARCH_S390;
98#endif 97#endif
99 return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390; 98 return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ba55e939a820..834b67c4db5a 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -42,6 +42,7 @@ config SUPERH
42 select MODULES_USE_ELF_RELA 42 select MODULES_USE_ELF_RELA
43 select OLD_SIGSUSPEND 43 select OLD_SIGSUSPEND
44 select OLD_SIGACTION 44 select OLD_SIGACTION
45 select HAVE_ARCH_AUDITSYSCALL
45 help 46 help
46 The SuperH is a RISC processor targeted for use in embedded systems 47 The SuperH is a RISC processor targeted for use in embedded systems
47 and consumer electronics; it was also used in the Sega Dreamcast 48 and consumer electronics; it was also used in the Sega Dreamcast
diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig
index 4e5229b0c5bb..47236573db83 100644
--- a/arch/sh/configs/rsk7203_defconfig
+++ b/arch/sh/configs/rsk7203_defconfig
@@ -128,7 +128,6 @@ CONFIG_DEBUG_MUTEXES=y
128CONFIG_DEBUG_SPINLOCK_SLEEP=y 128CONFIG_DEBUG_SPINLOCK_SLEEP=y
129CONFIG_DEBUG_INFO=y 129CONFIG_DEBUG_INFO=y
130CONFIG_DEBUG_VM=y 130CONFIG_DEBUG_VM=y
131CONFIG_DEBUG_WRITECOUNT=y
132CONFIG_DEBUG_LIST=y 131CONFIG_DEBUG_LIST=y
133CONFIG_DEBUG_SG=y 132CONFIG_DEBUG_SG=y
134CONFIG_FRAME_POINTER=y 133CONFIG_FRAME_POINTER=y
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 7d8b7e94b93b..29f2e988c56a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -77,6 +77,7 @@ config SPARC64
77 select ARCH_HAVE_NMI_SAFE_CMPXCHG 77 select ARCH_HAVE_NMI_SAFE_CMPXCHG
78 select HAVE_C_RECORDMCOUNT 78 select HAVE_C_RECORDMCOUNT
79 select NO_BOOTMEM 79 select NO_BOOTMEM
80 select HAVE_ARCH_AUDITSYSCALL
80 81
81config ARCH_DEFCONFIG 82config ARCH_DEFCONFIG
82 string 83 string
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 21ca44c4f6d5..6915d28cf118 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -1,6 +1,7 @@
1config UML 1config UML
2 bool 2 bool
3 default y 3 default y
4 select HAVE_ARCH_AUDITSYSCALL
4 select HAVE_UID16 5 select HAVE_UID16
5 select GENERIC_IRQ_SHOW 6 select GENERIC_IRQ_SHOW
6 select GENERIC_CPU_DEVICES 7 select GENERIC_CPU_DEVICES
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5b8ec0f53b57..25d2c6f7325e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -129,6 +129,7 @@ config X86
129 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 129 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
130 select HAVE_CC_STACKPROTECTOR 130 select HAVE_CC_STACKPROTECTOR
131 select GENERIC_CPU_AUTOPROBE 131 select GENERIC_CPU_AUTOPROBE
132 select HAVE_ARCH_AUDITSYSCALL
132 133
133config INSTRUCTION_DECODER 134config INSTRUCTION_DECODER
134 def_bool y 135 def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 0db89106854e..d1b7c377a234 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -108,7 +108,7 @@ else
108 108
109 # this works around some issues with generating unwind tables in older gccs 109 # this works around some issues with generating unwind tables in older gccs
110 # newer gccs do it by default 110 # newer gccs do it by default
111 KBUILD_CFLAGS += -maccumulate-outgoing-args 111 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
112endif 112endif
113 113
114# Make sure compiler does not have buggy stack-protector support. 114# Make sure compiler does not have buggy stack-protector support.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index fcaf9c961265..7de069afb382 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -60,7 +60,7 @@
60 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 60 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
61 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ 61 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
62 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ 62 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
63 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) 63 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP))
64 64
65#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 65#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
66 66
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index aea284b41312..d6a756ae04c8 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -13,7 +13,7 @@
13#ifndef _ASM_X86_SYSCALL_H 13#ifndef _ASM_X86_SYSCALL_H
14#define _ASM_X86_SYSCALL_H 14#define _ASM_X86_SYSCALL_H
15 15
16#include <linux/audit.h> 16#include <uapi/linux/audit.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <asm/asm-offsets.h> /* For NR_syscalls */ 19#include <asm/asm-offsets.h> /* For NR_syscalls */
@@ -91,8 +91,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
91 memcpy(&regs->bx + i, args, n * sizeof(args[0])); 91 memcpy(&regs->bx + i, args, n * sizeof(args[0]));
92} 92}
93 93
94static inline int syscall_get_arch(struct task_struct *task, 94static inline int syscall_get_arch(void)
95 struct pt_regs *regs)
96{ 95{
97 return AUDIT_ARCH_I386; 96 return AUDIT_ARCH_I386;
98} 97}
@@ -221,8 +220,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
221 } 220 }
222} 221}
223 222
224static inline int syscall_get_arch(struct task_struct *task, 223static inline int syscall_get_arch(void)
225 struct pt_regs *regs)
226{ 224{
227#ifdef CONFIG_IA32_EMULATION 225#ifdef CONFIG_IA32_EMULATION
228 /* 226 /*
@@ -234,7 +232,7 @@ static inline int syscall_get_arch(struct task_struct *task,
234 * 232 *
235 * x32 tasks should be considered AUDIT_ARCH_X86_64. 233 * x32 tasks should be considered AUDIT_ARCH_X86_64.
236 */ 234 */
237 if (task_thread_info(task)->status & TS_COMPAT) 235 if (task_thread_info(current)->status & TS_COMPAT)
238 return AUDIT_ARCH_I386; 236 return AUDIT_ARCH_I386;
239#endif 237#endif
240 /* Both x32 and x86_64 are considered "64-bit". */ 238 /* Both x32 and x86_64 are considered "64-bit". */
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index bea60671ef8a..f47a104a749c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -308,7 +308,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
308 const u32 kvm_supported_word9_x86_features = 308 const u32 kvm_supported_word9_x86_features =
309 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | 309 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
310 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | 310 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
311 F(ADX); 311 F(ADX) | F(SMAP);
312 312
313 /* all calls to cpuid_count() should be made on the same cpu */ 313 /* all calls to cpuid_count() should be made on the same cpu */
314 get_cpu(); 314 get_cpu();
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index a2a1bb7ed8c1..eeecbed26ac7 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -48,6 +48,14 @@ static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
48 return best && (best->ebx & bit(X86_FEATURE_SMEP)); 48 return best && (best->ebx & bit(X86_FEATURE_SMEP));
49} 49}
50 50
51static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
52{
53 struct kvm_cpuid_entry2 *best;
54
55 best = kvm_find_cpuid_entry(vcpu, 7, 0);
56 return best && (best->ebx & bit(X86_FEATURE_SMAP));
57}
58
51static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) 59static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
52{ 60{
53 struct kvm_cpuid_entry2 *best; 61 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f5704d9e5ddc..813d31038b93 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3601,20 +3601,27 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
3601 } 3601 }
3602} 3602}
3603 3603
3604static void update_permission_bitmask(struct kvm_vcpu *vcpu, 3604void update_permission_bitmask(struct kvm_vcpu *vcpu,
3605 struct kvm_mmu *mmu, bool ept) 3605 struct kvm_mmu *mmu, bool ept)
3606{ 3606{
3607 unsigned bit, byte, pfec; 3607 unsigned bit, byte, pfec;
3608 u8 map; 3608 u8 map;
3609 bool fault, x, w, u, wf, uf, ff, smep; 3609 bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0;
3610 3610
3611 smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 3611 cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
3612 cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
3612 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { 3613 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
3613 pfec = byte << 1; 3614 pfec = byte << 1;
3614 map = 0; 3615 map = 0;
3615 wf = pfec & PFERR_WRITE_MASK; 3616 wf = pfec & PFERR_WRITE_MASK;
3616 uf = pfec & PFERR_USER_MASK; 3617 uf = pfec & PFERR_USER_MASK;
3617 ff = pfec & PFERR_FETCH_MASK; 3618 ff = pfec & PFERR_FETCH_MASK;
3619 /*
3620 * PFERR_RSVD_MASK bit is set in PFEC if the access is not
3621 * subject to SMAP restrictions, and cleared otherwise. The
3622 * bit is only meaningful if the SMAP bit is set in CR4.
3623 */
3624 smapf = !(pfec & PFERR_RSVD_MASK);
3618 for (bit = 0; bit < 8; ++bit) { 3625 for (bit = 0; bit < 8; ++bit) {
3619 x = bit & ACC_EXEC_MASK; 3626 x = bit & ACC_EXEC_MASK;
3620 w = bit & ACC_WRITE_MASK; 3627 w = bit & ACC_WRITE_MASK;
@@ -3626,12 +3633,33 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
3626 /* Allow supervisor writes if !cr0.wp */ 3633 /* Allow supervisor writes if !cr0.wp */
3627 w |= !is_write_protection(vcpu) && !uf; 3634 w |= !is_write_protection(vcpu) && !uf;
3628 /* Disallow supervisor fetches of user code if cr4.smep */ 3635 /* Disallow supervisor fetches of user code if cr4.smep */
3629 x &= !(smep && u && !uf); 3636 x &= !(cr4_smep && u && !uf);
3637
3638 /*
3639 * SMAP:kernel-mode data accesses from user-mode
3640 * mappings should fault. A fault is considered
3641 * as a SMAP violation if all of the following
3642 * conditions are ture:
3643 * - X86_CR4_SMAP is set in CR4
3644 * - An user page is accessed
3645 * - Page fault in kernel mode
3646 * - if CPL = 3 or X86_EFLAGS_AC is clear
3647 *
3648 * Here, we cover the first three conditions.
3649 * The fourth is computed dynamically in
3650 * permission_fault() and is in smapf.
3651 *
3652 * Also, SMAP does not affect instruction
3653 * fetches, add the !ff check here to make it
3654 * clearer.
3655 */
3656 smap = cr4_smap && u && !uf && !ff;
3630 } else 3657 } else
3631 /* Not really needed: no U/S accesses on ept */ 3658 /* Not really needed: no U/S accesses on ept */
3632 u = 1; 3659 u = 1;
3633 3660
3634 fault = (ff && !x) || (uf && !u) || (wf && !w); 3661 fault = (ff && !x) || (uf && !u) || (wf && !w) ||
3662 (smapf && smap);
3635 map |= fault << bit; 3663 map |= fault << bit;
3636 } 3664 }
3637 mmu->permissions[byte] = map; 3665 mmu->permissions[byte] = map;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 292615274358..3842e70bdb7c 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -44,11 +44,17 @@
44#define PT_DIRECTORY_LEVEL 2 44#define PT_DIRECTORY_LEVEL 2
45#define PT_PAGE_TABLE_LEVEL 1 45#define PT_PAGE_TABLE_LEVEL 1
46 46
47#define PFERR_PRESENT_MASK (1U << 0) 47#define PFERR_PRESENT_BIT 0
48#define PFERR_WRITE_MASK (1U << 1) 48#define PFERR_WRITE_BIT 1
49#define PFERR_USER_MASK (1U << 2) 49#define PFERR_USER_BIT 2
50#define PFERR_RSVD_MASK (1U << 3) 50#define PFERR_RSVD_BIT 3
51#define PFERR_FETCH_MASK (1U << 4) 51#define PFERR_FETCH_BIT 4
52
53#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
54#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
55#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
56#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
57#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
52 58
53int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 59int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
54void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); 60void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
@@ -73,6 +79,8 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
73void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 79void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
74void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, 80void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
75 bool execonly); 81 bool execonly);
82void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
83 bool ept);
76 84
77static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 85static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
78{ 86{
@@ -110,10 +118,30 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu)
110 * Will a fault with a given page-fault error code (pfec) cause a permission 118 * Will a fault with a given page-fault error code (pfec) cause a permission
111 * fault with the given access (in ACC_* format)? 119 * fault with the given access (in ACC_* format)?
112 */ 120 */
113static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access, 121static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
114 unsigned pfec) 122 unsigned pte_access, unsigned pfec)
115{ 123{
116 return (mmu->permissions[pfec >> 1] >> pte_access) & 1; 124 int cpl = kvm_x86_ops->get_cpl(vcpu);
125 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
126
127 /*
128 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
129 *
130 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
131 * (these are implicit supervisor accesses) regardless of the value
132 * of EFLAGS.AC.
133 *
134 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
135 * the result in X86_EFLAGS_AC. We then insert it in place of
136 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
137 * but it will be one in index if SMAP checks are being overridden.
138 * It is important to keep this branchless.
139 */
140 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
141 int index = (pfec >> 1) +
142 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
143
144 return (mmu->permissions[index] >> pte_access) & 1;
117} 145}
118 146
119void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); 147void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index b1e6c1bf68d3..123efd3ec29f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -353,7 +353,7 @@ retry_walk:
353 walker->ptes[walker->level - 1] = pte; 353 walker->ptes[walker->level - 1] = pte;
354 } while (!is_last_gpte(mmu, walker->level, pte)); 354 } while (!is_last_gpte(mmu, walker->level, pte));
355 355
356 if (unlikely(permission_fault(mmu, pte_access, access))) { 356 if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) {
357 errcode |= PFERR_PRESENT_MASK; 357 errcode |= PFERR_PRESENT_MASK;
358 goto error; 358 goto error;
359 } 359 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1320e0f8e611..1f68c5831924 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3484,13 +3484,14 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3484 hw_cr4 &= ~X86_CR4_PAE; 3484 hw_cr4 &= ~X86_CR4_PAE;
3485 hw_cr4 |= X86_CR4_PSE; 3485 hw_cr4 |= X86_CR4_PSE;
3486 /* 3486 /*
3487 * SMEP is disabled if CPU is in non-paging mode in 3487 * SMEP/SMAP is disabled if CPU is in non-paging mode
3488 * hardware. However KVM always uses paging mode to 3488 * in hardware. However KVM always uses paging mode to
3489 * emulate guest non-paging mode with TDP. 3489 * emulate guest non-paging mode with TDP.
3490 * To emulate this behavior, SMEP needs to be manually 3490 * To emulate this behavior, SMEP/SMAP needs to be
3491 * disabled when guest switches to non-paging mode. 3491 * manually disabled when guest switches to non-paging
3492 * mode.
3492 */ 3493 */
3493 hw_cr4 &= ~X86_CR4_SMEP; 3494 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
3494 } else if (!(cr4 & X86_CR4_PAE)) { 3495 } else if (!(cr4 & X86_CR4_PAE)) {
3495 hw_cr4 &= ~X86_CR4_PAE; 3496 hw_cr4 &= ~X86_CR4_PAE;
3496 } 3497 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9d1b5cd4d34c..8b8fc0b792ba 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -652,6 +652,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
652 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) 652 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
653 return 1; 653 return 1;
654 654
655 if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
656 return 1;
657
655 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) 658 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
656 return 1; 659 return 1;
657 660
@@ -680,6 +683,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
680 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 683 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
681 kvm_mmu_reset_context(vcpu); 684 kvm_mmu_reset_context(vcpu);
682 685
686 if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
687 update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
688
683 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 689 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
684 kvm_update_cpuid(vcpu); 690 kvm_update_cpuid(vcpu);
685 691
@@ -1117,7 +1123,6 @@ static inline u64 get_kernel_ns(void)
1117{ 1123{
1118 struct timespec ts; 1124 struct timespec ts;
1119 1125
1120 WARN_ON(preemptible());
1121 ktime_get_ts(&ts); 1126 ktime_get_ts(&ts);
1122 monotonic_to_bootbased(&ts); 1127 monotonic_to_bootbased(&ts);
1123 return timespec_to_ns(&ts); 1128 return timespec_to_ns(&ts);
@@ -4164,7 +4169,8 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4164 | (write ? PFERR_WRITE_MASK : 0); 4169 | (write ? PFERR_WRITE_MASK : 0);
4165 4170
4166 if (vcpu_match_mmio_gva(vcpu, gva) 4171 if (vcpu_match_mmio_gva(vcpu, gva)
4167 && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { 4172 && !permission_fault(vcpu, vcpu->arch.walk_mmu,
4173 vcpu->arch.access, access)) {
4168 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 4174 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4169 (gva & (PAGE_SIZE - 1)); 4175 (gva & (PAGE_SIZE - 1));
4170 trace_vcpu_match_mmio(gva, *gpa, write, false); 4176 trace_vcpu_match_mmio(gva, *gpa, write, false);
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index d57d917ff240..1493c68352d1 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -627,7 +627,6 @@ CONFIG_SCHED_DEBUG=y
627# CONFIG_DEBUG_KOBJECT is not set 627# CONFIG_DEBUG_KOBJECT is not set
628# CONFIG_DEBUG_INFO is not set 628# CONFIG_DEBUG_INFO is not set
629# CONFIG_DEBUG_VM is not set 629# CONFIG_DEBUG_VM is not set
630# CONFIG_DEBUG_WRITECOUNT is not set
631# CONFIG_DEBUG_MEMORY_INIT is not set 630# CONFIG_DEBUG_MEMORY_INIT is not set
632# CONFIG_DEBUG_LIST is not set 631# CONFIG_DEBUG_LIST is not set
633# CONFIG_DEBUG_SG is not set 632# CONFIG_DEBUG_SG is not set
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index 583c2b0974ca..12a492ab6d17 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -569,7 +569,6 @@ CONFIG_DEBUG_SPINLOCK_SLEEP=y
569# CONFIG_DEBUG_INFO is not set 569# CONFIG_DEBUG_INFO is not set
570# CONFIG_DEBUG_VM is not set 570# CONFIG_DEBUG_VM is not set
571CONFIG_DEBUG_NOMMU_REGIONS=y 571CONFIG_DEBUG_NOMMU_REGIONS=y
572# CONFIG_DEBUG_WRITECOUNT is not set
573# CONFIG_DEBUG_MEMORY_INIT is not set 572# CONFIG_DEBUG_MEMORY_INIT is not set
574# CONFIG_DEBUG_LIST is not set 573# CONFIG_DEBUG_LIST is not set
575# CONFIG_DEBUG_SG is not set 574# CONFIG_DEBUG_SG is not set
diff --git a/block/blk-map.c b/block/blk-map.c
index cca6356d216d..f7b22bc21518 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -188,7 +188,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
188 * unmapping. 188 * unmapping.
189 */ 189 */
190int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 190int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
191 struct rq_map_data *map_data, struct sg_iovec *iov, 191 struct rq_map_data *map_data, const struct sg_iovec *iov,
192 int iov_count, unsigned int len, gfp_t gfp_mask) 192 int iov_count, unsigned int len, gfp_t gfp_mask)
193{ 193{
194 struct bio *bio; 194 struct bio *bio;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 18c76e84d540..68e3992e8838 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -469,24 +469,14 @@ static void drbd_wait_ee_list_empty(struct drbd_device *device,
469 469
470static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags) 470static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
471{ 471{
472 mm_segment_t oldfs;
473 struct kvec iov = { 472 struct kvec iov = {
474 .iov_base = buf, 473 .iov_base = buf,
475 .iov_len = size, 474 .iov_len = size,
476 }; 475 };
477 struct msghdr msg = { 476 struct msghdr msg = {
478 .msg_iovlen = 1,
479 .msg_iov = (struct iovec *)&iov,
480 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) 477 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
481 }; 478 };
482 int rv; 479 return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags);
483
484 oldfs = get_fs();
485 set_fs(KERNEL_DS);
486 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
487 set_fs(oldfs);
488
489 return rv;
490} 480}
491 481
492static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size) 482static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 55298db36b2d..3a70ea2f7cd6 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -630,37 +630,29 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
630 } 630 }
631 631
632 case NBD_CLEAR_SOCK: { 632 case NBD_CLEAR_SOCK: {
633 struct file *file; 633 struct socket *sock = nbd->sock;
634
635 nbd->sock = NULL; 634 nbd->sock = NULL;
636 file = nbd->file;
637 nbd->file = NULL;
638 nbd_clear_que(nbd); 635 nbd_clear_que(nbd);
639 BUG_ON(!list_empty(&nbd->queue_head)); 636 BUG_ON(!list_empty(&nbd->queue_head));
640 BUG_ON(!list_empty(&nbd->waiting_queue)); 637 BUG_ON(!list_empty(&nbd->waiting_queue));
641 kill_bdev(bdev); 638 kill_bdev(bdev);
642 if (file) 639 if (sock)
643 fput(file); 640 sockfd_put(sock);
644 return 0; 641 return 0;
645 } 642 }
646 643
647 case NBD_SET_SOCK: { 644 case NBD_SET_SOCK: {
648 struct file *file; 645 struct socket *sock;
649 if (nbd->file) 646 int err;
647 if (nbd->sock)
650 return -EBUSY; 648 return -EBUSY;
651 file = fget(arg); 649 sock = sockfd_lookup(arg, &err);
652 if (file) { 650 if (sock) {
653 struct inode *inode = file_inode(file); 651 nbd->sock = sock;
654 if (S_ISSOCK(inode->i_mode)) { 652 if (max_part > 0)
655 nbd->file = file; 653 bdev->bd_invalidated = 1;
656 nbd->sock = SOCKET_I(inode); 654 nbd->disconnect = 0; /* we're connected now */
657 if (max_part > 0) 655 return 0;
658 bdev->bd_invalidated = 1;
659 nbd->disconnect = 0; /* we're connected now */
660 return 0;
661 } else {
662 fput(file);
663 }
664 } 656 }
665 return -EINVAL; 657 return -EINVAL;
666 } 658 }
@@ -697,12 +689,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
697 689
698 case NBD_DO_IT: { 690 case NBD_DO_IT: {
699 struct task_struct *thread; 691 struct task_struct *thread;
700 struct file *file; 692 struct socket *sock;
701 int error; 693 int error;
702 694
703 if (nbd->pid) 695 if (nbd->pid)
704 return -EBUSY; 696 return -EBUSY;
705 if (!nbd->file) 697 if (!nbd->sock)
706 return -EINVAL; 698 return -EINVAL;
707 699
708 mutex_unlock(&nbd->tx_lock); 700 mutex_unlock(&nbd->tx_lock);
@@ -731,15 +723,15 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
731 if (error) 723 if (error)
732 return error; 724 return error;
733 sock_shutdown(nbd, 0); 725 sock_shutdown(nbd, 0);
734 file = nbd->file; 726 sock = nbd->sock;
735 nbd->file = NULL; 727 nbd->sock = NULL;
736 nbd_clear_que(nbd); 728 nbd_clear_que(nbd);
737 dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); 729 dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
738 kill_bdev(bdev); 730 kill_bdev(bdev);
739 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 731 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
740 set_device_ro(bdev, false); 732 set_device_ro(bdev, false);
741 if (file) 733 if (sock)
742 fput(file); 734 sockfd_put(sock);
743 nbd->flags = 0; 735 nbd->flags = 0;
744 nbd->bytesize = 0; 736 nbd->bytesize = 0;
745 bdev->bd_inode->i_size = 0; 737 bdev->bd_inode->i_size = 0;
@@ -875,9 +867,7 @@ static int __init nbd_init(void)
875 867
876 for (i = 0; i < nbds_max; i++) { 868 for (i = 0; i < nbds_max; i++) {
877 struct gendisk *disk = nbd_dev[i].disk; 869 struct gendisk *disk = nbd_dev[i].disk;
878 nbd_dev[i].file = NULL;
879 nbd_dev[i].magic = NBD_MAGIC; 870 nbd_dev[i].magic = NBD_MAGIC;
880 nbd_dev[i].flags = 0;
881 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); 871 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
882 spin_lock_init(&nbd_dev[i].queue_lock); 872 spin_lock_init(&nbd_dev[i].queue_lock);
883 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 873 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index da085ff10d25..7c64fa756cce 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * NVM Express device driver 2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation. 3 * Copyright (c) 2011-2014, Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -20,10 +20,12 @@
20#include <linux/bio.h> 20#include <linux/bio.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/cpu.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <linux/errno.h> 25#include <linux/errno.h>
25#include <linux/fs.h> 26#include <linux/fs.h>
26#include <linux/genhd.h> 27#include <linux/genhd.h>
28#include <linux/hdreg.h>
27#include <linux/idr.h> 29#include <linux/idr.h>
28#include <linux/init.h> 30#include <linux/init.h>
29#include <linux/interrupt.h> 31#include <linux/interrupt.h>
@@ -35,6 +37,7 @@
35#include <linux/module.h> 37#include <linux/module.h>
36#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
37#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/percpu.h>
38#include <linux/poison.h> 41#include <linux/poison.h>
39#include <linux/ptrace.h> 42#include <linux/ptrace.h>
40#include <linux/sched.h> 43#include <linux/sched.h>
@@ -47,6 +50,11 @@
47#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 50#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
48#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 51#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
49#define ADMIN_TIMEOUT (60 * HZ) 52#define ADMIN_TIMEOUT (60 * HZ)
53#define IOD_TIMEOUT (4 * NVME_IO_TIMEOUT)
54
55unsigned char io_timeout = 30;
56module_param(io_timeout, byte, 0644);
57MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
50 58
51static int nvme_major; 59static int nvme_major;
52module_param(nvme_major, int, 0); 60module_param(nvme_major, int, 0);
@@ -58,6 +66,7 @@ static DEFINE_SPINLOCK(dev_list_lock);
58static LIST_HEAD(dev_list); 66static LIST_HEAD(dev_list);
59static struct task_struct *nvme_thread; 67static struct task_struct *nvme_thread;
60static struct workqueue_struct *nvme_workq; 68static struct workqueue_struct *nvme_workq;
69static wait_queue_head_t nvme_kthread_wait;
61 70
62static void nvme_reset_failed_dev(struct work_struct *ws); 71static void nvme_reset_failed_dev(struct work_struct *ws);
63 72
@@ -74,6 +83,7 @@ struct async_cmd_info {
74 * commands and one for I/O commands). 83 * commands and one for I/O commands).
75 */ 84 */
76struct nvme_queue { 85struct nvme_queue {
86 struct rcu_head r_head;
77 struct device *q_dmadev; 87 struct device *q_dmadev;
78 struct nvme_dev *dev; 88 struct nvme_dev *dev;
79 char irqname[24]; /* nvme4294967295-65535\0 */ 89 char irqname[24]; /* nvme4294967295-65535\0 */
@@ -85,6 +95,7 @@ struct nvme_queue {
85 wait_queue_head_t sq_full; 95 wait_queue_head_t sq_full;
86 wait_queue_t sq_cong_wait; 96 wait_queue_t sq_cong_wait;
87 struct bio_list sq_cong; 97 struct bio_list sq_cong;
98 struct list_head iod_bio;
88 u32 __iomem *q_db; 99 u32 __iomem *q_db;
89 u16 q_depth; 100 u16 q_depth;
90 u16 cq_vector; 101 u16 cq_vector;
@@ -95,6 +106,7 @@ struct nvme_queue {
95 u8 cq_phase; 106 u8 cq_phase;
96 u8 cqe_seen; 107 u8 cqe_seen;
97 u8 q_suspended; 108 u8 q_suspended;
109 cpumask_var_t cpu_mask;
98 struct async_cmd_info cmdinfo; 110 struct async_cmd_info cmdinfo;
99 unsigned long cmdid_data[]; 111 unsigned long cmdid_data[];
100}; 112};
@@ -118,7 +130,7 @@ static inline void _nvme_check_size(void)
118 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 130 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
119} 131}
120 132
121typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, 133typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
122 struct nvme_completion *); 134 struct nvme_completion *);
123 135
124struct nvme_cmd_info { 136struct nvme_cmd_info {
@@ -190,7 +202,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
190#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 202#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
191#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE) 203#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
192 204
193static void special_completion(struct nvme_dev *dev, void *ctx, 205static void special_completion(struct nvme_queue *nvmeq, void *ctx,
194 struct nvme_completion *cqe) 206 struct nvme_completion *cqe)
195{ 207{
196 if (ctx == CMD_CTX_CANCELLED) 208 if (ctx == CMD_CTX_CANCELLED)
@@ -198,26 +210,26 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
198 if (ctx == CMD_CTX_FLUSH) 210 if (ctx == CMD_CTX_FLUSH)
199 return; 211 return;
200 if (ctx == CMD_CTX_ABORT) { 212 if (ctx == CMD_CTX_ABORT) {
201 ++dev->abort_limit; 213 ++nvmeq->dev->abort_limit;
202 return; 214 return;
203 } 215 }
204 if (ctx == CMD_CTX_COMPLETED) { 216 if (ctx == CMD_CTX_COMPLETED) {
205 dev_warn(&dev->pci_dev->dev, 217 dev_warn(nvmeq->q_dmadev,
206 "completed id %d twice on queue %d\n", 218 "completed id %d twice on queue %d\n",
207 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 219 cqe->command_id, le16_to_cpup(&cqe->sq_id));
208 return; 220 return;
209 } 221 }
210 if (ctx == CMD_CTX_INVALID) { 222 if (ctx == CMD_CTX_INVALID) {
211 dev_warn(&dev->pci_dev->dev, 223 dev_warn(nvmeq->q_dmadev,
212 "invalid id %d completed on queue %d\n", 224 "invalid id %d completed on queue %d\n",
213 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 225 cqe->command_id, le16_to_cpup(&cqe->sq_id));
214 return; 226 return;
215 } 227 }
216 228
217 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); 229 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
218} 230}
219 231
220static void async_completion(struct nvme_dev *dev, void *ctx, 232static void async_completion(struct nvme_queue *nvmeq, void *ctx,
221 struct nvme_completion *cqe) 233 struct nvme_completion *cqe)
222{ 234{
223 struct async_cmd_info *cmdinfo = ctx; 235 struct async_cmd_info *cmdinfo = ctx;
@@ -262,14 +274,34 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
262 return ctx; 274 return ctx;
263} 275}
264 276
265struct nvme_queue *get_nvmeq(struct nvme_dev *dev) 277static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
278{
279 return rcu_dereference_raw(dev->queues[qid]);
280}
281
282static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
283{
284 unsigned queue_id = get_cpu_var(*dev->io_queue);
285 rcu_read_lock();
286 return rcu_dereference(dev->queues[queue_id]);
287}
288
289static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
266{ 290{
267 return dev->queues[get_cpu() + 1]; 291 rcu_read_unlock();
292 put_cpu_var(nvmeq->dev->io_queue);
268} 293}
269 294
270void put_nvmeq(struct nvme_queue *nvmeq) 295static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
296 __acquires(RCU)
271{ 297{
272 put_cpu(); 298 rcu_read_lock();
299 return rcu_dereference(dev->queues[q_idx]);
300}
301
302static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
303{
304 rcu_read_unlock();
273} 305}
274 306
275/** 307/**
@@ -284,6 +316,10 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
284 unsigned long flags; 316 unsigned long flags;
285 u16 tail; 317 u16 tail;
286 spin_lock_irqsave(&nvmeq->q_lock, flags); 318 spin_lock_irqsave(&nvmeq->q_lock, flags);
319 if (nvmeq->q_suspended) {
320 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
321 return -EBUSY;
322 }
287 tail = nvmeq->sq_tail; 323 tail = nvmeq->sq_tail;
288 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 324 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
289 if (++tail == nvmeq->q_depth) 325 if (++tail == nvmeq->q_depth)
@@ -323,6 +359,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
323 iod->npages = -1; 359 iod->npages = -1;
324 iod->length = nbytes; 360 iod->length = nbytes;
325 iod->nents = 0; 361 iod->nents = 0;
362 iod->first_dma = 0ULL;
326 iod->start_time = jiffies; 363 iod->start_time = jiffies;
327 } 364 }
328 365
@@ -371,19 +408,31 @@ static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
371 part_stat_unlock(); 408 part_stat_unlock();
372} 409}
373 410
374static void bio_completion(struct nvme_dev *dev, void *ctx, 411static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
375 struct nvme_completion *cqe) 412 struct nvme_completion *cqe)
376{ 413{
377 struct nvme_iod *iod = ctx; 414 struct nvme_iod *iod = ctx;
378 struct bio *bio = iod->private; 415 struct bio *bio = iod->private;
379 u16 status = le16_to_cpup(&cqe->status) >> 1; 416 u16 status = le16_to_cpup(&cqe->status) >> 1;
380 417
418 if (unlikely(status)) {
419 if (!(status & NVME_SC_DNR ||
420 bio->bi_rw & REQ_FAILFAST_MASK) &&
421 (jiffies - iod->start_time) < IOD_TIMEOUT) {
422 if (!waitqueue_active(&nvmeq->sq_full))
423 add_wait_queue(&nvmeq->sq_full,
424 &nvmeq->sq_cong_wait);
425 list_add_tail(&iod->node, &nvmeq->iod_bio);
426 wake_up(&nvmeq->sq_full);
427 return;
428 }
429 }
381 if (iod->nents) { 430 if (iod->nents) {
382 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 431 dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
383 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 432 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
384 nvme_end_io_acct(bio, iod->start_time); 433 nvme_end_io_acct(bio, iod->start_time);
385 } 434 }
386 nvme_free_iod(dev, iod); 435 nvme_free_iod(nvmeq->dev, iod);
387 if (status) 436 if (status)
388 bio_endio(bio, -EIO); 437 bio_endio(bio, -EIO);
389 else 438 else
@@ -391,8 +440,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
391} 440}
392 441
393/* length is in bytes. gfp flags indicates whether we may sleep. */ 442/* length is in bytes. gfp flags indicates whether we may sleep. */
394int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, 443int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
395 struct nvme_iod *iod, int total_len, gfp_t gfp) 444 gfp_t gfp)
396{ 445{
397 struct dma_pool *pool; 446 struct dma_pool *pool;
398 int length = total_len; 447 int length = total_len;
@@ -405,7 +454,6 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
405 dma_addr_t prp_dma; 454 dma_addr_t prp_dma;
406 int nprps, i; 455 int nprps, i;
407 456
408 cmd->prp1 = cpu_to_le64(dma_addr);
409 length -= (PAGE_SIZE - offset); 457 length -= (PAGE_SIZE - offset);
410 if (length <= 0) 458 if (length <= 0)
411 return total_len; 459 return total_len;
@@ -420,7 +468,7 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
420 } 468 }
421 469
422 if (length <= PAGE_SIZE) { 470 if (length <= PAGE_SIZE) {
423 cmd->prp2 = cpu_to_le64(dma_addr); 471 iod->first_dma = dma_addr;
424 return total_len; 472 return total_len;
425 } 473 }
426 474
@@ -435,13 +483,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
435 483
436 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 484 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
437 if (!prp_list) { 485 if (!prp_list) {
438 cmd->prp2 = cpu_to_le64(dma_addr); 486 iod->first_dma = dma_addr;
439 iod->npages = -1; 487 iod->npages = -1;
440 return (total_len - length) + PAGE_SIZE; 488 return (total_len - length) + PAGE_SIZE;
441 } 489 }
442 list[0] = prp_list; 490 list[0] = prp_list;
443 iod->first_dma = prp_dma; 491 iod->first_dma = prp_dma;
444 cmd->prp2 = cpu_to_le64(prp_dma);
445 i = 0; 492 i = 0;
446 for (;;) { 493 for (;;) {
447 if (i == PAGE_SIZE / 8) { 494 if (i == PAGE_SIZE / 8) {
@@ -480,10 +527,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
480 527
481 bio_chain(split, bio); 528 bio_chain(split, bio);
482 529
483 if (bio_list_empty(&nvmeq->sq_cong)) 530 if (!waitqueue_active(&nvmeq->sq_full))
484 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 531 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
485 bio_list_add(&nvmeq->sq_cong, split); 532 bio_list_add(&nvmeq->sq_cong, split);
486 bio_list_add(&nvmeq->sq_cong, bio); 533 bio_list_add(&nvmeq->sq_cong, bio);
534 wake_up(&nvmeq->sq_full);
487 535
488 return 0; 536 return 0;
489} 537}
@@ -536,25 +584,13 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
536 return length; 584 return length;
537} 585}
538 586
539/*
540 * We reuse the small pool to allocate the 16-byte range here as it is not
541 * worth having a special pool for these or additional cases to handle freeing
542 * the iod.
543 */
544static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, 587static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
545 struct bio *bio, struct nvme_iod *iod, int cmdid) 588 struct bio *bio, struct nvme_iod *iod, int cmdid)
546{ 589{
547 struct nvme_dsm_range *range; 590 struct nvme_dsm_range *range =
591 (struct nvme_dsm_range *)iod_list(iod)[0];
548 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 592 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
549 593
550 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
551 &iod->first_dma);
552 if (!range)
553 return -ENOMEM;
554
555 iod_list(iod)[0] = (__le64 *)range;
556 iod->npages = 0;
557
558 range->cattr = cpu_to_le32(0); 594 range->cattr = cpu_to_le32(0);
559 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift); 595 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
560 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); 596 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
@@ -601,44 +637,22 @@ int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
601 return nvme_submit_flush(nvmeq, ns, cmdid); 637 return nvme_submit_flush(nvmeq, ns, cmdid);
602} 638}
603 639
604/* 640static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
605 * Called with local interrupts disabled and the q_lock held. May not sleep.
606 */
607static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
608 struct bio *bio)
609{ 641{
642 struct bio *bio = iod->private;
643 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
610 struct nvme_command *cmnd; 644 struct nvme_command *cmnd;
611 struct nvme_iod *iod; 645 int cmdid;
612 enum dma_data_direction dma_dir;
613 int cmdid, length, result;
614 u16 control; 646 u16 control;
615 u32 dsmgmt; 647 u32 dsmgmt;
616 int psegs = bio_phys_segments(ns->queue, bio);
617
618 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
619 result = nvme_submit_flush_data(nvmeq, ns);
620 if (result)
621 return result;
622 }
623 648
624 result = -ENOMEM;
625 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
626 if (!iod)
627 goto nomem;
628 iod->private = bio;
629
630 result = -EBUSY;
631 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); 649 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
632 if (unlikely(cmdid < 0)) 650 if (unlikely(cmdid < 0))
633 goto free_iod; 651 return cmdid;
634 652
635 if (bio->bi_rw & REQ_DISCARD) { 653 if (bio->bi_rw & REQ_DISCARD)
636 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 654 return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
637 if (result) 655 if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
638 goto free_cmdid;
639 return result;
640 }
641 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
642 return nvme_submit_flush(nvmeq, ns, cmdid); 656 return nvme_submit_flush(nvmeq, ns, cmdid);
643 657
644 control = 0; 658 control = 0;
@@ -652,42 +666,85 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
652 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 666 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
653 667
654 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 668 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
655
656 memset(cmnd, 0, sizeof(*cmnd)); 669 memset(cmnd, 0, sizeof(*cmnd));
657 if (bio_data_dir(bio)) {
658 cmnd->rw.opcode = nvme_cmd_write;
659 dma_dir = DMA_TO_DEVICE;
660 } else {
661 cmnd->rw.opcode = nvme_cmd_read;
662 dma_dir = DMA_FROM_DEVICE;
663 }
664
665 result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
666 if (result <= 0)
667 goto free_cmdid;
668 length = result;
669 670
671 cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read;
670 cmnd->rw.command_id = cmdid; 672 cmnd->rw.command_id = cmdid;
671 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 673 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
672 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 674 cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
673 GFP_ATOMIC); 675 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
674 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); 676 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
675 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 677 cmnd->rw.length =
678 cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1);
676 cmnd->rw.control = cpu_to_le16(control); 679 cmnd->rw.control = cpu_to_le16(control);
677 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 680 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
678 681
679 nvme_start_io_acct(bio);
680 if (++nvmeq->sq_tail == nvmeq->q_depth) 682 if (++nvmeq->sq_tail == nvmeq->q_depth)
681 nvmeq->sq_tail = 0; 683 nvmeq->sq_tail = 0;
682 writel(nvmeq->sq_tail, nvmeq->q_db); 684 writel(nvmeq->sq_tail, nvmeq->q_db);
683 685
684 return 0; 686 return 0;
687}
688
689/*
690 * Called with local interrupts disabled and the q_lock held. May not sleep.
691 */
692static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
693 struct bio *bio)
694{
695 struct nvme_iod *iod;
696 int psegs = bio_phys_segments(ns->queue, bio);
697 int result;
698
699 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
700 result = nvme_submit_flush_data(nvmeq, ns);
701 if (result)
702 return result;
703 }
704
705 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
706 if (!iod)
707 return -ENOMEM;
708
709 iod->private = bio;
710 if (bio->bi_rw & REQ_DISCARD) {
711 void *range;
712 /*
713 * We reuse the small pool to allocate the 16-byte range here
714 * as it is not worth having a special pool for these or
715 * additional cases to handle freeing the iod.
716 */
717 range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
718 GFP_ATOMIC,
719 &iod->first_dma);
720 if (!range) {
721 result = -ENOMEM;
722 goto free_iod;
723 }
724 iod_list(iod)[0] = (__le64 *)range;
725 iod->npages = 0;
726 } else if (psegs) {
727 result = nvme_map_bio(nvmeq, iod, bio,
728 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
729 psegs);
730 if (result <= 0)
731 goto free_iod;
732 if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
733 result) {
734 result = -ENOMEM;
735 goto free_iod;
736 }
737 nvme_start_io_acct(bio);
738 }
739 if (unlikely(nvme_submit_iod(nvmeq, iod))) {
740 if (!waitqueue_active(&nvmeq->sq_full))
741 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
742 list_add_tail(&iod->node, &nvmeq->iod_bio);
743 }
744 return 0;
685 745
686 free_cmdid:
687 free_cmdid(nvmeq, cmdid, NULL);
688 free_iod: 746 free_iod:
689 nvme_free_iod(nvmeq->dev, iod); 747 nvme_free_iod(nvmeq->dev, iod);
690 nomem:
691 return result; 748 return result;
692} 749}
693 750
@@ -711,7 +768,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
711 } 768 }
712 769
713 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 770 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
714 fn(nvmeq->dev, ctx, &cqe); 771 fn(nvmeq, ctx, &cqe);
715 } 772 }
716 773
717 /* If the controller ignores the cq head doorbell and continuously 774 /* If the controller ignores the cq head doorbell and continuously
@@ -747,7 +804,7 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
747 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong)) 804 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
748 result = nvme_submit_bio_queue(nvmeq, ns, bio); 805 result = nvme_submit_bio_queue(nvmeq, ns, bio);
749 if (unlikely(result)) { 806 if (unlikely(result)) {
750 if (bio_list_empty(&nvmeq->sq_cong)) 807 if (!waitqueue_active(&nvmeq->sq_full))
751 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 808 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
752 bio_list_add(&nvmeq->sq_cong, bio); 809 bio_list_add(&nvmeq->sq_cong, bio);
753 } 810 }
@@ -791,7 +848,7 @@ struct sync_cmd_info {
791 int status; 848 int status;
792}; 849};
793 850
794static void sync_completion(struct nvme_dev *dev, void *ctx, 851static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
795 struct nvme_completion *cqe) 852 struct nvme_completion *cqe)
796{ 853{
797 struct sync_cmd_info *cmdinfo = ctx; 854 struct sync_cmd_info *cmdinfo = ctx;
@@ -804,27 +861,46 @@ static void sync_completion(struct nvme_dev *dev, void *ctx,
804 * Returns 0 on success. If the result is negative, it's a Linux error code; 861 * Returns 0 on success. If the result is negative, it's a Linux error code;
805 * if the result is positive, it's an NVM Express status code 862 * if the result is positive, it's an NVM Express status code
806 */ 863 */
807int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 864static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
865 struct nvme_command *cmd,
808 u32 *result, unsigned timeout) 866 u32 *result, unsigned timeout)
809{ 867{
810 int cmdid; 868 int cmdid, ret;
811 struct sync_cmd_info cmdinfo; 869 struct sync_cmd_info cmdinfo;
870 struct nvme_queue *nvmeq;
871
872 nvmeq = lock_nvmeq(dev, q_idx);
873 if (!nvmeq) {
874 unlock_nvmeq(nvmeq);
875 return -ENODEV;
876 }
812 877
813 cmdinfo.task = current; 878 cmdinfo.task = current;
814 cmdinfo.status = -EINTR; 879 cmdinfo.status = -EINTR;
815 880
816 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, 881 cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
817 timeout); 882 if (cmdid < 0) {
818 if (cmdid < 0) 883 unlock_nvmeq(nvmeq);
819 return cmdid; 884 return cmdid;
885 }
820 cmd->common.command_id = cmdid; 886 cmd->common.command_id = cmdid;
821 887
822 set_current_state(TASK_KILLABLE); 888 set_current_state(TASK_KILLABLE);
823 nvme_submit_cmd(nvmeq, cmd); 889 ret = nvme_submit_cmd(nvmeq, cmd);
890 if (ret) {
891 free_cmdid(nvmeq, cmdid, NULL);
892 unlock_nvmeq(nvmeq);
893 set_current_state(TASK_RUNNING);
894 return ret;
895 }
896 unlock_nvmeq(nvmeq);
824 schedule_timeout(timeout); 897 schedule_timeout(timeout);
825 898
826 if (cmdinfo.status == -EINTR) { 899 if (cmdinfo.status == -EINTR) {
827 nvme_abort_command(nvmeq, cmdid); 900 nvmeq = lock_nvmeq(dev, q_idx);
901 if (nvmeq)
902 nvme_abort_command(nvmeq, cmdid);
903 unlock_nvmeq(nvmeq);
828 return -EINTR; 904 return -EINTR;
829 } 905 }
830 906
@@ -845,20 +921,26 @@ static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
845 return cmdid; 921 return cmdid;
846 cmdinfo->status = -EINTR; 922 cmdinfo->status = -EINTR;
847 cmd->common.command_id = cmdid; 923 cmd->common.command_id = cmdid;
848 nvme_submit_cmd(nvmeq, cmd); 924 return nvme_submit_cmd(nvmeq, cmd);
849 return 0;
850} 925}
851 926
852int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 927int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
853 u32 *result) 928 u32 *result)
854{ 929{
855 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); 930 return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
931}
932
933int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
934 u32 *result)
935{
936 return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
937 NVME_IO_TIMEOUT);
856} 938}
857 939
858static int nvme_submit_admin_cmd_async(struct nvme_dev *dev, 940static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
859 struct nvme_command *cmd, struct async_cmd_info *cmdinfo) 941 struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
860{ 942{
861 return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo, 943 return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
862 ADMIN_TIMEOUT); 944 ADMIN_TIMEOUT);
863} 945}
864 946
@@ -985,6 +1067,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
985 struct nvme_command cmd; 1067 struct nvme_command cmd;
986 struct nvme_dev *dev = nvmeq->dev; 1068 struct nvme_dev *dev = nvmeq->dev;
987 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 1069 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1070 struct nvme_queue *adminq;
988 1071
989 if (!nvmeq->qid || info[cmdid].aborted) { 1072 if (!nvmeq->qid || info[cmdid].aborted) {
990 if (work_busy(&dev->reset_work)) 1073 if (work_busy(&dev->reset_work))
@@ -1001,7 +1084,8 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
1001 if (!dev->abort_limit) 1084 if (!dev->abort_limit)
1002 return; 1085 return;
1003 1086
1004 a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion, 1087 adminq = rcu_dereference(dev->queues[0]);
1088 a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
1005 ADMIN_TIMEOUT); 1089 ADMIN_TIMEOUT);
1006 if (a_cmdid < 0) 1090 if (a_cmdid < 0)
1007 return; 1091 return;
@@ -1018,7 +1102,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
1018 1102
1019 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid, 1103 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
1020 nvmeq->qid); 1104 nvmeq->qid);
1021 nvme_submit_cmd(dev->queues[0], &cmd); 1105 nvme_submit_cmd(adminq, &cmd);
1022} 1106}
1023 1107
1024/** 1108/**
@@ -1051,23 +1135,38 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
1051 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid, 1135 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
1052 nvmeq->qid); 1136 nvmeq->qid);
1053 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1137 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1054 fn(nvmeq->dev, ctx, &cqe); 1138 fn(nvmeq, ctx, &cqe);
1055 } 1139 }
1056} 1140}
1057 1141
1058static void nvme_free_queue(struct nvme_queue *nvmeq) 1142static void nvme_free_queue(struct rcu_head *r)
1059{ 1143{
1144 struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
1145
1060 spin_lock_irq(&nvmeq->q_lock); 1146 spin_lock_irq(&nvmeq->q_lock);
1061 while (bio_list_peek(&nvmeq->sq_cong)) { 1147 while (bio_list_peek(&nvmeq->sq_cong)) {
1062 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1148 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1063 bio_endio(bio, -EIO); 1149 bio_endio(bio, -EIO);
1064 } 1150 }
1151 while (!list_empty(&nvmeq->iod_bio)) {
1152 static struct nvme_completion cqe = {
1153 .status = cpu_to_le16(
1154 (NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1),
1155 };
1156 struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
1157 struct nvme_iod,
1158 node);
1159 list_del(&iod->node);
1160 bio_completion(nvmeq, iod, &cqe);
1161 }
1065 spin_unlock_irq(&nvmeq->q_lock); 1162 spin_unlock_irq(&nvmeq->q_lock);
1066 1163
1067 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 1164 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1068 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1165 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1069 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 1166 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1070 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1167 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1168 if (nvmeq->qid)
1169 free_cpumask_var(nvmeq->cpu_mask);
1071 kfree(nvmeq); 1170 kfree(nvmeq);
1072} 1171}
1073 1172
@@ -1076,9 +1175,10 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1076 int i; 1175 int i;
1077 1176
1078 for (i = dev->queue_count - 1; i >= lowest; i--) { 1177 for (i = dev->queue_count - 1; i >= lowest; i--) {
1079 nvme_free_queue(dev->queues[i]); 1178 struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
1179 rcu_assign_pointer(dev->queues[i], NULL);
1180 call_rcu(&nvmeq->r_head, nvme_free_queue);
1080 dev->queue_count--; 1181 dev->queue_count--;
1081 dev->queues[i] = NULL;
1082 } 1182 }
1083} 1183}
1084 1184
@@ -1098,6 +1198,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1098 return 1; 1198 return 1;
1099 } 1199 }
1100 nvmeq->q_suspended = 1; 1200 nvmeq->q_suspended = 1;
1201 nvmeq->dev->online_queues--;
1101 spin_unlock_irq(&nvmeq->q_lock); 1202 spin_unlock_irq(&nvmeq->q_lock);
1102 1203
1103 irq_set_affinity_hint(vector, NULL); 1204 irq_set_affinity_hint(vector, NULL);
@@ -1116,7 +1217,7 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
1116 1217
1117static void nvme_disable_queue(struct nvme_dev *dev, int qid) 1218static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1118{ 1219{
1119 struct nvme_queue *nvmeq = dev->queues[qid]; 1220 struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
1120 1221
1121 if (!nvmeq) 1222 if (!nvmeq)
1122 return; 1223 return;
@@ -1152,6 +1253,9 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1152 if (!nvmeq->sq_cmds) 1253 if (!nvmeq->sq_cmds)
1153 goto free_cqdma; 1254 goto free_cqdma;
1154 1255
1256 if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
1257 goto free_sqdma;
1258
1155 nvmeq->q_dmadev = dmadev; 1259 nvmeq->q_dmadev = dmadev;
1156 nvmeq->dev = dev; 1260 nvmeq->dev = dev;
1157 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", 1261 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
@@ -1162,15 +1266,20 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1162 init_waitqueue_head(&nvmeq->sq_full); 1266 init_waitqueue_head(&nvmeq->sq_full);
1163 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 1267 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
1164 bio_list_init(&nvmeq->sq_cong); 1268 bio_list_init(&nvmeq->sq_cong);
1269 INIT_LIST_HEAD(&nvmeq->iod_bio);
1165 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1270 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1166 nvmeq->q_depth = depth; 1271 nvmeq->q_depth = depth;
1167 nvmeq->cq_vector = vector; 1272 nvmeq->cq_vector = vector;
1168 nvmeq->qid = qid; 1273 nvmeq->qid = qid;
1169 nvmeq->q_suspended = 1; 1274 nvmeq->q_suspended = 1;
1170 dev->queue_count++; 1275 dev->queue_count++;
1276 rcu_assign_pointer(dev->queues[qid], nvmeq);
1171 1277
1172 return nvmeq; 1278 return nvmeq;
1173 1279
1280 free_sqdma:
1281 dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
1282 nvmeq->sq_dma_addr);
1174 free_cqdma: 1283 free_cqdma:
1175 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, 1284 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
1176 nvmeq->cq_dma_addr); 1285 nvmeq->cq_dma_addr);
@@ -1203,6 +1312,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1203 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 1312 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1204 nvme_cancel_ios(nvmeq, false); 1313 nvme_cancel_ios(nvmeq, false);
1205 nvmeq->q_suspended = 0; 1314 nvmeq->q_suspended = 0;
1315 dev->online_queues++;
1206} 1316}
1207 1317
1208static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) 1318static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
@@ -1311,12 +1421,11 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1311 if (result < 0) 1421 if (result < 0)
1312 return result; 1422 return result;
1313 1423
1314 nvmeq = dev->queues[0]; 1424 nvmeq = raw_nvmeq(dev, 0);
1315 if (!nvmeq) { 1425 if (!nvmeq) {
1316 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 1426 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
1317 if (!nvmeq) 1427 if (!nvmeq)
1318 return -ENOMEM; 1428 return -ENOMEM;
1319 dev->queues[0] = nvmeq;
1320 } 1429 }
1321 1430
1322 aqa = nvmeq->q_depth - 1; 1431 aqa = nvmeq->q_depth - 1;
@@ -1418,7 +1527,6 @@ void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1418static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1527static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1419{ 1528{
1420 struct nvme_dev *dev = ns->dev; 1529 struct nvme_dev *dev = ns->dev;
1421 struct nvme_queue *nvmeq;
1422 struct nvme_user_io io; 1530 struct nvme_user_io io;
1423 struct nvme_command c; 1531 struct nvme_command c;
1424 unsigned length, meta_len; 1532 unsigned length, meta_len;
@@ -1492,22 +1600,14 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1492 c.rw.metadata = cpu_to_le64(meta_dma_addr); 1600 c.rw.metadata = cpu_to_le64(meta_dma_addr);
1493 } 1601 }
1494 1602
1495 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); 1603 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1604 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1605 c.rw.prp2 = cpu_to_le64(iod->first_dma);
1496 1606
1497 nvmeq = get_nvmeq(dev);
1498 /*
1499 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
1500 * disabled. We may be preempted at any point, and be rescheduled
1501 * to a different CPU. That will cause cacheline bouncing, but no
1502 * additional races since q_lock already protects against other CPUs.
1503 */
1504 put_nvmeq(nvmeq);
1505 if (length != (io.nblocks + 1) << ns->lba_shift) 1607 if (length != (io.nblocks + 1) << ns->lba_shift)
1506 status = -ENOMEM; 1608 status = -ENOMEM;
1507 else if (!nvmeq || nvmeq->q_suspended)
1508 status = -EBUSY;
1509 else 1609 else
1510 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 1610 status = nvme_submit_io_cmd(dev, &c, NULL);
1511 1611
1512 if (meta_len) { 1612 if (meta_len) {
1513 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { 1613 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
@@ -1572,8 +1672,9 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
1572 length); 1672 length);
1573 if (IS_ERR(iod)) 1673 if (IS_ERR(iod))
1574 return PTR_ERR(iod); 1674 return PTR_ERR(iod);
1575 length = nvme_setup_prps(dev, &c.common, iod, length, 1675 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1576 GFP_KERNEL); 1676 c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1677 c.common.prp2 = cpu_to_le64(iod->first_dma);
1577 } 1678 }
1578 1679
1579 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : 1680 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
@@ -1581,8 +1682,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
1581 if (length != cmd.data_len) 1682 if (length != cmd.data_len)
1582 status = -ENOMEM; 1683 status = -ENOMEM;
1583 else 1684 else
1584 status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result, 1685 status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
1585 timeout);
1586 1686
1587 if (cmd.data_len) { 1687 if (cmd.data_len) {
1588 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); 1688 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1653,25 +1753,51 @@ static void nvme_release(struct gendisk *disk, fmode_t mode)
1653 kref_put(&dev->kref, nvme_free_dev); 1753 kref_put(&dev->kref, nvme_free_dev);
1654} 1754}
1655 1755
1756static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
1757{
1758 /* some standard values */
1759 geo->heads = 1 << 6;
1760 geo->sectors = 1 << 5;
1761 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1762 return 0;
1763}
1764
1656static const struct block_device_operations nvme_fops = { 1765static const struct block_device_operations nvme_fops = {
1657 .owner = THIS_MODULE, 1766 .owner = THIS_MODULE,
1658 .ioctl = nvme_ioctl, 1767 .ioctl = nvme_ioctl,
1659 .compat_ioctl = nvme_compat_ioctl, 1768 .compat_ioctl = nvme_compat_ioctl,
1660 .open = nvme_open, 1769 .open = nvme_open,
1661 .release = nvme_release, 1770 .release = nvme_release,
1771 .getgeo = nvme_getgeo,
1662}; 1772};
1663 1773
1774static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
1775{
1776 struct nvme_iod *iod, *next;
1777
1778 list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
1779 if (unlikely(nvme_submit_iod(nvmeq, iod)))
1780 break;
1781 list_del(&iod->node);
1782 if (bio_list_empty(&nvmeq->sq_cong) &&
1783 list_empty(&nvmeq->iod_bio))
1784 remove_wait_queue(&nvmeq->sq_full,
1785 &nvmeq->sq_cong_wait);
1786 }
1787}
1788
1664static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1789static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1665{ 1790{
1666 while (bio_list_peek(&nvmeq->sq_cong)) { 1791 while (bio_list_peek(&nvmeq->sq_cong)) {
1667 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1792 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1668 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1793 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1669 1794
1670 if (bio_list_empty(&nvmeq->sq_cong)) 1795 if (bio_list_empty(&nvmeq->sq_cong) &&
1796 list_empty(&nvmeq->iod_bio))
1671 remove_wait_queue(&nvmeq->sq_full, 1797 remove_wait_queue(&nvmeq->sq_full,
1672 &nvmeq->sq_cong_wait); 1798 &nvmeq->sq_cong_wait);
1673 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1799 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1674 if (bio_list_empty(&nvmeq->sq_cong)) 1800 if (!waitqueue_active(&nvmeq->sq_full))
1675 add_wait_queue(&nvmeq->sq_full, 1801 add_wait_queue(&nvmeq->sq_full,
1676 &nvmeq->sq_cong_wait); 1802 &nvmeq->sq_cong_wait);
1677 bio_list_add_head(&nvmeq->sq_cong, bio); 1803 bio_list_add_head(&nvmeq->sq_cong, bio);
@@ -1700,8 +1826,10 @@ static int nvme_kthread(void *data)
1700 queue_work(nvme_workq, &dev->reset_work); 1826 queue_work(nvme_workq, &dev->reset_work);
1701 continue; 1827 continue;
1702 } 1828 }
1829 rcu_read_lock();
1703 for (i = 0; i < dev->queue_count; i++) { 1830 for (i = 0; i < dev->queue_count; i++) {
1704 struct nvme_queue *nvmeq = dev->queues[i]; 1831 struct nvme_queue *nvmeq =
1832 rcu_dereference(dev->queues[i]);
1705 if (!nvmeq) 1833 if (!nvmeq)
1706 continue; 1834 continue;
1707 spin_lock_irq(&nvmeq->q_lock); 1835 spin_lock_irq(&nvmeq->q_lock);
@@ -1710,9 +1838,11 @@ static int nvme_kthread(void *data)
1710 nvme_process_cq(nvmeq); 1838 nvme_process_cq(nvmeq);
1711 nvme_cancel_ios(nvmeq, true); 1839 nvme_cancel_ios(nvmeq, true);
1712 nvme_resubmit_bios(nvmeq); 1840 nvme_resubmit_bios(nvmeq);
1841 nvme_resubmit_iods(nvmeq);
1713 unlock: 1842 unlock:
1714 spin_unlock_irq(&nvmeq->q_lock); 1843 spin_unlock_irq(&nvmeq->q_lock);
1715 } 1844 }
1845 rcu_read_unlock();
1716 } 1846 }
1717 spin_unlock(&dev_list_lock); 1847 spin_unlock(&dev_list_lock);
1718 schedule_timeout(round_jiffies_relative(HZ)); 1848 schedule_timeout(round_jiffies_relative(HZ));
@@ -1787,6 +1917,143 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1787 return NULL; 1917 return NULL;
1788} 1918}
1789 1919
1920static int nvme_find_closest_node(int node)
1921{
1922 int n, val, min_val = INT_MAX, best_node = node;
1923
1924 for_each_online_node(n) {
1925 if (n == node)
1926 continue;
1927 val = node_distance(node, n);
1928 if (val < min_val) {
1929 min_val = val;
1930 best_node = n;
1931 }
1932 }
1933 return best_node;
1934}
1935
1936static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
1937 int count)
1938{
1939 int cpu;
1940 for_each_cpu(cpu, qmask) {
1941 if (cpumask_weight(nvmeq->cpu_mask) >= count)
1942 break;
1943 if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
1944 *per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
1945 }
1946}
1947
1948static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus,
1949 const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
1950{
1951 int next_cpu;
1952 for_each_cpu(next_cpu, new_mask) {
1953 cpumask_or(mask, mask, get_cpu_mask(next_cpu));
1954 cpumask_or(mask, mask, topology_thread_cpumask(next_cpu));
1955 cpumask_and(mask, mask, unassigned_cpus);
1956 nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
1957 }
1958}
1959
1960static void nvme_create_io_queues(struct nvme_dev *dev)
1961{
1962 unsigned i, max;
1963
1964 max = min(dev->max_qid, num_online_cpus());
1965 for (i = dev->queue_count; i <= max; i++)
1966 if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
1967 break;
1968
1969 max = min(dev->queue_count - 1, num_online_cpus());
1970 for (i = dev->online_queues; i <= max; i++)
1971 if (nvme_create_queue(raw_nvmeq(dev, i), i))
1972 break;
1973}
1974
1975/*
1976 * If there are fewer queues than online cpus, this will try to optimally
1977 * assign a queue to multiple cpus by grouping cpus that are "close" together:
1978 * thread siblings, core, socket, closest node, then whatever else is
1979 * available.
1980 */
1981static void nvme_assign_io_queues(struct nvme_dev *dev)
1982{
1983 unsigned cpu, cpus_per_queue, queues, remainder, i;
1984 cpumask_var_t unassigned_cpus;
1985
1986 nvme_create_io_queues(dev);
1987
1988 queues = min(dev->online_queues - 1, num_online_cpus());
1989 if (!queues)
1990 return;
1991
1992 cpus_per_queue = num_online_cpus() / queues;
1993 remainder = queues - (num_online_cpus() - queues * cpus_per_queue);
1994
1995 if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL))
1996 return;
1997
1998 cpumask_copy(unassigned_cpus, cpu_online_mask);
1999 cpu = cpumask_first(unassigned_cpus);
2000 for (i = 1; i <= queues; i++) {
2001 struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
2002 cpumask_t mask;
2003
2004 cpumask_clear(nvmeq->cpu_mask);
2005 if (!cpumask_weight(unassigned_cpus)) {
2006 unlock_nvmeq(nvmeq);
2007 break;
2008 }
2009
2010 mask = *get_cpu_mask(cpu);
2011 nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
2012 if (cpus_weight(mask) < cpus_per_queue)
2013 nvme_add_cpus(&mask, unassigned_cpus,
2014 topology_thread_cpumask(cpu),
2015 nvmeq, cpus_per_queue);
2016 if (cpus_weight(mask) < cpus_per_queue)
2017 nvme_add_cpus(&mask, unassigned_cpus,
2018 topology_core_cpumask(cpu),
2019 nvmeq, cpus_per_queue);
2020 if (cpus_weight(mask) < cpus_per_queue)
2021 nvme_add_cpus(&mask, unassigned_cpus,
2022 cpumask_of_node(cpu_to_node(cpu)),
2023 nvmeq, cpus_per_queue);
2024 if (cpus_weight(mask) < cpus_per_queue)
2025 nvme_add_cpus(&mask, unassigned_cpus,
2026 cpumask_of_node(
2027 nvme_find_closest_node(
2028 cpu_to_node(cpu))),
2029 nvmeq, cpus_per_queue);
2030 if (cpus_weight(mask) < cpus_per_queue)
2031 nvme_add_cpus(&mask, unassigned_cpus,
2032 unassigned_cpus,
2033 nvmeq, cpus_per_queue);
2034
2035 WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
2036 "nvme%d qid:%d mis-matched queue-to-cpu assignment\n",
2037 dev->instance, i);
2038
2039 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
2040 nvmeq->cpu_mask);
2041 cpumask_andnot(unassigned_cpus, unassigned_cpus,
2042 nvmeq->cpu_mask);
2043 cpu = cpumask_next(cpu, unassigned_cpus);
2044 if (remainder && !--remainder)
2045 cpus_per_queue++;
2046 unlock_nvmeq(nvmeq);
2047 }
2048 WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n",
2049 dev->instance);
2050 i = 0;
2051 cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask);
2052 for_each_cpu(cpu, unassigned_cpus)
2053 *per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1;
2054 free_cpumask_var(unassigned_cpus);
2055}
2056
1790static int set_queue_count(struct nvme_dev *dev, int count) 2057static int set_queue_count(struct nvme_dev *dev, int count)
1791{ 2058{
1792 int status; 2059 int status;
@@ -1805,13 +2072,26 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1805 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); 2072 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
1806} 2073}
1807 2074
2075static int nvme_cpu_notify(struct notifier_block *self,
2076 unsigned long action, void *hcpu)
2077{
2078 struct nvme_dev *dev = container_of(self, struct nvme_dev, nb);
2079 switch (action) {
2080 case CPU_ONLINE:
2081 case CPU_DEAD:
2082 nvme_assign_io_queues(dev);
2083 break;
2084 }
2085 return NOTIFY_OK;
2086}
2087
1808static int nvme_setup_io_queues(struct nvme_dev *dev) 2088static int nvme_setup_io_queues(struct nvme_dev *dev)
1809{ 2089{
1810 struct nvme_queue *adminq = dev->queues[0]; 2090 struct nvme_queue *adminq = raw_nvmeq(dev, 0);
1811 struct pci_dev *pdev = dev->pci_dev; 2091 struct pci_dev *pdev = dev->pci_dev;
1812 int result, cpu, i, vecs, nr_io_queues, size, q_depth; 2092 int result, i, vecs, nr_io_queues, size;
1813 2093
1814 nr_io_queues = num_online_cpus(); 2094 nr_io_queues = num_possible_cpus();
1815 result = set_queue_count(dev, nr_io_queues); 2095 result = set_queue_count(dev, nr_io_queues);
1816 if (result < 0) 2096 if (result < 0)
1817 return result; 2097 return result;
@@ -1830,7 +2110,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1830 size = db_bar_size(dev, nr_io_queues); 2110 size = db_bar_size(dev, nr_io_queues);
1831 } while (1); 2111 } while (1);
1832 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2112 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1833 dev->queues[0]->q_db = dev->dbs; 2113 adminq->q_db = dev->dbs;
1834 } 2114 }
1835 2115
1836 /* Deregister the admin queue's interrupt */ 2116 /* Deregister the admin queue's interrupt */
@@ -1856,6 +2136,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1856 * number of interrupts. 2136 * number of interrupts.
1857 */ 2137 */
1858 nr_io_queues = vecs; 2138 nr_io_queues = vecs;
2139 dev->max_qid = nr_io_queues;
1859 2140
1860 result = queue_request_irq(dev, adminq, adminq->irqname); 2141 result = queue_request_irq(dev, adminq, adminq->irqname);
1861 if (result) { 2142 if (result) {
@@ -1864,49 +2145,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1864 } 2145 }
1865 2146
1866 /* Free previously allocated queues that are no longer usable */ 2147 /* Free previously allocated queues that are no longer usable */
1867 spin_lock(&dev_list_lock); 2148 nvme_free_queues(dev, nr_io_queues + 1);
1868 for (i = dev->queue_count - 1; i > nr_io_queues; i--) { 2149 nvme_assign_io_queues(dev);
1869 struct nvme_queue *nvmeq = dev->queues[i];
1870
1871 spin_lock_irq(&nvmeq->q_lock);
1872 nvme_cancel_ios(nvmeq, false);
1873 spin_unlock_irq(&nvmeq->q_lock);
1874
1875 nvme_free_queue(nvmeq);
1876 dev->queue_count--;
1877 dev->queues[i] = NULL;
1878 }
1879 spin_unlock(&dev_list_lock);
1880
1881 cpu = cpumask_first(cpu_online_mask);
1882 for (i = 0; i < nr_io_queues; i++) {
1883 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1884 cpu = cpumask_next(cpu, cpu_online_mask);
1885 }
1886
1887 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
1888 NVME_Q_DEPTH);
1889 for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
1890 dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
1891 if (!dev->queues[i + 1]) {
1892 result = -ENOMEM;
1893 goto free_queues;
1894 }
1895 }
1896
1897 for (; i < num_possible_cpus(); i++) {
1898 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1899 dev->queues[i + 1] = dev->queues[target + 1];
1900 }
1901 2150
1902 for (i = 1; i < dev->queue_count; i++) { 2151 dev->nb.notifier_call = &nvme_cpu_notify;
1903 result = nvme_create_queue(dev->queues[i], i); 2152 result = register_hotcpu_notifier(&dev->nb);
1904 if (result) { 2153 if (result)
1905 for (--i; i > 0; i--) 2154 goto free_queues;
1906 nvme_disable_queue(dev, i);
1907 goto free_queues;
1908 }
1909 }
1910 2155
1911 return 0; 2156 return 0;
1912 2157
@@ -1985,6 +2230,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
1985 2230
1986static int nvme_dev_map(struct nvme_dev *dev) 2231static int nvme_dev_map(struct nvme_dev *dev)
1987{ 2232{
2233 u64 cap;
1988 int bars, result = -ENOMEM; 2234 int bars, result = -ENOMEM;
1989 struct pci_dev *pdev = dev->pci_dev; 2235 struct pci_dev *pdev = dev->pci_dev;
1990 2236
@@ -2008,7 +2254,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
2008 result = -ENODEV; 2254 result = -ENODEV;
2009 goto unmap; 2255 goto unmap;
2010 } 2256 }
2011 dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap)); 2257 cap = readq(&dev->bar->cap);
2258 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
2259 dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
2012 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2260 dev->dbs = ((void __iomem *)dev->bar) + 4096;
2013 2261
2014 return 0; 2262 return 0;
@@ -2164,7 +2412,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
2164 atomic_set(&dq.refcount, 0); 2412 atomic_set(&dq.refcount, 0);
2165 dq.worker = &worker; 2413 dq.worker = &worker;
2166 for (i = dev->queue_count - 1; i > 0; i--) { 2414 for (i = dev->queue_count - 1; i > 0; i--) {
2167 struct nvme_queue *nvmeq = dev->queues[i]; 2415 struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
2168 2416
2169 if (nvme_suspend_queue(nvmeq)) 2417 if (nvme_suspend_queue(nvmeq))
2170 continue; 2418 continue;
@@ -2177,19 +2425,38 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
2177 kthread_stop(kworker_task); 2425 kthread_stop(kworker_task);
2178} 2426}
2179 2427
2428/*
2429* Remove the node from the device list and check
2430* for whether or not we need to stop the nvme_thread.
2431*/
2432static void nvme_dev_list_remove(struct nvme_dev *dev)
2433{
2434 struct task_struct *tmp = NULL;
2435
2436 spin_lock(&dev_list_lock);
2437 list_del_init(&dev->node);
2438 if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
2439 tmp = nvme_thread;
2440 nvme_thread = NULL;
2441 }
2442 spin_unlock(&dev_list_lock);
2443
2444 if (tmp)
2445 kthread_stop(tmp);
2446}
2447
2180static void nvme_dev_shutdown(struct nvme_dev *dev) 2448static void nvme_dev_shutdown(struct nvme_dev *dev)
2181{ 2449{
2182 int i; 2450 int i;
2183 2451
2184 dev->initialized = 0; 2452 dev->initialized = 0;
2453 unregister_hotcpu_notifier(&dev->nb);
2185 2454
2186 spin_lock(&dev_list_lock); 2455 nvme_dev_list_remove(dev);
2187 list_del_init(&dev->node);
2188 spin_unlock(&dev_list_lock);
2189 2456
2190 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { 2457 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
2191 for (i = dev->queue_count - 1; i >= 0; i--) { 2458 for (i = dev->queue_count - 1; i >= 0; i--) {
2192 struct nvme_queue *nvmeq = dev->queues[i]; 2459 struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
2193 nvme_suspend_queue(nvmeq); 2460 nvme_suspend_queue(nvmeq);
2194 nvme_clear_queue(nvmeq); 2461 nvme_clear_queue(nvmeq);
2195 } 2462 }
@@ -2282,6 +2549,7 @@ static void nvme_free_dev(struct kref *kref)
2282 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 2549 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2283 2550
2284 nvme_free_namespaces(dev); 2551 nvme_free_namespaces(dev);
2552 free_percpu(dev->io_queue);
2285 kfree(dev->queues); 2553 kfree(dev->queues);
2286 kfree(dev->entry); 2554 kfree(dev->entry);
2287 kfree(dev); 2555 kfree(dev);
@@ -2325,6 +2593,7 @@ static const struct file_operations nvme_dev_fops = {
2325static int nvme_dev_start(struct nvme_dev *dev) 2593static int nvme_dev_start(struct nvme_dev *dev)
2326{ 2594{
2327 int result; 2595 int result;
2596 bool start_thread = false;
2328 2597
2329 result = nvme_dev_map(dev); 2598 result = nvme_dev_map(dev);
2330 if (result) 2599 if (result)
@@ -2335,9 +2604,24 @@ static int nvme_dev_start(struct nvme_dev *dev)
2335 goto unmap; 2604 goto unmap;
2336 2605
2337 spin_lock(&dev_list_lock); 2606 spin_lock(&dev_list_lock);
2607 if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
2608 start_thread = true;
2609 nvme_thread = NULL;
2610 }
2338 list_add(&dev->node, &dev_list); 2611 list_add(&dev->node, &dev_list);
2339 spin_unlock(&dev_list_lock); 2612 spin_unlock(&dev_list_lock);
2340 2613
2614 if (start_thread) {
2615 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
2616 wake_up(&nvme_kthread_wait);
2617 } else
2618 wait_event_killable(nvme_kthread_wait, nvme_thread);
2619
2620 if (IS_ERR_OR_NULL(nvme_thread)) {
2621 result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
2622 goto disable;
2623 }
2624
2341 result = nvme_setup_io_queues(dev); 2625 result = nvme_setup_io_queues(dev);
2342 if (result && result != -EBUSY) 2626 if (result && result != -EBUSY)
2343 goto disable; 2627 goto disable;
@@ -2346,9 +2630,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
2346 2630
2347 disable: 2631 disable:
2348 nvme_disable_queue(dev, 0); 2632 nvme_disable_queue(dev, 0);
2349 spin_lock(&dev_list_lock); 2633 nvme_dev_list_remove(dev);
2350 list_del_init(&dev->node);
2351 spin_unlock(&dev_list_lock);
2352 unmap: 2634 unmap:
2353 nvme_dev_unmap(dev); 2635 nvme_dev_unmap(dev);
2354 return result; 2636 return result;
@@ -2367,18 +2649,10 @@ static int nvme_remove_dead_ctrl(void *arg)
2367 2649
2368static void nvme_remove_disks(struct work_struct *ws) 2650static void nvme_remove_disks(struct work_struct *ws)
2369{ 2651{
2370 int i;
2371 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); 2652 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
2372 2653
2373 nvme_dev_remove(dev); 2654 nvme_dev_remove(dev);
2374 spin_lock(&dev_list_lock); 2655 nvme_free_queues(dev, 1);
2375 for (i = dev->queue_count - 1; i > 0; i--) {
2376 BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
2377 nvme_free_queue(dev->queues[i]);
2378 dev->queue_count--;
2379 dev->queues[i] = NULL;
2380 }
2381 spin_unlock(&dev_list_lock);
2382} 2656}
2383 2657
2384static int nvme_dev_resume(struct nvme_dev *dev) 2658static int nvme_dev_resume(struct nvme_dev *dev)
@@ -2441,6 +2715,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2441 GFP_KERNEL); 2715 GFP_KERNEL);
2442 if (!dev->queues) 2716 if (!dev->queues)
2443 goto free; 2717 goto free;
2718 dev->io_queue = alloc_percpu(unsigned short);
2719 if (!dev->io_queue)
2720 goto free;
2444 2721
2445 INIT_LIST_HEAD(&dev->namespaces); 2722 INIT_LIST_HEAD(&dev->namespaces);
2446 dev->reset_workfn = nvme_reset_failed_dev; 2723 dev->reset_workfn = nvme_reset_failed_dev;
@@ -2455,6 +2732,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2455 if (result) 2732 if (result)
2456 goto release; 2733 goto release;
2457 2734
2735 kref_init(&dev->kref);
2458 result = nvme_dev_start(dev); 2736 result = nvme_dev_start(dev);
2459 if (result) { 2737 if (result) {
2460 if (result == -EBUSY) 2738 if (result == -EBUSY)
@@ -2462,7 +2740,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2462 goto release_pools; 2740 goto release_pools;
2463 } 2741 }
2464 2742
2465 kref_init(&dev->kref);
2466 result = nvme_dev_add(dev); 2743 result = nvme_dev_add(dev);
2467 if (result) 2744 if (result)
2468 goto shutdown; 2745 goto shutdown;
@@ -2491,6 +2768,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2491 release: 2768 release:
2492 nvme_release_instance(dev); 2769 nvme_release_instance(dev);
2493 free: 2770 free:
2771 free_percpu(dev->io_queue);
2494 kfree(dev->queues); 2772 kfree(dev->queues);
2495 kfree(dev->entry); 2773 kfree(dev->entry);
2496 kfree(dev); 2774 kfree(dev);
@@ -2517,6 +2795,7 @@ static void nvme_remove(struct pci_dev *pdev)
2517 nvme_dev_remove(dev); 2795 nvme_dev_remove(dev);
2518 nvme_dev_shutdown(dev); 2796 nvme_dev_shutdown(dev);
2519 nvme_free_queues(dev, 0); 2797 nvme_free_queues(dev, 0);
2798 rcu_barrier();
2520 nvme_release_instance(dev); 2799 nvme_release_instance(dev);
2521 nvme_release_prp_pools(dev); 2800 nvme_release_prp_pools(dev);
2522 kref_put(&dev->kref, nvme_free_dev); 2801 kref_put(&dev->kref, nvme_free_dev);
@@ -2529,6 +2808,7 @@ static void nvme_remove(struct pci_dev *pdev)
2529#define nvme_slot_reset NULL 2808#define nvme_slot_reset NULL
2530#define nvme_error_resume NULL 2809#define nvme_error_resume NULL
2531 2810
2811#ifdef CONFIG_PM_SLEEP
2532static int nvme_suspend(struct device *dev) 2812static int nvme_suspend(struct device *dev)
2533{ 2813{
2534 struct pci_dev *pdev = to_pci_dev(dev); 2814 struct pci_dev *pdev = to_pci_dev(dev);
@@ -2549,6 +2829,7 @@ static int nvme_resume(struct device *dev)
2549 } 2829 }
2550 return 0; 2830 return 0;
2551} 2831}
2832#endif
2552 2833
2553static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 2834static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
2554 2835
@@ -2563,7 +2844,7 @@ static const struct pci_error_handlers nvme_err_handler = {
2563/* Move to pci_ids.h later */ 2844/* Move to pci_ids.h later */
2564#define PCI_CLASS_STORAGE_EXPRESS 0x010802 2845#define PCI_CLASS_STORAGE_EXPRESS 0x010802
2565 2846
2566static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { 2847static const struct pci_device_id nvme_id_table[] = {
2567 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2848 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2568 { 0, } 2849 { 0, }
2569}; 2850};
@@ -2585,14 +2866,11 @@ static int __init nvme_init(void)
2585{ 2866{
2586 int result; 2867 int result;
2587 2868
2588 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 2869 init_waitqueue_head(&nvme_kthread_wait);
2589 if (IS_ERR(nvme_thread))
2590 return PTR_ERR(nvme_thread);
2591 2870
2592 result = -ENOMEM;
2593 nvme_workq = create_singlethread_workqueue("nvme"); 2871 nvme_workq = create_singlethread_workqueue("nvme");
2594 if (!nvme_workq) 2872 if (!nvme_workq)
2595 goto kill_kthread; 2873 return -ENOMEM;
2596 2874
2597 result = register_blkdev(nvme_major, "nvme"); 2875 result = register_blkdev(nvme_major, "nvme");
2598 if (result < 0) 2876 if (result < 0)
@@ -2609,8 +2887,6 @@ static int __init nvme_init(void)
2609 unregister_blkdev(nvme_major, "nvme"); 2887 unregister_blkdev(nvme_major, "nvme");
2610 kill_workq: 2888 kill_workq:
2611 destroy_workqueue(nvme_workq); 2889 destroy_workqueue(nvme_workq);
2612 kill_kthread:
2613 kthread_stop(nvme_thread);
2614 return result; 2890 return result;
2615} 2891}
2616 2892
@@ -2619,11 +2895,11 @@ static void __exit nvme_exit(void)
2619 pci_unregister_driver(&nvme_driver); 2895 pci_unregister_driver(&nvme_driver);
2620 unregister_blkdev(nvme_major, "nvme"); 2896 unregister_blkdev(nvme_major, "nvme");
2621 destroy_workqueue(nvme_workq); 2897 destroy_workqueue(nvme_workq);
2622 kthread_stop(nvme_thread); 2898 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
2623} 2899}
2624 2900
2625MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 2901MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
2626MODULE_LICENSE("GPL"); 2902MODULE_LICENSE("GPL");
2627MODULE_VERSION("0.8"); 2903MODULE_VERSION("0.9");
2628module_init(nvme_init); 2904module_init(nvme_init);
2629module_exit(nvme_exit); 2905module_exit(nvme_exit);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a0ceb64e269..2c3f5be06da1 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -1562,13 +1562,14 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1562 res = PTR_ERR(iod); 1562 res = PTR_ERR(iod);
1563 goto out; 1563 goto out;
1564 } 1564 }
1565 length = nvme_setup_prps(dev, &c.common, iod, tot_len, 1565 length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
1566 GFP_KERNEL);
1567 if (length != tot_len) { 1566 if (length != tot_len) {
1568 res = -ENOMEM; 1567 res = -ENOMEM;
1569 goto out_unmap; 1568 goto out_unmap;
1570 } 1569 }
1571 1570
1571 c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1572 c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
1572 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); 1573 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1573 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); 1574 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
1574 } else if (opcode == nvme_admin_activate_fw) { 1575 } else if (opcode == nvme_admin_activate_fw) {
@@ -2033,7 +2034,6 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2033 int res = SNTI_TRANSLATION_SUCCESS; 2034 int res = SNTI_TRANSLATION_SUCCESS;
2034 int nvme_sc; 2035 int nvme_sc;
2035 struct nvme_dev *dev = ns->dev; 2036 struct nvme_dev *dev = ns->dev;
2036 struct nvme_queue *nvmeq;
2037 u32 num_cmds; 2037 u32 num_cmds;
2038 struct nvme_iod *iod; 2038 struct nvme_iod *iod;
2039 u64 unit_len; 2039 u64 unit_len;
@@ -2045,7 +2045,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2045 struct nvme_command c; 2045 struct nvme_command c;
2046 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); 2046 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
2047 u16 control; 2047 u16 control;
2048 u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors); 2048 u32 max_blocks = queue_max_hw_sectors(ns->queue);
2049 2049
2050 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); 2050 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
2051 2051
@@ -2093,8 +2093,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2093 res = PTR_ERR(iod); 2093 res = PTR_ERR(iod);
2094 goto out; 2094 goto out;
2095 } 2095 }
2096 retcode = nvme_setup_prps(dev, &c.common, iod, unit_len, 2096 retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
2097 GFP_KERNEL);
2098 if (retcode != unit_len) { 2097 if (retcode != unit_len) {
2099 nvme_unmap_user_pages(dev, 2098 nvme_unmap_user_pages(dev,
2100 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 2099 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2103,21 +2102,12 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2103 res = -ENOMEM; 2102 res = -ENOMEM;
2104 goto out; 2103 goto out;
2105 } 2104 }
2105 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
2106 c.rw.prp2 = cpu_to_le64(iod->first_dma);
2106 2107
2107 nvme_offset += unit_num_blocks; 2108 nvme_offset += unit_num_blocks;
2108 2109
2109 nvmeq = get_nvmeq(dev); 2110 nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
2110 /*
2111 * Since nvme_submit_sync_cmd sleeps, we can't keep
2112 * preemption disabled. We may be preempted at any
2113 * point, and be rescheduled to a different CPU. That
2114 * will cause cacheline bouncing, but no additional
2115 * races since q_lock already protects against other
2116 * CPUs.
2117 */
2118 put_nvmeq(nvmeq);
2119 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
2120 NVME_IO_TIMEOUT);
2121 if (nvme_sc != NVME_SC_SUCCESS) { 2111 if (nvme_sc != NVME_SC_SUCCESS) {
2122 nvme_unmap_user_pages(dev, 2112 nvme_unmap_user_pages(dev,
2123 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 2113 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2644,7 +2634,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2644{ 2634{
2645 int res = SNTI_TRANSLATION_SUCCESS; 2635 int res = SNTI_TRANSLATION_SUCCESS;
2646 int nvme_sc; 2636 int nvme_sc;
2647 struct nvme_queue *nvmeq;
2648 struct nvme_command c; 2637 struct nvme_command c;
2649 u8 immed, pcmod, pc, no_flush, start; 2638 u8 immed, pcmod, pc, no_flush, start;
2650 2639
@@ -2671,10 +2660,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2671 c.common.opcode = nvme_cmd_flush; 2660 c.common.opcode = nvme_cmd_flush;
2672 c.common.nsid = cpu_to_le32(ns->ns_id); 2661 c.common.nsid = cpu_to_le32(ns->ns_id);
2673 2662
2674 nvmeq = get_nvmeq(ns->dev); 2663 nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
2675 put_nvmeq(nvmeq);
2676 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
2677
2678 res = nvme_trans_status_code(hdr, nvme_sc); 2664 res = nvme_trans_status_code(hdr, nvme_sc);
2679 if (res) 2665 if (res)
2680 goto out; 2666 goto out;
@@ -2697,15 +2683,12 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2697 int res = SNTI_TRANSLATION_SUCCESS; 2683 int res = SNTI_TRANSLATION_SUCCESS;
2698 int nvme_sc; 2684 int nvme_sc;
2699 struct nvme_command c; 2685 struct nvme_command c;
2700 struct nvme_queue *nvmeq;
2701 2686
2702 memset(&c, 0, sizeof(c)); 2687 memset(&c, 0, sizeof(c));
2703 c.common.opcode = nvme_cmd_flush; 2688 c.common.opcode = nvme_cmd_flush;
2704 c.common.nsid = cpu_to_le32(ns->ns_id); 2689 c.common.nsid = cpu_to_le32(ns->ns_id);
2705 2690
2706 nvmeq = get_nvmeq(ns->dev); 2691 nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
2707 put_nvmeq(nvmeq);
2708 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
2709 2692
2710 res = nvme_trans_status_code(hdr, nvme_sc); 2693 res = nvme_trans_status_code(hdr, nvme_sc);
2711 if (res) 2694 if (res)
@@ -2872,7 +2855,6 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2872 struct nvme_dev *dev = ns->dev; 2855 struct nvme_dev *dev = ns->dev;
2873 struct scsi_unmap_parm_list *plist; 2856 struct scsi_unmap_parm_list *plist;
2874 struct nvme_dsm_range *range; 2857 struct nvme_dsm_range *range;
2875 struct nvme_queue *nvmeq;
2876 struct nvme_command c; 2858 struct nvme_command c;
2877 int i, nvme_sc, res = -ENOMEM; 2859 int i, nvme_sc, res = -ENOMEM;
2878 u16 ndesc, list_len; 2860 u16 ndesc, list_len;
@@ -2914,10 +2896,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2914 c.dsm.nr = cpu_to_le32(ndesc - 1); 2896 c.dsm.nr = cpu_to_le32(ndesc - 1);
2915 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 2897 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
2916 2898
2917 nvmeq = get_nvmeq(dev); 2899 nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
2918 put_nvmeq(nvmeq);
2919
2920 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
2921 res = nvme_trans_status_code(hdr, nvme_sc); 2900 res = nvme_trans_status_code(hdr, nvme_sc);
2922 2901
2923 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), 2902 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 8c3b255e629a..e900961cdd2e 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -61,18 +61,18 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
61 } 61 }
62 bcm2835_rng_ops.priv = (unsigned long)rng_base; 62 bcm2835_rng_ops.priv = (unsigned long)rng_base;
63 63
64 /* set warm-up count & enable */
65 __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
66 __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
67
64 /* register driver */ 68 /* register driver */
65 err = hwrng_register(&bcm2835_rng_ops); 69 err = hwrng_register(&bcm2835_rng_ops);
66 if (err) { 70 if (err) {
67 dev_err(dev, "hwrng registration failed\n"); 71 dev_err(dev, "hwrng registration failed\n");
68 iounmap(rng_base); 72 iounmap(rng_base);
69 } else { 73 } else
70 dev_info(dev, "hwrng registered\n"); 74 dev_info(dev, "hwrng registered\n");
71 75
72 /* set warm-up count & enable */
73 __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
74 __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
75 }
76 return err; 76 return err;
77} 77}
78 78
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 6928d094451d..60aafb8a1f2e 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -901,9 +901,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
901 if (len + offset > PAGE_SIZE) 901 if (len + offset > PAGE_SIZE)
902 len = PAGE_SIZE - offset; 902 len = PAGE_SIZE - offset;
903 903
904 src = buf->ops->map(pipe, buf, 1); 904 src = kmap_atomic(buf->page);
905 memcpy(page_address(page) + offset, src + buf->offset, len); 905 memcpy(page_address(page) + offset, src + buf->offset, len);
906 buf->ops->unmap(pipe, buf, src); 906 kunmap_atomic(src);
907 907
908 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); 908 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
909 } 909 }
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 8ee228e9ab5a..c98fdb185931 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -51,6 +51,8 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51static int 51static int
52isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 52isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53 struct isert_rdma_wr *wr); 53 struct isert_rdma_wr *wr);
54static int
55isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
54 56
55static void 57static void
56isert_qp_event_callback(struct ib_event *e, void *context) 58isert_qp_event_callback(struct ib_event *e, void *context)
@@ -87,7 +89,8 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
87} 89}
88 90
89static int 91static int
90isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 92isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
93 u8 protection)
91{ 94{
92 struct isert_device *device = isert_conn->conn_device; 95 struct isert_device *device = isert_conn->conn_device;
93 struct ib_qp_init_attr attr; 96 struct ib_qp_init_attr attr;
@@ -119,6 +122,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
119 attr.cap.max_recv_sge = 1; 122 attr.cap.max_recv_sge = 1;
120 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 123 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
121 attr.qp_type = IB_QPT_RC; 124 attr.qp_type = IB_QPT_RC;
125 if (protection)
126 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
122 127
123 pr_debug("isert_conn_setup_qp cma_id->device: %p\n", 128 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124 cma_id->device); 129 cma_id->device);
@@ -226,7 +231,8 @@ isert_create_device_ib_res(struct isert_device *device)
226 return ret; 231 return ret;
227 232
228 /* asign function handlers */ 233 /* asign function handlers */
229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 234 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
235 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
230 device->use_fastreg = 1; 236 device->use_fastreg = 1;
231 device->reg_rdma_mem = isert_reg_rdma; 237 device->reg_rdma_mem = isert_reg_rdma;
232 device->unreg_rdma_mem = isert_unreg_rdma; 238 device->unreg_rdma_mem = isert_unreg_rdma;
@@ -236,13 +242,18 @@ isert_create_device_ib_res(struct isert_device *device)
236 device->unreg_rdma_mem = isert_unmap_cmd; 242 device->unreg_rdma_mem = isert_unmap_cmd;
237 } 243 }
238 244
245 /* Check signature cap */
246 device->pi_capable = dev_attr->device_cap_flags &
247 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
248
239 device->cqs_used = min_t(int, num_online_cpus(), 249 device->cqs_used = min_t(int, num_online_cpus(),
240 device->ib_device->num_comp_vectors); 250 device->ib_device->num_comp_vectors);
241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 251 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
242 pr_debug("Using %d CQs, device %s supports %d vectors support " 252 pr_debug("Using %d CQs, device %s supports %d vectors support "
243 "Fast registration %d\n", 253 "Fast registration %d pi_capable %d\n",
244 device->cqs_used, device->ib_device->name, 254 device->cqs_used, device->ib_device->name,
245 device->ib_device->num_comp_vectors, device->use_fastreg); 255 device->ib_device->num_comp_vectors, device->use_fastreg,
256 device->pi_capable);
246 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 257 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
247 device->cqs_used, GFP_KERNEL); 258 device->cqs_used, GFP_KERNEL);
248 if (!device->cq_desc) { 259 if (!device->cq_desc) {
@@ -395,6 +406,12 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
395 list_del(&fr_desc->list); 406 list_del(&fr_desc->list);
396 ib_free_fast_reg_page_list(fr_desc->data_frpl); 407 ib_free_fast_reg_page_list(fr_desc->data_frpl);
397 ib_dereg_mr(fr_desc->data_mr); 408 ib_dereg_mr(fr_desc->data_mr);
409 if (fr_desc->pi_ctx) {
410 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
411 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
412 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
413 kfree(fr_desc->pi_ctx);
414 }
398 kfree(fr_desc); 415 kfree(fr_desc);
399 ++i; 416 ++i;
400 } 417 }
@@ -406,8 +423,10 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
406 423
407static int 424static int
408isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, 425isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
409 struct fast_reg_descriptor *fr_desc) 426 struct fast_reg_descriptor *fr_desc, u8 protection)
410{ 427{
428 int ret;
429
411 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, 430 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
412 ISCSI_ISER_SG_TABLESIZE); 431 ISCSI_ISER_SG_TABLESIZE);
413 if (IS_ERR(fr_desc->data_frpl)) { 432 if (IS_ERR(fr_desc->data_frpl)) {
@@ -420,27 +439,88 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
420 if (IS_ERR(fr_desc->data_mr)) { 439 if (IS_ERR(fr_desc->data_mr)) {
421 pr_err("Failed to allocate data frmr err=%ld\n", 440 pr_err("Failed to allocate data frmr err=%ld\n",
422 PTR_ERR(fr_desc->data_mr)); 441 PTR_ERR(fr_desc->data_mr));
423 ib_free_fast_reg_page_list(fr_desc->data_frpl); 442 ret = PTR_ERR(fr_desc->data_mr);
424 return PTR_ERR(fr_desc->data_mr); 443 goto err_data_frpl;
425 } 444 }
426 pr_debug("Create fr_desc %p page_list %p\n", 445 pr_debug("Create fr_desc %p page_list %p\n",
427 fr_desc, fr_desc->data_frpl->page_list); 446 fr_desc, fr_desc->data_frpl->page_list);
447 fr_desc->ind |= ISERT_DATA_KEY_VALID;
448
449 if (protection) {
450 struct ib_mr_init_attr mr_init_attr = {0};
451 struct pi_context *pi_ctx;
452
453 fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
454 if (!fr_desc->pi_ctx) {
455 pr_err("Failed to allocate pi context\n");
456 ret = -ENOMEM;
457 goto err_data_mr;
458 }
459 pi_ctx = fr_desc->pi_ctx;
460
461 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
462 ISCSI_ISER_SG_TABLESIZE);
463 if (IS_ERR(pi_ctx->prot_frpl)) {
464 pr_err("Failed to allocate prot frpl err=%ld\n",
465 PTR_ERR(pi_ctx->prot_frpl));
466 ret = PTR_ERR(pi_ctx->prot_frpl);
467 goto err_pi_ctx;
468 }
428 469
429 fr_desc->valid = true; 470 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
471 if (IS_ERR(pi_ctx->prot_mr)) {
472 pr_err("Failed to allocate prot frmr err=%ld\n",
473 PTR_ERR(pi_ctx->prot_mr));
474 ret = PTR_ERR(pi_ctx->prot_mr);
475 goto err_prot_frpl;
476 }
477 fr_desc->ind |= ISERT_PROT_KEY_VALID;
478
479 mr_init_attr.max_reg_descriptors = 2;
480 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
481 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
482 if (IS_ERR(pi_ctx->sig_mr)) {
483 pr_err("Failed to allocate signature enabled mr err=%ld\n",
484 PTR_ERR(pi_ctx->sig_mr));
485 ret = PTR_ERR(pi_ctx->sig_mr);
486 goto err_prot_mr;
487 }
488 fr_desc->ind |= ISERT_SIG_KEY_VALID;
489 }
490 fr_desc->ind &= ~ISERT_PROTECTED;
430 491
431 return 0; 492 return 0;
493err_prot_mr:
494 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
495err_prot_frpl:
496 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
497err_pi_ctx:
498 kfree(fr_desc->pi_ctx);
499err_data_mr:
500 ib_dereg_mr(fr_desc->data_mr);
501err_data_frpl:
502 ib_free_fast_reg_page_list(fr_desc->data_frpl);
503
504 return ret;
432} 505}
433 506
434static int 507static int
435isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) 508isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
436{ 509{
437 struct fast_reg_descriptor *fr_desc; 510 struct fast_reg_descriptor *fr_desc;
438 struct isert_device *device = isert_conn->conn_device; 511 struct isert_device *device = isert_conn->conn_device;
439 int i, ret; 512 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
513 struct se_node_acl *se_nacl = se_sess->se_node_acl;
514 int i, ret, tag_num;
515 /*
516 * Setup the number of FRMRs based upon the number of tags
517 * available to session in iscsi_target_locate_portal().
518 */
519 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
520 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
440 521
441 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
442 isert_conn->conn_fr_pool_size = 0; 522 isert_conn->conn_fr_pool_size = 0;
443 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { 523 for (i = 0; i < tag_num; i++) {
444 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 524 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
445 if (!fr_desc) { 525 if (!fr_desc) {
446 pr_err("Failed to allocate fast_reg descriptor\n"); 526 pr_err("Failed to allocate fast_reg descriptor\n");
@@ -449,7 +529,8 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
449 } 529 }
450 530
451 ret = isert_create_fr_desc(device->ib_device, 531 ret = isert_create_fr_desc(device->ib_device,
452 isert_conn->conn_pd, fr_desc); 532 isert_conn->conn_pd, fr_desc,
533 pi_support);
453 if (ret) { 534 if (ret) {
454 pr_err("Failed to create fastreg descriptor err=%d\n", 535 pr_err("Failed to create fastreg descriptor err=%d\n",
455 ret); 536 ret);
@@ -480,6 +561,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
480 struct isert_device *device; 561 struct isert_device *device;
481 struct ib_device *ib_dev = cma_id->device; 562 struct ib_device *ib_dev = cma_id->device;
482 int ret = 0; 563 int ret = 0;
564 u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
483 565
484 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 566 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
485 cma_id, cma_id->context); 567 cma_id, cma_id->context);
@@ -498,6 +580,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
498 kref_get(&isert_conn->conn_kref); 580 kref_get(&isert_conn->conn_kref);
499 mutex_init(&isert_conn->conn_mutex); 581 mutex_init(&isert_conn->conn_mutex);
500 spin_lock_init(&isert_conn->conn_lock); 582 spin_lock_init(&isert_conn->conn_lock);
583 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
501 584
502 cma_id->context = isert_conn; 585 cma_id->context = isert_conn;
503 isert_conn->conn_cm_id = cma_id; 586 isert_conn->conn_cm_id = cma_id;
@@ -569,16 +652,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
569 goto out_mr; 652 goto out_mr;
570 } 653 }
571 654
572 if (device->use_fastreg) { 655 if (pi_support && !device->pi_capable) {
573 ret = isert_conn_create_fastreg_pool(isert_conn); 656 pr_err("Protection information requested but not supported\n");
574 if (ret) { 657 ret = -EINVAL;
575 pr_err("Conn: %p failed to create fastreg pool\n", 658 goto out_mr;
576 isert_conn);
577 goto out_fastreg;
578 }
579 } 659 }
580 660
581 ret = isert_conn_setup_qp(isert_conn, cma_id); 661 ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
582 if (ret) 662 if (ret)
583 goto out_conn_dev; 663 goto out_conn_dev;
584 664
@@ -591,9 +671,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
591 return 0; 671 return 0;
592 672
593out_conn_dev: 673out_conn_dev:
594 if (device->use_fastreg)
595 isert_conn_free_fastreg_pool(isert_conn);
596out_fastreg:
597 ib_dereg_mr(isert_conn->conn_mr); 674 ib_dereg_mr(isert_conn->conn_mr);
598out_mr: 675out_mr:
599 ib_dealloc_pd(isert_conn->conn_pd); 676 ib_dealloc_pd(isert_conn->conn_pd);
@@ -967,6 +1044,18 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
967 } 1044 }
968 if (!login->login_failed) { 1045 if (!login->login_failed) {
969 if (login->login_complete) { 1046 if (login->login_complete) {
1047 if (isert_conn->conn_device->use_fastreg) {
1048 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
1049
1050 ret = isert_conn_create_fastreg_pool(isert_conn,
1051 pi_support);
1052 if (ret) {
1053 pr_err("Conn: %p failed to create"
1054 " fastreg pool\n", isert_conn);
1055 return ret;
1056 }
1057 }
1058
970 ret = isert_alloc_rx_descriptors(isert_conn); 1059 ret = isert_alloc_rx_descriptors(isert_conn);
971 if (ret) 1060 if (ret)
972 return ret; 1061 return ret;
@@ -1392,19 +1481,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1392 } 1481 }
1393} 1482}
1394 1483
1484static int
1485isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1486 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1487 enum iser_ib_op_code op, struct isert_data_buf *data)
1488{
1489 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1490
1491 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1492 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1493
1494 data->len = length - offset;
1495 data->offset = offset;
1496 data->sg_off = data->offset / PAGE_SIZE;
1497
1498 data->sg = &sg[data->sg_off];
1499 data->nents = min_t(unsigned int, nents - data->sg_off,
1500 ISCSI_ISER_SG_TABLESIZE);
1501 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1502 PAGE_SIZE);
1503
1504 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1505 data->dma_dir);
1506 if (unlikely(!data->dma_nents)) {
1507 pr_err("Cmd: unable to dma map SGs %p\n", sg);
1508 return -EINVAL;
1509 }
1510
1511 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1512 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1513
1514 return 0;
1515}
1516
1517static void
1518isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1519{
1520 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1521
1522 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1523 memset(data, 0, sizeof(*data));
1524}
1525
1526
1527
1395static void 1528static void
1396isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1529isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1397{ 1530{
1398 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1531 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1399 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1400 1532
1401 pr_debug("isert_unmap_cmd: %p\n", isert_cmd); 1533 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1402 if (wr->sge) { 1534
1535 if (wr->data.sg) {
1403 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); 1536 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1404 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1537 isert_unmap_data_buf(isert_conn, &wr->data);
1405 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1406 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1407 wr->sge = NULL;
1408 } 1538 }
1409 1539
1410 if (wr->send_wr) { 1540 if (wr->send_wr) {
@@ -1424,7 +1554,6 @@ static void
1424isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1554isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1425{ 1555{
1426 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1556 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1427 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1428 LIST_HEAD(unmap_list); 1557 LIST_HEAD(unmap_list);
1429 1558
1430 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); 1559 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
@@ -1432,18 +1561,19 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1432 if (wr->fr_desc) { 1561 if (wr->fr_desc) {
1433 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", 1562 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1434 isert_cmd, wr->fr_desc); 1563 isert_cmd, wr->fr_desc);
1564 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1565 isert_unmap_data_buf(isert_conn, &wr->prot);
1566 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1567 }
1435 spin_lock_bh(&isert_conn->conn_lock); 1568 spin_lock_bh(&isert_conn->conn_lock);
1436 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); 1569 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1437 spin_unlock_bh(&isert_conn->conn_lock); 1570 spin_unlock_bh(&isert_conn->conn_lock);
1438 wr->fr_desc = NULL; 1571 wr->fr_desc = NULL;
1439 } 1572 }
1440 1573
1441 if (wr->sge) { 1574 if (wr->data.sg) {
1442 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); 1575 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1443 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1576 isert_unmap_data_buf(isert_conn, &wr->data);
1444 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1445 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1446 wr->sge = NULL;
1447 } 1577 }
1448 1578
1449 wr->ib_sge = NULL; 1579 wr->ib_sge = NULL;
@@ -1451,7 +1581,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1451} 1581}
1452 1582
1453static void 1583static void
1454isert_put_cmd(struct isert_cmd *isert_cmd) 1584isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1455{ 1585{
1456 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1586 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1457 struct isert_conn *isert_conn = isert_cmd->conn; 1587 struct isert_conn *isert_conn = isert_cmd->conn;
@@ -1467,8 +1597,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1467 list_del_init(&cmd->i_conn_node); 1597 list_del_init(&cmd->i_conn_node);
1468 spin_unlock_bh(&conn->cmd_lock); 1598 spin_unlock_bh(&conn->cmd_lock);
1469 1599
1470 if (cmd->data_direction == DMA_TO_DEVICE) 1600 if (cmd->data_direction == DMA_TO_DEVICE) {
1471 iscsit_stop_dataout_timer(cmd); 1601 iscsit_stop_dataout_timer(cmd);
1602 /*
1603 * Check for special case during comp_err where
1604 * WRITE_PENDING has been handed off from core,
1605 * but requires an extra target_put_sess_cmd()
1606 * before transport_generic_free_cmd() below.
1607 */
1608 if (comp_err &&
1609 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1610 struct se_cmd *se_cmd = &cmd->se_cmd;
1611
1612 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1613 }
1614 }
1472 1615
1473 device->unreg_rdma_mem(isert_cmd, isert_conn); 1616 device->unreg_rdma_mem(isert_cmd, isert_conn);
1474 transport_generic_free_cmd(&cmd->se_cmd, 0); 1617 transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1523,7 +1666,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1523 1666
1524static void 1667static void
1525isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1668isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1526 struct ib_device *ib_dev) 1669 struct ib_device *ib_dev, bool comp_err)
1527{ 1670{
1528 if (isert_cmd->pdu_buf_dma != 0) { 1671 if (isert_cmd->pdu_buf_dma != 0) {
1529 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); 1672 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
@@ -1533,7 +1676,77 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1533 } 1676 }
1534 1677
1535 isert_unmap_tx_desc(tx_desc, ib_dev); 1678 isert_unmap_tx_desc(tx_desc, ib_dev);
1536 isert_put_cmd(isert_cmd); 1679 isert_put_cmd(isert_cmd, comp_err);
1680}
1681
1682static int
1683isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1684{
1685 struct ib_mr_status mr_status;
1686 int ret;
1687
1688 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1689 if (ret) {
1690 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1691 goto fail_mr_status;
1692 }
1693
1694 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1695 u64 sec_offset_err;
1696 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1697
1698 switch (mr_status.sig_err.err_type) {
1699 case IB_SIG_BAD_GUARD:
1700 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1701 break;
1702 case IB_SIG_BAD_REFTAG:
1703 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1704 break;
1705 case IB_SIG_BAD_APPTAG:
1706 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1707 break;
1708 }
1709 sec_offset_err = mr_status.sig_err.sig_err_offset;
1710 do_div(sec_offset_err, block_size);
1711 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1712
1713 pr_err("isert: PI error found type %d at sector 0x%llx "
1714 "expected 0x%x vs actual 0x%x\n",
1715 mr_status.sig_err.err_type,
1716 (unsigned long long)se_cmd->bad_sector,
1717 mr_status.sig_err.expected,
1718 mr_status.sig_err.actual);
1719 ret = 1;
1720 }
1721
1722fail_mr_status:
1723 return ret;
1724}
1725
1726static void
1727isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1728 struct isert_cmd *isert_cmd)
1729{
1730 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1731 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1732 struct se_cmd *se_cmd = &cmd->se_cmd;
1733 struct isert_conn *isert_conn = isert_cmd->conn;
1734 struct isert_device *device = isert_conn->conn_device;
1735 int ret = 0;
1736
1737 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1738 ret = isert_check_pi_status(se_cmd,
1739 wr->fr_desc->pi_ctx->sig_mr);
1740 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1741 }
1742
1743 device->unreg_rdma_mem(isert_cmd, isert_conn);
1744 wr->send_wr_num = 0;
1745 if (ret)
1746 transport_send_check_condition_and_sense(se_cmd,
1747 se_cmd->pi_err, 0);
1748 else
1749 isert_put_response(isert_conn->conn, cmd);
1537} 1750}
1538 1751
1539static void 1752static void
@@ -1545,10 +1758,17 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1545 struct se_cmd *se_cmd = &cmd->se_cmd; 1758 struct se_cmd *se_cmd = &cmd->se_cmd;
1546 struct isert_conn *isert_conn = isert_cmd->conn; 1759 struct isert_conn *isert_conn = isert_cmd->conn;
1547 struct isert_device *device = isert_conn->conn_device; 1760 struct isert_device *device = isert_conn->conn_device;
1761 int ret = 0;
1762
1763 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1764 ret = isert_check_pi_status(se_cmd,
1765 wr->fr_desc->pi_ctx->sig_mr);
1766 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1767 }
1548 1768
1549 iscsit_stop_dataout_timer(cmd); 1769 iscsit_stop_dataout_timer(cmd);
1550 device->unreg_rdma_mem(isert_cmd, isert_conn); 1770 device->unreg_rdma_mem(isert_cmd, isert_conn);
1551 cmd->write_data_done = wr->cur_rdma_length; 1771 cmd->write_data_done = wr->data.len;
1552 wr->send_wr_num = 0; 1772 wr->send_wr_num = 0;
1553 1773
1554 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1774 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
@@ -1557,7 +1777,11 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1557 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1777 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1558 spin_unlock_bh(&cmd->istate_lock); 1778 spin_unlock_bh(&cmd->istate_lock);
1559 1779
1560 target_execute_cmd(se_cmd); 1780 if (ret)
1781 transport_send_check_condition_and_sense(se_cmd,
1782 se_cmd->pi_err, 0);
1783 else
1784 target_execute_cmd(se_cmd);
1561} 1785}
1562 1786
1563static void 1787static void
@@ -1577,14 +1801,14 @@ isert_do_control_comp(struct work_struct *work)
1577 iscsit_tmr_post_handler(cmd, cmd->conn); 1801 iscsit_tmr_post_handler(cmd, cmd->conn);
1578 1802
1579 cmd->i_state = ISTATE_SENT_STATUS; 1803 cmd->i_state = ISTATE_SENT_STATUS;
1580 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1804 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1581 break; 1805 break;
1582 case ISTATE_SEND_REJECT: 1806 case ISTATE_SEND_REJECT:
1583 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); 1807 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1584 atomic_dec(&isert_conn->post_send_buf_count); 1808 atomic_dec(&isert_conn->post_send_buf_count);
1585 1809
1586 cmd->i_state = ISTATE_SENT_STATUS; 1810 cmd->i_state = ISTATE_SENT_STATUS;
1587 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1811 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1588 break; 1812 break;
1589 case ISTATE_SEND_LOGOUTRSP: 1813 case ISTATE_SEND_LOGOUTRSP:
1590 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1814 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
@@ -1598,7 +1822,7 @@ isert_do_control_comp(struct work_struct *work)
1598 case ISTATE_SEND_TEXTRSP: 1822 case ISTATE_SEND_TEXTRSP:
1599 atomic_dec(&isert_conn->post_send_buf_count); 1823 atomic_dec(&isert_conn->post_send_buf_count);
1600 cmd->i_state = ISTATE_SENT_STATUS; 1824 cmd->i_state = ISTATE_SENT_STATUS;
1601 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1825 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1602 break; 1826 break;
1603 default: 1827 default:
1604 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); 1828 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
@@ -1626,10 +1850,21 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1626 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1850 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1627 return; 1851 return;
1628 } 1852 }
1629 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1853
1854 /**
1855 * If send_wr_num is 0 this means that we got
1856 * RDMA completion and we cleared it and we should
1857 * simply decrement the response post. else the
1858 * response is incorporated in send_wr_num, just
1859 * sub it.
1860 **/
1861 if (wr->send_wr_num)
1862 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1863 else
1864 atomic_dec(&isert_conn->post_send_buf_count);
1630 1865
1631 cmd->i_state = ISTATE_SENT_STATUS; 1866 cmd->i_state = ISTATE_SENT_STATUS;
1632 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1867 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1633} 1868}
1634 1869
1635static void 1870static void
@@ -1658,8 +1893,9 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
1658 isert_conn, ib_dev); 1893 isert_conn, ib_dev);
1659 break; 1894 break;
1660 case ISER_IB_RDMA_WRITE: 1895 case ISER_IB_RDMA_WRITE:
1661 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); 1896 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1662 dump_stack(); 1897 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1898 isert_completion_rdma_write(tx_desc, isert_cmd);
1663 break; 1899 break;
1664 case ISER_IB_RDMA_READ: 1900 case ISER_IB_RDMA_READ:
1665 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1901 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
@@ -1709,8 +1945,20 @@ isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_de
1709 llnode = llist_next(llnode); 1945 llnode = llist_next(llnode);
1710 wr = &t->isert_cmd->rdma_wr; 1946 wr = &t->isert_cmd->rdma_wr;
1711 1947
1712 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1948 /**
1713 isert_completion_put(t, t->isert_cmd, ib_dev); 1949 * If send_wr_num is 0 this means that we got
1950 * RDMA completion and we cleared it and we should
1951 * simply decrement the response post. else the
1952 * response is incorporated in send_wr_num, just
1953 * sub it.
1954 **/
1955 if (wr->send_wr_num)
1956 atomic_sub(wr->send_wr_num,
1957 &isert_conn->post_send_buf_count);
1958 else
1959 atomic_dec(&isert_conn->post_send_buf_count);
1960
1961 isert_completion_put(t, t->isert_cmd, ib_dev, true);
1714 } 1962 }
1715} 1963}
1716 1964
@@ -1728,15 +1976,27 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
1728 llnode = llist_next(llnode); 1976 llnode = llist_next(llnode);
1729 wr = &t->isert_cmd->rdma_wr; 1977 wr = &t->isert_cmd->rdma_wr;
1730 1978
1731 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1979 /**
1732 isert_completion_put(t, t->isert_cmd, ib_dev); 1980 * If send_wr_num is 0 this means that we got
1981 * RDMA completion and we cleared it and we should
1982 * simply decrement the response post. else the
1983 * response is incorporated in send_wr_num, just
1984 * sub it.
1985 **/
1986 if (wr->send_wr_num)
1987 atomic_sub(wr->send_wr_num,
1988 &isert_conn->post_send_buf_count);
1989 else
1990 atomic_dec(&isert_conn->post_send_buf_count);
1991
1992 isert_completion_put(t, t->isert_cmd, ib_dev, true);
1733 } 1993 }
1734 tx_desc->comp_llnode_batch = NULL; 1994 tx_desc->comp_llnode_batch = NULL;
1735 1995
1736 if (!isert_cmd) 1996 if (!isert_cmd)
1737 isert_unmap_tx_desc(tx_desc, ib_dev); 1997 isert_unmap_tx_desc(tx_desc, ib_dev);
1738 else 1998 else
1739 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1999 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1740} 2000}
1741 2001
1742static void 2002static void
@@ -1918,6 +2178,36 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1918 return isert_post_response(isert_conn, isert_cmd); 2178 return isert_post_response(isert_conn, isert_cmd);
1919} 2179}
1920 2180
2181static void
2182isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2183{
2184 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2185 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2186 struct isert_device *device = isert_conn->conn_device;
2187
2188 spin_lock_bh(&conn->cmd_lock);
2189 if (!list_empty(&cmd->i_conn_node))
2190 list_del_init(&cmd->i_conn_node);
2191 spin_unlock_bh(&conn->cmd_lock);
2192
2193 if (cmd->data_direction == DMA_TO_DEVICE)
2194 iscsit_stop_dataout_timer(cmd);
2195
2196 device->unreg_rdma_mem(isert_cmd, isert_conn);
2197}
2198
2199static enum target_prot_op
2200isert_get_sup_prot_ops(struct iscsi_conn *conn)
2201{
2202 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2203 struct isert_device *device = isert_conn->conn_device;
2204
2205 if (device->pi_capable)
2206 return TARGET_PROT_ALL;
2207
2208 return TARGET_PROT_NORMAL;
2209}
2210
1921static int 2211static int
1922isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2212isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1923 bool nopout_response) 2213 bool nopout_response)
@@ -2099,54 +2389,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2099 struct se_cmd *se_cmd = &cmd->se_cmd; 2389 struct se_cmd *se_cmd = &cmd->se_cmd;
2100 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2390 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2101 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2391 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2102 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2392 struct isert_data_buf *data = &wr->data;
2103 struct ib_send_wr *send_wr; 2393 struct ib_send_wr *send_wr;
2104 struct ib_sge *ib_sge; 2394 struct ib_sge *ib_sge;
2105 struct scatterlist *sg_start; 2395 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2106 u32 sg_off = 0, sg_nents; 2396 int ret = 0, i, ib_sge_cnt;
2107 u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2108 int ret = 0, count, i, ib_sge_cnt;
2109 2397
2110 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2398 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2111 data_left = se_cmd->data_length;
2112 } else {
2113 sg_off = cmd->write_data_done / PAGE_SIZE;
2114 data_left = se_cmd->data_length - cmd->write_data_done;
2115 offset = cmd->write_data_done;
2116 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2117 }
2118 2399
2119 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2400 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2120 sg_nents = se_cmd->t_data_nents - sg_off; 2401 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2402 se_cmd->t_data_nents, se_cmd->data_length,
2403 offset, wr->iser_ib_op, &wr->data);
2404 if (ret)
2405 return ret;
2121 2406
2122 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, 2407 data_left = data->len;
2123 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2408 offset = data->offset;
2124 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2125 if (unlikely(!count)) {
2126 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2127 return -EINVAL;
2128 }
2129 wr->sge = sg_start;
2130 wr->num_sge = sg_nents;
2131 wr->cur_rdma_length = data_left;
2132 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2133 isert_cmd, count, sg_start, sg_nents, data_left);
2134 2409
2135 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 2410 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2136 if (!ib_sge) { 2411 if (!ib_sge) {
2137 pr_warn("Unable to allocate ib_sge\n"); 2412 pr_warn("Unable to allocate ib_sge\n");
2138 ret = -ENOMEM; 2413 ret = -ENOMEM;
2139 goto unmap_sg; 2414 goto unmap_cmd;
2140 } 2415 }
2141 wr->ib_sge = ib_sge; 2416 wr->ib_sge = ib_sge;
2142 2417
2143 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 2418 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2144 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2419 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2145 GFP_KERNEL); 2420 GFP_KERNEL);
2146 if (!wr->send_wr) { 2421 if (!wr->send_wr) {
2147 pr_debug("Unable to allocate wr->send_wr\n"); 2422 pr_debug("Unable to allocate wr->send_wr\n");
2148 ret = -ENOMEM; 2423 ret = -ENOMEM;
2149 goto unmap_sg; 2424 goto unmap_cmd;
2150 } 2425 }
2151 2426
2152 wr->isert_cmd = isert_cmd; 2427 wr->isert_cmd = isert_cmd;
@@ -2185,10 +2460,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2185 } 2460 }
2186 2461
2187 return 0; 2462 return 0;
2188unmap_sg: 2463unmap_cmd:
2189 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, 2464 isert_unmap_data_buf(isert_conn, data);
2190 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2465
2191 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2192 return ret; 2466 return ret;
2193} 2467}
2194 2468
@@ -2232,49 +2506,70 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2232} 2506}
2233 2507
2234static int 2508static int
2235isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, 2509isert_fast_reg_mr(struct isert_conn *isert_conn,
2236 struct isert_conn *isert_conn, struct scatterlist *sg_start, 2510 struct fast_reg_descriptor *fr_desc,
2237 struct ib_sge *ib_sge, u32 sg_nents, u32 offset, 2511 struct isert_data_buf *mem,
2238 unsigned int data_len) 2512 enum isert_indicator ind,
2513 struct ib_sge *sge)
2239{ 2514{
2240 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2515 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2516 struct ib_mr *mr;
2517 struct ib_fast_reg_page_list *frpl;
2241 struct ib_send_wr fr_wr, inv_wr; 2518 struct ib_send_wr fr_wr, inv_wr;
2242 struct ib_send_wr *bad_wr, *wr = NULL; 2519 struct ib_send_wr *bad_wr, *wr = NULL;
2243 int ret, pagelist_len; 2520 int ret, pagelist_len;
2244 u32 page_off; 2521 u32 page_off;
2245 u8 key; 2522 u8 key;
2246 2523
2247 sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE); 2524 if (mem->dma_nents == 1) {
2248 page_off = offset % PAGE_SIZE; 2525 sge->lkey = isert_conn->conn_mr->lkey;
2526 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2527 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2528 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2529 __func__, __LINE__, sge->addr, sge->length,
2530 sge->lkey);
2531 return 0;
2532 }
2533
2534 if (ind == ISERT_DATA_KEY_VALID) {
2535 /* Registering data buffer */
2536 mr = fr_desc->data_mr;
2537 frpl = fr_desc->data_frpl;
2538 } else {
2539 /* Registering protection buffer */
2540 mr = fr_desc->pi_ctx->prot_mr;
2541 frpl = fr_desc->pi_ctx->prot_frpl;
2542 }
2543
2544 page_off = mem->offset % PAGE_SIZE;
2249 2545
2250 pr_debug("Use fr_desc %p sg_nents %d offset %u\n", 2546 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2251 fr_desc, sg_nents, offset); 2547 fr_desc, mem->nents, mem->offset);
2252 2548
2253 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, 2549 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2254 &fr_desc->data_frpl->page_list[0]); 2550 &frpl->page_list[0]);
2255 2551
2256 if (!fr_desc->valid) { 2552 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
2257 memset(&inv_wr, 0, sizeof(inv_wr)); 2553 memset(&inv_wr, 0, sizeof(inv_wr));
2258 inv_wr.wr_id = ISER_FASTREG_LI_WRID; 2554 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2259 inv_wr.opcode = IB_WR_LOCAL_INV; 2555 inv_wr.opcode = IB_WR_LOCAL_INV;
2260 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; 2556 inv_wr.ex.invalidate_rkey = mr->rkey;
2261 wr = &inv_wr; 2557 wr = &inv_wr;
2262 /* Bump the key */ 2558 /* Bump the key */
2263 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF); 2559 key = (u8)(mr->rkey & 0x000000FF);
2264 ib_update_fast_reg_key(fr_desc->data_mr, ++key); 2560 ib_update_fast_reg_key(mr, ++key);
2265 } 2561 }
2266 2562
2267 /* Prepare FASTREG WR */ 2563 /* Prepare FASTREG WR */
2268 memset(&fr_wr, 0, sizeof(fr_wr)); 2564 memset(&fr_wr, 0, sizeof(fr_wr));
2269 fr_wr.wr_id = ISER_FASTREG_LI_WRID; 2565 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2270 fr_wr.opcode = IB_WR_FAST_REG_MR; 2566 fr_wr.opcode = IB_WR_FAST_REG_MR;
2271 fr_wr.wr.fast_reg.iova_start = 2567 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2272 fr_desc->data_frpl->page_list[0] + page_off; 2568 fr_wr.wr.fast_reg.page_list = frpl;
2273 fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2274 fr_wr.wr.fast_reg.page_list_len = pagelist_len; 2569 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2275 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 2570 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2276 fr_wr.wr.fast_reg.length = data_len; 2571 fr_wr.wr.fast_reg.length = mem->len;
2277 fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; 2572 fr_wr.wr.fast_reg.rkey = mr->rkey;
2278 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; 2573 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2279 2574
2280 if (!wr) 2575 if (!wr)
@@ -2287,15 +2582,157 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2287 pr_err("fast registration failed, ret:%d\n", ret); 2582 pr_err("fast registration failed, ret:%d\n", ret);
2288 return ret; 2583 return ret;
2289 } 2584 }
2290 fr_desc->valid = false; 2585 fr_desc->ind &= ~ind;
2586
2587 sge->lkey = mr->lkey;
2588 sge->addr = frpl->page_list[0] + page_off;
2589 sge->length = mem->len;
2590
2591 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2592 __func__, __LINE__, sge->addr, sge->length,
2593 sge->lkey);
2594
2595 return ret;
2596}
2597
2598static inline enum ib_t10_dif_type
2599se2ib_prot_type(enum target_prot_type prot_type)
2600{
2601 switch (prot_type) {
2602 case TARGET_DIF_TYPE0_PROT:
2603 return IB_T10DIF_NONE;
2604 case TARGET_DIF_TYPE1_PROT:
2605 return IB_T10DIF_TYPE1;
2606 case TARGET_DIF_TYPE2_PROT:
2607 return IB_T10DIF_TYPE2;
2608 case TARGET_DIF_TYPE3_PROT:
2609 return IB_T10DIF_TYPE3;
2610 default:
2611 return IB_T10DIF_NONE;
2612 }
2613}
2291 2614
2292 ib_sge->lkey = fr_desc->data_mr->lkey; 2615static int
2293 ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off; 2616isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2294 ib_sge->length = data_len; 2617{
2618 enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
2619
2620 sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
2621 sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
2622 sig_attrs->mem.sig.dif.pi_interval =
2623 se_cmd->se_dev->dev_attrib.block_size;
2624 sig_attrs->wire.sig.dif.pi_interval =
2625 se_cmd->se_dev->dev_attrib.block_size;
2626
2627 switch (se_cmd->prot_op) {
2628 case TARGET_PROT_DIN_INSERT:
2629 case TARGET_PROT_DOUT_STRIP:
2630 sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
2631 sig_attrs->wire.sig.dif.type = ib_prot_type;
2632 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
2633 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
2634 break;
2635 case TARGET_PROT_DOUT_INSERT:
2636 case TARGET_PROT_DIN_STRIP:
2637 sig_attrs->mem.sig.dif.type = ib_prot_type;
2638 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
2639 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
2640 sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
2641 break;
2642 case TARGET_PROT_DIN_PASS:
2643 case TARGET_PROT_DOUT_PASS:
2644 sig_attrs->mem.sig.dif.type = ib_prot_type;
2645 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
2646 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
2647 sig_attrs->wire.sig.dif.type = ib_prot_type;
2648 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
2649 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
2650 break;
2651 default:
2652 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2653 return -EINVAL;
2654 }
2295 2655
2296 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", 2656 return 0;
2297 ib_sge->addr, ib_sge->length, ib_sge->lkey); 2657}
2658
2659static inline u8
2660isert_set_prot_checks(u8 prot_checks)
2661{
2662 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2663 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2664 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2665}
2666
2667static int
2668isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2669 struct fast_reg_descriptor *fr_desc,
2670 struct ib_sge *data_sge, struct ib_sge *prot_sge,
2671 struct ib_sge *sig_sge)
2672{
2673 struct ib_send_wr sig_wr, inv_wr;
2674 struct ib_send_wr *bad_wr, *wr = NULL;
2675 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2676 struct ib_sig_attrs sig_attrs;
2677 int ret;
2678 u32 key;
2679
2680 memset(&sig_attrs, 0, sizeof(sig_attrs));
2681 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2682 if (ret)
2683 goto err;
2684
2685 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2686
2687 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2688 memset(&inv_wr, 0, sizeof(inv_wr));
2689 inv_wr.opcode = IB_WR_LOCAL_INV;
2690 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2691 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2692 wr = &inv_wr;
2693 /* Bump the key */
2694 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2695 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2696 }
2697
2698 memset(&sig_wr, 0, sizeof(sig_wr));
2699 sig_wr.opcode = IB_WR_REG_SIG_MR;
2700 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2701 sig_wr.sg_list = data_sge;
2702 sig_wr.num_sge = 1;
2703 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2704 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2705 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2706 if (se_cmd->t_prot_sg)
2707 sig_wr.wr.sig_handover.prot = prot_sge;
2708
2709 if (!wr)
2710 wr = &sig_wr;
2711 else
2712 wr->next = &sig_wr;
2713
2714 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2715 if (ret) {
2716 pr_err("fast registration failed, ret:%d\n", ret);
2717 goto err;
2718 }
2719 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2720
2721 sig_sge->lkey = pi_ctx->sig_mr->lkey;
2722 sig_sge->addr = 0;
2723 sig_sge->length = se_cmd->data_length;
2724 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2725 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2726 /*
2727 * We have protection guards on the wire
2728 * so we need to set a larget transfer
2729 */
2730 sig_sge->length += se_cmd->prot_length;
2298 2731
2732 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2733 sig_sge->addr, sig_sge->length,
2734 sig_sge->lkey);
2735err:
2299 return ret; 2736 return ret;
2300} 2737}
2301 2738
@@ -2305,62 +2742,82 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2305{ 2742{
2306 struct se_cmd *se_cmd = &cmd->se_cmd; 2743 struct se_cmd *se_cmd = &cmd->se_cmd;
2307 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2744 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2308 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2745 struct isert_conn *isert_conn = conn->context;
2309 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2746 struct ib_sge data_sge;
2310 struct ib_send_wr *send_wr; 2747 struct ib_send_wr *send_wr;
2311 struct ib_sge *ib_sge; 2748 struct fast_reg_descriptor *fr_desc = NULL;
2312 struct scatterlist *sg_start; 2749 u32 offset;
2313 struct fast_reg_descriptor *fr_desc; 2750 int ret = 0;
2314 u32 sg_off = 0, sg_nents;
2315 u32 offset = 0, data_len, data_left, rdma_write_max;
2316 int ret = 0, count;
2317 unsigned long flags; 2751 unsigned long flags;
2318 2752
2319 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2753 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2320 data_left = se_cmd->data_length;
2321 } else {
2322 offset = cmd->write_data_done;
2323 sg_off = offset / PAGE_SIZE;
2324 data_left = se_cmd->data_length - cmd->write_data_done;
2325 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2326 }
2327 2754
2328 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2755 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2329 sg_nents = se_cmd->t_data_nents - sg_off; 2756 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2757 se_cmd->t_data_nents, se_cmd->data_length,
2758 offset, wr->iser_ib_op, &wr->data);
2759 if (ret)
2760 return ret;
2330 2761
2331 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, 2762 if (wr->data.dma_nents != 1 ||
2332 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2763 se_cmd->prot_op != TARGET_PROT_NORMAL) {
2333 DMA_TO_DEVICE : DMA_FROM_DEVICE); 2764 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2334 if (unlikely(!count)) { 2765 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2335 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); 2766 struct fast_reg_descriptor, list);
2336 return -EINVAL; 2767 list_del(&fr_desc->list);
2768 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2769 wr->fr_desc = fr_desc;
2337 } 2770 }
2338 wr->sge = sg_start;
2339 wr->num_sge = sg_nents;
2340 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2341 isert_cmd, count, sg_start, sg_nents, data_left);
2342 2771
2343 memset(&wr->s_ib_sge, 0, sizeof(*ib_sge)); 2772 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2344 ib_sge = &wr->s_ib_sge; 2773 ISERT_DATA_KEY_VALID, &data_sge);
2345 wr->ib_sge = ib_sge; 2774 if (ret)
2775 goto unmap_cmd;
2776
2777 if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
2778 struct ib_sge prot_sge, sig_sge;
2779
2780 if (se_cmd->t_prot_sg) {
2781 ret = isert_map_data_buf(isert_conn, isert_cmd,
2782 se_cmd->t_prot_sg,
2783 se_cmd->t_prot_nents,
2784 se_cmd->prot_length,
2785 0, wr->iser_ib_op, &wr->prot);
2786 if (ret)
2787 goto unmap_cmd;
2788
2789 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2790 ISERT_PROT_KEY_VALID, &prot_sge);
2791 if (ret)
2792 goto unmap_prot_cmd;
2793 }
2794
2795 ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2796 &data_sge, &prot_sge, &sig_sge);
2797 if (ret)
2798 goto unmap_prot_cmd;
2346 2799
2800 fr_desc->ind |= ISERT_PROTECTED;
2801 memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
2802 } else
2803 memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
2804
2805 wr->ib_sge = &wr->s_ib_sge;
2347 wr->send_wr_num = 1; 2806 wr->send_wr_num = 1;
2348 memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2807 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2349 wr->send_wr = &wr->s_send_wr; 2808 wr->send_wr = &wr->s_send_wr;
2350
2351 wr->isert_cmd = isert_cmd; 2809 wr->isert_cmd = isert_cmd;
2352 rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2353 2810
2354 send_wr = &isert_cmd->rdma_wr.s_send_wr; 2811 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2355 send_wr->sg_list = ib_sge; 2812 send_wr->sg_list = &wr->s_ib_sge;
2356 send_wr->num_sge = 1; 2813 send_wr->num_sge = 1;
2357 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2814 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2358 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2815 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2359 send_wr->opcode = IB_WR_RDMA_WRITE; 2816 send_wr->opcode = IB_WR_RDMA_WRITE;
2360 send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2817 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2361 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2818 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2362 send_wr->send_flags = 0; 2819 send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
2363 send_wr->next = &isert_cmd->tx_desc.send_wr; 2820 0 : IB_SEND_SIGNALED;
2364 } else { 2821 } else {
2365 send_wr->opcode = IB_WR_RDMA_READ; 2822 send_wr->opcode = IB_WR_RDMA_READ;
2366 send_wr->wr.rdma.remote_addr = isert_cmd->write_va; 2823 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
@@ -2368,37 +2825,18 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2368 send_wr->send_flags = IB_SEND_SIGNALED; 2825 send_wr->send_flags = IB_SEND_SIGNALED;
2369 } 2826 }
2370 2827
2371 data_len = min(data_left, rdma_write_max); 2828 return 0;
2372 wr->cur_rdma_length = data_len; 2829unmap_prot_cmd:
2373 2830 if (se_cmd->t_prot_sg)
2374 /* if there is a single dma entry, dma mr is sufficient */ 2831 isert_unmap_data_buf(isert_conn, &wr->prot);
2375 if (count == 1) { 2832unmap_cmd:
2376 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]); 2833 if (fr_desc) {
2377 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2378 ib_sge->lkey = isert_conn->conn_mr->lkey;
2379 wr->fr_desc = NULL;
2380 } else {
2381 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2834 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2382 fr_desc = list_first_entry(&isert_conn->conn_fr_pool, 2835 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2383 struct fast_reg_descriptor, list);
2384 list_del(&fr_desc->list);
2385 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2836 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2386 wr->fr_desc = fr_desc;
2387
2388 ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
2389 ib_sge, sg_nents, offset, data_len);
2390 if (ret) {
2391 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2392 goto unmap_sg;
2393 }
2394 } 2837 }
2838 isert_unmap_data_buf(isert_conn, &wr->data);
2395 2839
2396 return 0;
2397
2398unmap_sg:
2399 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2400 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2401 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2402 return ret; 2840 return ret;
2403} 2841}
2404 2842
@@ -2422,25 +2860,35 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2422 return rc; 2860 return rc;
2423 } 2861 }
2424 2862
2425 /* 2863 if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
2426 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2864 /*
2427 */ 2865 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2428 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2866 */
2429 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2867 isert_create_send_desc(isert_conn, isert_cmd,
2430 &isert_cmd->tx_desc.iscsi_header); 2868 &isert_cmd->tx_desc);
2431 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2869 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2432 isert_init_send_wr(isert_conn, isert_cmd, 2870 &isert_cmd->tx_desc.iscsi_header);
2433 &isert_cmd->tx_desc.send_wr, true); 2871 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2872 isert_init_send_wr(isert_conn, isert_cmd,
2873 &isert_cmd->tx_desc.send_wr, true);
2874 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2875 wr->send_wr_num += 1;
2876 }
2434 2877
2435 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2878 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2436 2879
2437 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2880 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2438 if (rc) { 2881 if (rc) {
2439 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2882 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2440 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2883 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2441 } 2884 }
2442 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 2885
2443 isert_cmd); 2886 if (se_cmd->prot_op == TARGET_PROT_NORMAL)
2887 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2888 "READ\n", isert_cmd);
2889 else
2890 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2891 isert_cmd);
2444 2892
2445 return 1; 2893 return 1;
2446} 2894}
@@ -2815,6 +3263,8 @@ static struct iscsit_transport iser_target_transport = {
2815 .iscsit_get_dataout = isert_get_dataout, 3263 .iscsit_get_dataout = isert_get_dataout,
2816 .iscsit_queue_data_in = isert_put_datain, 3264 .iscsit_queue_data_in = isert_put_datain,
2817 .iscsit_queue_status = isert_put_response, 3265 .iscsit_queue_status = isert_put_response,
3266 .iscsit_aborted_task = isert_aborted_task,
3267 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2818}; 3268};
2819 3269
2820static int __init isert_init(void) 3270static int __init isert_init(void)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index f6ae7f5dd408..4c072ae34c01 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -50,11 +50,35 @@ struct iser_tx_desc {
50 struct ib_send_wr send_wr; 50 struct ib_send_wr send_wr;
51} __packed; 51} __packed;
52 52
53enum isert_indicator {
54 ISERT_PROTECTED = 1 << 0,
55 ISERT_DATA_KEY_VALID = 1 << 1,
56 ISERT_PROT_KEY_VALID = 1 << 2,
57 ISERT_SIG_KEY_VALID = 1 << 3,
58};
59
60struct pi_context {
61 struct ib_mr *prot_mr;
62 struct ib_fast_reg_page_list *prot_frpl;
63 struct ib_mr *sig_mr;
64};
65
53struct fast_reg_descriptor { 66struct fast_reg_descriptor {
54 struct list_head list; 67 struct list_head list;
55 struct ib_mr *data_mr; 68 struct ib_mr *data_mr;
56 struct ib_fast_reg_page_list *data_frpl; 69 struct ib_fast_reg_page_list *data_frpl;
57 bool valid; 70 u8 ind;
71 struct pi_context *pi_ctx;
72};
73
74struct isert_data_buf {
75 struct scatterlist *sg;
76 int nents;
77 u32 sg_off;
78 u32 len; /* cur_rdma_length */
79 u32 offset;
80 unsigned int dma_nents;
81 enum dma_data_direction dma_dir;
58}; 82};
59 83
60struct isert_rdma_wr { 84struct isert_rdma_wr {
@@ -63,12 +87,11 @@ struct isert_rdma_wr {
63 enum iser_ib_op_code iser_ib_op; 87 enum iser_ib_op_code iser_ib_op;
64 struct ib_sge *ib_sge; 88 struct ib_sge *ib_sge;
65 struct ib_sge s_ib_sge; 89 struct ib_sge s_ib_sge;
66 int num_sge;
67 struct scatterlist *sge;
68 int send_wr_num; 90 int send_wr_num;
69 struct ib_send_wr *send_wr; 91 struct ib_send_wr *send_wr;
70 struct ib_send_wr s_send_wr; 92 struct ib_send_wr s_send_wr;
71 u32 cur_rdma_length; 93 struct isert_data_buf data;
94 struct isert_data_buf prot;
72 struct fast_reg_descriptor *fr_desc; 95 struct fast_reg_descriptor *fr_desc;
73}; 96};
74 97
@@ -141,6 +164,7 @@ struct isert_cq_desc {
141 164
142struct isert_device { 165struct isert_device {
143 int use_fastreg; 166 int use_fastreg;
167 bool pi_capable;
144 int cqs_used; 168 int cqs_used;
145 int refcount; 169 int refcount;
146 int cq_active_qps[ISERT_MAX_CQ]; 170 int cq_active_qps[ISERT_MAX_CQ];
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0e537d8d0e47..fe09f2788b15 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1078static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, 1078static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1079 struct srpt_send_ioctx *ioctx) 1079 struct srpt_send_ioctx *ioctx)
1080{ 1080{
1081 struct ib_device *dev = ch->sport->sdev->device;
1081 struct se_cmd *cmd; 1082 struct se_cmd *cmd;
1082 struct scatterlist *sg, *sg_orig; 1083 struct scatterlist *sg, *sg_orig;
1083 int sg_cnt; 1084 int sg_cnt;
@@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1124 1125
1125 db = ioctx->rbufs; 1126 db = ioctx->rbufs;
1126 tsize = cmd->data_length; 1127 tsize = cmd->data_length;
1127 dma_len = sg_dma_len(&sg[0]); 1128 dma_len = ib_sg_dma_len(dev, &sg[0]);
1128 riu = ioctx->rdma_ius; 1129 riu = ioctx->rdma_ius;
1129 1130
1130 /* 1131 /*
@@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1155 ++j; 1156 ++j;
1156 if (j < count) { 1157 if (j < count) {
1157 sg = sg_next(sg); 1158 sg = sg_next(sg);
1158 dma_len = sg_dma_len(sg); 1159 dma_len = ib_sg_dma_len(
1160 dev, sg);
1159 } 1161 }
1160 } 1162 }
1161 } else { 1163 } else {
@@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1192 tsize = cmd->data_length; 1194 tsize = cmd->data_length;
1193 riu = ioctx->rdma_ius; 1195 riu = ioctx->rdma_ius;
1194 sg = sg_orig; 1196 sg = sg_orig;
1195 dma_len = sg_dma_len(&sg[0]); 1197 dma_len = ib_sg_dma_len(dev, &sg[0]);
1196 dma_addr = sg_dma_address(&sg[0]); 1198 dma_addr = ib_sg_dma_address(dev, &sg[0]);
1197 1199
1198 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */ 1200 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1199 for (i = 0, j = 0; 1201 for (i = 0, j = 0;
@@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1216 ++j; 1218 ++j;
1217 if (j < count) { 1219 if (j < count) {
1218 sg = sg_next(sg); 1220 sg = sg_next(sg);
1219 dma_len = sg_dma_len(sg); 1221 dma_len = ib_sg_dma_len(
1220 dma_addr = sg_dma_address(sg); 1222 dev, sg);
1223 dma_addr = ib_sg_dma_address(
1224 dev, sg);
1221 } 1225 }
1222 } 1226 }
1223 } else { 1227 } else {
@@ -2580,7 +2584,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2580 goto destroy_ib; 2584 goto destroy_ib;
2581 } 2585 }
2582 2586
2583 ch->sess = transport_init_session(); 2587 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
2584 if (IS_ERR(ch->sess)) { 2588 if (IS_ERR(ch->sess)) {
2585 rej->reason = __constant_cpu_to_be32( 2589 rej->reason = __constant_cpu_to_be32(
2586 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2590 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -3081,6 +3085,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
3081 srpt_queue_response(cmd); 3085 srpt_queue_response(cmd);
3082} 3086}
3083 3087
3088static void srpt_aborted_task(struct se_cmd *cmd)
3089{
3090 struct srpt_send_ioctx *ioctx = container_of(cmd,
3091 struct srpt_send_ioctx, cmd);
3092
3093 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
3094}
3095
3084static int srpt_queue_status(struct se_cmd *cmd) 3096static int srpt_queue_status(struct se_cmd *cmd)
3085{ 3097{
3086 struct srpt_send_ioctx *ioctx; 3098 struct srpt_send_ioctx *ioctx;
@@ -3928,6 +3940,7 @@ static struct target_core_fabric_ops srpt_template = {
3928 .queue_data_in = srpt_queue_data_in, 3940 .queue_data_in = srpt_queue_data_in,
3929 .queue_status = srpt_queue_status, 3941 .queue_status = srpt_queue_status,
3930 .queue_tm_rsp = srpt_queue_tm_rsp, 3942 .queue_tm_rsp = srpt_queue_tm_rsp,
3943 .aborted_task = srpt_aborted_task,
3931 /* 3944 /*
3932 * Setup function pointers for generic logic in 3945 * Setup function pointers for generic logic in
3933 * target_core_fabric_configfs.c 3946 * target_core_fabric_configfs.c
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4195a01b1535..9a8e66ae04f5 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1988,7 +1988,6 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
1988 if (mddev->bitmap_info.file) { 1988 if (mddev->bitmap_info.file) {
1989 struct file *f = mddev->bitmap_info.file; 1989 struct file *f = mddev->bitmap_info.file;
1990 mddev->bitmap_info.file = NULL; 1990 mddev->bitmap_info.file = NULL;
1991 restore_bitmap_write_access(f);
1992 fput(f); 1991 fput(f);
1993 } 1992 }
1994 } else { 1993 } else {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4ad5cc4e63e8..8fda38d23e38 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5181,32 +5181,6 @@ static int restart_array(struct mddev *mddev)
5181 return 0; 5181 return 0;
5182} 5182}
5183 5183
5184/* similar to deny_write_access, but accounts for our holding a reference
5185 * to the file ourselves */
5186static int deny_bitmap_write_access(struct file * file)
5187{
5188 struct inode *inode = file->f_mapping->host;
5189
5190 spin_lock(&inode->i_lock);
5191 if (atomic_read(&inode->i_writecount) > 1) {
5192 spin_unlock(&inode->i_lock);
5193 return -ETXTBSY;
5194 }
5195 atomic_set(&inode->i_writecount, -1);
5196 spin_unlock(&inode->i_lock);
5197
5198 return 0;
5199}
5200
5201void restore_bitmap_write_access(struct file *file)
5202{
5203 struct inode *inode = file->f_mapping->host;
5204
5205 spin_lock(&inode->i_lock);
5206 atomic_set(&inode->i_writecount, 1);
5207 spin_unlock(&inode->i_lock);
5208}
5209
5210static void md_clean(struct mddev *mddev) 5184static void md_clean(struct mddev *mddev)
5211{ 5185{
5212 mddev->array_sectors = 0; 5186 mddev->array_sectors = 0;
@@ -5427,7 +5401,6 @@ static int do_md_stop(struct mddev * mddev, int mode,
5427 5401
5428 bitmap_destroy(mddev); 5402 bitmap_destroy(mddev);
5429 if (mddev->bitmap_info.file) { 5403 if (mddev->bitmap_info.file) {
5430 restore_bitmap_write_access(mddev->bitmap_info.file);
5431 fput(mddev->bitmap_info.file); 5404 fput(mddev->bitmap_info.file);
5432 mddev->bitmap_info.file = NULL; 5405 mddev->bitmap_info.file = NULL;
5433 } 5406 }
@@ -5979,7 +5952,7 @@ abort_export:
5979 5952
5980static int set_bitmap_file(struct mddev *mddev, int fd) 5953static int set_bitmap_file(struct mddev *mddev, int fd)
5981{ 5954{
5982 int err; 5955 int err = 0;
5983 5956
5984 if (mddev->pers) { 5957 if (mddev->pers) {
5985 if (!mddev->pers->quiesce) 5958 if (!mddev->pers->quiesce)
@@ -5991,6 +5964,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
5991 5964
5992 5965
5993 if (fd >= 0) { 5966 if (fd >= 0) {
5967 struct inode *inode;
5994 if (mddev->bitmap) 5968 if (mddev->bitmap)
5995 return -EEXIST; /* cannot add when bitmap is present */ 5969 return -EEXIST; /* cannot add when bitmap is present */
5996 mddev->bitmap_info.file = fget(fd); 5970 mddev->bitmap_info.file = fget(fd);
@@ -6001,10 +5975,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
6001 return -EBADF; 5975 return -EBADF;
6002 } 5976 }
6003 5977
6004 err = deny_bitmap_write_access(mddev->bitmap_info.file); 5978 inode = mddev->bitmap_info.file->f_mapping->host;
6005 if (err) { 5979 if (!S_ISREG(inode->i_mode)) {
5980 printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
5981 mdname(mddev));
5982 err = -EBADF;
5983 } else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) {
5984 printk(KERN_ERR "%s: error: bitmap file must open for write\n",
5985 mdname(mddev));
5986 err = -EBADF;
5987 } else if (atomic_read(&inode->i_writecount) != 1) {
6006 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 5988 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
6007 mdname(mddev)); 5989 mdname(mddev));
5990 err = -EBUSY;
5991 }
5992 if (err) {
6008 fput(mddev->bitmap_info.file); 5993 fput(mddev->bitmap_info.file);
6009 mddev->bitmap_info.file = NULL; 5994 mddev->bitmap_info.file = NULL;
6010 return err; 5995 return err;
@@ -6027,10 +6012,8 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
6027 mddev->pers->quiesce(mddev, 0); 6012 mddev->pers->quiesce(mddev, 0);
6028 } 6013 }
6029 if (fd < 0) { 6014 if (fd < 0) {
6030 if (mddev->bitmap_info.file) { 6015 if (mddev->bitmap_info.file)
6031 restore_bitmap_write_access(mddev->bitmap_info.file);
6032 fput(mddev->bitmap_info.file); 6016 fput(mddev->bitmap_info.file);
6033 }
6034 mddev->bitmap_info.file = NULL; 6017 mddev->bitmap_info.file = NULL;
6035 } 6018 }
6036 6019
@@ -7182,11 +7165,14 @@ static int md_seq_open(struct inode *inode, struct file *file)
7182 return error; 7165 return error;
7183} 7166}
7184 7167
7168static int md_unloading;
7185static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 7169static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7186{ 7170{
7187 struct seq_file *seq = filp->private_data; 7171 struct seq_file *seq = filp->private_data;
7188 int mask; 7172 int mask;
7189 7173
7174 if (md_unloading)
7175 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;;
7190 poll_wait(filp, &md_event_waiters, wait); 7176 poll_wait(filp, &md_event_waiters, wait);
7191 7177
7192 /* always allow read */ 7178 /* always allow read */
@@ -8672,6 +8658,7 @@ static __exit void md_exit(void)
8672{ 8658{
8673 struct mddev *mddev; 8659 struct mddev *mddev;
8674 struct list_head *tmp; 8660 struct list_head *tmp;
8661 int delay = 1;
8675 8662
8676 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); 8663 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
8677 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 8664 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
@@ -8680,7 +8667,19 @@ static __exit void md_exit(void)
8680 unregister_blkdev(mdp_major, "mdp"); 8667 unregister_blkdev(mdp_major, "mdp");
8681 unregister_reboot_notifier(&md_notifier); 8668 unregister_reboot_notifier(&md_notifier);
8682 unregister_sysctl_table(raid_table_header); 8669 unregister_sysctl_table(raid_table_header);
8670
8671 /* We cannot unload the modules while some process is
8672 * waiting for us in select() or poll() - wake them up
8673 */
8674 md_unloading = 1;
8675 while (waitqueue_active(&md_event_waiters)) {
8676 /* not safe to leave yet */
8677 wake_up(&md_event_waiters);
8678 msleep(delay);
8679 delay += delay;
8680 }
8683 remove_proc_entry("mdstat", NULL); 8681 remove_proc_entry("mdstat", NULL);
8682
8684 for_each_mddev(mddev, tmp) { 8683 for_each_mddev(mddev, tmp) {
8685 export_array(mddev); 8684 export_array(mddev);
8686 mddev->hold_active = 0; 8685 mddev->hold_active = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 07bba96de260..a49d991f3fe1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -605,7 +605,6 @@ extern int md_check_no_bitmap(struct mddev *mddev);
605extern int md_integrity_register(struct mddev *mddev); 605extern int md_integrity_register(struct mddev *mddev);
606extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); 606extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
607extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); 607extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
608extern void restore_bitmap_write_access(struct file *file);
609 608
610extern void mddev_init(struct mddev *mddev); 609extern void mddev_init(struct mddev *mddev);
611extern int md_run(struct mddev *mddev); 610extern int md_run(struct mddev *mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4a6ca1cb2e78..56e24c072b62 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -97,6 +97,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
97 struct pool_info *pi = data; 97 struct pool_info *pi = data;
98 struct r1bio *r1_bio; 98 struct r1bio *r1_bio;
99 struct bio *bio; 99 struct bio *bio;
100 int need_pages;
100 int i, j; 101 int i, j;
101 102
102 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 103 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
@@ -119,15 +120,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
119 * RESYNC_PAGES for each bio. 120 * RESYNC_PAGES for each bio.
120 */ 121 */
121 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 122 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
122 j = pi->raid_disks; 123 need_pages = pi->raid_disks;
123 else 124 else
124 j = 1; 125 need_pages = 1;
125 while(j--) { 126 for (j = 0; j < need_pages; j++) {
126 bio = r1_bio->bios[j]; 127 bio = r1_bio->bios[j];
127 bio->bi_vcnt = RESYNC_PAGES; 128 bio->bi_vcnt = RESYNC_PAGES;
128 129
129 if (bio_alloc_pages(bio, gfp_flags)) 130 if (bio_alloc_pages(bio, gfp_flags))
130 goto out_free_bio; 131 goto out_free_pages;
131 } 132 }
132 /* If not user-requests, copy the page pointers to all bios */ 133 /* If not user-requests, copy the page pointers to all bios */
133 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { 134 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
@@ -141,6 +142,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
141 142
142 return r1_bio; 143 return r1_bio;
143 144
145out_free_pages:
146 while (--j >= 0) {
147 struct bio_vec *bv;
148
149 bio_for_each_segment_all(bv, r1_bio->bios[j], i)
150 __free_page(bv->bv_page);
151 }
152
144out_free_bio: 153out_free_bio:
145 while (++j < pi->raid_disks) 154 while (++j < pi->raid_disks)
146 bio_put(r1_bio->bios[j]); 155 bio_put(r1_bio->bios[j]);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 16f5c21963db..25247a852912 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -679,14 +679,9 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
679 init_stripe(sh, sector, previous); 679 init_stripe(sh, sector, previous);
680 atomic_inc(&sh->count); 680 atomic_inc(&sh->count);
681 } 681 }
682 } else { 682 } else if (!atomic_inc_not_zero(&sh->count)) {
683 spin_lock(&conf->device_lock); 683 spin_lock(&conf->device_lock);
684 if (atomic_read(&sh->count)) { 684 if (!atomic_read(&sh->count)) {
685 BUG_ON(!list_empty(&sh->lru)
686 && !test_bit(STRIPE_EXPANDING, &sh->state)
687 && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
688 );
689 } else {
690 if (!test_bit(STRIPE_HANDLE, &sh->state)) 685 if (!test_bit(STRIPE_HANDLE, &sh->state))
691 atomic_inc(&conf->active_stripes); 686 atomic_inc(&conf->active_stripes);
692 BUG_ON(list_empty(&sh->lru) && 687 BUG_ON(list_empty(&sh->lru) &&
@@ -4552,6 +4547,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4552 struct stripe_head *sh; 4547 struct stripe_head *sh;
4553 const int rw = bio_data_dir(bi); 4548 const int rw = bio_data_dir(bi);
4554 int remaining; 4549 int remaining;
4550 DEFINE_WAIT(w);
4551 bool do_prepare;
4555 4552
4556 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 4553 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
4557 md_flush_request(mddev, bi); 4554 md_flush_request(mddev, bi);
@@ -4575,15 +4572,18 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4575 bi->bi_next = NULL; 4572 bi->bi_next = NULL;
4576 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4573 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
4577 4574
4575 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
4578 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 4576 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
4579 DEFINE_WAIT(w);
4580 int previous; 4577 int previous;
4581 int seq; 4578 int seq;
4582 4579
4580 do_prepare = false;
4583 retry: 4581 retry:
4584 seq = read_seqcount_begin(&conf->gen_lock); 4582 seq = read_seqcount_begin(&conf->gen_lock);
4585 previous = 0; 4583 previous = 0;
4586 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 4584 if (do_prepare)
4585 prepare_to_wait(&conf->wait_for_overlap, &w,
4586 TASK_UNINTERRUPTIBLE);
4587 if (unlikely(conf->reshape_progress != MaxSector)) { 4587 if (unlikely(conf->reshape_progress != MaxSector)) {
4588 /* spinlock is needed as reshape_progress may be 4588 /* spinlock is needed as reshape_progress may be
4589 * 64bit on a 32bit platform, and so it might be 4589 * 64bit on a 32bit platform, and so it might be
@@ -4604,6 +4604,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4604 : logical_sector >= conf->reshape_safe) { 4604 : logical_sector >= conf->reshape_safe) {
4605 spin_unlock_irq(&conf->device_lock); 4605 spin_unlock_irq(&conf->device_lock);
4606 schedule(); 4606 schedule();
4607 do_prepare = true;
4607 goto retry; 4608 goto retry;
4608 } 4609 }
4609 } 4610 }
@@ -4640,6 +4641,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4640 if (must_retry) { 4641 if (must_retry) {
4641 release_stripe(sh); 4642 release_stripe(sh);
4642 schedule(); 4643 schedule();
4644 do_prepare = true;
4643 goto retry; 4645 goto retry;
4644 } 4646 }
4645 } 4647 }
@@ -4663,8 +4665,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4663 prepare_to_wait(&conf->wait_for_overlap, 4665 prepare_to_wait(&conf->wait_for_overlap,
4664 &w, TASK_INTERRUPTIBLE); 4666 &w, TASK_INTERRUPTIBLE);
4665 if (logical_sector >= mddev->suspend_lo && 4667 if (logical_sector >= mddev->suspend_lo &&
4666 logical_sector < mddev->suspend_hi) 4668 logical_sector < mddev->suspend_hi) {
4667 schedule(); 4669 schedule();
4670 do_prepare = true;
4671 }
4668 goto retry; 4672 goto retry;
4669 } 4673 }
4670 4674
@@ -4677,9 +4681,9 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4677 md_wakeup_thread(mddev->thread); 4681 md_wakeup_thread(mddev->thread);
4678 release_stripe(sh); 4682 release_stripe(sh);
4679 schedule(); 4683 schedule();
4684 do_prepare = true;
4680 goto retry; 4685 goto retry;
4681 } 4686 }
4682 finish_wait(&conf->wait_for_overlap, &w);
4683 set_bit(STRIPE_HANDLE, &sh->state); 4687 set_bit(STRIPE_HANDLE, &sh->state);
4684 clear_bit(STRIPE_DELAYED, &sh->state); 4688 clear_bit(STRIPE_DELAYED, &sh->state);
4685 if ((bi->bi_rw & REQ_SYNC) && 4689 if ((bi->bi_rw & REQ_SYNC) &&
@@ -4689,10 +4693,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4689 } else { 4693 } else {
4690 /* cannot get stripe for read-ahead, just give-up */ 4694 /* cannot get stripe for read-ahead, just give-up */
4691 clear_bit(BIO_UPTODATE, &bi->bi_flags); 4695 clear_bit(BIO_UPTODATE, &bi->bi_flags);
4692 finish_wait(&conf->wait_for_overlap, &w);
4693 break; 4696 break;
4694 } 4697 }
4695 } 4698 }
4699 finish_wait(&conf->wait_for_overlap, &w);
4696 4700
4697 remaining = raid5_dec_bi_active_stripes(bi); 4701 remaining = raid5_dec_bi_active_stripes(bi);
4698 if (remaining == 0) { 4702 if (remaining == 0) {
diff --git a/drivers/media/dvb-frontends/drx39xyj/Kconfig b/drivers/media/dvb-frontends/drx39xyj/Kconfig
index 15628eb5cf0c..6c2ccb6a506b 100644
--- a/drivers/media/dvb-frontends/drx39xyj/Kconfig
+++ b/drivers/media/dvb-frontends/drx39xyj/Kconfig
@@ -1,7 +1,7 @@
1config DVB_DRX39XYJ 1config DVB_DRX39XYJ
2 tristate "Micronas DRX-J demodulator" 2 tristate "Micronas DRX-J demodulator"
3 depends on DVB_CORE && I2C 3 depends on DVB_CORE && I2C
4 default m if DVB_FE_CUSTOMISE 4 default m if !MEDIA_SUBDRV_AUTOSELECT
5 help 5 help
6 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want 6 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
7 to support this frontend. 7 to support this frontend.
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 1d2c47378cf8..92c891a571ab 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1176,6 +1176,7 @@ static struct dvb_frontend_ops lgdt3304_ops = {
1176 }, 1176 },
1177 .i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl, 1177 .i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl,
1178 .init = lgdt3305_init, 1178 .init = lgdt3305_init,
1179 .sleep = lgdt3305_sleep,
1179 .set_frontend = lgdt3304_set_parameters, 1180 .set_frontend = lgdt3304_set_parameters,
1180 .get_frontend = lgdt3305_get_frontend, 1181 .get_frontend = lgdt3305_get_frontend,
1181 .get_tune_settings = lgdt3305_get_tune_settings, 1182 .get_tune_settings = lgdt3305_get_tune_settings,
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 32cffca14d0b..d63bc9c13dce 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -297,7 +297,7 @@ struct inittab {
297 u8 val; 297 u8 val;
298}; 298};
299 299
300struct inittab m88rs2000_setup[] = { 300static struct inittab m88rs2000_setup[] = {
301 {DEMOD_WRITE, 0x9a, 0x30}, 301 {DEMOD_WRITE, 0x9a, 0x30},
302 {DEMOD_WRITE, 0x00, 0x01}, 302 {DEMOD_WRITE, 0x00, 0x01},
303 {WRITE_DELAY, 0x19, 0x00}, 303 {WRITE_DELAY, 0x19, 0x00},
@@ -315,7 +315,7 @@ struct inittab m88rs2000_setup[] = {
315 {0xff, 0xaa, 0xff} 315 {0xff, 0xaa, 0xff}
316}; 316};
317 317
318struct inittab m88rs2000_shutdown[] = { 318static struct inittab m88rs2000_shutdown[] = {
319 {DEMOD_WRITE, 0x9a, 0x30}, 319 {DEMOD_WRITE, 0x9a, 0x30},
320 {DEMOD_WRITE, 0xb0, 0x00}, 320 {DEMOD_WRITE, 0xb0, 0x00},
321 {DEMOD_WRITE, 0xf1, 0x89}, 321 {DEMOD_WRITE, 0xf1, 0x89},
@@ -325,7 +325,7 @@ struct inittab m88rs2000_shutdown[] = {
325 {0xff, 0xaa, 0xff} 325 {0xff, 0xaa, 0xff}
326}; 326};
327 327
328struct inittab fe_reset[] = { 328static struct inittab fe_reset[] = {
329 {DEMOD_WRITE, 0x00, 0x01}, 329 {DEMOD_WRITE, 0x00, 0x01},
330 {DEMOD_WRITE, 0x20, 0x81}, 330 {DEMOD_WRITE, 0x20, 0x81},
331 {DEMOD_WRITE, 0x21, 0x80}, 331 {DEMOD_WRITE, 0x21, 0x80},
@@ -363,7 +363,7 @@ struct inittab fe_reset[] = {
363 {0xff, 0xaa, 0xff} 363 {0xff, 0xaa, 0xff}
364}; 364};
365 365
366struct inittab fe_trigger[] = { 366static struct inittab fe_trigger[] = {
367 {DEMOD_WRITE, 0x97, 0x04}, 367 {DEMOD_WRITE, 0x97, 0x04},
368 {DEMOD_WRITE, 0x99, 0x77}, 368 {DEMOD_WRITE, 0x99, 0x77},
369 {DEMOD_WRITE, 0x9b, 0x64}, 369 {DEMOD_WRITE, 0x9b, 0x64},
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 7a77a5b7a075..5c421886d97c 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -49,8 +49,8 @@
49#define VPE_MODULE_NAME "vpe" 49#define VPE_MODULE_NAME "vpe"
50 50
51/* minimum and maximum frame sizes */ 51/* minimum and maximum frame sizes */
52#define MIN_W 128 52#define MIN_W 32
53#define MIN_H 128 53#define MIN_H 32
54#define MAX_W 1920 54#define MAX_W 1920
55#define MAX_H 1080 55#define MAX_H 1080
56 56
@@ -887,6 +887,9 @@ static int job_ready(void *priv)
887 if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed) 887 if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
888 return 0; 888 return 0;
889 889
890 if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < needed)
891 return 0;
892
890 return 1; 893 return 1;
891} 894}
892 895
@@ -1277,18 +1280,17 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
1277 s_buf = &s_vb->v4l2_buf; 1280 s_buf = &s_vb->v4l2_buf;
1278 d_buf = &d_vb->v4l2_buf; 1281 d_buf = &d_vb->v4l2_buf;
1279 1282
1283 d_buf->flags = s_buf->flags;
1284
1280 d_buf->timestamp = s_buf->timestamp; 1285 d_buf->timestamp = s_buf->timestamp;
1281 d_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1286 if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE)
1282 d_buf->flags |= s_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1283 if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) {
1284 d_buf->flags |= V4L2_BUF_FLAG_TIMECODE;
1285 d_buf->timecode = s_buf->timecode; 1287 d_buf->timecode = s_buf->timecode;
1286 } 1288
1287 d_buf->sequence = ctx->sequence; 1289 d_buf->sequence = ctx->sequence;
1288 d_buf->field = ctx->field;
1289 1290
1290 d_q_data = &ctx->q_data[Q_DATA_DST]; 1291 d_q_data = &ctx->q_data[Q_DATA_DST];
1291 if (d_q_data->flags & Q_DATA_INTERLACED) { 1292 if (d_q_data->flags & Q_DATA_INTERLACED) {
1293 d_buf->field = ctx->field;
1292 if (ctx->field == V4L2_FIELD_BOTTOM) { 1294 if (ctx->field == V4L2_FIELD_BOTTOM) {
1293 ctx->sequence++; 1295 ctx->sequence++;
1294 ctx->field = V4L2_FIELD_TOP; 1296 ctx->field = V4L2_FIELD_TOP;
@@ -1297,6 +1299,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
1297 ctx->field = V4L2_FIELD_BOTTOM; 1299 ctx->field = V4L2_FIELD_BOTTOM;
1298 } 1300 }
1299 } else { 1301 } else {
1302 d_buf->field = V4L2_FIELD_NONE;
1300 ctx->sequence++; 1303 ctx->sequence++;
1301 } 1304 }
1302 1305
@@ -1335,8 +1338,9 @@ static int vpe_querycap(struct file *file, void *priv,
1335{ 1338{
1336 strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1); 1339 strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
1337 strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1); 1340 strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
1338 strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info)); 1341 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
1339 cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; 1342 VPE_MODULE_NAME);
1343 cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
1340 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; 1344 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1341 return 0; 1345 return 0;
1342} 1346}
@@ -1476,6 +1480,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1476 } 1480 }
1477 } 1481 }
1478 1482
1483 memset(pix->reserved, 0, sizeof(pix->reserved));
1479 for (i = 0; i < pix->num_planes; i++) { 1484 for (i = 0; i < pix->num_planes; i++) {
1480 plane_fmt = &pix->plane_fmt[i]; 1485 plane_fmt = &pix->plane_fmt[i];
1481 depth = fmt->vpdma_fmt[i]->depth; 1486 depth = fmt->vpdma_fmt[i]->depth;
@@ -1487,6 +1492,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1487 1492
1488 plane_fmt->sizeimage = 1493 plane_fmt->sizeimage =
1489 (pix->height * pix->width * depth) >> 3; 1494 (pix->height * pix->width * depth) >> 3;
1495
1496 memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
1490 } 1497 }
1491 1498
1492 return 0; 1499 return 0;
@@ -1717,6 +1724,16 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
1717 q_data = get_q_data(ctx, vb->vb2_queue->type); 1724 q_data = get_q_data(ctx, vb->vb2_queue->type);
1718 num_planes = q_data->fmt->coplanar ? 2 : 1; 1725 num_planes = q_data->fmt->coplanar ? 2 : 1;
1719 1726
1727 if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1728 if (!(q_data->flags & Q_DATA_INTERLACED)) {
1729 vb->v4l2_buf.field = V4L2_FIELD_NONE;
1730 } else {
1731 if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
1732 vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
1733 return -EINVAL;
1734 }
1735 }
1736
1720 for (i = 0; i < num_planes; i++) { 1737 for (i = 0; i < num_planes; i++) {
1721 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) { 1738 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1722 vpe_err(ctx->dev, 1739 vpe_err(ctx->dev,
@@ -1866,9 +1883,11 @@ static int vpe_open(struct file *file)
1866 s_q_data->fmt = &vpe_formats[2]; 1883 s_q_data->fmt = &vpe_formats[2];
1867 s_q_data->width = 1920; 1884 s_q_data->width = 1920;
1868 s_q_data->height = 1080; 1885 s_q_data->height = 1080;
1869 s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height * 1886 s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
1870 s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3; 1887 s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
1871 s_q_data->colorspace = V4L2_COLORSPACE_SMPTE170M; 1888 s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
1889 s_q_data->height);
1890 s_q_data->colorspace = V4L2_COLORSPACE_REC709;
1872 s_q_data->field = V4L2_FIELD_NONE; 1891 s_q_data->field = V4L2_FIELD_NONE;
1873 s_q_data->c_rect.left = 0; 1892 s_q_data->c_rect.left = 0;
1874 s_q_data->c_rect.top = 0; 1893 s_q_data->c_rect.top = 0;
@@ -2002,7 +2021,7 @@ static struct video_device vpe_videodev = {
2002 .fops = &vpe_fops, 2021 .fops = &vpe_fops,
2003 .ioctl_ops = &vpe_ioctl_ops, 2022 .ioctl_ops = &vpe_ioctl_ops,
2004 .minor = -1, 2023 .minor = -1,
2005 .release = video_device_release, 2024 .release = video_device_release_empty,
2006 .vfl_dir = VFL_DIR_M2M, 2025 .vfl_dir = VFL_DIR_M2M,
2007}; 2026};
2008 2027
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index 579a52b3edce..0127dd257a57 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -504,6 +504,18 @@ unlock:
504 return ret; 504 return ret;
505} 505}
506 506
507static int img_ir_set_normal_filter(struct rc_dev *dev,
508 struct rc_scancode_filter *sc_filter)
509{
510 return img_ir_set_filter(dev, RC_FILTER_NORMAL, sc_filter);
511}
512
513static int img_ir_set_wakeup_filter(struct rc_dev *dev,
514 struct rc_scancode_filter *sc_filter)
515{
516 return img_ir_set_filter(dev, RC_FILTER_WAKEUP, sc_filter);
517}
518
507/** 519/**
508 * img_ir_set_decoder() - Set the current decoder. 520 * img_ir_set_decoder() - Set the current decoder.
509 * @priv: IR private data. 521 * @priv: IR private data.
@@ -986,7 +998,8 @@ int img_ir_probe_hw(struct img_ir_priv *priv)
986 rdev->map_name = RC_MAP_EMPTY; 998 rdev->map_name = RC_MAP_EMPTY;
987 rc_set_allowed_protocols(rdev, img_ir_allowed_protos(priv)); 999 rc_set_allowed_protocols(rdev, img_ir_allowed_protos(priv));
988 rdev->input_name = "IMG Infrared Decoder"; 1000 rdev->input_name = "IMG Infrared Decoder";
989 rdev->s_filter = img_ir_set_filter; 1001 rdev->s_filter = img_ir_set_normal_filter;
1002 rdev->s_wakeup_filter = img_ir_set_wakeup_filter;
990 1003
991 /* Register hardware decoder */ 1004 /* Register hardware decoder */
992 error = rc_register_device(rdev); 1005 error = rc_register_device(rdev);
diff --git a/drivers/media/rc/img-ir/img-ir-nec.c b/drivers/media/rc/img-ir/img-ir-nec.c
index e7a731bc3a9b..751d9d945269 100644
--- a/drivers/media/rc/img-ir/img-ir-nec.c
+++ b/drivers/media/rc/img-ir/img-ir-nec.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include "img-ir-hw.h" 7#include "img-ir-hw.h"
8#include <linux/bitrev.h>
8 9
9/* Convert NEC data to a scancode */ 10/* Convert NEC data to a scancode */
10static int img_ir_nec_scancode(int len, u64 raw, int *scancode, u64 protocols) 11static int img_ir_nec_scancode(int len, u64 raw, int *scancode, u64 protocols)
@@ -22,11 +23,11 @@ static int img_ir_nec_scancode(int len, u64 raw, int *scancode, u64 protocols)
22 data_inv = (raw >> 24) & 0xff; 23 data_inv = (raw >> 24) & 0xff;
23 if ((data_inv ^ data) != 0xff) { 24 if ((data_inv ^ data) != 0xff) {
24 /* 32-bit NEC (used by Apple and TiVo remotes) */ 25 /* 32-bit NEC (used by Apple and TiVo remotes) */
25 /* scan encoding: aaAAddDD */ 26 /* scan encoding: as transmitted, MSBit = first received bit */
26 *scancode = addr_inv << 24 | 27 *scancode = bitrev8(addr) << 24 |
27 addr << 16 | 28 bitrev8(addr_inv) << 16 |
28 data_inv << 8 | 29 bitrev8(data) << 8 |
29 data; 30 bitrev8(data_inv);
30 } else if ((addr_inv ^ addr) != 0xff) { 31 } else if ((addr_inv ^ addr) != 0xff) {
31 /* Extended NEC */ 32 /* Extended NEC */
32 /* scan encoding: AAaaDD */ 33 /* scan encoding: AAaaDD */
@@ -54,13 +55,15 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in,
54 55
55 if ((in->data | in->mask) & 0xff000000) { 56 if ((in->data | in->mask) & 0xff000000) {
56 /* 32-bit NEC (used by Apple and TiVo remotes) */ 57 /* 32-bit NEC (used by Apple and TiVo remotes) */
57 /* scan encoding: aaAAddDD */ 58 /* scan encoding: as transmitted, MSBit = first received bit */
58 addr_inv = (in->data >> 24) & 0xff; 59 addr = bitrev8(in->data >> 24);
59 addr_inv_m = (in->mask >> 24) & 0xff; 60 addr_m = bitrev8(in->mask >> 24);
60 addr = (in->data >> 16) & 0xff; 61 addr_inv = bitrev8(in->data >> 16);
61 addr_m = (in->mask >> 16) & 0xff; 62 addr_inv_m = bitrev8(in->mask >> 16);
62 data_inv = (in->data >> 8) & 0xff; 63 data = bitrev8(in->data >> 8);
63 data_inv_m = (in->mask >> 8) & 0xff; 64 data_m = bitrev8(in->mask >> 8);
65 data_inv = bitrev8(in->data >> 0);
66 data_inv_m = bitrev8(in->mask >> 0);
64 } else if ((in->data | in->mask) & 0x00ff0000) { 67 } else if ((in->data | in->mask) & 0x00ff0000) {
65 /* Extended NEC */ 68 /* Extended NEC */
66 /* scan encoding AAaaDD */ 69 /* scan encoding AAaaDD */
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 9de1791d2494..35c42e5e270b 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -172,10 +172,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
172 if (send_32bits) { 172 if (send_32bits) {
173 /* NEC transport, but modified protocol, used by at 173 /* NEC transport, but modified protocol, used by at
174 * least Apple and TiVo remotes */ 174 * least Apple and TiVo remotes */
175 scancode = not_address << 24 | 175 scancode = data->bits;
176 address << 16 |
177 not_command << 8 |
178 command;
179 IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode); 176 IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode);
180 } else if ((address ^ not_address) != 0xff) { 177 } else if ((address ^ not_address) != 0xff) {
181 /* Extended NEC */ 178 /* Extended NEC */
diff --git a/drivers/media/rc/keymaps/rc-tivo.c b/drivers/media/rc/keymaps/rc-tivo.c
index 5cc1b456e329..454e06295692 100644
--- a/drivers/media/rc/keymaps/rc-tivo.c
+++ b/drivers/media/rc/keymaps/rc-tivo.c
@@ -15,62 +15,62 @@
15 * Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle, 15 * Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle,
16 * which also ships with a TiVo-branded IR transceiver, supported by the mceusb 16 * which also ships with a TiVo-branded IR transceiver, supported by the mceusb
17 * driver. Note that the remote uses an NEC-ish protocol, but instead of having 17 * driver. Note that the remote uses an NEC-ish protocol, but instead of having
18 * a command/not_command pair, it has a vendor ID of 0x3085, but some keys, the 18 * a command/not_command pair, it has a vendor ID of 0xa10c, but some keys, the
19 * NEC extended checksums do pass, so the table presently has the intended 19 * NEC extended checksums do pass, so the table presently has the intended
20 * values and the checksum-passed versions for those keys. 20 * values and the checksum-passed versions for those keys.
21 */ 21 */
22static struct rc_map_table tivo[] = { 22static struct rc_map_table tivo[] = {
23 { 0x3085f009, KEY_MEDIA }, /* TiVo Button */ 23 { 0xa10c900f, KEY_MEDIA }, /* TiVo Button */
24 { 0x3085e010, KEY_POWER2 }, /* TV Power */ 24 { 0xa10c0807, KEY_POWER2 }, /* TV Power */
25 { 0x3085e011, KEY_TV }, /* Live TV/Swap */ 25 { 0xa10c8807, KEY_TV }, /* Live TV/Swap */
26 { 0x3085c034, KEY_VIDEO_NEXT }, /* TV Input */ 26 { 0xa10c2c03, KEY_VIDEO_NEXT }, /* TV Input */
27 { 0x3085e013, KEY_INFO }, 27 { 0xa10cc807, KEY_INFO },
28 { 0x3085a05f, KEY_CYCLEWINDOWS }, /* Window */ 28 { 0xa10cfa05, KEY_CYCLEWINDOWS }, /* Window */
29 { 0x0085305f, KEY_CYCLEWINDOWS }, 29 { 0x0085305f, KEY_CYCLEWINDOWS },
30 { 0x3085c036, KEY_EPG }, /* Guide */ 30 { 0xa10c6c03, KEY_EPG }, /* Guide */
31 31
32 { 0x3085e014, KEY_UP }, 32 { 0xa10c2807, KEY_UP },
33 { 0x3085e016, KEY_DOWN }, 33 { 0xa10c6807, KEY_DOWN },
34 { 0x3085e017, KEY_LEFT }, 34 { 0xa10ce807, KEY_LEFT },
35 { 0x3085e015, KEY_RIGHT }, 35 { 0xa10ca807, KEY_RIGHT },
36 36
37 { 0x3085e018, KEY_SCROLLDOWN }, /* Red Thumbs Down */ 37 { 0xa10c1807, KEY_SCROLLDOWN }, /* Red Thumbs Down */
38 { 0x3085e019, KEY_SELECT }, 38 { 0xa10c9807, KEY_SELECT },
39 { 0x3085e01a, KEY_SCROLLUP }, /* Green Thumbs Up */ 39 { 0xa10c5807, KEY_SCROLLUP }, /* Green Thumbs Up */
40 40
41 { 0x3085e01c, KEY_VOLUMEUP }, 41 { 0xa10c3807, KEY_VOLUMEUP },
42 { 0x3085e01d, KEY_VOLUMEDOWN }, 42 { 0xa10cb807, KEY_VOLUMEDOWN },
43 { 0x3085e01b, KEY_MUTE }, 43 { 0xa10cd807, KEY_MUTE },
44 { 0x3085d020, KEY_RECORD }, 44 { 0xa10c040b, KEY_RECORD },
45 { 0x3085e01e, KEY_CHANNELUP }, 45 { 0xa10c7807, KEY_CHANNELUP },
46 { 0x3085e01f, KEY_CHANNELDOWN }, 46 { 0xa10cf807, KEY_CHANNELDOWN },
47 { 0x0085301f, KEY_CHANNELDOWN }, 47 { 0x0085301f, KEY_CHANNELDOWN },
48 48
49 { 0x3085d021, KEY_PLAY }, 49 { 0xa10c840b, KEY_PLAY },
50 { 0x3085d023, KEY_PAUSE }, 50 { 0xa10cc40b, KEY_PAUSE },
51 { 0x3085d025, KEY_SLOW }, 51 { 0xa10ca40b, KEY_SLOW },
52 { 0x3085d022, KEY_REWIND }, 52 { 0xa10c440b, KEY_REWIND },
53 { 0x3085d024, KEY_FASTFORWARD }, 53 { 0xa10c240b, KEY_FASTFORWARD },
54 { 0x3085d026, KEY_PREVIOUS }, 54 { 0xa10c640b, KEY_PREVIOUS },
55 { 0x3085d027, KEY_NEXT }, /* ->| */ 55 { 0xa10ce40b, KEY_NEXT }, /* ->| */
56 56
57 { 0x3085b044, KEY_ZOOM }, /* Aspect */ 57 { 0xa10c220d, KEY_ZOOM }, /* Aspect */
58 { 0x3085b048, KEY_STOP }, 58 { 0xa10c120d, KEY_STOP },
59 { 0x3085b04a, KEY_DVD }, /* DVD Menu */ 59 { 0xa10c520d, KEY_DVD }, /* DVD Menu */
60 60
61 { 0x3085d028, KEY_NUMERIC_1 }, 61 { 0xa10c140b, KEY_NUMERIC_1 },
62 { 0x3085d029, KEY_NUMERIC_2 }, 62 { 0xa10c940b, KEY_NUMERIC_2 },
63 { 0x3085d02a, KEY_NUMERIC_3 }, 63 { 0xa10c540b, KEY_NUMERIC_3 },
64 { 0x3085d02b, KEY_NUMERIC_4 }, 64 { 0xa10cd40b, KEY_NUMERIC_4 },
65 { 0x3085d02c, KEY_NUMERIC_5 }, 65 { 0xa10c340b, KEY_NUMERIC_5 },
66 { 0x3085d02d, KEY_NUMERIC_6 }, 66 { 0xa10cb40b, KEY_NUMERIC_6 },
67 { 0x3085d02e, KEY_NUMERIC_7 }, 67 { 0xa10c740b, KEY_NUMERIC_7 },
68 { 0x3085d02f, KEY_NUMERIC_8 }, 68 { 0xa10cf40b, KEY_NUMERIC_8 },
69 { 0x0085302f, KEY_NUMERIC_8 }, 69 { 0x0085302f, KEY_NUMERIC_8 },
70 { 0x3085c030, KEY_NUMERIC_9 }, 70 { 0xa10c0c03, KEY_NUMERIC_9 },
71 { 0x3085c031, KEY_NUMERIC_0 }, 71 { 0xa10c8c03, KEY_NUMERIC_0 },
72 { 0x3085c033, KEY_ENTER }, 72 { 0xa10ccc03, KEY_ENTER },
73 { 0x3085c032, KEY_CLEAR }, 73 { 0xa10c4c03, KEY_CLEAR },
74}; 74};
75 75
76static struct rc_map_list tivo_map = { 76static struct rc_map_list tivo_map = {
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 99697aae92ff..970b93d6f399 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -633,19 +633,13 @@ EXPORT_SYMBOL_GPL(rc_repeat);
633static void ir_do_keydown(struct rc_dev *dev, int scancode, 633static void ir_do_keydown(struct rc_dev *dev, int scancode,
634 u32 keycode, u8 toggle) 634 u32 keycode, u8 toggle)
635{ 635{
636 struct rc_scancode_filter *filter; 636 bool new_event = (!dev->keypressed ||
637 bool new_event = !dev->keypressed || 637 dev->last_scancode != scancode ||
638 dev->last_scancode != scancode || 638 dev->last_toggle != toggle);
639 dev->last_toggle != toggle;
640 639
641 if (new_event && dev->keypressed) 640 if (new_event && dev->keypressed)
642 ir_do_keyup(dev, false); 641 ir_do_keyup(dev, false);
643 642
644 /* Generic scancode filtering */
645 filter = &dev->scancode_filters[RC_FILTER_NORMAL];
646 if (filter->mask && ((scancode ^ filter->data) & filter->mask))
647 return;
648
649 input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode); 643 input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
650 644
651 if (new_event && keycode != KEY_RESERVED) { 645 if (new_event && keycode != KEY_RESERVED) {
@@ -923,6 +917,7 @@ static ssize_t store_protocols(struct device *device,
923 int rc, i, count = 0; 917 int rc, i, count = 0;
924 ssize_t ret; 918 ssize_t ret;
925 int (*change_protocol)(struct rc_dev *dev, u64 *rc_type); 919 int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
920 int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
926 struct rc_scancode_filter local_filter, *filter; 921 struct rc_scancode_filter local_filter, *filter;
927 922
928 /* Device is being removed */ 923 /* Device is being removed */
@@ -1007,24 +1002,23 @@ static ssize_t store_protocols(struct device *device,
1007 * Fall back to clearing the filter. 1002 * Fall back to clearing the filter.
1008 */ 1003 */
1009 filter = &dev->scancode_filters[fattr->type]; 1004 filter = &dev->scancode_filters[fattr->type];
1010 if (old_type != type && filter->mask) { 1005 set_filter = (fattr->type == RC_FILTER_NORMAL)
1006 ? dev->s_filter : dev->s_wakeup_filter;
1007
1008 if (set_filter && old_type != type && filter->mask) {
1011 local_filter = *filter; 1009 local_filter = *filter;
1012 if (!type) { 1010 if (!type) {
1013 /* no protocol => clear filter */ 1011 /* no protocol => clear filter */
1014 ret = -1; 1012 ret = -1;
1015 } else if (!dev->s_filter) {
1016 /* generic filtering => accept any filter */
1017 ret = 0;
1018 } else { 1013 } else {
1019 /* hardware filtering => try setting, otherwise clear */ 1014 /* hardware filtering => try setting, otherwise clear */
1020 ret = dev->s_filter(dev, fattr->type, &local_filter); 1015 ret = set_filter(dev, &local_filter);
1021 } 1016 }
1022 if (ret < 0) { 1017 if (ret < 0) {
1023 /* clear the filter */ 1018 /* clear the filter */
1024 local_filter.data = 0; 1019 local_filter.data = 0;
1025 local_filter.mask = 0; 1020 local_filter.mask = 0;
1026 if (dev->s_filter) 1021 set_filter(dev, &local_filter);
1027 dev->s_filter(dev, fattr->type, &local_filter);
1028 } 1022 }
1029 1023
1030 /* commit the new filter */ 1024 /* commit the new filter */
@@ -1068,7 +1062,10 @@ static ssize_t show_filter(struct device *device,
1068 return -EINVAL; 1062 return -EINVAL;
1069 1063
1070 mutex_lock(&dev->lock); 1064 mutex_lock(&dev->lock);
1071 if (fattr->mask) 1065 if ((fattr->type == RC_FILTER_NORMAL && !dev->s_filter) ||
1066 (fattr->type == RC_FILTER_WAKEUP && !dev->s_wakeup_filter))
1067 val = 0;
1068 else if (fattr->mask)
1072 val = dev->scancode_filters[fattr->type].mask; 1069 val = dev->scancode_filters[fattr->type].mask;
1073 else 1070 else
1074 val = dev->scancode_filters[fattr->type].data; 1071 val = dev->scancode_filters[fattr->type].data;
@@ -1106,6 +1103,7 @@ static ssize_t store_filter(struct device *device,
1106 struct rc_scancode_filter local_filter, *filter; 1103 struct rc_scancode_filter local_filter, *filter;
1107 int ret; 1104 int ret;
1108 unsigned long val; 1105 unsigned long val;
1106 int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
1109 1107
1110 /* Device is being removed */ 1108 /* Device is being removed */
1111 if (!dev) 1109 if (!dev)
@@ -1115,9 +1113,11 @@ static ssize_t store_filter(struct device *device,
1115 if (ret < 0) 1113 if (ret < 0)
1116 return ret; 1114 return ret;
1117 1115
1118 /* Scancode filter not supported (but still accept 0) */ 1116 /* Can the scancode filter be set? */
1119 if (!dev->s_filter && fattr->type != RC_FILTER_NORMAL) 1117 set_filter = (fattr->type == RC_FILTER_NORMAL) ? dev->s_filter :
1120 return val ? -EINVAL : count; 1118 dev->s_wakeup_filter;
1119 if (!set_filter)
1120 return -EINVAL;
1121 1121
1122 mutex_lock(&dev->lock); 1122 mutex_lock(&dev->lock);
1123 1123
@@ -1128,16 +1128,16 @@ static ssize_t store_filter(struct device *device,
1128 local_filter.mask = val; 1128 local_filter.mask = val;
1129 else 1129 else
1130 local_filter.data = val; 1130 local_filter.data = val;
1131
1131 if (!dev->enabled_protocols[fattr->type] && local_filter.mask) { 1132 if (!dev->enabled_protocols[fattr->type] && local_filter.mask) {
1132 /* refuse to set a filter unless a protocol is enabled */ 1133 /* refuse to set a filter unless a protocol is enabled */
1133 ret = -EINVAL; 1134 ret = -EINVAL;
1134 goto unlock; 1135 goto unlock;
1135 } 1136 }
1136 if (dev->s_filter) { 1137
1137 ret = dev->s_filter(dev, fattr->type, &local_filter); 1138 ret = set_filter(dev, &local_filter);
1138 if (ret < 0) 1139 if (ret < 0)
1139 goto unlock; 1140 goto unlock;
1140 }
1141 1141
1142 /* Success, commit the new filter */ 1142 /* Success, commit the new filter */
1143 *filter = local_filter; 1143 *filter = local_filter;
@@ -1189,27 +1189,45 @@ static RC_FILTER_ATTR(wakeup_filter, S_IRUGO|S_IWUSR,
1189static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR, 1189static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
1190 show_filter, store_filter, RC_FILTER_WAKEUP, true); 1190 show_filter, store_filter, RC_FILTER_WAKEUP, true);
1191 1191
1192static struct attribute *rc_dev_attrs[] = { 1192static struct attribute *rc_dev_protocol_attrs[] = {
1193 &dev_attr_protocols.attr.attr, 1193 &dev_attr_protocols.attr.attr,
1194 NULL,
1195};
1196
1197static struct attribute_group rc_dev_protocol_attr_grp = {
1198 .attrs = rc_dev_protocol_attrs,
1199};
1200
1201static struct attribute *rc_dev_wakeup_protocol_attrs[] = {
1194 &dev_attr_wakeup_protocols.attr.attr, 1202 &dev_attr_wakeup_protocols.attr.attr,
1203 NULL,
1204};
1205
1206static struct attribute_group rc_dev_wakeup_protocol_attr_grp = {
1207 .attrs = rc_dev_wakeup_protocol_attrs,
1208};
1209
1210static struct attribute *rc_dev_filter_attrs[] = {
1195 &dev_attr_filter.attr.attr, 1211 &dev_attr_filter.attr.attr,
1196 &dev_attr_filter_mask.attr.attr, 1212 &dev_attr_filter_mask.attr.attr,
1197 &dev_attr_wakeup_filter.attr.attr,
1198 &dev_attr_wakeup_filter_mask.attr.attr,
1199 NULL, 1213 NULL,
1200}; 1214};
1201 1215
1202static struct attribute_group rc_dev_attr_grp = { 1216static struct attribute_group rc_dev_filter_attr_grp = {
1203 .attrs = rc_dev_attrs, 1217 .attrs = rc_dev_filter_attrs,
1204}; 1218};
1205 1219
1206static const struct attribute_group *rc_dev_attr_groups[] = { 1220static struct attribute *rc_dev_wakeup_filter_attrs[] = {
1207 &rc_dev_attr_grp, 1221 &dev_attr_wakeup_filter.attr.attr,
1208 NULL 1222 &dev_attr_wakeup_filter_mask.attr.attr,
1223 NULL,
1224};
1225
1226static struct attribute_group rc_dev_wakeup_filter_attr_grp = {
1227 .attrs = rc_dev_wakeup_filter_attrs,
1209}; 1228};
1210 1229
1211static struct device_type rc_dev_type = { 1230static struct device_type rc_dev_type = {
1212 .groups = rc_dev_attr_groups,
1213 .release = rc_dev_release, 1231 .release = rc_dev_release,
1214 .uevent = rc_dev_uevent, 1232 .uevent = rc_dev_uevent,
1215}; 1233};
@@ -1266,7 +1284,7 @@ int rc_register_device(struct rc_dev *dev)
1266 static bool raw_init = false; /* raw decoders loaded? */ 1284 static bool raw_init = false; /* raw decoders loaded? */
1267 struct rc_map *rc_map; 1285 struct rc_map *rc_map;
1268 const char *path; 1286 const char *path;
1269 int rc, devno; 1287 int rc, devno, attr = 0;
1270 1288
1271 if (!dev || !dev->map_name) 1289 if (!dev || !dev->map_name)
1272 return -EINVAL; 1290 return -EINVAL;
@@ -1294,6 +1312,16 @@ int rc_register_device(struct rc_dev *dev)
1294 return -ENOMEM; 1312 return -ENOMEM;
1295 } while (test_and_set_bit(devno, ir_core_dev_number)); 1313 } while (test_and_set_bit(devno, ir_core_dev_number));
1296 1314
1315 dev->dev.groups = dev->sysfs_groups;
1316 dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
1317 if (dev->s_filter)
1318 dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp;
1319 if (dev->s_wakeup_filter)
1320 dev->sysfs_groups[attr++] = &rc_dev_wakeup_filter_attr_grp;
1321 if (dev->change_wakeup_protocol)
1322 dev->sysfs_groups[attr++] = &rc_dev_wakeup_protocol_attr_grp;
1323 dev->sysfs_groups[attr++] = NULL;
1324
1297 /* 1325 /*
1298 * Take the lock here, as the device sysfs node will appear 1326 * Take the lock here, as the device sysfs node will appear
1299 * when device_add() is called, which may trigger an ir-keytable udev 1327 * when device_add() is called, which may trigger an ir-keytable udev
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 319adc4f0561..96ccfebce7ca 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -1468,7 +1468,8 @@ static int r820t_imr_prepare(struct r820t_priv *priv)
1468static int r820t_multi_read(struct r820t_priv *priv) 1468static int r820t_multi_read(struct r820t_priv *priv)
1469{ 1469{
1470 int rc, i; 1470 int rc, i;
1471 u8 data[2], min = 0, max = 255, sum = 0; 1471 u16 sum = 0;
1472 u8 data[2], min = 255, max = 0;
1472 1473
1473 usleep_range(5000, 6000); 1474 usleep_range(5000, 6000);
1474 1475
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 76a816511f2f..6ef93ee1fdcb 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -1107,6 +1107,7 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
1107 offset += 200000; 1107 offset += 200000;
1108 } 1108 }
1109#endif 1109#endif
1110 break;
1110 default: 1111 default:
1111 tuner_err("Unsupported tuner type %d.\n", new_type); 1112 tuner_err("Unsupported tuner type %d.\n", new_type);
1112 break; 1113 break;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c83c16cece01..61d196e8b3ab 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1503,8 +1503,6 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
1503 /* RTL2832P devices: */ 1503 /* RTL2832P devices: */
1504 { DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131, 1504 { DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
1505 &rtl2832u_props, "Astrometa DVB-T2", NULL) }, 1505 &rtl2832u_props, "Astrometa DVB-T2", NULL) },
1506 { DVB_USB_DEVICE(USB_VID_KYE, 0x707f,
1507 &rtl2832u_props, "Genius TVGo DVB-T03", NULL) },
1508 { } 1506 { }
1509}; 1507};
1510MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table); 1508MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
diff --git a/drivers/media/usb/gspca/jpeg.h b/drivers/media/usb/gspca/jpeg.h
index ab54910418b4..0aa2b671faa4 100644
--- a/drivers/media/usb/gspca/jpeg.h
+++ b/drivers/media/usb/gspca/jpeg.h
@@ -154,7 +154,9 @@ static void jpeg_set_qual(u8 *jpeg_hdr,
154{ 154{
155 int i, sc; 155 int i, sc;
156 156
157 if (quality < 50) 157 if (quality <= 0)
158 sc = 5000;
159 else if (quality < 50)
158 sc = 5000 / quality; 160 sc = 5000 / quality;
159 else 161 else
160 sc = 200 - quality * 2; 162 sc = 200 - quality * 2;
diff --git a/drivers/media/usb/stk1160/stk1160-ac97.c b/drivers/media/usb/stk1160/stk1160-ac97.c
index c46c8be89602..2dd308f9541f 100644
--- a/drivers/media/usb/stk1160/stk1160-ac97.c
+++ b/drivers/media/usb/stk1160/stk1160-ac97.c
@@ -108,7 +108,7 @@ int stk1160_ac97_register(struct stk1160 *dev)
108 "stk1160-mixer"); 108 "stk1160-mixer");
109 snprintf(card->longname, sizeof(card->longname), 109 snprintf(card->longname, sizeof(card->longname),
110 "stk1160 ac97 codec mixer control"); 110 "stk1160 ac97 codec mixer control");
111 strncpy(card->driver, dev->dev->driver->name, sizeof(card->driver)); 111 strlcpy(card->driver, dev->dev->driver->name, sizeof(card->driver));
112 112
113 rc = snd_ac97_bus(card, 0, &stk1160_ac97_ops, NULL, &ac97_bus); 113 rc = snd_ac97_bus(card, 0, &stk1160_ac97_ops, NULL, &ac97_bus);
114 if (rc) 114 if (rc)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d9f85464b362..69aff72c8957 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4492,6 +4492,7 @@ static int __init bonding_init(void)
4492out: 4492out:
4493 return res; 4493 return res;
4494err: 4494err:
4495 bond_destroy_debugfs();
4495 bond_netlink_fini(); 4496 bond_netlink_fini();
4496err_link: 4497err_link:
4497 unregister_pernet_subsys(&bond_net_ops); 4498 unregister_pernet_subsys(&bond_net_ops);
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 30104b60da85..c56ac9ebc08f 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -560,9 +560,7 @@ static struct net_device *apne_dev;
560static int __init apne_module_init(void) 560static int __init apne_module_init(void)
561{ 561{
562 apne_dev = apne_probe(-1); 562 apne_dev = apne_probe(-1);
563 if (IS_ERR(apne_dev)) 563 return PTR_ERR_OR_ZERO(apne_dev);
564 return PTR_ERR(apne_dev);
565 return 0;
566} 564}
567 565
568static void __exit apne_module_exit(void) 566static void __exit apne_module_exit(void)
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index fcaeeb8a4929..28460676b8ca 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -268,15 +268,6 @@ static unsigned int emac_setup(struct net_device *ndev)
268 writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN, 268 writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN,
269 db->membase + EMAC_TX_MODE_REG); 269 db->membase + EMAC_TX_MODE_REG);
270 270
271 /* set up RX */
272 reg_val = readl(db->membase + EMAC_RX_CTL_REG);
273
274 writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN |
275 EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN |
276 EMAC_RX_CTL_ACCEPT_MULTICAST_EN |
277 EMAC_RX_CTL_ACCEPT_BROADCAST_EN,
278 db->membase + EMAC_RX_CTL_REG);
279
280 /* set MAC */ 271 /* set MAC */
281 /* set MAC CTL0 */ 272 /* set MAC CTL0 */
282 reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); 273 reg_val = readl(db->membase + EMAC_MAC_CTL0_REG);
@@ -309,6 +300,26 @@ static unsigned int emac_setup(struct net_device *ndev)
309 return 0; 300 return 0;
310} 301}
311 302
303static void emac_set_rx_mode(struct net_device *ndev)
304{
305 struct emac_board_info *db = netdev_priv(ndev);
306 unsigned int reg_val;
307
308 /* set up RX */
309 reg_val = readl(db->membase + EMAC_RX_CTL_REG);
310
311 if (ndev->flags & IFF_PROMISC)
312 reg_val |= EMAC_RX_CTL_PASS_ALL_EN;
313 else
314 reg_val &= ~EMAC_RX_CTL_PASS_ALL_EN;
315
316 writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN |
317 EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN |
318 EMAC_RX_CTL_ACCEPT_MULTICAST_EN |
319 EMAC_RX_CTL_ACCEPT_BROADCAST_EN,
320 db->membase + EMAC_RX_CTL_REG);
321}
322
312static unsigned int emac_powerup(struct net_device *ndev) 323static unsigned int emac_powerup(struct net_device *ndev)
313{ 324{
314 struct emac_board_info *db = netdev_priv(ndev); 325 struct emac_board_info *db = netdev_priv(ndev);
@@ -782,6 +793,7 @@ static const struct net_device_ops emac_netdev_ops = {
782 .ndo_stop = emac_stop, 793 .ndo_stop = emac_stop,
783 .ndo_start_xmit = emac_start_xmit, 794 .ndo_start_xmit = emac_start_xmit,
784 .ndo_tx_timeout = emac_timeout, 795 .ndo_tx_timeout = emac_timeout,
796 .ndo_set_rx_mode = emac_set_rx_mode,
785 .ndo_do_ioctl = emac_ioctl, 797 .ndo_do_ioctl = emac_ioctl,
786 .ndo_change_mtu = eth_change_mtu, 798 .ndo_change_mtu = eth_change_mtu,
787 .ndo_validate_addr = eth_validate_addr, 799 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a8efb18e42fa..0ab83708b6a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8627,6 +8627,7 @@ bnx2_remove_one(struct pci_dev *pdev)
8627 pci_disable_device(pdev); 8627 pci_disable_device(pdev);
8628} 8628}
8629 8629
8630#ifdef CONFIG_PM_SLEEP
8630static int 8631static int
8631bnx2_suspend(struct device *device) 8632bnx2_suspend(struct device *device)
8632{ 8633{
@@ -8665,7 +8666,6 @@ bnx2_resume(struct device *device)
8665 return 0; 8666 return 0;
8666} 8667}
8667 8668
8668#ifdef CONFIG_PM_SLEEP
8669static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); 8669static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8670#define BNX2_PM_OPS (&bnx2_pm_ops) 8670#define BNX2_PM_OPS (&bnx2_pm_ops)
8671 8671
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 751d5c7b312d..7e49c43b7af3 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
4 4
5config NET_CADENCE 5config NET_CADENCE
6 bool "Cadence devices" 6 bool "Cadence devices"
7 depends on HAS_IOMEM 7 depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST)
8 default y 8 default y
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y. 10 If you have a network (Ethernet) card belonging to this class, say Y.
@@ -22,7 +22,7 @@ if NET_CADENCE
22 22
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on HAS_DMA 25 depends on HAS_DMA && (ARCH_AT91RM9200 || COMPILE_TEST)
26 select MACB 26 select MACB
27 ---help--- 27 ---help---
28 If you wish to compile a kernel for the AT91RM9200 and enable 28 If you wish to compile a kernel for the AT91RM9200 and enable
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
30 30
31config MACB 31config MACB
32 tristate "Cadence MACB/GEM support" 32 tristate "Cadence MACB/GEM support"
33 depends on HAS_DMA 33 depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST)
34 select PHYLIB 34 select PHYLIB
35 ---help--- 35 ---help---
36 The Cadence MACB ethernet interface is found on many Atmel AT32 and 36 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 81e8402a74b4..8a96572fdde0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -154,7 +154,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); 154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
155 req->l2t_idx = htons(e->idx); 155 req->l2t_idx = htons(e->idx);
156 req->vlan = htons(e->vlan); 156 req->vlan = htons(e->vlan);
157 if (e->neigh) 157 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
158 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); 158 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
159 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 159 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
160 160
@@ -394,6 +394,8 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
394 if (e) { 394 if (e) {
395 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 395 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
396 e->state = L2T_STATE_RESOLVING; 396 e->state = L2T_STATE_RESOLVING;
397 if (neigh->dev->flags & IFF_LOOPBACK)
398 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
397 memcpy(e->addr, addr, addr_len); 399 memcpy(e->addr, addr, addr_len);
398 e->ifindex = ifidx; 400 e->ifindex = ifidx;
399 e->hash = hash; 401 e->hash = hash;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fb2fe65903c2..bba67681aeaa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -682,7 +682,7 @@ enum {
682 SF_RD_ID = 0x9f, /* read ID */ 682 SF_RD_ID = 0x9f, /* read ID */
683 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 683 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684 684
685 FW_MAX_SIZE = 512 * 1024, 685 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
686}; 686};
687 687
688/** 688/**
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8ccaa2520dc3..97db5a7179df 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -374,6 +374,7 @@ enum vf_state {
374#define BE_FLAGS_NAPI_ENABLED (1 << 9) 374#define BE_FLAGS_NAPI_ENABLED (1 << 9)
375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) 376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
377#define BE_FLAGS_SETUP_DONE (1 << 13)
377 378
378#define BE_UC_PMAC_COUNT 30 379#define BE_UC_PMAC_COUNT 30
379#define BE_VF_UC_PMAC_COUNT 2 380#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3e6df47b6973..a18645407d21 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2033,11 +2033,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2033 bool dummy_wrb; 2033 bool dummy_wrb;
2034 int i, pending_txqs; 2034 int i, pending_txqs;
2035 2035
2036 /* Wait for a max of 200ms for all the tx-completions to arrive. */ 2036 /* Stop polling for compls when HW has been silent for 10ms */
2037 do { 2037 do {
2038 pending_txqs = adapter->num_tx_qs; 2038 pending_txqs = adapter->num_tx_qs;
2039 2039
2040 for_all_tx_queues(adapter, txo, i) { 2040 for_all_tx_queues(adapter, txo, i) {
2041 cmpl = 0;
2042 num_wrbs = 0;
2041 txq = &txo->q; 2043 txq = &txo->q;
2042 while ((txcp = be_tx_compl_get(&txo->cq))) { 2044 while ((txcp = be_tx_compl_get(&txo->cq))) {
2043 end_idx = 2045 end_idx =
@@ -2050,14 +2052,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2050 if (cmpl) { 2052 if (cmpl) {
2051 be_cq_notify(adapter, txo->cq.id, false, cmpl); 2053 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2052 atomic_sub(num_wrbs, &txq->used); 2054 atomic_sub(num_wrbs, &txq->used);
2053 cmpl = 0; 2055 timeo = 0;
2054 num_wrbs = 0;
2055 } 2056 }
2056 if (atomic_read(&txq->used) == 0) 2057 if (atomic_read(&txq->used) == 0)
2057 pending_txqs--; 2058 pending_txqs--;
2058 } 2059 }
2059 2060
2060 if (pending_txqs == 0 || ++timeo > 200) 2061 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2061 break; 2062 break;
2062 2063
2063 mdelay(1); 2064 mdelay(1);
@@ -2725,6 +2726,12 @@ static int be_close(struct net_device *netdev)
2725 struct be_eq_obj *eqo; 2726 struct be_eq_obj *eqo;
2726 int i; 2727 int i;
2727 2728
2729 /* This protection is needed as be_close() may be called even when the
2730 * adapter is in cleared state (after eeh perm failure)
2731 */
2732 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2733 return 0;
2734
2728 be_roce_dev_close(adapter); 2735 be_roce_dev_close(adapter);
2729 2736
2730 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 2737 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3055,6 +3062,7 @@ static int be_clear(struct be_adapter *adapter)
3055 be_clear_queues(adapter); 3062 be_clear_queues(adapter);
3056 3063
3057 be_msix_disable(adapter); 3064 be_msix_disable(adapter);
3065 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3058 return 0; 3066 return 0;
3059} 3067}
3060 3068
@@ -3559,6 +3567,7 @@ static int be_setup(struct be_adapter *adapter)
3559 adapter->phy.fc_autoneg = 1; 3567 adapter->phy.fc_autoneg = 1;
3560 3568
3561 be_schedule_worker(adapter); 3569 be_schedule_worker(adapter);
3570 adapter->flags |= BE_FLAGS_SETUP_DONE;
3562 return 0; 3571 return 0;
3563err: 3572err:
3564 be_clear(adapter); 3573 be_clear(adapter);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 2879b9631e15..c1d3fdb296a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -115,8 +115,6 @@ static DEFINE_SPINLOCK(e1000_phy_lock);
115 */ 115 */
116static s32 e1000_set_phy_type(struct e1000_hw *hw) 116static s32 e1000_set_phy_type(struct e1000_hw *hw)
117{ 117{
118 e_dbg("e1000_set_phy_type");
119
120 if (hw->mac_type == e1000_undefined) 118 if (hw->mac_type == e1000_undefined)
121 return -E1000_ERR_PHY_TYPE; 119 return -E1000_ERR_PHY_TYPE;
122 120
@@ -159,8 +157,6 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
159 u32 ret_val; 157 u32 ret_val;
160 u16 phy_saved_data; 158 u16 phy_saved_data;
161 159
162 e_dbg("e1000_phy_init_script");
163
164 if (hw->phy_init_script) { 160 if (hw->phy_init_script) {
165 msleep(20); 161 msleep(20);
166 162
@@ -253,8 +249,6 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
253 */ 249 */
254s32 e1000_set_mac_type(struct e1000_hw *hw) 250s32 e1000_set_mac_type(struct e1000_hw *hw)
255{ 251{
256 e_dbg("e1000_set_mac_type");
257
258 switch (hw->device_id) { 252 switch (hw->device_id) {
259 case E1000_DEV_ID_82542: 253 case E1000_DEV_ID_82542:
260 switch (hw->revision_id) { 254 switch (hw->revision_id) {
@@ -365,8 +359,6 @@ void e1000_set_media_type(struct e1000_hw *hw)
365{ 359{
366 u32 status; 360 u32 status;
367 361
368 e_dbg("e1000_set_media_type");
369
370 if (hw->mac_type != e1000_82543) { 362 if (hw->mac_type != e1000_82543) {
371 /* tbi_compatibility is only valid on 82543 */ 363 /* tbi_compatibility is only valid on 82543 */
372 hw->tbi_compatibility_en = false; 364 hw->tbi_compatibility_en = false;
@@ -415,8 +407,6 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
415 u32 led_ctrl; 407 u32 led_ctrl;
416 s32 ret_val; 408 s32 ret_val;
417 409
418 e_dbg("e1000_reset_hw");
419
420 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 410 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
421 if (hw->mac_type == e1000_82542_rev2_0) { 411 if (hw->mac_type == e1000_82542_rev2_0) {
422 e_dbg("Disabling MWI on 82542 rev 2.0\n"); 412 e_dbg("Disabling MWI on 82542 rev 2.0\n");
@@ -566,8 +556,6 @@ s32 e1000_init_hw(struct e1000_hw *hw)
566 u32 mta_size; 556 u32 mta_size;
567 u32 ctrl_ext; 557 u32 ctrl_ext;
568 558
569 e_dbg("e1000_init_hw");
570
571 /* Initialize Identification LED */ 559 /* Initialize Identification LED */
572 ret_val = e1000_id_led_init(hw); 560 ret_val = e1000_id_led_init(hw);
573 if (ret_val) { 561 if (ret_val) {
@@ -683,8 +671,6 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
683 u16 eeprom_data; 671 u16 eeprom_data;
684 s32 ret_val; 672 s32 ret_val;
685 673
686 e_dbg("e1000_adjust_serdes_amplitude");
687
688 if (hw->media_type != e1000_media_type_internal_serdes) 674 if (hw->media_type != e1000_media_type_internal_serdes)
689 return E1000_SUCCESS; 675 return E1000_SUCCESS;
690 676
@@ -730,8 +716,6 @@ s32 e1000_setup_link(struct e1000_hw *hw)
730 s32 ret_val; 716 s32 ret_val;
731 u16 eeprom_data; 717 u16 eeprom_data;
732 718
733 e_dbg("e1000_setup_link");
734
735 /* Read and store word 0x0F of the EEPROM. This word contains bits 719 /* Read and store word 0x0F of the EEPROM. This word contains bits
736 * that determine the hardware's default PAUSE (flow control) mode, 720 * that determine the hardware's default PAUSE (flow control) mode,
737 * a bit that determines whether the HW defaults to enabling or 721 * a bit that determines whether the HW defaults to enabling or
@@ -848,8 +832,6 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
848 u32 signal = 0; 832 u32 signal = 0;
849 s32 ret_val; 833 s32 ret_val;
850 834
851 e_dbg("e1000_setup_fiber_serdes_link");
852
853 /* On adapters with a MAC newer than 82544, SWDP 1 will be 835 /* On adapters with a MAC newer than 82544, SWDP 1 will be
854 * set when the optics detect a signal. On older adapters, it will be 836 * set when the optics detect a signal. On older adapters, it will be
855 * cleared when there is a signal. This applies to fiber media only. 837 * cleared when there is a signal. This applies to fiber media only.
@@ -1051,8 +1033,6 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
1051 s32 ret_val; 1033 s32 ret_val;
1052 u16 phy_data; 1034 u16 phy_data;
1053 1035
1054 e_dbg("e1000_copper_link_preconfig");
1055
1056 ctrl = er32(CTRL); 1036 ctrl = er32(CTRL);
1057 /* With 82543, we need to force speed and duplex on the MAC equal to 1037 /* With 82543, we need to force speed and duplex on the MAC equal to
1058 * what the PHY speed and duplex configuration is. In addition, we need 1038 * what the PHY speed and duplex configuration is. In addition, we need
@@ -1112,8 +1092,6 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
1112 s32 ret_val; 1092 s32 ret_val;
1113 u16 phy_data; 1093 u16 phy_data;
1114 1094
1115 e_dbg("e1000_copper_link_igp_setup");
1116
1117 if (hw->phy_reset_disable) 1095 if (hw->phy_reset_disable)
1118 return E1000_SUCCESS; 1096 return E1000_SUCCESS;
1119 1097
@@ -1254,8 +1232,6 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1254 s32 ret_val; 1232 s32 ret_val;
1255 u16 phy_data; 1233 u16 phy_data;
1256 1234
1257 e_dbg("e1000_copper_link_mgp_setup");
1258
1259 if (hw->phy_reset_disable) 1235 if (hw->phy_reset_disable)
1260 return E1000_SUCCESS; 1236 return E1000_SUCCESS;
1261 1237
@@ -1362,8 +1338,6 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1362 s32 ret_val; 1338 s32 ret_val;
1363 u16 phy_data; 1339 u16 phy_data;
1364 1340
1365 e_dbg("e1000_copper_link_autoneg");
1366
1367 /* Perform some bounds checking on the hw->autoneg_advertised 1341 /* Perform some bounds checking on the hw->autoneg_advertised
1368 * parameter. If this variable is zero, then set it to the default. 1342 * parameter. If this variable is zero, then set it to the default.
1369 */ 1343 */
@@ -1432,7 +1406,6 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1432static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1406static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1433{ 1407{
1434 s32 ret_val; 1408 s32 ret_val;
1435 e_dbg("e1000_copper_link_postconfig");
1436 1409
1437 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { 1410 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
1438 e1000_config_collision_dist(hw); 1411 e1000_config_collision_dist(hw);
@@ -1473,8 +1446,6 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1473 u16 i; 1446 u16 i;
1474 u16 phy_data; 1447 u16 phy_data;
1475 1448
1476 e_dbg("e1000_setup_copper_link");
1477
1478 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1449 /* Check if it is a valid PHY and set PHY mode if necessary. */
1479 ret_val = e1000_copper_link_preconfig(hw); 1450 ret_val = e1000_copper_link_preconfig(hw);
1480 if (ret_val) 1451 if (ret_val)
@@ -1554,8 +1525,6 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1554 u16 mii_autoneg_adv_reg; 1525 u16 mii_autoneg_adv_reg;
1555 u16 mii_1000t_ctrl_reg; 1526 u16 mii_1000t_ctrl_reg;
1556 1527
1557 e_dbg("e1000_phy_setup_autoneg");
1558
1559 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 1528 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
1560 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); 1529 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
1561 if (ret_val) 1530 if (ret_val)
@@ -1707,8 +1676,6 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1707 u16 phy_data; 1676 u16 phy_data;
1708 u16 i; 1677 u16 i;
1709 1678
1710 e_dbg("e1000_phy_force_speed_duplex");
1711
1712 /* Turn off Flow control if we are forcing speed and duplex. */ 1679 /* Turn off Flow control if we are forcing speed and duplex. */
1713 hw->fc = E1000_FC_NONE; 1680 hw->fc = E1000_FC_NONE;
1714 1681
@@ -1939,8 +1906,6 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
1939{ 1906{
1940 u32 tctl, coll_dist; 1907 u32 tctl, coll_dist;
1941 1908
1942 e_dbg("e1000_config_collision_dist");
1943
1944 if (hw->mac_type < e1000_82543) 1909 if (hw->mac_type < e1000_82543)
1945 coll_dist = E1000_COLLISION_DISTANCE_82542; 1910 coll_dist = E1000_COLLISION_DISTANCE_82542;
1946 else 1911 else
@@ -1970,8 +1935,6 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1970 s32 ret_val; 1935 s32 ret_val;
1971 u16 phy_data; 1936 u16 phy_data;
1972 1937
1973 e_dbg("e1000_config_mac_to_phy");
1974
1975 /* 82544 or newer MAC, Auto Speed Detection takes care of 1938 /* 82544 or newer MAC, Auto Speed Detection takes care of
1976 * MAC speed/duplex configuration. 1939 * MAC speed/duplex configuration.
1977 */ 1940 */
@@ -2049,8 +2012,6 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
2049{ 2012{
2050 u32 ctrl; 2013 u32 ctrl;
2051 2014
2052 e_dbg("e1000_force_mac_fc");
2053
2054 /* Get the current configuration of the Device Control Register */ 2015 /* Get the current configuration of the Device Control Register */
2055 ctrl = er32(CTRL); 2016 ctrl = er32(CTRL);
2056 2017
@@ -2120,8 +2081,6 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2120 u16 speed; 2081 u16 speed;
2121 u16 duplex; 2082 u16 duplex;
2122 2083
2123 e_dbg("e1000_config_fc_after_link_up");
2124
2125 /* Check for the case where we have fiber media and auto-neg failed 2084 /* Check for the case where we have fiber media and auto-neg failed
2126 * so we had to force link. In this case, we need to force the 2085 * so we had to force link. In this case, we need to force the
2127 * configuration of the MAC to match the "fc" parameter. 2086 * configuration of the MAC to match the "fc" parameter.
@@ -2337,8 +2296,6 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2337 u32 status; 2296 u32 status;
2338 s32 ret_val = E1000_SUCCESS; 2297 s32 ret_val = E1000_SUCCESS;
2339 2298
2340 e_dbg("e1000_check_for_serdes_link_generic");
2341
2342 ctrl = er32(CTRL); 2299 ctrl = er32(CTRL);
2343 status = er32(STATUS); 2300 status = er32(STATUS);
2344 rxcw = er32(RXCW); 2301 rxcw = er32(RXCW);
@@ -2449,8 +2406,6 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2449 s32 ret_val; 2406 s32 ret_val;
2450 u16 phy_data; 2407 u16 phy_data;
2451 2408
2452 e_dbg("e1000_check_for_link");
2453
2454 ctrl = er32(CTRL); 2409 ctrl = er32(CTRL);
2455 status = er32(STATUS); 2410 status = er32(STATUS);
2456 2411
@@ -2632,8 +2587,6 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
2632 s32 ret_val; 2587 s32 ret_val;
2633 u16 phy_data; 2588 u16 phy_data;
2634 2589
2635 e_dbg("e1000_get_speed_and_duplex");
2636
2637 if (hw->mac_type >= e1000_82543) { 2590 if (hw->mac_type >= e1000_82543) {
2638 status = er32(STATUS); 2591 status = er32(STATUS);
2639 if (status & E1000_STATUS_SPEED_1000) { 2592 if (status & E1000_STATUS_SPEED_1000) {
@@ -2699,7 +2652,6 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
2699 u16 i; 2652 u16 i;
2700 u16 phy_data; 2653 u16 phy_data;
2701 2654
2702 e_dbg("e1000_wait_autoneg");
2703 e_dbg("Waiting for Auto-Neg to complete.\n"); 2655 e_dbg("Waiting for Auto-Neg to complete.\n");
2704 2656
2705 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 2657 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
@@ -2866,8 +2818,6 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
2866 u32 ret_val; 2818 u32 ret_val;
2867 unsigned long flags; 2819 unsigned long flags;
2868 2820
2869 e_dbg("e1000_read_phy_reg");
2870
2871 spin_lock_irqsave(&e1000_phy_lock, flags); 2821 spin_lock_irqsave(&e1000_phy_lock, flags);
2872 2822
2873 if ((hw->phy_type == e1000_phy_igp) && 2823 if ((hw->phy_type == e1000_phy_igp) &&
@@ -2894,8 +2844,6 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2894 u32 mdic = 0; 2844 u32 mdic = 0;
2895 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2845 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
2896 2846
2897 e_dbg("e1000_read_phy_reg_ex");
2898
2899 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2847 if (reg_addr > MAX_PHY_REG_ADDRESS) {
2900 e_dbg("PHY Address %d is out of range\n", reg_addr); 2848 e_dbg("PHY Address %d is out of range\n", reg_addr);
2901 return -E1000_ERR_PARAM; 2849 return -E1000_ERR_PARAM;
@@ -3008,8 +2956,6 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
3008 u32 ret_val; 2956 u32 ret_val;
3009 unsigned long flags; 2957 unsigned long flags;
3010 2958
3011 e_dbg("e1000_write_phy_reg");
3012
3013 spin_lock_irqsave(&e1000_phy_lock, flags); 2959 spin_lock_irqsave(&e1000_phy_lock, flags);
3014 2960
3015 if ((hw->phy_type == e1000_phy_igp) && 2961 if ((hw->phy_type == e1000_phy_igp) &&
@@ -3036,8 +2982,6 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3036 u32 mdic = 0; 2982 u32 mdic = 0;
3037 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2983 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
3038 2984
3039 e_dbg("e1000_write_phy_reg_ex");
3040
3041 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2985 if (reg_addr > MAX_PHY_REG_ADDRESS) {
3042 e_dbg("PHY Address %d is out of range\n", reg_addr); 2986 e_dbg("PHY Address %d is out of range\n", reg_addr);
3043 return -E1000_ERR_PARAM; 2987 return -E1000_ERR_PARAM;
@@ -3129,8 +3073,6 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
3129 u32 ctrl, ctrl_ext; 3073 u32 ctrl, ctrl_ext;
3130 u32 led_ctrl; 3074 u32 led_ctrl;
3131 3075
3132 e_dbg("e1000_phy_hw_reset");
3133
3134 e_dbg("Resetting Phy...\n"); 3076 e_dbg("Resetting Phy...\n");
3135 3077
3136 if (hw->mac_type > e1000_82543) { 3078 if (hw->mac_type > e1000_82543) {
@@ -3189,8 +3131,6 @@ s32 e1000_phy_reset(struct e1000_hw *hw)
3189 s32 ret_val; 3131 s32 ret_val;
3190 u16 phy_data; 3132 u16 phy_data;
3191 3133
3192 e_dbg("e1000_phy_reset");
3193
3194 switch (hw->phy_type) { 3134 switch (hw->phy_type) {
3195 case e1000_phy_igp: 3135 case e1000_phy_igp:
3196 ret_val = e1000_phy_hw_reset(hw); 3136 ret_val = e1000_phy_hw_reset(hw);
@@ -3229,8 +3169,6 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3229 u16 phy_id_high, phy_id_low; 3169 u16 phy_id_high, phy_id_low;
3230 bool match = false; 3170 bool match = false;
3231 3171
3232 e_dbg("e1000_detect_gig_phy");
3233
3234 if (hw->phy_id != 0) 3172 if (hw->phy_id != 0)
3235 return E1000_SUCCESS; 3173 return E1000_SUCCESS;
3236 3174
@@ -3301,7 +3239,6 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3301static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3239static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
3302{ 3240{
3303 s32 ret_val; 3241 s32 ret_val;
3304 e_dbg("e1000_phy_reset_dsp");
3305 3242
3306 do { 3243 do {
3307 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); 3244 ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
@@ -3333,8 +3270,6 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
3333 u16 phy_data, min_length, max_length, average; 3270 u16 phy_data, min_length, max_length, average;
3334 e1000_rev_polarity polarity; 3271 e1000_rev_polarity polarity;
3335 3272
3336 e_dbg("e1000_phy_igp_get_info");
3337
3338 /* The downshift status is checked only once, after link is established, 3273 /* The downshift status is checked only once, after link is established,
3339 * and it stored in the hw->speed_downgraded parameter. 3274 * and it stored in the hw->speed_downgraded parameter.
3340 */ 3275 */
@@ -3414,8 +3349,6 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
3414 u16 phy_data; 3349 u16 phy_data;
3415 e1000_rev_polarity polarity; 3350 e1000_rev_polarity polarity;
3416 3351
3417 e_dbg("e1000_phy_m88_get_info");
3418
3419 /* The downshift status is checked only once, after link is established, 3352 /* The downshift status is checked only once, after link is established,
3420 * and it stored in the hw->speed_downgraded parameter. 3353 * and it stored in the hw->speed_downgraded parameter.
3421 */ 3354 */
@@ -3487,8 +3420,6 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3487 s32 ret_val; 3420 s32 ret_val;
3488 u16 phy_data; 3421 u16 phy_data;
3489 3422
3490 e_dbg("e1000_phy_get_info");
3491
3492 phy_info->cable_length = e1000_cable_length_undefined; 3423 phy_info->cable_length = e1000_cable_length_undefined;
3493 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3424 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
3494 phy_info->cable_polarity = e1000_rev_polarity_undefined; 3425 phy_info->cable_polarity = e1000_rev_polarity_undefined;
@@ -3527,8 +3458,6 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3527 3458
3528s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3459s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
3529{ 3460{
3530 e_dbg("e1000_validate_mdi_settings");
3531
3532 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 3461 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
3533 e_dbg("Invalid MDI setting detected\n"); 3462 e_dbg("Invalid MDI setting detected\n");
3534 hw->mdix = 1; 3463 hw->mdix = 1;
@@ -3551,8 +3480,6 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
3551 s32 ret_val = E1000_SUCCESS; 3480 s32 ret_val = E1000_SUCCESS;
3552 u16 eeprom_size; 3481 u16 eeprom_size;
3553 3482
3554 e_dbg("e1000_init_eeprom_params");
3555
3556 switch (hw->mac_type) { 3483 switch (hw->mac_type) {
3557 case e1000_82542_rev2_0: 3484 case e1000_82542_rev2_0:
3558 case e1000_82542_rev2_1: 3485 case e1000_82542_rev2_1:
@@ -3770,8 +3697,6 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
3770 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3697 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3771 u32 eecd, i = 0; 3698 u32 eecd, i = 0;
3772 3699
3773 e_dbg("e1000_acquire_eeprom");
3774
3775 eecd = er32(EECD); 3700 eecd = er32(EECD);
3776 3701
3777 /* Request EEPROM Access */ 3702 /* Request EEPROM Access */
@@ -3871,8 +3796,6 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
3871{ 3796{
3872 u32 eecd; 3797 u32 eecd;
3873 3798
3874 e_dbg("e1000_release_eeprom");
3875
3876 eecd = er32(EECD); 3799 eecd = er32(EECD);
3877 3800
3878 if (hw->eeprom.type == e1000_eeprom_spi) { 3801 if (hw->eeprom.type == e1000_eeprom_spi) {
@@ -3920,8 +3843,6 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
3920 u16 retry_count = 0; 3843 u16 retry_count = 0;
3921 u8 spi_stat_reg; 3844 u8 spi_stat_reg;
3922 3845
3923 e_dbg("e1000_spi_eeprom_ready");
3924
3925 /* Read "Status Register" repeatedly until the LSB is cleared. The 3846 /* Read "Status Register" repeatedly until the LSB is cleared. The
3926 * EEPROM will signal that the command has been completed by clearing 3847 * EEPROM will signal that the command has been completed by clearing
3927 * bit 0 of the internal status register. If it's not cleared within 3848 * bit 0 of the internal status register. If it's not cleared within
@@ -3974,8 +3895,6 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3974 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3895 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3975 u32 i = 0; 3896 u32 i = 0;
3976 3897
3977 e_dbg("e1000_read_eeprom");
3978
3979 if (hw->mac_type == e1000_ce4100) { 3898 if (hw->mac_type == e1000_ce4100) {
3980 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, 3899 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
3981 data); 3900 data);
@@ -4076,8 +3995,6 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
4076 u16 checksum = 0; 3995 u16 checksum = 0;
4077 u16 i, eeprom_data; 3996 u16 i, eeprom_data;
4078 3997
4079 e_dbg("e1000_validate_eeprom_checksum");
4080
4081 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 3998 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
4082 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 3999 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
4083 e_dbg("EEPROM Read Error\n"); 4000 e_dbg("EEPROM Read Error\n");
@@ -4112,8 +4029,6 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
4112 u16 checksum = 0; 4029 u16 checksum = 0;
4113 u16 i, eeprom_data; 4030 u16 i, eeprom_data;
4114 4031
4115 e_dbg("e1000_update_eeprom_checksum");
4116
4117 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 4032 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
4118 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 4033 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
4119 e_dbg("EEPROM Read Error\n"); 4034 e_dbg("EEPROM Read Error\n");
@@ -4154,8 +4069,6 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
4154 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4069 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4155 s32 status = 0; 4070 s32 status = 0;
4156 4071
4157 e_dbg("e1000_write_eeprom");
4158
4159 if (hw->mac_type == e1000_ce4100) { 4072 if (hw->mac_type == e1000_ce4100) {
4160 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, 4073 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
4161 data); 4074 data);
@@ -4205,8 +4118,6 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
4205 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4118 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4206 u16 widx = 0; 4119 u16 widx = 0;
4207 4120
4208 e_dbg("e1000_write_eeprom_spi");
4209
4210 while (widx < words) { 4121 while (widx < words) {
4211 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; 4122 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
4212 4123
@@ -4274,8 +4185,6 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
4274 u16 words_written = 0; 4185 u16 words_written = 0;
4275 u16 i = 0; 4186 u16 i = 0;
4276 4187
4277 e_dbg("e1000_write_eeprom_microwire");
4278
4279 /* Send the write enable command to the EEPROM (3-bit opcode plus 4188 /* Send the write enable command to the EEPROM (3-bit opcode plus
4280 * 6/8-bit dummy address beginning with 11). It's less work to include 4189 * 6/8-bit dummy address beginning with 11). It's less work to include
4281 * the 11 of the dummy address as part of the opcode than it is to shift 4190 * the 11 of the dummy address as part of the opcode than it is to shift
@@ -4354,8 +4263,6 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
4354 u16 offset; 4263 u16 offset;
4355 u16 eeprom_data, i; 4264 u16 eeprom_data, i;
4356 4265
4357 e_dbg("e1000_read_mac_addr");
4358
4359 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4266 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
4360 offset = i >> 1; 4267 offset = i >> 1;
4361 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { 4268 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
@@ -4394,8 +4301,6 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
4394 u32 i; 4301 u32 i;
4395 u32 rar_num; 4302 u32 rar_num;
4396 4303
4397 e_dbg("e1000_init_rx_addrs");
4398
4399 /* Setup the receive address. */ 4304 /* Setup the receive address. */
4400 e_dbg("Programming MAC Address into RAR[0]\n"); 4305 e_dbg("Programming MAC Address into RAR[0]\n");
4401 4306
@@ -4553,8 +4458,6 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
4553 u16 eeprom_data, i, temp; 4458 u16 eeprom_data, i, temp;
4554 const u16 led_mask = 0x0F; 4459 const u16 led_mask = 0x0F;
4555 4460
4556 e_dbg("e1000_id_led_init");
4557
4558 if (hw->mac_type < e1000_82540) { 4461 if (hw->mac_type < e1000_82540) {
4559 /* Nothing to do */ 4462 /* Nothing to do */
4560 return E1000_SUCCESS; 4463 return E1000_SUCCESS;
@@ -4626,8 +4529,6 @@ s32 e1000_setup_led(struct e1000_hw *hw)
4626 u32 ledctl; 4529 u32 ledctl;
4627 s32 ret_val = E1000_SUCCESS; 4530 s32 ret_val = E1000_SUCCESS;
4628 4531
4629 e_dbg("e1000_setup_led");
4630
4631 switch (hw->mac_type) { 4532 switch (hw->mac_type) {
4632 case e1000_82542_rev2_0: 4533 case e1000_82542_rev2_0:
4633 case e1000_82542_rev2_1: 4534 case e1000_82542_rev2_1:
@@ -4678,8 +4579,6 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
4678{ 4579{
4679 s32 ret_val = E1000_SUCCESS; 4580 s32 ret_val = E1000_SUCCESS;
4680 4581
4681 e_dbg("e1000_cleanup_led");
4682
4683 switch (hw->mac_type) { 4582 switch (hw->mac_type) {
4684 case e1000_82542_rev2_0: 4583 case e1000_82542_rev2_0:
4685 case e1000_82542_rev2_1: 4584 case e1000_82542_rev2_1:
@@ -4714,8 +4613,6 @@ s32 e1000_led_on(struct e1000_hw *hw)
4714{ 4613{
4715 u32 ctrl = er32(CTRL); 4614 u32 ctrl = er32(CTRL);
4716 4615
4717 e_dbg("e1000_led_on");
4718
4719 switch (hw->mac_type) { 4616 switch (hw->mac_type) {
4720 case e1000_82542_rev2_0: 4617 case e1000_82542_rev2_0:
4721 case e1000_82542_rev2_1: 4618 case e1000_82542_rev2_1:
@@ -4760,8 +4657,6 @@ s32 e1000_led_off(struct e1000_hw *hw)
4760{ 4657{
4761 u32 ctrl = er32(CTRL); 4658 u32 ctrl = er32(CTRL);
4762 4659
4763 e_dbg("e1000_led_off");
4764
4765 switch (hw->mac_type) { 4660 switch (hw->mac_type) {
4766 case e1000_82542_rev2_0: 4661 case e1000_82542_rev2_0:
4767 case e1000_82542_rev2_1: 4662 case e1000_82542_rev2_1:
@@ -4889,8 +4784,6 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
4889 */ 4784 */
4890void e1000_reset_adaptive(struct e1000_hw *hw) 4785void e1000_reset_adaptive(struct e1000_hw *hw)
4891{ 4786{
4892 e_dbg("e1000_reset_adaptive");
4893
4894 if (hw->adaptive_ifs) { 4787 if (hw->adaptive_ifs) {
4895 if (!hw->ifs_params_forced) { 4788 if (!hw->ifs_params_forced) {
4896 hw->current_ifs_val = 0; 4789 hw->current_ifs_val = 0;
@@ -4917,8 +4810,6 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
4917 */ 4810 */
4918void e1000_update_adaptive(struct e1000_hw *hw) 4811void e1000_update_adaptive(struct e1000_hw *hw)
4919{ 4812{
4920 e_dbg("e1000_update_adaptive");
4921
4922 if (hw->adaptive_ifs) { 4813 if (hw->adaptive_ifs) {
4923 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { 4814 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
4924 if (hw->tx_packet_delta > MIN_NUM_XMITS) { 4815 if (hw->tx_packet_delta > MIN_NUM_XMITS) {
@@ -5114,8 +5005,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
5114 u16 i, phy_data; 5005 u16 i, phy_data;
5115 u16 cable_length; 5006 u16 cable_length;
5116 5007
5117 e_dbg("e1000_get_cable_length");
5118
5119 *min_length = *max_length = 0; 5008 *min_length = *max_length = 0;
5120 5009
5121 /* Use old method for Phy older than IGP */ 5010 /* Use old method for Phy older than IGP */
@@ -5231,8 +5120,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
5231 s32 ret_val; 5120 s32 ret_val;
5232 u16 phy_data; 5121 u16 phy_data;
5233 5122
5234 e_dbg("e1000_check_polarity");
5235
5236 if (hw->phy_type == e1000_phy_m88) { 5123 if (hw->phy_type == e1000_phy_m88) {
5237 /* return the Polarity bit in the Status register. */ 5124 /* return the Polarity bit in the Status register. */
5238 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 5125 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
@@ -5299,8 +5186,6 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
5299 s32 ret_val; 5186 s32 ret_val;
5300 u16 phy_data; 5187 u16 phy_data;
5301 5188
5302 e_dbg("e1000_check_downshift");
5303
5304 if (hw->phy_type == e1000_phy_igp) { 5189 if (hw->phy_type == e1000_phy_igp) {
5305 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 5190 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
5306 &phy_data); 5191 &phy_data);
@@ -5411,8 +5296,6 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5411 s32 ret_val; 5296 s32 ret_val;
5412 u16 phy_data, phy_saved_data, speed, duplex, i; 5297 u16 phy_data, phy_saved_data, speed, duplex, i;
5413 5298
5414 e_dbg("e1000_config_dsp_after_link_change");
5415
5416 if (hw->phy_type != e1000_phy_igp) 5299 if (hw->phy_type != e1000_phy_igp)
5417 return E1000_SUCCESS; 5300 return E1000_SUCCESS;
5418 5301
@@ -5546,8 +5429,6 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
5546 s32 ret_val; 5429 s32 ret_val;
5547 u16 eeprom_data; 5430 u16 eeprom_data;
5548 5431
5549 e_dbg("e1000_set_phy_mode");
5550
5551 if ((hw->mac_type == e1000_82545_rev_3) && 5432 if ((hw->mac_type == e1000_82545_rev_3) &&
5552 (hw->media_type == e1000_media_type_copper)) { 5433 (hw->media_type == e1000_media_type_copper)) {
5553 ret_val = 5434 ret_val =
@@ -5594,7 +5475,6 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
5594{ 5475{
5595 s32 ret_val; 5476 s32 ret_val;
5596 u16 phy_data; 5477 u16 phy_data;
5597 e_dbg("e1000_set_d3_lplu_state");
5598 5478
5599 if (hw->phy_type != e1000_phy_igp) 5479 if (hw->phy_type != e1000_phy_igp)
5600 return E1000_SUCCESS; 5480 return E1000_SUCCESS;
@@ -5699,8 +5579,6 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
5699 u16 default_page = 0; 5579 u16 default_page = 0;
5700 u16 phy_data; 5580 u16 phy_data;
5701 5581
5702 e_dbg("e1000_set_vco_speed");
5703
5704 switch (hw->mac_type) { 5582 switch (hw->mac_type) {
5705 case e1000_82545_rev_3: 5583 case e1000_82545_rev_3:
5706 case e1000_82546_rev_3: 5584 case e1000_82546_rev_3:
@@ -5872,7 +5750,6 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
5872 */ 5750 */
5873static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5751static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
5874{ 5752{
5875 e_dbg("e1000_get_auto_rd_done");
5876 msleep(5); 5753 msleep(5);
5877 return E1000_SUCCESS; 5754 return E1000_SUCCESS;
5878} 5755}
@@ -5887,7 +5764,6 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
5887 */ 5764 */
5888static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5765static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
5889{ 5766{
5890 e_dbg("e1000_get_phy_cfg_done");
5891 msleep(10); 5767 msleep(10);
5892 return E1000_SUCCESS; 5768 return E1000_SUCCESS;
5893} 5769}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 46e6544ed1b7..27058dfe418b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2682,14 +2682,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
2682 u32 cmd_length = 0; 2682 u32 cmd_length = 0;
2683 u16 ipcse = 0, tucse, mss; 2683 u16 ipcse = 0, tucse, mss;
2684 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2684 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2685 int err;
2686 2685
2687 if (skb_is_gso(skb)) { 2686 if (skb_is_gso(skb)) {
2688 if (skb_header_cloned(skb)) { 2687 int err;
2689 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2688
2690 if (err) 2689 err = skb_cow_head(skb, 0);
2691 return err; 2690 if (err < 0)
2692 } 2691 return err;
2693 2692
2694 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2693 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2695 mss = skb_shinfo(skb)->gso_size; 2694 mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index dce377b59b2c..d50c91e50528 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5100,16 +5100,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5100 u32 cmd_length = 0; 5100 u32 cmd_length = 0;
5101 u16 ipcse = 0, mss; 5101 u16 ipcse = 0, mss;
5102 u8 ipcss, ipcso, tucss, tucso, hdr_len; 5102 u8 ipcss, ipcso, tucss, tucso, hdr_len;
5103 int err;
5103 5104
5104 if (!skb_is_gso(skb)) 5105 if (!skb_is_gso(skb))
5105 return 0; 5106 return 0;
5106 5107
5107 if (skb_header_cloned(skb)) { 5108 err = skb_cow_head(skb, 0);
5108 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5109 if (err < 0)
5109 5110 return err;
5110 if (err)
5111 return err;
5112 }
5113 5111
5114 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5112 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5115 mss = skb_shinfo(skb)->gso_size; 5113 mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 53be5f44d015..b9f50f40abe1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1114,20 +1114,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1114 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1114 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1115{ 1115{
1116 u32 cd_cmd, cd_tso_len, cd_mss; 1116 u32 cd_cmd, cd_tso_len, cd_mss;
1117 struct ipv6hdr *ipv6h;
1117 struct tcphdr *tcph; 1118 struct tcphdr *tcph;
1118 struct iphdr *iph; 1119 struct iphdr *iph;
1119 u32 l4len; 1120 u32 l4len;
1120 int err; 1121 int err;
1121 struct ipv6hdr *ipv6h;
1122 1122
1123 if (!skb_is_gso(skb)) 1123 if (!skb_is_gso(skb))
1124 return 0; 1124 return 0;
1125 1125
1126 if (skb_header_cloned(skb)) { 1126 err = skb_cow_head(skb, 0);
1127 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1127 if (err < 0)
1128 if (err) 1128 return err;
1129 return err;
1130 }
1131 1129
1132 if (protocol == htons(ETH_P_IP)) { 1130 if (protocol == htons(ETH_P_IP)) {
1133 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1131 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index e35e66ffa782..2797548fde0d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1412,6 +1412,14 @@ restart_watchdog:
1412 schedule_work(&adapter->adminq_task); 1412 schedule_work(&adapter->adminq_task);
1413} 1413}
1414 1414
1415/**
1416 * i40evf_configure_rss - increment to next available tx queue
1417 * @adapter: board private structure
1418 * @j: queue counter
1419 *
1420 * Helper function for RSS programming to increment through available
1421 * queus. Returns the next queue value.
1422 **/
1415static int next_queue(struct i40evf_adapter *adapter, int j) 1423static int next_queue(struct i40evf_adapter *adapter, int j)
1416{ 1424{
1417 j += 1; 1425 j += 1;
@@ -1451,10 +1459,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1451 /* Populate the LUT with max no. of queues in round robin fashion */ 1459 /* Populate the LUT with max no. of queues in round robin fashion */
1452 j = adapter->vsi_res->num_queue_pairs; 1460 j = adapter->vsi_res->num_queue_pairs;
1453 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1461 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
1454 lut = next_queue(adapter, j); 1462 j = next_queue(adapter, j);
1455 lut |= next_queue(adapter, j) << 8; 1463 lut = j;
1456 lut |= next_queue(adapter, j) << 16; 1464 j = next_queue(adapter, j);
1457 lut |= next_queue(adapter, j) << 24; 1465 lut |= j << 8;
1466 j = next_queue(adapter, j);
1467 lut |= j << 16;
1468 j = next_queue(adapter, j);
1469 lut |= j << 24;
1458 wr32(hw, I40E_VFQF_HLUT(i), lut); 1470 wr32(hw, I40E_VFQF_HLUT(i), lut);
1459 } 1471 }
1460 i40e_flush(hw); 1472 i40e_flush(hw);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 7fbe1e925143..27130065d92a 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -241,7 +241,6 @@ struct igb_ring {
241 struct igb_tx_buffer *tx_buffer_info; 241 struct igb_tx_buffer *tx_buffer_info;
242 struct igb_rx_buffer *rx_buffer_info; 242 struct igb_rx_buffer *rx_buffer_info;
243 }; 243 };
244 unsigned long last_rx_timestamp;
245 void *desc; /* descriptor ring memory */ 244 void *desc; /* descriptor ring memory */
246 unsigned long flags; /* ring specific flags */ 245 unsigned long flags; /* ring specific flags */
247 void __iomem *tail; /* pointer to ring tail register */ 246 void __iomem *tail; /* pointer to ring tail register */
@@ -437,6 +436,7 @@ struct igb_adapter {
437 struct hwtstamp_config tstamp_config; 436 struct hwtstamp_config tstamp_config;
438 unsigned long ptp_tx_start; 437 unsigned long ptp_tx_start;
439 unsigned long last_rx_ptp_check; 438 unsigned long last_rx_ptp_check;
439 unsigned long last_rx_timestamp;
440 spinlock_t tmreg_lock; 440 spinlock_t tmreg_lock;
441 struct cyclecounter cc; 441 struct cyclecounter cc;
442 struct timecounter tc; 442 struct timecounter tc;
@@ -533,20 +533,6 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
533void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 533void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
534void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 534void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
535 struct sk_buff *skb); 535 struct sk_buff *skb);
536static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
537 union e1000_adv_rx_desc *rx_desc,
538 struct sk_buff *skb)
539{
540 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
541 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
542 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
543
544 /* Update the last_rx_timestamp timer in order to enable watchdog check
545 * for error case of latched timestamp on a dropped packet.
546 */
547 rx_ring->last_rx_timestamp = jiffies;
548}
549
550int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 536int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
551int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 537int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
552#ifdef CONFIG_IGB_HWMON 538#ifdef CONFIG_IGB_HWMON
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 30198185d19a..fb98d4602f9d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4605,6 +4605,7 @@ static int igb_tso(struct igb_ring *tx_ring,
4605 struct sk_buff *skb = first->skb; 4605 struct sk_buff *skb = first->skb;
4606 u32 vlan_macip_lens, type_tucmd; 4606 u32 vlan_macip_lens, type_tucmd;
4607 u32 mss_l4len_idx, l4len; 4607 u32 mss_l4len_idx, l4len;
4608 int err;
4608 4609
4609 if (skb->ip_summed != CHECKSUM_PARTIAL) 4610 if (skb->ip_summed != CHECKSUM_PARTIAL)
4610 return 0; 4611 return 0;
@@ -4612,11 +4613,9 @@ static int igb_tso(struct igb_ring *tx_ring,
4612 if (!skb_is_gso(skb)) 4613 if (!skb_is_gso(skb))
4613 return 0; 4614 return 0;
4614 4615
4615 if (skb_header_cloned(skb)) { 4616 err = skb_cow_head(skb, 0);
4616 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4617 if (err < 0)
4617 if (err) 4618 return err;
4618 return err;
4619 }
4620 4619
4621 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4620 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4622 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; 4621 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
@@ -6955,7 +6954,9 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
6955 6954
6956 igb_rx_checksum(rx_ring, rx_desc, skb); 6955 igb_rx_checksum(rx_ring, rx_desc, skb);
6957 6956
6958 igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 6957 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
6958 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
6959 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
6959 6960
6960 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6961 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
6961 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 6962 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 2cca8fd5e574..9209d652e1c9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -427,10 +427,8 @@ static void igb_ptp_overflow_check(struct work_struct *work)
427void igb_ptp_rx_hang(struct igb_adapter *adapter) 427void igb_ptp_rx_hang(struct igb_adapter *adapter)
428{ 428{
429 struct e1000_hw *hw = &adapter->hw; 429 struct e1000_hw *hw = &adapter->hw;
430 struct igb_ring *rx_ring;
431 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); 430 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
432 unsigned long rx_event; 431 unsigned long rx_event;
433 int n;
434 432
435 if (hw->mac.type != e1000_82576) 433 if (hw->mac.type != e1000_82576)
436 return; 434 return;
@@ -445,11 +443,8 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
445 443
446 /* Determine the most recent watchdog or rx_timestamp event */ 444 /* Determine the most recent watchdog or rx_timestamp event */
447 rx_event = adapter->last_rx_ptp_check; 445 rx_event = adapter->last_rx_ptp_check;
448 for (n = 0; n < adapter->num_rx_queues; n++) { 446 if (time_after(adapter->last_rx_timestamp, rx_event))
449 rx_ring = adapter->rx_ring[n]; 447 rx_event = adapter->last_rx_timestamp;
450 if (time_after(rx_ring->last_rx_timestamp, rx_event))
451 rx_event = rx_ring->last_rx_timestamp;
452 }
453 448
454 /* Only need to read the high RXSTMP register to clear the lock */ 449 /* Only need to read the high RXSTMP register to clear the lock */
455 if (time_is_before_jiffies(rx_event + 5 * HZ)) { 450 if (time_is_before_jiffies(rx_event + 5 * HZ)) {
@@ -540,6 +535,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
540 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 535 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
541 536
542 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 537 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
538
539 /* Update the last_rx_timestamp timer in order to enable watchdog check
540 * for error case of latched timestamp on a dropped packet.
541 */
542 adapter->last_rx_timestamp = jiffies;
543} 543}
544 544
545/** 545/**
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b7ab03a2f28f..d608599e123a 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1910,20 +1910,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1911{ 1911{
1912 struct e1000_adv_tx_context_desc *context_desc; 1912 struct e1000_adv_tx_context_desc *context_desc;
1913 unsigned int i;
1914 int err;
1915 struct igbvf_buffer *buffer_info; 1913 struct igbvf_buffer *buffer_info;
1916 u32 info = 0, tu_cmd = 0; 1914 u32 info = 0, tu_cmd = 0;
1917 u32 mss_l4len_idx, l4len; 1915 u32 mss_l4len_idx, l4len;
1916 unsigned int i;
1917 int err;
1918
1918 *hdr_len = 0; 1919 *hdr_len = 0;
1919 1920
1920 if (skb_header_cloned(skb)) { 1921 err = skb_cow_head(skb, 0);
1921 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1922 if (err < 0) {
1922 if (err) { 1923 dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
1923 dev_err(&adapter->pdev->dev, 1924 return err;
1924 "igbvf_tso returning an error\n");
1925 return err;
1926 }
1927 } 1925 }
1928 1926
1929 l4len = tcp_hdrlen(skb); 1927 l4len = tcp_hdrlen(skb);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index f42c201f727f..60801273915c 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1220,17 +1220,15 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1220 unsigned int i; 1220 unsigned int i;
1221 u8 ipcss, ipcso, tucss, tucso, hdr_len; 1221 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1222 u16 ipcse, tucse, mss; 1222 u16 ipcse, tucse, mss;
1223 int err;
1224 1223
1225 if (likely(skb_is_gso(skb))) { 1224 if (likely(skb_is_gso(skb))) {
1226 struct ixgb_buffer *buffer_info; 1225 struct ixgb_buffer *buffer_info;
1227 struct iphdr *iph; 1226 struct iphdr *iph;
1227 int err;
1228 1228
1229 if (skb_header_cloned(skb)) { 1229 err = skb_cow_head(skb, 0);
1230 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1230 if (err < 0)
1231 if (err) 1231 return err;
1232 return err;
1233 }
1234 1232
1235 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1233 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1236 mss = skb_shinfo(skb)->gso_size; 1234 mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 55c53a1cbb62..1a12c1dd7a27 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -811,6 +811,7 @@ enum ixgbe_state_t {
811 __IXGBE_DISABLED, 811 __IXGBE_DISABLED,
812 __IXGBE_REMOVING, 812 __IXGBE_REMOVING,
813 __IXGBE_SERVICE_SCHED, 813 __IXGBE_SERVICE_SCHED,
814 __IXGBE_SERVICE_INITED,
814 __IXGBE_IN_SFP_INIT, 815 __IXGBE_IN_SFP_INIT,
815 __IXGBE_PTP_RUNNING, 816 __IXGBE_PTP_RUNNING,
816 __IXGBE_PTP_TX_IN_PROGRESS, 817 __IXGBE_PTP_TX_IN_PROGRESS,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8436c651b735..c4c526b7f99f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -297,7 +297,8 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
297 return; 297 return;
298 hw->hw_addr = NULL; 298 hw->hw_addr = NULL;
299 e_dev_err("Adapter removed\n"); 299 e_dev_err("Adapter removed\n");
300 ixgbe_service_event_schedule(adapter); 300 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
301 ixgbe_service_event_schedule(adapter);
301} 302}
302 303
303void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) 304void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
@@ -6509,6 +6510,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6509 struct sk_buff *skb = first->skb; 6510 struct sk_buff *skb = first->skb;
6510 u32 vlan_macip_lens, type_tucmd; 6511 u32 vlan_macip_lens, type_tucmd;
6511 u32 mss_l4len_idx, l4len; 6512 u32 mss_l4len_idx, l4len;
6513 int err;
6512 6514
6513 if (skb->ip_summed != CHECKSUM_PARTIAL) 6515 if (skb->ip_summed != CHECKSUM_PARTIAL)
6514 return 0; 6516 return 0;
@@ -6516,11 +6518,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6516 if (!skb_is_gso(skb)) 6518 if (!skb_is_gso(skb))
6517 return 0; 6519 return 0;
6518 6520
6519 if (skb_header_cloned(skb)) { 6521 err = skb_cow_head(skb, 0);
6520 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6522 if (err < 0)
6521 if (err) 6523 return err;
6522 return err;
6523 }
6524 6524
6525 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6525 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6526 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 6526 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
@@ -7077,8 +7077,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7077 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 7077 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
7078 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 7078 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
7079 struct vlan_ethhdr *vhdr; 7079 struct vlan_ethhdr *vhdr;
7080 if (skb_header_cloned(skb) && 7080
7081 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 7081 if (skb_cow_head(skb, 0))
7082 goto out_drop; 7082 goto out_drop;
7083 vhdr = (struct vlan_ethhdr *)skb->data; 7083 vhdr = (struct vlan_ethhdr *)skb->data;
7084 vhdr->h_vlan_TCI = htons(tx_flags >> 7084 vhdr->h_vlan_TCI = htons(tx_flags >>
@@ -8023,6 +8023,10 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8023 /* EEPROM */ 8023 /* EEPROM */
8024 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 8024 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
8025 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 8025 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
8026 if (ixgbe_removed(hw->hw_addr)) {
8027 err = -EIO;
8028 goto err_ioremap;
8029 }
8026 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 8030 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
8027 if (!(eec & (1 << 8))) 8031 if (!(eec & (1 << 8)))
8028 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; 8032 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
@@ -8185,7 +8189,12 @@ skip_sriov:
8185 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8189 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
8186 (unsigned long) adapter); 8190 (unsigned long) adapter);
8187 8191
8192 if (ixgbe_removed(hw->hw_addr)) {
8193 err = -EIO;
8194 goto err_sw_init;
8195 }
8188 INIT_WORK(&adapter->service_task, ixgbe_service_task); 8196 INIT_WORK(&adapter->service_task, ixgbe_service_task);
8197 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
8189 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 8198 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
8190 8199
8191 err = ixgbe_init_interrupt_scheme(adapter); 8200 err = ixgbe_init_interrupt_scheme(adapter);
@@ -8494,6 +8503,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8494 8503
8495skip_bad_vf_detection: 8504skip_bad_vf_detection:
8496#endif /* CONFIG_PCI_IOV */ 8505#endif /* CONFIG_PCI_IOV */
8506 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
8507 return PCI_ERS_RESULT_DISCONNECT;
8508
8497 rtnl_lock(); 8509 rtnl_lock();
8498 netif_device_detach(netdev); 8510 netif_device_detach(netdev);
8499 8511
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e7e7d695816b..a0a1de9ce238 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -421,6 +421,7 @@ enum ixbgevf_state_t {
421 __IXGBEVF_DOWN, 421 __IXGBEVF_DOWN,
422 __IXGBEVF_DISABLED, 422 __IXGBEVF_DISABLED,
423 __IXGBEVF_REMOVING, 423 __IXGBEVF_REMOVING,
424 __IXGBEVF_WORK_INIT,
424}; 425};
425 426
426struct ixgbevf_cb { 427struct ixgbevf_cb {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4ba139b2d25a..d0799e8e31e4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -107,7 +107,8 @@ static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
107 return; 107 return;
108 hw->hw_addr = NULL; 108 hw->hw_addr = NULL;
109 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 109 dev_err(&adapter->pdev->dev, "Adapter removed\n");
110 schedule_work(&adapter->watchdog_task); 110 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
111 schedule_work(&adapter->watchdog_task);
111} 112}
112 113
113static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) 114static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
@@ -2838,6 +2839,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2838 struct sk_buff *skb = first->skb; 2839 struct sk_buff *skb = first->skb;
2839 u32 vlan_macip_lens, type_tucmd; 2840 u32 vlan_macip_lens, type_tucmd;
2840 u32 mss_l4len_idx, l4len; 2841 u32 mss_l4len_idx, l4len;
2842 int err;
2841 2843
2842 if (skb->ip_summed != CHECKSUM_PARTIAL) 2844 if (skb->ip_summed != CHECKSUM_PARTIAL)
2843 return 0; 2845 return 0;
@@ -2845,11 +2847,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2845 if (!skb_is_gso(skb)) 2847 if (!skb_is_gso(skb))
2846 return 0; 2848 return 0;
2847 2849
2848 if (skb_header_cloned(skb)) { 2850 err = skb_cow_head(skb, 0);
2849 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2851 if (err < 0)
2850 if (err) 2852 return err;
2851 return err;
2852 }
2853 2853
2854 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2854 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2855 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2855 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
@@ -3573,8 +3573,13 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3573 adapter->watchdog_timer.function = ixgbevf_watchdog; 3573 adapter->watchdog_timer.function = ixgbevf_watchdog;
3574 adapter->watchdog_timer.data = (unsigned long)adapter; 3574 adapter->watchdog_timer.data = (unsigned long)adapter;
3575 3575
3576 if (IXGBE_REMOVED(hw->hw_addr)) {
3577 err = -EIO;
3578 goto err_sw_init;
3579 }
3576 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3580 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3577 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3581 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3582 set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
3578 3583
3579 err = ixgbevf_init_interrupt_scheme(adapter); 3584 err = ixgbevf_init_interrupt_scheme(adapter);
3580 if (err) 3585 if (err)
@@ -3667,6 +3672,9 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3667 struct net_device *netdev = pci_get_drvdata(pdev); 3672 struct net_device *netdev = pci_get_drvdata(pdev);
3668 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3673 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3669 3674
3675 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
3676 return PCI_ERS_RESULT_DISCONNECT;
3677
3670 rtnl_lock(); 3678 rtnl_lock();
3671 netif_device_detach(netdev); 3679 netif_device_detach(netdev);
3672 3680
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d04b1c3c9b85..b248bcbdae63 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -89,9 +89,8 @@
89#define MVNETA_TX_IN_PRGRS BIT(1) 89#define MVNETA_TX_IN_PRGRS BIT(1)
90#define MVNETA_TX_FIFO_EMPTY BIT(8) 90#define MVNETA_TX_FIFO_EMPTY BIT(8)
91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c 91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
92#define MVNETA_SERDES_CFG 0x24A0 92#define MVNETA_SGMII_SERDES_CFG 0x24A0
93#define MVNETA_SGMII_SERDES_PROTO 0x0cc7 93#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
94#define MVNETA_RGMII_SERDES_PROTO 0x0667
95#define MVNETA_TYPE_PRIO 0x24bc 94#define MVNETA_TYPE_PRIO 0x24bc
96#define MVNETA_FORCE_UNI BIT(21) 95#define MVNETA_FORCE_UNI BIT(21)
97#define MVNETA_TXQ_CMD_1 0x24e4 96#define MVNETA_TXQ_CMD_1 0x24e4
@@ -712,6 +711,35 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
712 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 711 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
713} 712}
714 713
714
715
716/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
717static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
718{
719 u32 val;
720
721 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
722
723 if (enable)
724 val |= MVNETA_GMAC2_PORT_RGMII;
725 else
726 val &= ~MVNETA_GMAC2_PORT_RGMII;
727
728 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
729}
730
731/* Config SGMII port */
732static void mvneta_port_sgmii_config(struct mvneta_port *pp)
733{
734 u32 val;
735
736 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
737 val |= MVNETA_GMAC2_PCS_ENABLE;
738 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
739
740 mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
741}
742
715/* Start the Ethernet port RX and TX activity */ 743/* Start the Ethernet port RX and TX activity */
716static void mvneta_port_up(struct mvneta_port *pp) 744static void mvneta_port_up(struct mvneta_port *pp)
717{ 745{
@@ -2729,15 +2757,12 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2729 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 2757 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2730 2758
2731 if (phy_mode == PHY_INTERFACE_MODE_SGMII) 2759 if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2732 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); 2760 mvneta_port_sgmii_config(pp);
2733 else
2734 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
2735 2761
2736 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 2762 mvneta_gmac_rgmii_set(pp, 1);
2737
2738 val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2739 2763
2740 /* Cancel Port Reset */ 2764 /* Cancel Port Reset */
2765 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2741 val &= ~MVNETA_GMAC2_PORT_RESET; 2766 val &= ~MVNETA_GMAC2_PORT_RESET;
2742 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 2767 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2743 2768
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f0ae95f66ceb..cef267e24f9c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2301,13 +2301,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2301 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 2301 /* Allow large DMA segments, up to the firmware limit of 1 GB */
2302 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 2302 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
2303 2303
2304 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2304 dev = pci_get_drvdata(pdev);
2305 if (!priv) { 2305 priv = mlx4_priv(dev);
2306 err = -ENOMEM;
2307 goto err_release_regions;
2308 }
2309
2310 dev = &priv->dev;
2311 dev->pdev = pdev; 2306 dev->pdev = pdev;
2312 INIT_LIST_HEAD(&priv->ctx_list); 2307 INIT_LIST_HEAD(&priv->ctx_list);
2313 spin_lock_init(&priv->ctx_lock); 2308 spin_lock_init(&priv->ctx_lock);
@@ -2374,10 +2369,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2374 } else { 2369 } else {
2375 atomic_inc(&pf_loading); 2370 atomic_inc(&pf_loading);
2376 err = pci_enable_sriov(pdev, total_vfs); 2371 err = pci_enable_sriov(pdev, total_vfs);
2377 atomic_dec(&pf_loading);
2378 if (err) { 2372 if (err) {
2379 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2373 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2380 err); 2374 err);
2375 atomic_dec(&pf_loading);
2381 err = 0; 2376 err = 0;
2382 } else { 2377 } else {
2383 mlx4_warn(dev, "Running in master mode\n"); 2378 mlx4_warn(dev, "Running in master mode\n");
@@ -2535,8 +2530,10 @@ slave_start:
2535 mlx4_sense_init(dev); 2530 mlx4_sense_init(dev);
2536 mlx4_start_sense(dev); 2531 mlx4_start_sense(dev);
2537 2532
2538 priv->pci_dev_data = pci_dev_data; 2533 priv->removed = 0;
2539 pci_set_drvdata(pdev, dev); 2534
2535 if (mlx4_is_master(dev) && dev->num_vfs)
2536 atomic_dec(&pf_loading);
2540 2537
2541 return 0; 2538 return 0;
2542 2539
@@ -2588,6 +2585,9 @@ err_rel_own:
2588 if (!mlx4_is_slave(dev)) 2585 if (!mlx4_is_slave(dev))
2589 mlx4_free_ownership(dev); 2586 mlx4_free_ownership(dev);
2590 2587
2588 if (mlx4_is_master(dev) && dev->num_vfs)
2589 atomic_dec(&pf_loading);
2590
2591 kfree(priv->dev.dev_vfs); 2591 kfree(priv->dev.dev_vfs);
2592 2592
2593err_free_dev: 2593err_free_dev:
@@ -2604,85 +2604,110 @@ err_disable_pdev:
2604 2604
2605static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 2605static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
2606{ 2606{
2607 struct mlx4_priv *priv;
2608 struct mlx4_dev *dev;
2609
2607 printk_once(KERN_INFO "%s", mlx4_version); 2610 printk_once(KERN_INFO "%s", mlx4_version);
2608 2611
2612 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2613 if (!priv)
2614 return -ENOMEM;
2615
2616 dev = &priv->dev;
2617 pci_set_drvdata(pdev, dev);
2618 priv->pci_dev_data = id->driver_data;
2619
2609 return __mlx4_init_one(pdev, id->driver_data); 2620 return __mlx4_init_one(pdev, id->driver_data);
2610} 2621}
2611 2622
2612static void mlx4_remove_one(struct pci_dev *pdev) 2623static void __mlx4_remove_one(struct pci_dev *pdev)
2613{ 2624{
2614 struct mlx4_dev *dev = pci_get_drvdata(pdev); 2625 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2615 struct mlx4_priv *priv = mlx4_priv(dev); 2626 struct mlx4_priv *priv = mlx4_priv(dev);
2627 int pci_dev_data;
2616 int p; 2628 int p;
2617 2629
2618 if (dev) { 2630 if (priv->removed)
2619 /* in SRIOV it is not allowed to unload the pf's 2631 return;
2620 * driver while there are alive vf's */
2621 if (mlx4_is_master(dev)) {
2622 if (mlx4_how_many_lives_vf(dev))
2623 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2624 }
2625 mlx4_stop_sense(dev);
2626 mlx4_unregister_device(dev);
2627 2632
2628 for (p = 1; p <= dev->caps.num_ports; p++) { 2633 pci_dev_data = priv->pci_dev_data;
2629 mlx4_cleanup_port_info(&priv->port[p]);
2630 mlx4_CLOSE_PORT(dev, p);
2631 }
2632 2634
2633 if (mlx4_is_master(dev)) 2635 /* in SRIOV it is not allowed to unload the pf's
2634 mlx4_free_resource_tracker(dev, 2636 * driver while there are alive vf's */
2635 RES_TR_FREE_SLAVES_ONLY); 2637 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
2636 2638 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2637 mlx4_cleanup_counters_table(dev); 2639 mlx4_stop_sense(dev);
2638 mlx4_cleanup_qp_table(dev); 2640 mlx4_unregister_device(dev);
2639 mlx4_cleanup_srq_table(dev);
2640 mlx4_cleanup_cq_table(dev);
2641 mlx4_cmd_use_polling(dev);
2642 mlx4_cleanup_eq_table(dev);
2643 mlx4_cleanup_mcg_table(dev);
2644 mlx4_cleanup_mr_table(dev);
2645 mlx4_cleanup_xrcd_table(dev);
2646 mlx4_cleanup_pd_table(dev);
2647 2641
2648 if (mlx4_is_master(dev)) 2642 for (p = 1; p <= dev->caps.num_ports; p++) {
2649 mlx4_free_resource_tracker(dev, 2643 mlx4_cleanup_port_info(&priv->port[p]);
2650 RES_TR_FREE_STRUCTS_ONLY); 2644 mlx4_CLOSE_PORT(dev, p);
2651 2645 }
2652 iounmap(priv->kar);
2653 mlx4_uar_free(dev, &priv->driver_uar);
2654 mlx4_cleanup_uar_table(dev);
2655 if (!mlx4_is_slave(dev))
2656 mlx4_clear_steering(dev);
2657 mlx4_free_eq_table(dev);
2658 if (mlx4_is_master(dev))
2659 mlx4_multi_func_cleanup(dev);
2660 mlx4_close_hca(dev);
2661 if (mlx4_is_slave(dev))
2662 mlx4_multi_func_cleanup(dev);
2663 mlx4_cmd_cleanup(dev);
2664
2665 if (dev->flags & MLX4_FLAG_MSI_X)
2666 pci_disable_msix(pdev);
2667 if (dev->flags & MLX4_FLAG_SRIOV) {
2668 mlx4_warn(dev, "Disabling SR-IOV\n");
2669 pci_disable_sriov(pdev);
2670 }
2671 2646
2672 if (!mlx4_is_slave(dev)) 2647 if (mlx4_is_master(dev))
2673 mlx4_free_ownership(dev); 2648 mlx4_free_resource_tracker(dev,
2649 RES_TR_FREE_SLAVES_ONLY);
2674 2650
2675 kfree(dev->caps.qp0_tunnel); 2651 mlx4_cleanup_counters_table(dev);
2676 kfree(dev->caps.qp0_proxy); 2652 mlx4_cleanup_qp_table(dev);
2677 kfree(dev->caps.qp1_tunnel); 2653 mlx4_cleanup_srq_table(dev);
2678 kfree(dev->caps.qp1_proxy); 2654 mlx4_cleanup_cq_table(dev);
2679 kfree(dev->dev_vfs); 2655 mlx4_cmd_use_polling(dev);
2656 mlx4_cleanup_eq_table(dev);
2657 mlx4_cleanup_mcg_table(dev);
2658 mlx4_cleanup_mr_table(dev);
2659 mlx4_cleanup_xrcd_table(dev);
2660 mlx4_cleanup_pd_table(dev);
2680 2661
2681 kfree(priv); 2662 if (mlx4_is_master(dev))
2682 pci_release_regions(pdev); 2663 mlx4_free_resource_tracker(dev,
2683 pci_disable_device(pdev); 2664 RES_TR_FREE_STRUCTS_ONLY);
2684 pci_set_drvdata(pdev, NULL); 2665
2666 iounmap(priv->kar);
2667 mlx4_uar_free(dev, &priv->driver_uar);
2668 mlx4_cleanup_uar_table(dev);
2669 if (!mlx4_is_slave(dev))
2670 mlx4_clear_steering(dev);
2671 mlx4_free_eq_table(dev);
2672 if (mlx4_is_master(dev))
2673 mlx4_multi_func_cleanup(dev);
2674 mlx4_close_hca(dev);
2675 if (mlx4_is_slave(dev))
2676 mlx4_multi_func_cleanup(dev);
2677 mlx4_cmd_cleanup(dev);
2678
2679 if (dev->flags & MLX4_FLAG_MSI_X)
2680 pci_disable_msix(pdev);
2681 if (dev->flags & MLX4_FLAG_SRIOV) {
2682 mlx4_warn(dev, "Disabling SR-IOV\n");
2683 pci_disable_sriov(pdev);
2684 dev->num_vfs = 0;
2685 } 2685 }
2686
2687 if (!mlx4_is_slave(dev))
2688 mlx4_free_ownership(dev);
2689
2690 kfree(dev->caps.qp0_tunnel);
2691 kfree(dev->caps.qp0_proxy);
2692 kfree(dev->caps.qp1_tunnel);
2693 kfree(dev->caps.qp1_proxy);
2694 kfree(dev->dev_vfs);
2695
2696 pci_release_regions(pdev);
2697 pci_disable_device(pdev);
2698 memset(priv, 0, sizeof(*priv));
2699 priv->pci_dev_data = pci_dev_data;
2700 priv->removed = 1;
2701}
2702
2703static void mlx4_remove_one(struct pci_dev *pdev)
2704{
2705 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2706 struct mlx4_priv *priv = mlx4_priv(dev);
2707
2708 __mlx4_remove_one(pdev);
2709 kfree(priv);
2710 pci_set_drvdata(pdev, NULL);
2686} 2711}
2687 2712
2688int mlx4_restart_one(struct pci_dev *pdev) 2713int mlx4_restart_one(struct pci_dev *pdev)
@@ -2692,7 +2717,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
2692 int pci_dev_data; 2717 int pci_dev_data;
2693 2718
2694 pci_dev_data = priv->pci_dev_data; 2719 pci_dev_data = priv->pci_dev_data;
2695 mlx4_remove_one(pdev); 2720 __mlx4_remove_one(pdev);
2696 return __mlx4_init_one(pdev, pci_dev_data); 2721 return __mlx4_init_one(pdev, pci_dev_data);
2697} 2722}
2698 2723
@@ -2747,7 +2772,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2747static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 2772static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2748 pci_channel_state_t state) 2773 pci_channel_state_t state)
2749{ 2774{
2750 mlx4_remove_one(pdev); 2775 __mlx4_remove_one(pdev);
2751 2776
2752 return state == pci_channel_io_perm_failure ? 2777 return state == pci_channel_io_perm_failure ?
2753 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 2778 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -2755,11 +2780,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2755 2780
2756static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 2781static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2757{ 2782{
2758 const struct pci_device_id *id; 2783 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2759 int ret; 2784 struct mlx4_priv *priv = mlx4_priv(dev);
2785 int ret;
2760 2786
2761 id = pci_match_id(mlx4_pci_table, pdev); 2787 ret = __mlx4_init_one(pdev, priv->pci_dev_data);
2762 ret = __mlx4_init_one(pdev, id->driver_data);
2763 2788
2764 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 2789 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2765} 2790}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf8be41abb36..f9c465101963 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -800,6 +800,7 @@ struct mlx4_priv {
800 spinlock_t ctx_lock; 800 spinlock_t ctx_lock;
801 801
802 int pci_dev_data; 802 int pci_dev_data;
803 int removed;
803 804
804 struct list_head pgdir_list; 805 struct list_head pgdir_list;
805 struct mutex pgdir_mutex; 806 struct mutex pgdir_mutex;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b48737dcd3c5..ba20c721ee97 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2139,8 +2139,6 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2139 ahw->max_mac_filters = nic_info.max_mac_filters; 2139 ahw->max_mac_filters = nic_info.max_mac_filters;
2140 ahw->max_mtu = nic_info.max_mtu; 2140 ahw->max_mtu = nic_info.max_mtu;
2141 2141
2142 adapter->max_tx_rings = ahw->max_tx_ques;
2143 adapter->max_sds_rings = ahw->max_rx_ques;
2144 /* eSwitch capability indicates vNIC mode. 2142 /* eSwitch capability indicates vNIC mode.
2145 * vNIC and SRIOV are mutually exclusive operational modes. 2143 * vNIC and SRIOV are mutually exclusive operational modes.
2146 * If SR-IOV capability is detected, SR-IOV physical function 2144 * If SR-IOV capability is detected, SR-IOV physical function
@@ -2161,6 +2159,7 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2161int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) 2159int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2162{ 2160{
2163 struct qlcnic_hardware_context *ahw = adapter->ahw; 2161 struct qlcnic_hardware_context *ahw = adapter->ahw;
2162 u16 max_sds_rings, max_tx_rings;
2164 int ret; 2163 int ret;
2165 2164
2166 ret = qlcnic_83xx_get_nic_configuration(adapter); 2165 ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2173,18 +2172,21 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2173 if (qlcnic_83xx_config_vnic_opmode(adapter)) 2172 if (qlcnic_83xx_config_vnic_opmode(adapter))
2174 return -EIO; 2173 return -EIO;
2175 2174
2176 adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; 2175 max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
2177 adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; 2176 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
2178 } else if (ret == QLC_83XX_DEFAULT_OPMODE) { 2177 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
2179 ahw->nic_mode = QLCNIC_DEFAULT_MODE; 2178 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
2180 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 2179 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2181 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 2180 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2182 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; 2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2183 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; 2182 max_tx_rings = QLCNIC_MAX_TX_RINGS;
2184 } else { 2183 } else {
2185 return -EIO; 2184 return -EIO;
2186 } 2185 }
2187 2186
2187 adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
2188 adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
2189
2188 return 0; 2190 return 0;
2189} 2191}
2190 2192
@@ -2348,15 +2350,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2348 goto disable_intr; 2350 goto disable_intr;
2349 } 2351 }
2350 2352
2353 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2354
2351 err = qlcnic_83xx_setup_mbx_intr(adapter); 2355 err = qlcnic_83xx_setup_mbx_intr(adapter);
2352 if (err) 2356 if (err)
2353 goto disable_mbx_intr; 2357 goto disable_mbx_intr;
2354 2358
2355 qlcnic_83xx_clear_function_resources(adapter); 2359 qlcnic_83xx_clear_function_resources(adapter);
2356 2360 qlcnic_dcb_enable(adapter->dcb);
2357 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2358
2359 qlcnic_83xx_initialize_nic(adapter, 1); 2361 qlcnic_83xx_initialize_nic(adapter, 1);
2362 qlcnic_dcb_get_info(adapter->dcb);
2360 2363
2361 /* Configure default, SR-IOV or Virtual NIC mode of operation */ 2364 /* Configure default, SR-IOV or Virtual NIC mode of operation */
2362 err = qlcnic_83xx_configure_opmode(adapter); 2365 err = qlcnic_83xx_configure_opmode(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 64dcbf33d8f0..c1e11f5715b0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -883,8 +883,6 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
883 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 883 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
884 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 884 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
885 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 885 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
886 adapter->max_tx_rings = npar_info->max_tx_ques;
887 adapter->max_sds_rings = npar_info->max_rx_ques;
888 } 886 }
889 887
890 qlcnic_free_mbx_args(&cmd); 888 qlcnic_free_mbx_args(&cmd);
@@ -1356,6 +1354,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1356 arg2 &= ~BIT_3; 1354 arg2 &= ~BIT_3;
1357 break; 1355 break;
1358 case QLCNIC_ADD_VLAN: 1356 case QLCNIC_ADD_VLAN:
1357 arg1 &= ~(0x0ffff << 16);
1359 arg1 |= (BIT_2 | BIT_5); 1358 arg1 |= (BIT_2 | BIT_5);
1360 arg1 |= (esw_cfg->vlan_id << 16); 1359 arg1 |= (esw_cfg->vlan_id << 16);
1361 break; 1360 break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 7d4f54912bad..a51fe18f09a8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -330,8 +330,6 @@ static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
330 goto out_free_cfg; 330 goto out_free_cfg;
331 } 331 }
332 332
333 qlcnic_dcb_get_info(dcb);
334
335 return 0; 333 return 0;
336out_free_cfg: 334out_free_cfg:
337 kfree(dcb->cfg); 335 kfree(dcb->cfg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 309d05640883..dbf75393f758 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -670,7 +670,7 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
670 else 670 else
671 num_msix += adapter->drv_tx_rings; 671 num_msix += adapter->drv_tx_rings;
672 672
673 if (adapter->drv_rss_rings > 0) 673 if (adapter->drv_rss_rings > 0)
674 num_msix += adapter->drv_rss_rings; 674 num_msix += adapter->drv_rss_rings;
675 else 675 else
676 num_msix += adapter->drv_sds_rings; 676 num_msix += adapter->drv_sds_rings;
@@ -686,19 +686,15 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
686 return -ENOMEM; 686 return -ENOMEM;
687 } 687 }
688 688
689restore:
690 for (vector = 0; vector < num_msix; vector++) 689 for (vector = 0; vector < num_msix; vector++)
691 adapter->msix_entries[vector].entry = vector; 690 adapter->msix_entries[vector].entry = vector;
692 691
692restore:
693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
694 if (err == 0) { 694 if (err > 0) {
695 adapter->ahw->num_msix = num_msix; 695 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
696 if (adapter->drv_tss_rings > 0) 696 return -ENOSPC;
697 adapter->drv_tx_rings = adapter->drv_tss_rings;
698 697
699 if (adapter->drv_rss_rings > 0)
700 adapter->drv_sds_rings = adapter->drv_rss_rings;
701 } else {
702 netdev_info(adapter->netdev, 698 netdev_info(adapter->netdev,
703 "Unable to allocate %d MSI-X vectors, Available vectors %d\n", 699 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
704 num_msix, err); 700 num_msix, err);
@@ -716,12 +712,20 @@ restore:
716 "Restoring %d Tx, %d SDS rings for total %d vectors.\n", 712 "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
717 adapter->drv_tx_rings, adapter->drv_sds_rings, 713 adapter->drv_tx_rings, adapter->drv_sds_rings,
718 num_msix); 714 num_msix);
719 goto restore;
720 715
721 err = -EIO; 716 goto restore;
717 } else if (err < 0) {
718 return err;
722 } 719 }
723 720
724 return err; 721 adapter->ahw->num_msix = num_msix;
722 if (adapter->drv_tss_rings > 0)
723 adapter->drv_tx_rings = adapter->drv_tss_rings;
724
725 if (adapter->drv_rss_rings > 0)
726 adapter->drv_sds_rings = adapter->drv_rss_rings;
727
728 return 0;
725} 729}
726 730
727int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) 731int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
@@ -2528,8 +2532,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2528 goto err_out_free_hw; 2532 goto err_out_free_hw;
2529 } 2533 }
2530 2534
2531 qlcnic_dcb_enable(adapter->dcb);
2532
2533 if (qlcnic_read_mac_addr(adapter)) 2535 if (qlcnic_read_mac_addr(adapter))
2534 dev_warn(&pdev->dev, "failed to read mac addr\n"); 2536 dev_warn(&pdev->dev, "failed to read mac addr\n");
2535 2537
@@ -2549,7 +2551,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2549 "Device does not support MSI interrupts\n"); 2551 "Device does not support MSI interrupts\n");
2550 2552
2551 if (qlcnic_82xx_check(adapter)) { 2553 if (qlcnic_82xx_check(adapter)) {
2554 qlcnic_dcb_enable(adapter->dcb);
2555 qlcnic_dcb_get_info(adapter->dcb);
2552 err = qlcnic_setup_intr(adapter); 2556 err = qlcnic_setup_intr(adapter);
2557
2553 if (err) { 2558 if (err) {
2554 dev_err(&pdev->dev, "Failed to setup interrupt\n"); 2559 dev_err(&pdev->dev, "Failed to setup interrupt\n");
2555 goto err_out_disable_msi; 2560 goto err_out_disable_msi;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 14f748cbf0de..280137991544 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -461,6 +461,16 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
461{ 461{
462 struct net_device *netdev = adapter->netdev; 462 struct net_device *netdev = adapter->netdev;
463 463
464 if (pci_vfs_assigned(adapter->pdev)) {
465 netdev_err(adapter->netdev,
466 "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
467 adapter->portnum);
468 netdev_info(adapter->netdev,
469 "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
470 adapter->portnum);
471 return -EPERM;
472 }
473
464 rtnl_lock(); 474 rtnl_lock();
465 if (netif_running(netdev)) 475 if (netif_running(netdev))
466 __qlcnic_down(adapter, netdev); 476 __qlcnic_down(adapter, netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 448d156c3d08..cd346e27f2e1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -354,7 +354,7 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
354{ 354{
355 int i; 355 int i;
356 356
357 for (i = 0; i < adapter->ahw->max_vnic_func; i++) { 357 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
358 if (adapter->npars[i].pci_func == pci_func) 358 if (adapter->npars[i].pci_func == pci_func)
359 return i; 359 return i;
360 } 360 }
@@ -720,6 +720,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
721 struct qlcnic_npar_func_cfg *np_cfg; 721 struct qlcnic_npar_func_cfg *np_cfg;
722 struct qlcnic_info nic_info; 722 struct qlcnic_info nic_info;
723 u8 pci_func;
723 int i, ret; 724 int i, ret;
724 u32 count; 725 u32 count;
725 726
@@ -729,26 +730,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
729 730
730 count = size / sizeof(struct qlcnic_npar_func_cfg); 731 count = size / sizeof(struct qlcnic_npar_func_cfg);
731 for (i = 0; i < adapter->ahw->total_nic_func; i++) { 732 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
732 if (qlcnic_is_valid_nic_func(adapter, i) < 0)
733 continue;
734 if (adapter->npars[i].pci_func >= count) { 733 if (adapter->npars[i].pci_func >= count) {
735 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", 734 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
736 __func__, adapter->ahw->total_nic_func, count); 735 __func__, adapter->ahw->total_nic_func, count);
737 continue; 736 continue;
738 } 737 }
739 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
740 if (ret)
741 return ret;
742 if (!adapter->npars[i].eswitch_status) 738 if (!adapter->npars[i].eswitch_status)
743 continue; 739 continue;
744 np_cfg[i].pci_func = i; 740 pci_func = adapter->npars[i].pci_func;
745 np_cfg[i].op_mode = (u8)nic_info.op_mode; 741 if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
746 np_cfg[i].port_num = nic_info.phys_port; 742 continue;
747 np_cfg[i].fw_capab = nic_info.capabilities; 743 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
748 np_cfg[i].min_bw = nic_info.min_tx_bw; 744 if (ret)
749 np_cfg[i].max_bw = nic_info.max_tx_bw; 745 return ret;
750 np_cfg[i].max_tx_queues = nic_info.max_tx_ques; 746
751 np_cfg[i].max_rx_queues = nic_info.max_rx_ques; 747 np_cfg[pci_func].pci_func = pci_func;
748 np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
749 np_cfg[pci_func].port_num = nic_info.phys_port;
750 np_cfg[pci_func].fw_capab = nic_info.capabilities;
751 np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
752 np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
753 np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
754 np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
752 } 755 }
753 return size; 756 return size;
754} 757}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5d5fec6c4eb0..36aa109416c4 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -687,7 +687,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
687 687
688 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 688 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
689 689
690 if (unlikely(status < 0)) { 690 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
691 /* the interface is going down, skbs are purged */ 691 /* the interface is going down, skbs are purged */
692 dev_kfree_skb_any(skb); 692 dev_kfree_skb_any(skb);
693 return; 693 return;
@@ -1201,8 +1201,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
1201 for_each_slave(priv, cpsw_slave_open, priv); 1201 for_each_slave(priv, cpsw_slave_open, priv);
1202 1202
1203 /* Add default VLAN */ 1203 /* Add default VLAN */
1204 if (!priv->data.dual_emac) 1204 cpsw_add_default_vlan(priv);
1205 cpsw_add_default_vlan(priv);
1206 1205
1207 if (!cpsw_common_res_usage_state(priv)) { 1206 if (!cpsw_common_res_usage_state(priv)) {
1208 /* setup tx dma to fixed prio and zero offset */ 1207 /* setup tx dma to fixed prio and zero offset */
@@ -1253,6 +1252,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1253 cpsw_set_coalesce(ndev, &coal); 1252 cpsw_set_coalesce(ndev, &coal);
1254 } 1253 }
1255 1254
1255 napi_enable(&priv->napi);
1256 cpdma_ctlr_start(priv->dma);
1257 cpsw_intr_enable(priv);
1258 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1259 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1260
1256 prim_cpsw = cpsw_get_slave_priv(priv, 0); 1261 prim_cpsw = cpsw_get_slave_priv(priv, 0);
1257 if (prim_cpsw->irq_enabled == false) { 1262 if (prim_cpsw->irq_enabled == false) {
1258 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { 1263 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
@@ -1261,12 +1266,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
1261 } 1266 }
1262 } 1267 }
1263 1268
1264 napi_enable(&priv->napi);
1265 cpdma_ctlr_start(priv->dma);
1266 cpsw_intr_enable(priv);
1267 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1268 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1269
1270 if (priv->data.dual_emac) 1269 if (priv->data.dual_emac)
1271 priv->slaves[priv->emac_port].open_stat = true; 1270 priv->slaves[priv->emac_port].open_stat = true;
1272 return 0; 1271 return 0;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 13010b4dae5b..d18f711d0b0c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -747,6 +747,7 @@ struct ndis_oject_header {
747#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0 747#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0
748#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1 748#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1
749 749
750#define VERSION_4_OFFLOAD_SIZE 22
750/* 751/*
751 * New offload OIDs for NDIS 6 752 * New offload OIDs for NDIS 6
752 */ 753 */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index daddea2654ce..f7629ecefa84 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -344,7 +344,7 @@ static int netvsc_connect_vsp(struct hv_device *device)
344 memset(init_packet, 0, sizeof(struct nvsp_message)); 344 memset(init_packet, 0, sizeof(struct nvsp_message));
345 345
346 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) 346 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
347 ndis_version = 0x00050001; 347 ndis_version = 0x00060001;
348 else 348 else
349 ndis_version = 0x0006001e; 349 ndis_version = 0x0006001e;
350 350
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4e4cf9e0c8d7..31e55fba7cad 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -319,7 +319,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
319 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 319 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
320 (num_data_pgs * sizeof(struct hv_page_buffer)) + 320 (num_data_pgs * sizeof(struct hv_page_buffer)) +
321 sizeof(struct rndis_message) + 321 sizeof(struct rndis_message) +
322 NDIS_VLAN_PPI_SIZE, GFP_ATOMIC); 322 NDIS_VLAN_PPI_SIZE +
323 NDIS_CSUM_PPI_SIZE +
324 NDIS_LSO_PPI_SIZE, GFP_ATOMIC);
323 if (!packet) { 325 if (!packet) {
324 /* out of memory, drop packet */ 326 /* out of memory, drop packet */
325 netdev_err(net, "unable to allocate hv_netvsc_packet\n"); 327 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
@@ -396,7 +398,30 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
396 csum_info->transmit.tcp_checksum = 1; 398 csum_info->transmit.tcp_checksum = 1;
397 csum_info->transmit.tcp_header_offset = hdr_offset; 399 csum_info->transmit.tcp_header_offset = hdr_offset;
398 } else if (net_trans_info & INFO_UDP) { 400 } else if (net_trans_info & INFO_UDP) {
399 csum_info->transmit.udp_checksum = 1; 401 /* UDP checksum offload is not supported on ws2008r2.
402 * Furthermore, on ws2012 and ws2012r2, there are some
403 * issues with udp checksum offload from Linux guests.
404 * (these are host issues).
405 * For now compute the checksum here.
406 */
407 struct udphdr *uh;
408 u16 udp_len;
409
410 ret = skb_cow_head(skb, 0);
411 if (ret)
412 goto drop;
413
414 uh = udp_hdr(skb);
415 udp_len = ntohs(uh->len);
416 uh->check = 0;
417 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
418 ip_hdr(skb)->daddr,
419 udp_len, IPPROTO_UDP,
420 csum_partial(uh, udp_len, 0));
421 if (uh->check == 0)
422 uh->check = CSUM_MANGLED_0;
423
424 csum_info->transmit.udp_checksum = 0;
400 } 425 }
401 goto do_send; 426 goto do_send;
402 427
@@ -436,6 +461,7 @@ do_send:
436 461
437 ret = netvsc_send(net_device_ctx->device_ctx, packet); 462 ret = netvsc_send(net_device_ctx->device_ctx, packet);
438 463
464drop:
439 if (ret == 0) { 465 if (ret == 0) {
440 net->stats.tx_bytes += skb->len; 466 net->stats.tx_bytes += skb->len;
441 net->stats.tx_packets++; 467 net->stats.tx_packets++;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 4a37e3db9e32..143a98caf618 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -641,6 +641,16 @@ int rndis_filter_set_offload_params(struct hv_device *hdev,
641 struct rndis_set_complete *set_complete; 641 struct rndis_set_complete *set_complete;
642 u32 extlen = sizeof(struct ndis_offload_params); 642 u32 extlen = sizeof(struct ndis_offload_params);
643 int ret, t; 643 int ret, t;
644 u32 vsp_version = nvdev->nvsp_version;
645
646 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
647 extlen = VERSION_4_OFFLOAD_SIZE;
648 /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
649 * UDP checksum offload.
650 */
651 req_offloads->udp_ip_v4_csum = 0;
652 req_offloads->udp_ip_v6_csum = 0;
653 }
644 654
645 request = get_rndis_request(rdev, RNDIS_MSG_SET, 655 request = get_rndis_request(rdev, RNDIS_MSG_SET,
646 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); 656 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
@@ -674,7 +684,7 @@ int rndis_filter_set_offload_params(struct hv_device *hdev,
674 } else { 684 } else {
675 set_complete = &request->response_msg.msg.set_complete; 685 set_complete = &request->response_msg.msg.set_complete;
676 if (set_complete->status != RNDIS_STATUS_SUCCESS) { 686 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
677 netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", 687 netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
678 set_complete->status); 688 set_complete->status);
679 ret = -EINVAL; 689 ret = -EINVAL;
680 } 690 }
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 430bb0db9bc4..e36f194673a4 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -365,7 +365,7 @@ __at86rf230_read_subreg(struct at86rf230_local *lp,
365 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); 365 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
366 366
367 if (status == 0) 367 if (status == 0)
368 *data = buf[1]; 368 *data = (buf[1] & mask) >> shift;
369 369
370 return status; 370 return status;
371} 371}
@@ -1025,14 +1025,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
1025 return -EINVAL; 1025 return -EINVAL;
1026 } 1026 }
1027 1027
1028 rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
1029 if (rc)
1030 return rc;
1031 if (!status) {
1032 dev_err(&lp->spi->dev, "AVDD error\n");
1033 return -EINVAL;
1034 }
1035
1036 return 0; 1028 return 0;
1037} 1029}
1038 1030
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index f3cdf64997d6..63aa9d9e34c5 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -78,11 +78,19 @@ static void ntb_netdev_event_handler(void *data, int status)
78 netdev_dbg(ndev, "Event %x, Link %x\n", status, 78 netdev_dbg(ndev, "Event %x, Link %x\n", status,
79 ntb_transport_link_query(dev->qp)); 79 ntb_transport_link_query(dev->qp));
80 80
81 /* Currently, only link status event is supported */ 81 switch (status) {
82 if (status) 82 case NTB_LINK_DOWN:
83 netif_carrier_on(ndev);
84 else
85 netif_carrier_off(ndev); 83 netif_carrier_off(ndev);
84 break;
85 case NTB_LINK_UP:
86 if (!ntb_transport_link_query(dev->qp))
87 return;
88
89 netif_carrier_on(ndev);
90 break;
91 default:
92 netdev_warn(ndev, "Unsupported event type %d\n", status);
93 }
86} 94}
87 95
88static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, 96static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
@@ -182,8 +190,10 @@ static int ntb_netdev_open(struct net_device *ndev)
182 190
183 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 191 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
184 ndev->mtu + ETH_HLEN); 192 ndev->mtu + ETH_HLEN);
185 if (rc == -EINVAL) 193 if (rc == -EINVAL) {
194 dev_kfree_skb(skb);
186 goto err; 195 goto err;
196 }
187 } 197 }
188 198
189 netif_carrier_off(ndev); 199 netif_carrier_off(ndev);
@@ -367,12 +377,15 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
367{ 377{
368 struct net_device *ndev; 378 struct net_device *ndev;
369 struct ntb_netdev *dev; 379 struct ntb_netdev *dev;
380 bool found = false;
370 381
371 list_for_each_entry(dev, &dev_list, list) { 382 list_for_each_entry(dev, &dev_list, list) {
372 if (dev->pdev == pdev) 383 if (dev->pdev == pdev) {
384 found = true;
373 break; 385 break;
386 }
374 } 387 }
375 if (dev == NULL) 388 if (!found)
376 return; 389 return;
377 390
378 list_del(&dev->list); 391 list_del(&dev->list);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1d788f19135b..1b6d09aef427 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -756,12 +756,8 @@ void phy_state_machine(struct work_struct *work)
756 netif_carrier_on(phydev->attached_dev); 756 netif_carrier_on(phydev->attached_dev);
757 phydev->adjust_link(phydev->attached_dev); 757 phydev->adjust_link(phydev->attached_dev);
758 758
759 } else if (0 == phydev->link_timeout--) { 759 } else if (0 == phydev->link_timeout--)
760 needs_aneg = 1; 760 needs_aneg = 1;
761 /* If we have the magic_aneg bit, we try again */
762 if (phydev->drv->flags & PHY_HAS_MAGICANEG)
763 break;
764 }
765 break; 761 break;
766 case PHY_NOLINK: 762 case PHY_NOLINK:
767 err = phy_read_status(phydev); 763 err = phy_read_status(phydev);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 18e12a3f7fc3..3fbfb0869030 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -929,6 +929,9 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
929 struct r8152 *tp = netdev_priv(netdev); 929 struct r8152 *tp = netdev_priv(netdev);
930 int ret; 930 int ret;
931 931
932 if (test_bit(RTL8152_UNPLUG, &tp->flags))
933 return -ENODEV;
934
932 if (phy_id != R8152_PHY_ID) 935 if (phy_id != R8152_PHY_ID)
933 return -EINVAL; 936 return -EINVAL;
934 937
@@ -949,6 +952,9 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
949{ 952{
950 struct r8152 *tp = netdev_priv(netdev); 953 struct r8152 *tp = netdev_priv(netdev);
951 954
955 if (test_bit(RTL8152_UNPLUG, &tp->flags))
956 return;
957
952 if (phy_id != R8152_PHY_ID) 958 if (phy_id != R8152_PHY_ID)
953 return; 959 return;
954 960
@@ -1962,6 +1968,9 @@ static int rtl_enable(struct r8152 *tp)
1962 1968
1963static int rtl8152_enable(struct r8152 *tp) 1969static int rtl8152_enable(struct r8152 *tp)
1964{ 1970{
1971 if (test_bit(RTL8152_UNPLUG, &tp->flags))
1972 return -ENODEV;
1973
1965 set_tx_qlen(tp); 1974 set_tx_qlen(tp);
1966 rtl_set_eee_plus(tp); 1975 rtl_set_eee_plus(tp);
1967 1976
@@ -1994,6 +2003,9 @@ static void r8153_set_rx_agg(struct r8152 *tp)
1994 2003
1995static int rtl8153_enable(struct r8152 *tp) 2004static int rtl8153_enable(struct r8152 *tp)
1996{ 2005{
2006 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2007 return -ENODEV;
2008
1997 set_tx_qlen(tp); 2009 set_tx_qlen(tp);
1998 rtl_set_eee_plus(tp); 2010 rtl_set_eee_plus(tp);
1999 r8153_set_rx_agg(tp); 2011 r8153_set_rx_agg(tp);
@@ -2006,6 +2018,11 @@ static void rtl8152_disable(struct r8152 *tp)
2006 u32 ocp_data; 2018 u32 ocp_data;
2007 int i; 2019 int i;
2008 2020
2021 if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
2022 rtl_drop_queued_tx(tp);
2023 return;
2024 }
2025
2009 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2026 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
2010 ocp_data &= ~RCR_ACPT_ALL; 2027 ocp_data &= ~RCR_ACPT_ALL;
2011 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2028 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -2232,6 +2249,9 @@ static void r8152b_exit_oob(struct r8152 *tp)
2232 u32 ocp_data; 2249 u32 ocp_data;
2233 int i; 2250 int i;
2234 2251
2252 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2253 return;
2254
2235 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2255 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
2236 ocp_data &= ~RCR_ACPT_ALL; 2256 ocp_data &= ~RCR_ACPT_ALL;
2237 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2257 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -2460,6 +2480,9 @@ static void r8153_first_init(struct r8152 *tp)
2460 u32 ocp_data; 2480 u32 ocp_data;
2461 int i; 2481 int i;
2462 2482
2483 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2484 return;
2485
2463 rxdy_gated_en(tp, true); 2486 rxdy_gated_en(tp, true);
2464 r8153_teredo_off(tp); 2487 r8153_teredo_off(tp);
2465 2488
@@ -2687,6 +2710,11 @@ out:
2687 2710
2688static void rtl8152_down(struct r8152 *tp) 2711static void rtl8152_down(struct r8152 *tp)
2689{ 2712{
2713 if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
2714 rtl_drop_queued_tx(tp);
2715 return;
2716 }
2717
2690 r8152_power_cut_en(tp, false); 2718 r8152_power_cut_en(tp, false);
2691 r8152b_disable_aldps(tp); 2719 r8152b_disable_aldps(tp);
2692 r8152b_enter_oob(tp); 2720 r8152b_enter_oob(tp);
@@ -2695,6 +2723,11 @@ static void rtl8152_down(struct r8152 *tp)
2695 2723
2696static void rtl8153_down(struct r8152 *tp) 2724static void rtl8153_down(struct r8152 *tp)
2697{ 2725{
2726 if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
2727 rtl_drop_queued_tx(tp);
2728 return;
2729 }
2730
2698 r8153_u1u2en(tp, false); 2731 r8153_u1u2en(tp, false);
2699 r8153_power_cut_en(tp, false); 2732 r8153_power_cut_en(tp, false);
2700 r8153_disable_aldps(tp); 2733 r8153_disable_aldps(tp);
@@ -2904,6 +2937,9 @@ static void r8152b_init(struct r8152 *tp)
2904{ 2937{
2905 u32 ocp_data; 2938 u32 ocp_data;
2906 2939
2940 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2941 return;
2942
2907 if (tp->version == RTL_VER_01) { 2943 if (tp->version == RTL_VER_01) {
2908 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); 2944 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
2909 ocp_data &= ~LED_MODE_MASK; 2945 ocp_data &= ~LED_MODE_MASK;
@@ -2939,6 +2975,9 @@ static void r8153_init(struct r8152 *tp)
2939 u32 ocp_data; 2975 u32 ocp_data;
2940 int i; 2976 int i;
2941 2977
2978 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2979 return;
2980
2942 r8153_u1u2en(tp, false); 2981 r8153_u1u2en(tp, false);
2943 2982
2944 for (i = 0; i < 500; i++) { 2983 for (i = 0; i < 500; i++) {
@@ -3213,6 +3252,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
3213 struct mii_ioctl_data *data = if_mii(rq); 3252 struct mii_ioctl_data *data = if_mii(rq);
3214 int res; 3253 int res;
3215 3254
3255 if (test_bit(RTL8152_UNPLUG, &tp->flags))
3256 return -ENODEV;
3257
3216 res = usb_autopm_get_interface(tp->intf); 3258 res = usb_autopm_get_interface(tp->intf);
3217 if (res < 0) 3259 if (res < 0)
3218 goto out; 3260 goto out;
@@ -3293,12 +3335,18 @@ static void r8152b_get_version(struct r8152 *tp)
3293 3335
3294static void rtl8152_unload(struct r8152 *tp) 3336static void rtl8152_unload(struct r8152 *tp)
3295{ 3337{
3338 if (test_bit(RTL8152_UNPLUG, &tp->flags))
3339 return;
3340
3296 if (tp->version != RTL_VER_01) 3341 if (tp->version != RTL_VER_01)
3297 r8152_power_cut_en(tp, true); 3342 r8152_power_cut_en(tp, true);
3298} 3343}
3299 3344
3300static void rtl8153_unload(struct r8152 *tp) 3345static void rtl8153_unload(struct r8152 *tp)
3301{ 3346{
3347 if (test_bit(RTL8152_UNPLUG, &tp->flags))
3348 return;
3349
3302 r8153_power_cut_en(tp, true); 3350 r8153_power_cut_en(tp, true);
3303} 3351}
3304 3352
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c55e316373a1..82355d5d155a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1755,8 +1755,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1755 if (err) 1755 if (err)
1756 return err; 1756 return err;
1757 1757
1758 return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, 1758 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
1759 false); 1759 tos, ttl, df, false);
1760} 1760}
1761EXPORT_SYMBOL_GPL(vxlan_xmit_skb); 1761EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1762 1762
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 84734a805092..83c39e2858bf 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -1521,11 +1521,7 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
1521 cosa_putstatus(cosa, 0); 1521 cosa_putstatus(cosa, 0);
1522 cosa_getdata8(cosa); 1522 cosa_getdata8(cosa);
1523 cosa_putstatus(cosa, SR_RST); 1523 cosa_putstatus(cosa, SR_RST);
1524#ifdef MODULE
1525 msleep(500); 1524 msleep(500);
1526#else
1527 udelay(5*100000);
1528#endif
1529 /* Disable all IRQs from the card */ 1525 /* Disable all IRQs from the card */
1530 cosa_putstatus(cosa, 0); 1526 cosa_putstatus(cosa, 0);
1531 1527
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 3b3e91057a4c..00fb8badbacc 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -1004,11 +1004,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1004 case ATH9K_ANI_FIRSTEP_LEVEL:{ 1004 case ATH9K_ANI_FIRSTEP_LEVEL:{
1005 u32 level = param; 1005 u32 level = param;
1006 1006
1007 value = level * 2; 1007 value = level;
1008 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 1008 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1009 AR_PHY_FIND_SIG_FIRSTEP, value); 1009 AR_PHY_FIND_SIG_FIRSTEP, value);
1010 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
1011 AR_PHY_FIND_SIG_FIRSTEP_LOW, value);
1012 1010
1013 if (level != aniState->firstepLevel) { 1011 if (level != aniState->firstepLevel) {
1014 ath_dbg(common, ANI, 1012 ath_dbg(common, ANI,
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 471e0f624e81..bd9e634879e6 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -312,10 +312,9 @@ static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
312 312
313void ath9k_csa_update(struct ath_softc *sc) 313void ath9k_csa_update(struct ath_softc *sc)
314{ 314{
315 ieee80211_iterate_active_interfaces(sc->hw, 315 ieee80211_iterate_active_interfaces_atomic(sc->hw,
316 IEEE80211_IFACE_ITER_NORMAL, 316 IEEE80211_IFACE_ITER_NORMAL,
317 ath9k_csa_update_vif, 317 ath9k_csa_update_vif, sc);
318 sc);
319} 318}
320 319
321void ath9k_beacon_tasklet(unsigned long data) 320void ath9k_beacon_tasklet(unsigned long data)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index e8149e3dbdd5..289f3d8924b5 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -471,8 +471,11 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
471 if (!txok || !vif || !txs) 471 if (!txok || !vif || !txs)
472 goto send_mac80211; 472 goto send_mac80211;
473 473
474 if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK) 474 if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK) {
475 tx_info->flags |= IEEE80211_TX_STAT_ACK; 475 tx_info->flags |= IEEE80211_TX_STAT_ACK;
476 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
477 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
478 }
476 479
477 if (txs->ts_flags & ATH9K_HTC_TXSTAT_FILT) 480 if (txs->ts_flags & ATH9K_HTC_TXSTAT_FILT)
478 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 481 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c0a4e866edca..cbbb02a6b13b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -670,6 +670,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
670 .num_different_channels = 1, 670 .num_different_channels = 1,
671 .beacon_int_infra_match = true, 671 .beacon_int_infra_match = true,
672 }, 672 },
673#ifdef CONFIG_ATH9K_DFS_CERTIFIED
673 { 674 {
674 .limits = if_dfs_limits, 675 .limits = if_dfs_limits,
675 .n_limits = ARRAY_SIZE(if_dfs_limits), 676 .n_limits = ARRAY_SIZE(if_dfs_limits),
@@ -679,6 +680,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
679 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 680 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
680 BIT(NL80211_CHAN_WIDTH_20), 681 BIT(NL80211_CHAN_WIDTH_20),
681 } 682 }
683#endif
682}; 684};
683 685
684static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 686static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 05ee7f10cc8f..24ccbe96e0c8 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -5176,22 +5176,22 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
5176 int ch = new_channel->hw_value; 5176 int ch = new_channel->hw_value;
5177 5177
5178 u16 old_band_5ghz; 5178 u16 old_band_5ghz;
5179 u32 tmp32; 5179 u16 tmp16;
5180 5180
5181 old_band_5ghz = 5181 old_band_5ghz =
5182 b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; 5182 b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
5183 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { 5183 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
5184 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); 5184 tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
5185 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); 5185 b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
5186 b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000); 5186 b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
5187 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); 5187 b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
5188 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); 5188 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
5189 } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) { 5189 } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) {
5190 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); 5190 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
5191 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); 5191 tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
5192 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); 5192 b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
5193 b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF); 5193 b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF);
5194 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); 5194 b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
5195 } 5195 }
5196 5196
5197 b43_chantab_phy_upload(dev, e); 5197 b43_chantab_phy_upload(dev, e);
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index e89535e86caf..1a8d32138593 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -102,10 +102,10 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
102 } 102 }
103 103
104get_queue_num: 104get_queue_num:
105 q_num = 0;
106 recontend_queue = false; 105 recontend_queue = false;
107 106
108 q_num = rsi_determine_min_weight_queue(common); 107 q_num = rsi_determine_min_weight_queue(common);
108
109 q_len = skb_queue_len(&common->tx_queue[ii]); 109 q_len = skb_queue_len(&common->tx_queue[ii]);
110 ii = q_num; 110 ii = q_num;
111 111
@@ -118,7 +118,9 @@ get_queue_num:
118 } 118 }
119 } 119 }
120 120
121 common->tx_qinfo[q_num].pkt_contended = 0; 121 if (q_num < NUM_EDCA_QUEUES)
122 common->tx_qinfo[q_num].pkt_contended = 0;
123
122 /* Adjust the back off values for all queues again */ 124 /* Adjust the back off values for all queues again */
123 recontend_queue = rsi_recalculate_weights(common); 125 recontend_queue = rsi_recalculate_weights(common);
124 126
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
index 7e4ef4554411..c466246a323f 100644
--- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -289,32 +289,29 @@ int rsi_init_dbgfs(struct rsi_hw *adapter)
289 const struct rsi_dbg_files *files; 289 const struct rsi_dbg_files *files;
290 290
291 dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL); 291 dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL);
292 if (!dev_dbgfs)
293 return -ENOMEM;
294
292 adapter->dfsentry = dev_dbgfs; 295 adapter->dfsentry = dev_dbgfs;
293 296
294 snprintf(devdir, sizeof(devdir), "%s", 297 snprintf(devdir, sizeof(devdir), "%s",
295 wiphy_name(adapter->hw->wiphy)); 298 wiphy_name(adapter->hw->wiphy));
296 dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
297 299
298 if (IS_ERR(dev_dbgfs->subdir)) { 300 dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
299 if (dev_dbgfs->subdir == ERR_PTR(-ENODEV))
300 rsi_dbg(ERR_ZONE,
301 "%s:Debugfs has not been mounted\n", __func__);
302 else
303 rsi_dbg(ERR_ZONE, "debugfs:%s not created\n", devdir);
304 301
305 adapter->dfsentry = NULL; 302 if (!dev_dbgfs->subdir) {
306 kfree(dev_dbgfs); 303 kfree(dev_dbgfs);
307 return (int)PTR_ERR(dev_dbgfs->subdir); 304 return -ENOMEM;
308 } else { 305 }
309 for (ii = 0; ii < adapter->num_debugfs_entries; ii++) { 306
310 files = &dev_debugfs_files[ii]; 307 for (ii = 0; ii < adapter->num_debugfs_entries; ii++) {
311 dev_dbgfs->rsi_files[ii] = 308 files = &dev_debugfs_files[ii];
312 debugfs_create_file(files->name, 309 dev_dbgfs->rsi_files[ii] =
313 files->perms, 310 debugfs_create_file(files->name,
314 dev_dbgfs->subdir, 311 files->perms,
315 common, 312 dev_dbgfs->subdir,
316 &files->fops); 313 common,
317 } 314 &files->fops);
318 } 315 }
319 return 0; 316 return 0;
320} 317}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 2361a6849ad7..73694295648f 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -738,7 +738,7 @@ int rsi_hal_load_key(struct rsi_common *common,
738 * 738 *
739 * Return: 0 on success, corresponding error code on failure. 739 * Return: 0 on success, corresponding error code on failure.
740 */ 740 */
741static u8 rsi_load_bootup_params(struct rsi_common *common) 741static int rsi_load_bootup_params(struct rsi_common *common)
742{ 742{
743 struct sk_buff *skb; 743 struct sk_buff *skb;
744 struct rsi_boot_params *boot_params; 744 struct rsi_boot_params *boot_params;
@@ -1272,6 +1272,7 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
1272{ 1272{
1273 s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff); 1273 s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff);
1274 u16 msg_type = (msg[2]); 1274 u16 msg_type = (msg[2]);
1275 int ret;
1275 1276
1276 rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n", 1277 rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n",
1277 __func__, msg_len, msg_type); 1278 __func__, msg_len, msg_type);
@@ -1284,8 +1285,9 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
1284 if (common->fsm_state == FSM_CARD_NOT_READY) { 1285 if (common->fsm_state == FSM_CARD_NOT_READY) {
1285 rsi_set_default_parameters(common); 1286 rsi_set_default_parameters(common);
1286 1287
1287 if (rsi_load_bootup_params(common)) 1288 ret = rsi_load_bootup_params(common);
1288 return -ENOMEM; 1289 if (ret)
1290 return ret;
1289 else 1291 else
1290 common->fsm_state = FSM_BOOT_PARAMS_SENT; 1292 common->fsm_state = FSM_BOOT_PARAMS_SENT;
1291 } else { 1293 } else {
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 852453f386e2..2e39d38d6a9e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -756,12 +756,13 @@ fail:
756static void rsi_disconnect(struct sdio_func *pfunction) 756static void rsi_disconnect(struct sdio_func *pfunction)
757{ 757{
758 struct rsi_hw *adapter = sdio_get_drvdata(pfunction); 758 struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
759 struct rsi_91x_sdiodev *dev = 759 struct rsi_91x_sdiodev *dev;
760 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
761 760
762 if (!adapter) 761 if (!adapter)
763 return; 762 return;
764 763
764 dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev;
765
765 dev->write_fail = 2; 766 dev->write_fail = 2;
766 rsi_mac80211_detach(adapter); 767 rsi_mac80211_detach(adapter);
767 768
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index f1cb99cafed8..20d11ccfffe3 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -247,7 +247,7 @@ static int rsi_process_pkt(struct rsi_common *common)
247 if (!common->rx_data_pkt) { 247 if (!common->rx_data_pkt) {
248 rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n", 248 rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n",
249 __func__); 249 __func__);
250 return -1; 250 return -ENOMEM;
251 } 251 }
252 252
253 status = rsi_sdio_host_intf_read_pkt(adapter, 253 status = rsi_sdio_host_intf_read_pkt(adapter,
@@ -260,12 +260,10 @@ static int rsi_process_pkt(struct rsi_common *common)
260 } 260 }
261 261
262 status = rsi_read_pkt(common, rcv_pkt_len); 262 status = rsi_read_pkt(common, rcv_pkt_len);
263 kfree(common->rx_data_pkt);
264 return status;
265 263
266fail: 264fail:
267 kfree(common->rx_data_pkt); 265 kfree(common->rx_data_pkt);
268 return -1; 266 return status;
269} 267}
270 268
271/** 269/**
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index bb1bf96670eb..4c46e5631e2f 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -154,24 +154,30 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
154 u16 *value, 154 u16 *value,
155 u16 len) 155 u16 len)
156{ 156{
157 u8 temp_buf[4]; 157 u8 *buf;
158 int status = 0; 158 int status = -ENOMEM;
159
160 buf = kmalloc(0x04, GFP_KERNEL);
161 if (!buf)
162 return status;
159 163
160 status = usb_control_msg(usbdev, 164 status = usb_control_msg(usbdev,
161 usb_rcvctrlpipe(usbdev, 0), 165 usb_rcvctrlpipe(usbdev, 0),
162 USB_VENDOR_REGISTER_READ, 166 USB_VENDOR_REGISTER_READ,
163 USB_TYPE_VENDOR, 167 USB_TYPE_VENDOR,
164 ((reg & 0xffff0000) >> 16), (reg & 0xffff), 168 ((reg & 0xffff0000) >> 16), (reg & 0xffff),
165 (void *)temp_buf, 169 (void *)buf,
166 len, 170 len,
167 HZ * 5); 171 HZ * 5);
168 172
169 *value = (temp_buf[0] | (temp_buf[1] << 8)); 173 *value = (buf[0] | (buf[1] << 8));
170 if (status < 0) { 174 if (status < 0) {
171 rsi_dbg(ERR_ZONE, 175 rsi_dbg(ERR_ZONE,
172 "%s: Reg read failed with error code :%d\n", 176 "%s: Reg read failed with error code :%d\n",
173 __func__, status); 177 __func__, status);
174 } 178 }
179 kfree(buf);
180
175 return status; 181 return status;
176} 182}
177 183
@@ -190,8 +196,12 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
190 u16 value, 196 u16 value,
191 u16 len) 197 u16 len)
192{ 198{
193 u8 usb_reg_buf[4]; 199 u8 *usb_reg_buf;
194 int status = 0; 200 int status = -ENOMEM;
201
202 usb_reg_buf = kmalloc(0x04, GFP_KERNEL);
203 if (!usb_reg_buf)
204 return status;
195 205
196 usb_reg_buf[0] = (value & 0x00ff); 206 usb_reg_buf[0] = (value & 0x00ff);
197 usb_reg_buf[1] = (value & 0xff00) >> 8; 207 usb_reg_buf[1] = (value & 0xff00) >> 8;
@@ -212,6 +222,8 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
212 "%s: Reg write failed with error code :%d\n", 222 "%s: Reg write failed with error code :%d\n",
213 __func__, status); 223 __func__, status);
214 } 224 }
225 kfree(usb_reg_buf);
226
215 return status; 227 return status;
216} 228}
217 229
@@ -286,7 +298,7 @@ int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
286 return -ENOMEM; 298 return -ENOMEM;
287 299
288 while (count) { 300 while (count) {
289 transfer = min_t(int, count, 4096); 301 transfer = (u8)(min_t(u32, count, 4096));
290 memcpy(buf, data, transfer); 302 memcpy(buf, data, transfer);
291 status = usb_control_msg(dev->usbdev, 303 status = usb_control_msg(dev->usbdev,
292 usb_sndctrlpipe(dev->usbdev, 0), 304 usb_sndctrlpipe(dev->usbdev, 0),
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
index b6722de64a31..33da3dfcfa4f 100644
--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -625,17 +625,7 @@ bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
625 else 625 else
626 btcoexist->binded = true; 626 btcoexist->binded = true;
627 627
628#if (defined(CONFIG_PCI_HCI))
629 btcoexist->chip_interface = BTC_INTF_PCI;
630#elif (defined(CONFIG_USB_HCI))
631 btcoexist->chip_interface = BTC_INTF_USB;
632#elif (defined(CONFIG_SDIO_HCI))
633 btcoexist->chip_interface = BTC_INTF_SDIO;
634#elif (defined(CONFIG_GSPI_HCI))
635 btcoexist->chip_interface = BTC_INTF_GSPI;
636#else
637 btcoexist->chip_interface = BTC_INTF_UNKNOWN; 628 btcoexist->chip_interface = BTC_INTF_UNKNOWN;
638#endif
639 629
640 if (NULL == btcoexist->adapter) 630 if (NULL == btcoexist->adapter)
641 btcoexist->adapter = adapter; 631 btcoexist->adapter = adapter;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 057b05700f8b..158b5e639fc7 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1291,13 +1291,13 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1291 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1291 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1292 skb_entry_set_link(&np->tx_skbs[i], i+1); 1292 skb_entry_set_link(&np->tx_skbs[i], i+1);
1293 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1293 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1294 np->grant_tx_page[i] = NULL;
1294 } 1295 }
1295 1296
1296 /* Clear out rx_skbs */ 1297 /* Clear out rx_skbs */
1297 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1298 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1298 np->rx_skbs[i] = NULL; 1299 np->rx_skbs[i] = NULL;
1299 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1300 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1300 np->grant_tx_page[i] = NULL;
1301 } 1301 }
1302 1302
1303 /* A grant for every tx ring slot */ 1303 /* A grant for every tx ring slot */
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 170e8e60cdb7..372e08c4ffef 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -91,7 +91,7 @@ static struct dentry *debugfs_dir;
91/* Translate memory window 0,1 to BAR 2,4 */ 91/* Translate memory window 0,1 to BAR 2,4 */
92#define MW_TO_BAR(mw) (mw * NTB_MAX_NUM_MW + 2) 92#define MW_TO_BAR(mw) (mw * NTB_MAX_NUM_MW + 2)
93 93
94static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = { 94static const struct pci_device_id ntb_pci_tbl[] = {
95 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)}, 95 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
96 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)}, 96 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
97 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)}, 97 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
@@ -120,7 +120,8 @@ MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
120 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 120 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
121 */ 121 */
122int ntb_register_event_callback(struct ntb_device *ndev, 122int ntb_register_event_callback(struct ntb_device *ndev,
123 void (*func)(void *handle, enum ntb_hw_event event)) 123 void (*func)(void *handle,
124 enum ntb_hw_event event))
124{ 125{
125 if (ndev->event_cb) 126 if (ndev->event_cb)
126 return -EINVAL; 127 return -EINVAL;
@@ -715,9 +716,9 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
715 SNB_PBAR4LMT_OFFSET); 716 SNB_PBAR4LMT_OFFSET);
716 /* HW errata on the Limit registers. They can only be 717 /* HW errata on the Limit registers. They can only be
717 * written when the base register is 4GB aligned and 718 * written when the base register is 4GB aligned and
718 * < 32bit. This should already be the case based on the 719 * < 32bit. This should already be the case based on
719 * driver defaults, but write the Limit registers first 720 * the driver defaults, but write the Limit registers
720 * just in case. 721 * first just in case.
721 */ 722 */
722 } else { 723 } else {
723 ndev->limits.max_mw = SNB_MAX_MW; 724 ndev->limits.max_mw = SNB_MAX_MW;
@@ -739,9 +740,9 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
739 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET); 740 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
740 /* HW errata on the Limit registers. They can only be 741 /* HW errata on the Limit registers. They can only be
741 * written when the base register is 4GB aligned and 742 * written when the base register is 4GB aligned and
742 * < 32bit. This should already be the case based on the 743 * < 32bit. This should already be the case based on
743 * driver defaults, but write the Limit registers first 744 * the driver defaults, but write the Limit registers
744 * just in case. 745 * first just in case.
745 */ 746 */
746 } 747 }
747 748
@@ -785,7 +786,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
785 /* B2B_XLAT_OFFSET is a 64bit register, but can 786 /* B2B_XLAT_OFFSET is a 64bit register, but can
786 * only take 32bit writes 787 * only take 32bit writes
787 */ 788 */
788 writel(SNB_MBAR01_DSD_ADDR & 0xffffffff, 789 writel(SNB_MBAR01_USD_ADDR & 0xffffffff,
789 ndev->reg_base + SNB_B2B_XLAT_OFFSETL); 790 ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
790 writel(SNB_MBAR01_USD_ADDR >> 32, 791 writel(SNB_MBAR01_USD_ADDR >> 32,
791 ndev->reg_base + SNB_B2B_XLAT_OFFSETU); 792 ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
@@ -803,7 +804,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
803 ndev->conn_type = NTB_CONN_RP; 804 ndev->conn_type = NTB_CONN_RP;
804 805
805 if (xeon_errata_workaround) { 806 if (xeon_errata_workaround) {
806 dev_err(&ndev->pdev->dev, 807 dev_err(&ndev->pdev->dev,
807 "NTB-RP disabled due to hardware errata. To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n"); 808 "NTB-RP disabled due to hardware errata. To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n");
808 return -EINVAL; 809 return -EINVAL;
809 } 810 }
@@ -1079,111 +1080,131 @@ static irqreturn_t ntb_interrupt(int irq, void *dev)
1079 return IRQ_HANDLED; 1080 return IRQ_HANDLED;
1080} 1081}
1081 1082
1082static int ntb_setup_msix(struct ntb_device *ndev) 1083static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries)
1083{ 1084{
1084 struct pci_dev *pdev = ndev->pdev; 1085 struct pci_dev *pdev = ndev->pdev;
1085 struct msix_entry *msix; 1086 struct msix_entry *msix;
1086 int msix_entries;
1087 int rc, i; 1087 int rc, i;
1088 u16 val;
1089 1088
1090 if (!pdev->msix_cap) { 1089 if (msix_entries < ndev->limits.msix_cnt)
1091 rc = -EIO; 1090 return -ENOSPC;
1092 goto err;
1093 }
1094 1091
1095 rc = pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &val); 1092 rc = pci_enable_msix_exact(pdev, ndev->msix_entries, msix_entries);
1096 if (rc) 1093 if (rc < 0)
1097 goto err; 1094 return rc;
1098 1095
1099 msix_entries = msix_table_size(val); 1096 for (i = 0; i < msix_entries; i++) {
1100 if (msix_entries > ndev->limits.msix_cnt) { 1097 msix = &ndev->msix_entries[i];
1101 rc = -EINVAL; 1098 WARN_ON(!msix->vector);
1102 goto err; 1099
1100 if (i == msix_entries - 1) {
1101 rc = request_irq(msix->vector,
1102 xeon_event_msix_irq, 0,
1103 "ntb-event-msix", ndev);
1104 if (rc)
1105 goto err;
1106 } else {
1107 rc = request_irq(msix->vector,
1108 xeon_callback_msix_irq, 0,
1109 "ntb-callback-msix",
1110 &ndev->db_cb[i]);
1111 if (rc)
1112 goto err;
1113 }
1103 } 1114 }
1104 1115
1105 ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries, 1116 ndev->num_msix = msix_entries;
1106 GFP_KERNEL); 1117 ndev->max_cbs = msix_entries - 1;
1107 if (!ndev->msix_entries) { 1118
1108 rc = -ENOMEM; 1119 return 0;
1109 goto err; 1120
1121err:
1122 while (--i >= 0) {
1123 /* Code never reaches here for entry nr 'ndev->num_msix - 1' */
1124 msix = &ndev->msix_entries[i];
1125 free_irq(msix->vector, &ndev->db_cb[i]);
1110 } 1126 }
1111 1127
1112 for (i = 0; i < msix_entries; i++) 1128 pci_disable_msix(pdev);
1113 ndev->msix_entries[i].entry = i; 1129 ndev->num_msix = 0;
1114 1130
1115 rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries); 1131 return rc;
1116 if (rc < 0) 1132}
1117 goto err1;
1118 if (rc > 0) {
1119 /* On SNB, the link interrupt is always tied to 4th vector. If
1120 * we can't get all 4, then we can't use MSI-X.
1121 */
1122 if (ndev->hw_type != BWD_HW) {
1123 rc = -EIO;
1124 goto err1;
1125 }
1126 1133
1127 dev_warn(&pdev->dev, 1134static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries)
1128 "Only %d MSI-X vectors. Limiting the number of queues to that number.\n", 1135{
1129 rc); 1136 struct pci_dev *pdev = ndev->pdev;
1130 msix_entries = rc; 1137 struct msix_entry *msix;
1138 int rc, i;
1131 1139
1132 rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries); 1140 msix_entries = pci_enable_msix_range(pdev, ndev->msix_entries,
1133 if (rc) 1141 1, msix_entries);
1134 goto err1; 1142 if (msix_entries < 0)
1135 } 1143 return msix_entries;
1136 1144
1137 for (i = 0; i < msix_entries; i++) { 1145 for (i = 0; i < msix_entries; i++) {
1138 msix = &ndev->msix_entries[i]; 1146 msix = &ndev->msix_entries[i];
1139 WARN_ON(!msix->vector); 1147 WARN_ON(!msix->vector);
1140 1148
1141 /* Use the last MSI-X vector for Link status */ 1149 rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
1142 if (ndev->hw_type == BWD_HW) { 1150 "ntb-callback-msix", &ndev->db_cb[i]);
1143 rc = request_irq(msix->vector, bwd_callback_msix_irq, 0, 1151 if (rc)
1144 "ntb-callback-msix", &ndev->db_cb[i]); 1152 goto err;
1145 if (rc)
1146 goto err2;
1147 } else {
1148 if (i == msix_entries - 1) {
1149 rc = request_irq(msix->vector,
1150 xeon_event_msix_irq, 0,
1151 "ntb-event-msix", ndev);
1152 if (rc)
1153 goto err2;
1154 } else {
1155 rc = request_irq(msix->vector,
1156 xeon_callback_msix_irq, 0,
1157 "ntb-callback-msix",
1158 &ndev->db_cb[i]);
1159 if (rc)
1160 goto err2;
1161 }
1162 }
1163 } 1153 }
1164 1154
1165 ndev->num_msix = msix_entries; 1155 ndev->num_msix = msix_entries;
1156 ndev->max_cbs = msix_entries;
1157
1158 return 0;
1159
1160err:
1161 while (--i >= 0)
1162 free_irq(msix->vector, &ndev->db_cb[i]);
1163
1164 pci_disable_msix(pdev);
1165 ndev->num_msix = 0;
1166
1167 return rc;
1168}
1169
1170static int ntb_setup_msix(struct ntb_device *ndev)
1171{
1172 struct pci_dev *pdev = ndev->pdev;
1173 int msix_entries;
1174 int rc, i;
1175
1176 msix_entries = pci_msix_vec_count(pdev);
1177 if (msix_entries < 0) {
1178 rc = msix_entries;
1179 goto err;
1180 } else if (msix_entries > ndev->limits.msix_cnt) {
1181 rc = -EINVAL;
1182 goto err;
1183 }
1184
1185 ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
1186 GFP_KERNEL);
1187 if (!ndev->msix_entries) {
1188 rc = -ENOMEM;
1189 goto err;
1190 }
1191
1192 for (i = 0; i < msix_entries; i++)
1193 ndev->msix_entries[i].entry = i;
1194
1166 if (ndev->hw_type == BWD_HW) 1195 if (ndev->hw_type == BWD_HW)
1167 ndev->max_cbs = msix_entries; 1196 rc = ntb_setup_bwd_msix(ndev, msix_entries);
1168 else 1197 else
1169 ndev->max_cbs = msix_entries - 1; 1198 rc = ntb_setup_snb_msix(ndev, msix_entries);
1199 if (rc)
1200 goto err1;
1170 1201
1171 return 0; 1202 return 0;
1172 1203
1173err2:
1174 while (--i >= 0) {
1175 msix = &ndev->msix_entries[i];
1176 if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
1177 free_irq(msix->vector, ndev);
1178 else
1179 free_irq(msix->vector, &ndev->db_cb[i]);
1180 }
1181 pci_disable_msix(pdev);
1182err1: 1204err1:
1183 kfree(ndev->msix_entries); 1205 kfree(ndev->msix_entries);
1184 dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
1185err: 1206err:
1186 ndev->num_msix = 0; 1207 dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
1187 return rc; 1208 return rc;
1188} 1209}
1189 1210
@@ -1281,6 +1302,7 @@ static void ntb_free_interrupts(struct ntb_device *ndev)
1281 free_irq(msix->vector, &ndev->db_cb[i]); 1302 free_irq(msix->vector, &ndev->db_cb[i]);
1282 } 1303 }
1283 pci_disable_msix(pdev); 1304 pci_disable_msix(pdev);
1305 kfree(ndev->msix_entries);
1284 } else { 1306 } else {
1285 free_irq(pdev->irq, ndev); 1307 free_irq(pdev->irq, ndev);
1286 1308
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
index bbdb7edca10c..465517b7393e 100644
--- a/drivers/ntb/ntb_hw.h
+++ b/drivers/ntb/ntb_hw.h
@@ -45,6 +45,7 @@
45 * Contact Information: 45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com> 46 * Jon Mason <jon.mason@intel.com>
47 */ 47 */
48#include <linux/ntb.h>
48 49
49#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725 50#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
50#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726 51#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726
@@ -60,8 +61,6 @@
60#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F 61#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
61#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E 62#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
62 63
63#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
64
65#ifndef readq 64#ifndef readq
66static inline u64 readq(void __iomem *addr) 65static inline u64 readq(void __iomem *addr)
67{ 66{
@@ -83,9 +82,6 @@ static inline void writeq(u64 val, void __iomem *addr)
83#define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\ 82#define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
84 (1 << NTB_BAR_45)) 83 (1 << NTB_BAR_45))
85 84
86#define NTB_LINK_DOWN 0
87#define NTB_LINK_UP 1
88
89#define NTB_HB_TIMEOUT msecs_to_jiffies(1000) 85#define NTB_HB_TIMEOUT msecs_to_jiffies(1000)
90 86
91#define NTB_MAX_NUM_MW 2 87#define NTB_MAX_NUM_MW 2
@@ -233,7 +229,7 @@ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
233 int db_num)); 229 int db_num));
234void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx); 230void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
235int ntb_register_event_callback(struct ntb_device *ndev, 231int ntb_register_event_callback(struct ntb_device *ndev,
236 void (*event_cb_func) (void *handle, 232 void (*event_cb_func)(void *handle,
237 enum ntb_hw_event event)); 233 enum ntb_hw_event event));
238void ntb_unregister_event_callback(struct ntb_device *ndev); 234void ntb_unregister_event_callback(struct ntb_device *ndev);
239int ntb_get_max_spads(struct ntb_device *ndev); 235int ntb_get_max_spads(struct ntb_device *ndev);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 3217f394d45b..9dd63b822025 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -56,7 +56,6 @@
56#include <linux/pci.h> 56#include <linux/pci.h>
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/types.h> 58#include <linux/types.h>
59#include <linux/ntb.h>
60#include "ntb_hw.h" 59#include "ntb_hw.h"
61 60
62#define NTB_TRANSPORT_VERSION 3 61#define NTB_TRANSPORT_VERSION 3
@@ -107,8 +106,8 @@ struct ntb_transport_qp {
107 struct ntb_rx_info __iomem *rx_info; 106 struct ntb_rx_info __iomem *rx_info;
108 struct ntb_rx_info *remote_rx_info; 107 struct ntb_rx_info *remote_rx_info;
109 108
110 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 109 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
111 void *data, int len); 110 void *data, int len);
112 struct list_head tx_free_q; 111 struct list_head tx_free_q;
113 spinlock_t ntb_tx_free_q_lock; 112 spinlock_t ntb_tx_free_q_lock;
114 void __iomem *tx_mw; 113 void __iomem *tx_mw;
@@ -117,8 +116,8 @@ struct ntb_transport_qp {
117 unsigned int tx_max_entry; 116 unsigned int tx_max_entry;
118 unsigned int tx_max_frame; 117 unsigned int tx_max_frame;
119 118
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 119 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len); 120 void *data, int len);
122 struct list_head rx_pend_q; 121 struct list_head rx_pend_q;
123 struct list_head rx_free_q; 122 struct list_head rx_free_q;
124 spinlock_t ntb_rx_pend_q_lock; 123 spinlock_t ntb_rx_pend_q_lock;
@@ -129,7 +128,7 @@ struct ntb_transport_qp {
129 unsigned int rx_max_frame; 128 unsigned int rx_max_frame;
130 dma_cookie_t last_cookie; 129 dma_cookie_t last_cookie;
131 130
132 void (*event_handler) (void *data, int status); 131 void (*event_handler)(void *data, int status);
133 struct delayed_work link_work; 132 struct delayed_work link_work;
134 struct work_struct link_cleanup; 133 struct work_struct link_cleanup;
135 134
@@ -480,7 +479,7 @@ static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
480} 479}
481 480
482static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 481static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
483 struct list_head *list) 482 struct list_head *list)
484{ 483{
485 struct ntb_queue_entry *entry; 484 struct ntb_queue_entry *entry;
486 unsigned long flags; 485 unsigned long flags;
@@ -839,7 +838,7 @@ static void ntb_qp_link_work(struct work_struct *work)
839} 838}
840 839
841static int ntb_transport_init_queue(struct ntb_transport *nt, 840static int ntb_transport_init_queue(struct ntb_transport *nt,
842 unsigned int qp_num) 841 unsigned int qp_num)
843{ 842{
844 struct ntb_transport_qp *qp; 843 struct ntb_transport_qp *qp;
845 unsigned int num_qps_mw, tx_size; 844 unsigned int num_qps_mw, tx_size;
@@ -1055,7 +1054,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1055 if (!chan) 1054 if (!chan)
1056 goto err; 1055 goto err;
1057 1056
1058 if (len < copy_bytes) 1057 if (len < copy_bytes)
1059 goto err_wait; 1058 goto err_wait;
1060 1059
1061 device = chan->device; 1060 device = chan->device;
@@ -1190,8 +1189,7 @@ out:
1190 return 0; 1189 return 0;
1191 1190
1192err: 1191err:
1193 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, 1192 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1194 &qp->rx_pend_q);
1195 /* Ensure that the data is fully copied out before clearing the flag */ 1193 /* Ensure that the data is fully copied out before clearing the flag */
1196 wmb(); 1194 wmb();
1197 hdr->flags = 0; 1195 hdr->flags = 0;
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 129f7b997866..3841b9813109 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -201,23 +201,11 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
201 } 201 }
202 202
203 bootreg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 203 bootreg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
204 if (!bootreg_res) {
205 dev_err(dev,
206 "platform_get_resource(IORESOURCE_MEM, 0): NULL\n");
207 return -EADDRNOTAVAIL;
208 }
209
210 chipsig_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
211 if (!chipsig_res) {
212 dev_err(dev,
213 "platform_get_resource(IORESOURCE_MEM, 1): NULL\n");
214 return -EADDRNOTAVAIL;
215 }
216
217 bootreg = devm_ioremap_resource(dev, bootreg_res); 204 bootreg = devm_ioremap_resource(dev, bootreg_res);
218 if (IS_ERR(bootreg)) 205 if (IS_ERR(bootreg))
219 return PTR_ERR(bootreg); 206 return PTR_ERR(bootreg);
220 207
208 chipsig_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
221 chipsig = devm_ioremap_resource(dev, chipsig_res); 209 chipsig = devm_ioremap_resource(dev, chipsig_res);
222 if (IS_ERR(chipsig)) 210 if (IS_ERR(chipsig))
223 return PTR_ERR(chipsig); 211 return PTR_ERR(chipsig);
@@ -301,8 +289,6 @@ static int da8xx_rproc_remove(struct platform_device *pdev)
301 */ 289 */
302 disable_irq(drproc->irq); 290 disable_irq(drproc->irq);
303 291
304 devm_clk_put(dev, drproc->dsp_clk);
305
306 rproc_del(rproc); 292 rproc_del(rproc);
307 rproc_put(rproc); 293 rproc_put(rproc);
308 294
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
index 1ec39a4c0b3e..c4ac9104dd8e 100644
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ b/drivers/remoteproc/ste_modem_rproc.c
@@ -164,7 +164,7 @@ sproc_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw)
164} 164}
165 165
166/* STE modem firmware handler operations */ 166/* STE modem firmware handler operations */
167const struct rproc_fw_ops sproc_fw_ops = { 167static const struct rproc_fw_ops sproc_fw_ops = {
168 .load = sproc_load_segments, 168 .load = sproc_load_segments,
169 .find_rsc_table = sproc_find_rsc_table, 169 .find_rsc_table = sproc_find_rsc_table,
170 .find_loaded_rsc_table = sproc_find_loaded_rsc_table, 170 .find_loaded_rsc_table = sproc_find_loaded_rsc_table,
@@ -193,7 +193,7 @@ static void sproc_kick_callback(struct ste_modem_device *mdev, int vqid)
193 sproc_dbg(sproc, "no message was found in vqid %d\n", vqid); 193 sproc_dbg(sproc, "no message was found in vqid %d\n", vqid);
194} 194}
195 195
196struct ste_modem_dev_cb sproc_dev_cb = { 196static struct ste_modem_dev_cb sproc_dev_cb = {
197 .kick = sproc_kick_callback, 197 .kick = sproc_kick_callback,
198}; 198};
199 199
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c8bd092fc945..02832d64d918 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,9 @@ config SCSI_SCAN_ASYNC
263 You can override this choice by specifying "scsi_mod.scan=sync" 263 You can override this choice by specifying "scsi_mod.scan=sync"
264 or async on the kernel's command line. 264 or async on the kernel's command line.
265 265
266 Note that this setting also affects whether resuming from
267 system suspend will be performed asynchronously.
268
266menu "SCSI Transports" 269menu "SCSI Transports"
267 depends on SCSI 270 depends on SCSI
268 271
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index bfb6d07d87f0..11854845393b 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -125,7 +125,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
125 return 0; 125 return 0;
126} 126}
127 127
128static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) 128static void iscsi_sw_tcp_data_ready(struct sock *sk)
129{ 129{
130 struct iscsi_conn *conn; 130 struct iscsi_conn *conn;
131 struct iscsi_tcp_conn *tcp_conn; 131 struct iscsi_tcp_conn *tcp_conn;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 666fe09378fa..f42ecb238af5 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -40,7 +40,7 @@ struct iscsi_sw_tcp_conn {
40 40
41 struct iscsi_sw_tcp_send out; 41 struct iscsi_sw_tcp_send out;
42 /* old values for socket callbacks */ 42 /* old values for socket callbacks */
43 void (*old_data_ready)(struct sock *, int); 43 void (*old_data_ready)(struct sock *);
44 void (*old_state_change)(struct sock *); 44 void (*old_state_change)(struct sock *);
45 void (*old_write_space)(struct sock *); 45 void (*old_write_space)(struct sock *);
46 46
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 788c4fe2b0c9..68fb66fdb757 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
684 qlt_xmit_tm_rsp(mcmd); 684 qlt_xmit_tm_rsp(mcmd);
685} 685}
686 686
687static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
688{
689 struct qla_tgt_cmd *cmd = container_of(se_cmd,
690 struct qla_tgt_cmd, se_cmd);
691 struct scsi_qla_host *vha = cmd->vha;
692 struct qla_hw_data *ha = vha->hw;
693
694 if (!cmd->sg_mapped)
695 return;
696
697 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
698 cmd->sg_mapped = 0;
699}
700
687/* Local pointer to allocated TCM configfs fabric module */ 701/* Local pointer to allocated TCM configfs fabric module */
688struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; 702struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
689struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; 703struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
@@ -1468,7 +1482,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1468 } 1482 }
1469 se_tpg = &tpg->se_tpg; 1483 se_tpg = &tpg->se_tpg;
1470 1484
1471 se_sess = transport_init_session(); 1485 se_sess = transport_init_session(TARGET_PROT_NORMAL);
1472 if (IS_ERR(se_sess)) { 1486 if (IS_ERR(se_sess)) {
1473 pr_err("Unable to initialize struct se_session\n"); 1487 pr_err("Unable to initialize struct se_session\n");
1474 return PTR_ERR(se_sess); 1488 return PTR_ERR(se_sess);
@@ -1877,6 +1891,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1877 .queue_data_in = tcm_qla2xxx_queue_data_in, 1891 .queue_data_in = tcm_qla2xxx_queue_data_in,
1878 .queue_status = tcm_qla2xxx_queue_status, 1892 .queue_status = tcm_qla2xxx_queue_status,
1879 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1893 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1894 .aborted_task = tcm_qla2xxx_aborted_task,
1880 /* 1895 /*
1881 * Setup function pointers for generic logic in 1896 * Setup function pointers for generic logic in
1882 * target_core_fabric_configfs.c 1897 * target_core_fabric_configfs.c
@@ -1926,6 +1941,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1926 .queue_data_in = tcm_qla2xxx_queue_data_in, 1941 .queue_data_in = tcm_qla2xxx_queue_data_in,
1927 .queue_status = tcm_qla2xxx_queue_status, 1942 .queue_status = tcm_qla2xxx_queue_status,
1928 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1943 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1944 .aborted_task = tcm_qla2xxx_aborted_task,
1929 /* 1945 /*
1930 * Setup function pointers for generic logic in 1946 * Setup function pointers for generic logic in
1931 * target_core_fabric_configfs.c 1947 * target_core_fabric_configfs.c
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c4d632c27a3e..88d46fe6bf98 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -91,6 +91,15 @@ EXPORT_SYMBOL(scsi_logging_level);
91ASYNC_DOMAIN(scsi_sd_probe_domain); 91ASYNC_DOMAIN(scsi_sd_probe_domain);
92EXPORT_SYMBOL(scsi_sd_probe_domain); 92EXPORT_SYMBOL(scsi_sd_probe_domain);
93 93
94/*
95 * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
96 * asynchronous system resume operations. It is marked 'exclusive' to avoid
97 * being included in the async_synchronize_full() that is invoked by
98 * dpm_resume()
99 */
100ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
101EXPORT_SYMBOL(scsi_sd_pm_domain);
102
94/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 103/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
95 * You may not alter any existing entry (although adding new ones is 104 * You may not alter any existing entry (although adding new ones is
96 * encouraged once assigned by ANSI/INCITS T10 105 * encouraged once assigned by ANSI/INCITS T10
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 001e9ceda4c3..7454498c4091 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -18,35 +18,77 @@
18 18
19#ifdef CONFIG_PM_SLEEP 19#ifdef CONFIG_PM_SLEEP
20 20
21static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *)) 21static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
22{ 22{
23 return pm && pm->suspend ? pm->suspend(dev) : 0;
24}
25
26static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
27{
28 return pm && pm->freeze ? pm->freeze(dev) : 0;
29}
30
31static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
32{
33 return pm && pm->poweroff ? pm->poweroff(dev) : 0;
34}
35
36static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
37{
38 return pm && pm->resume ? pm->resume(dev) : 0;
39}
40
41static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
42{
43 return pm && pm->thaw ? pm->thaw(dev) : 0;
44}
45
46static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
47{
48 return pm && pm->restore ? pm->restore(dev) : 0;
49}
50
51static int scsi_dev_type_suspend(struct device *dev,
52 int (*cb)(struct device *, const struct dev_pm_ops *))
53{
54 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
23 int err; 55 int err;
24 56
57 /* flush pending in-flight resume operations, suspend is synchronous */
58 async_synchronize_full_domain(&scsi_sd_pm_domain);
59
25 err = scsi_device_quiesce(to_scsi_device(dev)); 60 err = scsi_device_quiesce(to_scsi_device(dev));
26 if (err == 0) { 61 if (err == 0) {
27 if (cb) { 62 err = cb(dev, pm);
28 err = cb(dev); 63 if (err)
29 if (err) 64 scsi_device_resume(to_scsi_device(dev));
30 scsi_device_resume(to_scsi_device(dev));
31 }
32 } 65 }
33 dev_dbg(dev, "scsi suspend: %d\n", err); 66 dev_dbg(dev, "scsi suspend: %d\n", err);
34 return err; 67 return err;
35} 68}
36 69
37static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *)) 70static int scsi_dev_type_resume(struct device *dev,
71 int (*cb)(struct device *, const struct dev_pm_ops *))
38{ 72{
73 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
39 int err = 0; 74 int err = 0;
40 75
41 if (cb) 76 err = cb(dev, pm);
42 err = cb(dev);
43 scsi_device_resume(to_scsi_device(dev)); 77 scsi_device_resume(to_scsi_device(dev));
44 dev_dbg(dev, "scsi resume: %d\n", err); 78 dev_dbg(dev, "scsi resume: %d\n", err);
79
80 if (err == 0) {
81 pm_runtime_disable(dev);
82 pm_runtime_set_active(dev);
83 pm_runtime_enable(dev);
84 }
85
45 return err; 86 return err;
46} 87}
47 88
48static int 89static int
49scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *)) 90scsi_bus_suspend_common(struct device *dev,
91 int (*cb)(struct device *, const struct dev_pm_ops *))
50{ 92{
51 int err = 0; 93 int err = 0;
52 94
@@ -66,20 +108,54 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
66 return err; 108 return err;
67} 109}
68 110
69static int 111static void async_sdev_resume(void *dev, async_cookie_t cookie)
70scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *))
71{ 112{
72 int err = 0; 113 scsi_dev_type_resume(dev, do_scsi_resume);
114}
73 115
74 if (scsi_is_sdev_device(dev)) 116static void async_sdev_thaw(void *dev, async_cookie_t cookie)
75 err = scsi_dev_type_resume(dev, cb); 117{
118 scsi_dev_type_resume(dev, do_scsi_thaw);
119}
76 120
77 if (err == 0) { 121static void async_sdev_restore(void *dev, async_cookie_t cookie)
122{
123 scsi_dev_type_resume(dev, do_scsi_restore);
124}
125
126static int scsi_bus_resume_common(struct device *dev,
127 int (*cb)(struct device *, const struct dev_pm_ops *))
128{
129 async_func_t fn;
130
131 if (!scsi_is_sdev_device(dev))
132 fn = NULL;
133 else if (cb == do_scsi_resume)
134 fn = async_sdev_resume;
135 else if (cb == do_scsi_thaw)
136 fn = async_sdev_thaw;
137 else if (cb == do_scsi_restore)
138 fn = async_sdev_restore;
139 else
140 fn = NULL;
141
142 if (fn) {
143 async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
144
145 /*
146 * If a user has disabled async probing a likely reason
147 * is due to a storage enclosure that does not inject
148 * staggered spin-ups. For safety, make resume
149 * synchronous as well in that case.
150 */
151 if (strncmp(scsi_scan_type, "async", 5) != 0)
152 async_synchronize_full_domain(&scsi_sd_pm_domain);
153 } else {
78 pm_runtime_disable(dev); 154 pm_runtime_disable(dev);
79 pm_runtime_set_active(dev); 155 pm_runtime_set_active(dev);
80 pm_runtime_enable(dev); 156 pm_runtime_enable(dev);
81 } 157 }
82 return err; 158 return 0;
83} 159}
84 160
85static int scsi_bus_prepare(struct device *dev) 161static int scsi_bus_prepare(struct device *dev)
@@ -97,38 +173,32 @@ static int scsi_bus_prepare(struct device *dev)
97 173
98static int scsi_bus_suspend(struct device *dev) 174static int scsi_bus_suspend(struct device *dev)
99{ 175{
100 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 176 return scsi_bus_suspend_common(dev, do_scsi_suspend);
101 return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL);
102} 177}
103 178
104static int scsi_bus_resume(struct device *dev) 179static int scsi_bus_resume(struct device *dev)
105{ 180{
106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 181 return scsi_bus_resume_common(dev, do_scsi_resume);
107 return scsi_bus_resume_common(dev, pm ? pm->resume : NULL);
108} 182}
109 183
110static int scsi_bus_freeze(struct device *dev) 184static int scsi_bus_freeze(struct device *dev)
111{ 185{
112 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 186 return scsi_bus_suspend_common(dev, do_scsi_freeze);
113 return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL);
114} 187}
115 188
116static int scsi_bus_thaw(struct device *dev) 189static int scsi_bus_thaw(struct device *dev)
117{ 190{
118 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 191 return scsi_bus_resume_common(dev, do_scsi_thaw);
119 return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL);
120} 192}
121 193
122static int scsi_bus_poweroff(struct device *dev) 194static int scsi_bus_poweroff(struct device *dev)
123{ 195{
124 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 196 return scsi_bus_suspend_common(dev, do_scsi_poweroff);
125 return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL);
126} 197}
127 198
128static int scsi_bus_restore(struct device *dev) 199static int scsi_bus_restore(struct device *dev)
129{ 200{
130 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 201 return scsi_bus_resume_common(dev, do_scsi_restore);
131 return scsi_bus_resume_common(dev, pm ? pm->restore : NULL);
132} 202}
133 203
134#else /* CONFIG_PM_SLEEP */ 204#else /* CONFIG_PM_SLEEP */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f079a598bed4..48e5b657e79f 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -112,6 +112,7 @@ extern void scsi_exit_procfs(void);
112#endif /* CONFIG_PROC_FS */ 112#endif /* CONFIG_PROC_FS */
113 113
114/* scsi_scan.c */ 114/* scsi_scan.c */
115extern char scsi_scan_type[];
115extern int scsi_complete_async_scans(void); 116extern int scsi_complete_async_scans(void);
116extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, 117extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
117 unsigned int, unsigned int, int); 118 unsigned int, unsigned int, int);
@@ -166,6 +167,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
166static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} 167static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
167#endif /* CONFIG_PM_RUNTIME */ 168#endif /* CONFIG_PM_RUNTIME */
168 169
170extern struct async_domain scsi_sd_pm_domain;
169extern struct async_domain scsi_sd_probe_domain; 171extern struct async_domain scsi_sd_probe_domain;
170 172
171/* 173/*
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 27f96d5b7680..e02b3aab56ce 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
97#define SCSI_SCAN_TYPE_DEFAULT "sync" 97#define SCSI_SCAN_TYPE_DEFAULT "sync"
98#endif 98#endif
99 99
100static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; 100char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
101 101
102module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); 102module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
103MODULE_PARM_DESC(scan, "sync, async or none"); 103MODULE_PARM_DESC(scan, "sync, async or none");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 89e6c04ac595..efcbcd182863 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3026,6 +3026,7 @@ static int sd_remove(struct device *dev)
3026 devt = disk_devt(sdkp->disk); 3026 devt = disk_devt(sdkp->disk);
3027 scsi_autopm_get_device(sdkp->device); 3027 scsi_autopm_get_device(sdkp->device);
3028 3028
3029 async_synchronize_full_domain(&scsi_sd_pm_domain);
3029 async_synchronize_full_domain(&scsi_sd_probe_domain); 3030 async_synchronize_full_domain(&scsi_sd_probe_domain);
3030 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); 3031 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
3031 blk_queue_unprep_rq(sdkp->device->request_queue, NULL); 3032 blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index d92fe4037e94..6b349e301869 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3000,7 +3000,11 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task
3000 if ((target == -1 || cp->target == target) && 3000 if ((target == -1 || cp->target == target) &&
3001 (lun == -1 || cp->lun == lun) && 3001 (lun == -1 || cp->lun == lun) &&
3002 (task == -1 || cp->tag == task)) { 3002 (task == -1 || cp->tag == task)) {
3003#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
3003 sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); 3004 sym_set_cam_status(cp->cmd, DID_SOFT_ERROR);
3005#else
3006 sym_set_cam_status(cp->cmd, DID_REQUEUE);
3007#endif
3004 sym_remque(&cp->link_ccbq); 3008 sym_remque(&cp->link_ccbq);
3005 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); 3009 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
3006 } 3010 }
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index a54b506ba7ca..37758d1c8a68 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -99,16 +99,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
99 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; 99 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
100 unsigned int niov = tx->tx_niov; 100 unsigned int niov = tx->tx_niov;
101#endif 101#endif
102 struct msghdr msg = { 102 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
103 .msg_name = NULL,
104 .msg_namelen = 0,
105 .msg_iov = scratchiov,
106 .msg_iovlen = niov,
107 .msg_control = NULL,
108 .msg_controllen = 0,
109 .msg_flags = MSG_DONTWAIT
110 };
111 mm_segment_t oldmm = get_fs();
112 int i; 103 int i;
113 104
114 for (nob = i = 0; i < niov; i++) { 105 for (nob = i = 0; i < niov; i++) {
@@ -120,9 +111,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
120 nob < tx->tx_resid) 111 nob < tx->tx_resid)
121 msg.msg_flags |= MSG_MORE; 112 msg.msg_flags |= MSG_MORE;
122 113
123 set_fs (KERNEL_DS); 114 rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
124 rc = sock_sendmsg(sock, &msg, nob);
125 set_fs (oldmm);
126 } 115 }
127 return rc; 116 return rc;
128} 117}
@@ -174,16 +163,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
174 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; 163 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
175 unsigned int niov = tx->tx_nkiov; 164 unsigned int niov = tx->tx_nkiov;
176#endif 165#endif
177 struct msghdr msg = { 166 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
178 .msg_name = NULL,
179 .msg_namelen = 0,
180 .msg_iov = scratchiov,
181 .msg_iovlen = niov,
182 .msg_control = NULL,
183 .msg_controllen = 0,
184 .msg_flags = MSG_DONTWAIT
185 };
186 mm_segment_t oldmm = get_fs();
187 int i; 167 int i;
188 168
189 for (nob = i = 0; i < niov; i++) { 169 for (nob = i = 0; i < niov; i++) {
@@ -196,9 +176,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
196 nob < tx->tx_resid) 176 nob < tx->tx_resid)
197 msg.msg_flags |= MSG_MORE; 177 msg.msg_flags |= MSG_MORE;
198 178
199 set_fs (KERNEL_DS); 179 rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
200 rc = sock_sendmsg(sock, &msg, nob);
201 set_fs (oldmm);
202 180
203 for (i = 0; i < niov; i++) 181 for (i = 0; i < niov; i++)
204 kunmap(kiov[i].kiov_page); 182 kunmap(kiov[i].kiov_page);
@@ -237,15 +215,8 @@ ksocknal_lib_recv_iov (ksock_conn_t *conn)
237#endif 215#endif
238 struct iovec *iov = conn->ksnc_rx_iov; 216 struct iovec *iov = conn->ksnc_rx_iov;
239 struct msghdr msg = { 217 struct msghdr msg = {
240 .msg_name = NULL,
241 .msg_namelen = 0,
242 .msg_iov = scratchiov,
243 .msg_iovlen = niov,
244 .msg_control = NULL,
245 .msg_controllen = 0,
246 .msg_flags = 0 218 .msg_flags = 0
247 }; 219 };
248 mm_segment_t oldmm = get_fs();
249 int nob; 220 int nob;
250 int i; 221 int i;
251 int rc; 222 int rc;
@@ -263,10 +234,8 @@ ksocknal_lib_recv_iov (ksock_conn_t *conn)
263 } 234 }
264 LASSERT (nob <= conn->ksnc_rx_nob_wanted); 235 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
265 236
266 set_fs (KERNEL_DS); 237 rc = kernel_recvmsg(conn->ksnc_sock, &msg,
267 rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT); 238 (struct kvec *)scratchiov, niov, nob, MSG_DONTWAIT);
268 /* NB this is just a boolean..........................^ */
269 set_fs (oldmm);
270 239
271 saved_csum = 0; 240 saved_csum = 0;
272 if (conn->ksnc_proto == &ksocknal_protocol_v2x) { 241 if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
@@ -355,14 +324,8 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
355#endif 324#endif
356 lnet_kiov_t *kiov = conn->ksnc_rx_kiov; 325 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
357 struct msghdr msg = { 326 struct msghdr msg = {
358 .msg_name = NULL,
359 .msg_namelen = 0,
360 .msg_iov = scratchiov,
361 .msg_control = NULL,
362 .msg_controllen = 0,
363 .msg_flags = 0 327 .msg_flags = 0
364 }; 328 };
365 mm_segment_t oldmm = get_fs();
366 int nob; 329 int nob;
367 int i; 330 int i;
368 int rc; 331 int rc;
@@ -370,13 +333,14 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
370 void *addr; 333 void *addr;
371 int sum; 334 int sum;
372 int fragnob; 335 int fragnob;
336 int n;
373 337
374 /* NB we can't trust socket ops to either consume our iovs 338 /* NB we can't trust socket ops to either consume our iovs
375 * or leave them alone. */ 339 * or leave them alone. */
376 addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages); 340 addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
377 if (addr != NULL) { 341 if (addr != NULL) {
378 nob = scratchiov[0].iov_len; 342 nob = scratchiov[0].iov_len;
379 msg.msg_iovlen = 1; 343 n = 1;
380 344
381 } else { 345 } else {
382 for (nob = i = 0; i < niov; i++) { 346 for (nob = i = 0; i < niov; i++) {
@@ -384,15 +348,13 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
384 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + 348 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
385 kiov[i].kiov_offset; 349 kiov[i].kiov_offset;
386 } 350 }
387 msg.msg_iovlen = niov; 351 n = niov;
388 } 352 }
389 353
390 LASSERT (nob <= conn->ksnc_rx_nob_wanted); 354 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
391 355
392 set_fs (KERNEL_DS); 356 rc = kernel_recvmsg(conn->ksnc_sock, &msg,
393 rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT); 357 (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
394 /* NB this is just a boolean.......................^ */
395 set_fs (oldmm);
396 358
397 if (conn->ksnc_msg.ksm_csum != 0) { 359 if (conn->ksnc_msg.ksm_csum != 0) {
398 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { 360 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
@@ -655,7 +617,7 @@ extern void ksocknal_write_callback (ksock_conn_t *conn);
655 * socket call back in Linux 617 * socket call back in Linux
656 */ 618 */
657static void 619static void
658ksocknal_data_ready (struct sock *sk, int n) 620ksocknal_data_ready (struct sock *sk)
659{ 621{
660 ksock_conn_t *conn; 622 ksock_conn_t *conn;
661 623
@@ -666,7 +628,7 @@ ksocknal_data_ready (struct sock *sk, int n)
666 conn = sk->sk_user_data; 628 conn = sk->sk_user_data;
667 if (conn == NULL) { /* raced with ksocknal_terminate_conn */ 629 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
668 LASSERT (sk->sk_data_ready != &ksocknal_data_ready); 630 LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
669 sk->sk_data_ready (sk, n); 631 sk->sk_data_ready (sk);
670 } else 632 } else
671 ksocknal_read_callback(conn); 633 ksocknal_read_callback(conn);
672 634
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
index e6069d78af6b..7539fe16d76f 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
@@ -265,17 +265,11 @@ libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
265 * empty enough to take the whole message immediately */ 265 * empty enough to take the whole message immediately */
266 266
267 for (;;) { 267 for (;;) {
268 struct iovec iov = { 268 struct kvec iov = {
269 .iov_base = buffer, 269 .iov_base = buffer,
270 .iov_len = nob 270 .iov_len = nob
271 }; 271 };
272 struct msghdr msg = { 272 struct msghdr msg = {
273 .msg_name = NULL,
274 .msg_namelen = 0,
275 .msg_iov = &iov,
276 .msg_iovlen = 1,
277 .msg_control = NULL,
278 .msg_controllen = 0,
279 .msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0 273 .msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0
280 }; 274 };
281 275
@@ -297,11 +291,9 @@ libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
297 } 291 }
298 } 292 }
299 293
300 set_fs (KERNEL_DS);
301 then = jiffies; 294 then = jiffies;
302 rc = sock_sendmsg (sock, &msg, iov.iov_len); 295 rc = kernel_sendmsg(sock, &msg, &iov, 1, nob);
303 ticks -= jiffies - then; 296 ticks -= jiffies - then;
304 set_fs (oldmm);
305 297
306 if (rc == nob) 298 if (rc == nob)
307 return 0; 299 return 0;
@@ -338,17 +330,11 @@ libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
338 LASSERT (ticks > 0); 330 LASSERT (ticks > 0);
339 331
340 for (;;) { 332 for (;;) {
341 struct iovec iov = { 333 struct kvec iov = {
342 .iov_base = buffer, 334 .iov_base = buffer,
343 .iov_len = nob 335 .iov_len = nob
344 }; 336 };
345 struct msghdr msg = { 337 struct msghdr msg = {
346 .msg_name = NULL,
347 .msg_namelen = 0,
348 .msg_iov = &iov,
349 .msg_iovlen = 1,
350 .msg_control = NULL,
351 .msg_controllen = 0,
352 .msg_flags = 0 338 .msg_flags = 0
353 }; 339 };
354 340
@@ -367,11 +353,9 @@ libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
367 return rc; 353 return rc;
368 } 354 }
369 355
370 set_fs(KERNEL_DS);
371 then = jiffies; 356 then = jiffies;
372 rc = sock_recvmsg(sock, &msg, iov.iov_len, 0); 357 rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
373 ticks -= jiffies - then; 358 ticks -= jiffies - then;
374 set_fs(oldmm);
375 359
376 if (rc < 0) 360 if (rc < 0)
377 return rc; 361 return rc;
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index ab06891f7fc7..80d48b5ae247 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -115,27 +115,6 @@ failed:
115 return rc; 115 return rc;
116} 116}
117 117
118static int ll_readlink(struct dentry *dentry, char *buffer, int buflen)
119{
120 struct inode *inode = dentry->d_inode;
121 struct ptlrpc_request *request;
122 char *symname;
123 int rc;
124
125 CDEBUG(D_VFSTRACE, "VFS Op\n");
126
127 ll_inode_size_lock(inode);
128 rc = ll_readlink_internal(inode, &request, &symname);
129 if (rc)
130 GOTO(out, rc);
131
132 rc = vfs_readlink(dentry, buffer, buflen, symname);
133 out:
134 ptlrpc_req_finished(request);
135 ll_inode_size_unlock(inode);
136 return rc;
137}
138
139static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd) 118static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
140{ 119{
141 struct inode *inode = dentry->d_inode; 120 struct inode *inode = dentry->d_inode;
@@ -175,7 +154,7 @@ static void ll_put_link(struct dentry *dentry, struct nameidata *nd, void *cooki
175} 154}
176 155
177struct inode_operations ll_fast_symlink_inode_operations = { 156struct inode_operations ll_fast_symlink_inode_operations = {
178 .readlink = ll_readlink, 157 .readlink = generic_readlink,
179 .setattr = ll_setattr, 158 .setattr = ll_setattr,
180 .follow_link = ll_follow_link, 159 .follow_link = ll_follow_link,
181 .put_link = ll_put_link, 160 .put_link = ll_put_link,
diff --git a/drivers/staging/media/msi3101/msi001.c b/drivers/staging/media/msi3101/msi001.c
index ac43bae10102..bd0b93cb6c53 100644
--- a/drivers/staging/media/msi3101/msi001.c
+++ b/drivers/staging/media/msi3101/msi001.c
@@ -201,7 +201,7 @@ static int msi001_set_tuner(struct msi001 *s)
201 dev_dbg(&s->spi->dev, "%s: bandwidth selected=%d\n", 201 dev_dbg(&s->spi->dev, "%s: bandwidth selected=%d\n",
202 __func__, bandwidth_lut[i].freq); 202 __func__, bandwidth_lut[i].freq);
203 203
204 f_vco = (f_rf + f_if + f_if1) * lo_div; 204 f_vco = (u64) (f_rf + f_if + f_if1) * lo_div;
205 tmp64 = f_vco; 205 tmp64 = f_vco;
206 m = do_div(tmp64, F_REF * R_REF); 206 m = do_div(tmp64, F_REF * R_REF);
207 n = (unsigned int) tmp64; 207 n = (unsigned int) tmp64;
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
index 260d1b736721..65d351f99da2 100644
--- a/drivers/staging/media/msi3101/sdr-msi3101.c
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -913,7 +913,6 @@ static int msi3101_set_usb_adc(struct msi3101_state *s)
913 913
914 /* set tuner, subdev, filters according to sampling rate */ 914 /* set tuner, subdev, filters according to sampling rate */
915 bandwidth_auto = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO); 915 bandwidth_auto = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO);
916 bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
917 if (v4l2_ctrl_g_ctrl(bandwidth_auto)) { 916 if (v4l2_ctrl_g_ctrl(bandwidth_auto)) {
918 bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH); 917 bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
919 v4l2_ctrl_s_ctrl(bandwidth, s->f_adc); 918 v4l2_ctrl_s_ctrl(bandwidth, s->f_adc);
@@ -1078,6 +1077,7 @@ static int msi3101_start_streaming(struct vb2_queue *vq, unsigned int count)
1078static int msi3101_stop_streaming(struct vb2_queue *vq) 1077static int msi3101_stop_streaming(struct vb2_queue *vq)
1079{ 1078{
1080 struct msi3101_state *s = vb2_get_drv_priv(vq); 1079 struct msi3101_state *s = vb2_get_drv_priv(vq);
1080 int ret;
1081 dev_dbg(&s->udev->dev, "%s:\n", __func__); 1081 dev_dbg(&s->udev->dev, "%s:\n", __func__);
1082 1082
1083 if (mutex_lock_interruptible(&s->v4l2_lock)) 1083 if (mutex_lock_interruptible(&s->v4l2_lock))
@@ -1090,17 +1090,22 @@ static int msi3101_stop_streaming(struct vb2_queue *vq)
1090 1090
1091 /* according to tests, at least 700us delay is required */ 1091 /* according to tests, at least 700us delay is required */
1092 msleep(20); 1092 msleep(20);
1093 msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0); 1093 ret = msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
1094 if (ret)
1095 goto err_sleep_tuner;
1094 1096
1095 /* sleep USB IF / ADC */ 1097 /* sleep USB IF / ADC */
1096 msi3101_ctrl_msg(s, CMD_WREG, 0x01000003); 1098 ret = msi3101_ctrl_msg(s, CMD_WREG, 0x01000003);
1099 if (ret)
1100 goto err_sleep_tuner;
1097 1101
1102err_sleep_tuner:
1098 /* sleep tuner */ 1103 /* sleep tuner */
1099 v4l2_subdev_call(s->v4l2_subdev, core, s_power, 0); 1104 ret = v4l2_subdev_call(s->v4l2_subdev, core, s_power, 0);
1100 1105
1101 mutex_unlock(&s->v4l2_lock); 1106 mutex_unlock(&s->v4l2_lock);
1102 1107
1103 return 0; 1108 return ret;
1104} 1109}
1105 1110
1106static struct vb2_ops msi3101_vb2_ops = { 1111static struct vb2_ops msi3101_vb2_ops = {
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 773d8ca07a00..de692d7011a5 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -86,7 +86,6 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
86 struct stub_device *sdev = dev_get_drvdata(dev); 86 struct stub_device *sdev = dev_get_drvdata(dev);
87 int sockfd = 0; 87 int sockfd = 0;
88 struct socket *socket; 88 struct socket *socket;
89 ssize_t err = -EINVAL;
90 int rv; 89 int rv;
91 90
92 if (!sdev) { 91 if (!sdev) {
@@ -99,6 +98,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
99 return -EINVAL; 98 return -EINVAL;
100 99
101 if (sockfd != -1) { 100 if (sockfd != -1) {
101 int err;
102 dev_info(dev, "stub up\n"); 102 dev_info(dev, "stub up\n");
103 103
104 spin_lock_irq(&sdev->ud.lock); 104 spin_lock_irq(&sdev->ud.lock);
@@ -108,7 +108,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
108 goto err; 108 goto err;
109 } 109 }
110 110
111 socket = sockfd_to_socket(sockfd); 111 socket = sockfd_lookup(sockfd, &err);
112 if (!socket) 112 if (!socket)
113 goto err; 113 goto err;
114 114
@@ -141,7 +141,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
141 141
142err: 142err:
143 spin_unlock_irq(&sdev->ud.lock); 143 spin_unlock_irq(&sdev->ud.lock);
144 return err; 144 return -EINVAL;
145} 145}
146static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd); 146static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd);
147 147
@@ -211,7 +211,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
211 * not touch NULL socket. 211 * not touch NULL socket.
212 */ 212 */
213 if (ud->tcp_socket) { 213 if (ud->tcp_socket) {
214 fput(ud->tcp_socket->file); 214 sockfd_put(ud->tcp_socket);
215 ud->tcp_socket = NULL; 215 ud->tcp_socket = NULL;
216 } 216 }
217 217
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 184fa70365db..facaaf003f19 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -382,31 +382,6 @@ err:
382} 382}
383EXPORT_SYMBOL_GPL(usbip_recv); 383EXPORT_SYMBOL_GPL(usbip_recv);
384 384
385struct socket *sockfd_to_socket(unsigned int sockfd)
386{
387 struct socket *socket;
388 struct file *file;
389 struct inode *inode;
390
391 file = fget(sockfd);
392 if (!file) {
393 pr_err("invalid sockfd\n");
394 return NULL;
395 }
396
397 inode = file_inode(file);
398
399 if (!inode || !S_ISSOCK(inode->i_mode)) {
400 fput(file);
401 return NULL;
402 }
403
404 socket = SOCKET_I(inode);
405
406 return socket;
407}
408EXPORT_SYMBOL_GPL(sockfd_to_socket);
409
410/* there may be more cases to tweak the flags. */ 385/* there may be more cases to tweak the flags. */
411static unsigned int tweak_transfer_flags(unsigned int flags) 386static unsigned int tweak_transfer_flags(unsigned int flags)
412{ 387{
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index 732fb636a1e5..f555d834f134 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -299,7 +299,6 @@ void usbip_dump_urb(struct urb *purb);
299void usbip_dump_header(struct usbip_header *pdu); 299void usbip_dump_header(struct usbip_header *pdu);
300 300
301int usbip_recv(struct socket *sock, void *buf, int size); 301int usbip_recv(struct socket *sock, void *buf, int size);
302struct socket *sockfd_to_socket(unsigned int sockfd);
303 302
304void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd, 303void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
305 int pack); 304 int pack);
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 1e84577230ef..70e17551943d 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -788,7 +788,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
788 788
789 /* active connection is closed */ 789 /* active connection is closed */
790 if (vdev->ud.tcp_socket) { 790 if (vdev->ud.tcp_socket) {
791 fput(vdev->ud.tcp_socket->file); 791 sockfd_put(vdev->ud.tcp_socket);
792 vdev->ud.tcp_socket = NULL; 792 vdev->ud.tcp_socket = NULL;
793 } 793 }
794 pr_info("release socket\n"); 794 pr_info("release socket\n");
@@ -835,7 +835,7 @@ static void vhci_device_reset(struct usbip_device *ud)
835 vdev->udev = NULL; 835 vdev->udev = NULL;
836 836
837 if (ud->tcp_socket) { 837 if (ud->tcp_socket) {
838 fput(ud->tcp_socket->file); 838 sockfd_put(ud->tcp_socket);
839 ud->tcp_socket = NULL; 839 ud->tcp_socket = NULL;
840 } 840 }
841 ud->status = VDEV_ST_NULL; 841 ud->status = VDEV_ST_NULL;
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index e0980324fb03..47bddcdde0a6 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -176,6 +176,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
176 struct socket *socket; 176 struct socket *socket;
177 int sockfd = 0; 177 int sockfd = 0;
178 __u32 rhport = 0, devid = 0, speed = 0; 178 __u32 rhport = 0, devid = 0, speed = 0;
179 int err;
179 180
180 /* 181 /*
181 * @rhport: port number of vhci_hcd 182 * @rhport: port number of vhci_hcd
@@ -194,8 +195,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
194 return -EINVAL; 195 return -EINVAL;
195 196
196 /* Extract socket from fd. */ 197 /* Extract socket from fd. */
197 /* The correct way to clean this up is to fput(socket->file). */ 198 socket = sockfd_lookup(sockfd, &err);
198 socket = sockfd_to_socket(sockfd);
199 if (!socket) 199 if (!socket)
200 return -EINVAL; 200 return -EINVAL;
201 201
@@ -211,7 +211,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
211 spin_unlock(&vdev->ud.lock); 211 spin_unlock(&vdev->ud.lock);
212 spin_unlock(&the_controller->lock); 212 spin_unlock(&the_controller->lock);
213 213
214 fput(socket->file); 214 sockfd_put(socket);
215 215
216 dev_err(dev, "port %d already used\n", rhport); 216 dev_err(dev, "port %d already used\n", rhport);
217 return -EINVAL; 217 return -EINVAL;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b83ec378d04f..78cab13bbb1b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -499,6 +499,23 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
499 return 0; 499 return 0;
500} 500}
501 501
502static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
503{
504 bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
505
506 spin_lock_bh(&conn->cmd_lock);
507 if (!list_empty(&cmd->i_conn_node))
508 list_del_init(&cmd->i_conn_node);
509 spin_unlock_bh(&conn->cmd_lock);
510
511 __iscsit_free_cmd(cmd, scsi_cmd, true);
512}
513
514static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
515{
516 return TARGET_PROT_NORMAL;
517}
518
502static struct iscsit_transport iscsi_target_transport = { 519static struct iscsit_transport iscsi_target_transport = {
503 .name = "iSCSI/TCP", 520 .name = "iSCSI/TCP",
504 .transport_type = ISCSI_TCP, 521 .transport_type = ISCSI_TCP,
@@ -513,6 +530,8 @@ static struct iscsit_transport iscsi_target_transport = {
513 .iscsit_response_queue = iscsit_response_queue, 530 .iscsit_response_queue = iscsit_response_queue,
514 .iscsit_queue_data_in = iscsit_queue_rsp, 531 .iscsit_queue_data_in = iscsit_queue_rsp,
515 .iscsit_queue_status = iscsit_queue_rsp, 532 .iscsit_queue_status = iscsit_queue_rsp,
533 .iscsit_aborted_task = iscsit_aborted_task,
534 .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
516}; 535};
517 536
518static int __init iscsi_target_init_module(void) 537static int __init iscsi_target_init_module(void)
@@ -1503,6 +1522,16 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1503{ 1522{
1504 u32 payload_length = ntoh24(hdr->dlength); 1523 u32 payload_length = ntoh24(hdr->dlength);
1505 1524
1525 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1526 pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
1527 if (!cmd)
1528 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1529 (unsigned char *)hdr);
1530
1531 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1532 (unsigned char *)hdr);
1533 }
1534
1506 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1535 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1507 pr_err("NOPOUT ITT is reserved, but Immediate Bit is" 1536 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1508 " not set, protocol error.\n"); 1537 " not set, protocol error.\n");
@@ -2468,6 +2497,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2468{ 2497{
2469 struct iscsi_cmd *cmd; 2498 struct iscsi_cmd *cmd;
2470 struct iscsi_conn *conn_p; 2499 struct iscsi_conn *conn_p;
2500 bool found = false;
2471 2501
2472 /* 2502 /*
2473 * Only send a Asynchronous Message on connections whos network 2503 * Only send a Asynchronous Message on connections whos network
@@ -2476,11 +2506,12 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2476 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { 2506 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2477 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { 2507 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2478 iscsit_inc_conn_usage_count(conn_p); 2508 iscsit_inc_conn_usage_count(conn_p);
2509 found = true;
2479 break; 2510 break;
2480 } 2511 }
2481 } 2512 }
2482 2513
2483 if (!conn_p) 2514 if (!found)
2484 return; 2515 return;
2485 2516
2486 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); 2517 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 1c0088fe9e99..ae03f3e5de1e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1052,6 +1052,11 @@ TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
1052 */ 1052 */
1053DEF_TPG_ATTRIB(default_erl); 1053DEF_TPG_ATTRIB(default_erl);
1054TPG_ATTR(default_erl, S_IRUGO | S_IWUSR); 1054TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
1055/*
1056 * Define iscsi_tpg_attrib_s_t10_pi
1057 */
1058DEF_TPG_ATTRIB(t10_pi);
1059TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
1055 1060
1056static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { 1061static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1057 &iscsi_tpg_attrib_authentication.attr, 1062 &iscsi_tpg_attrib_authentication.attr,
@@ -1064,6 +1069,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1064 &iscsi_tpg_attrib_prod_mode_write_protect.attr, 1069 &iscsi_tpg_attrib_prod_mode_write_protect.attr,
1065 &iscsi_tpg_attrib_demo_mode_discovery.attr, 1070 &iscsi_tpg_attrib_demo_mode_discovery.attr,
1066 &iscsi_tpg_attrib_default_erl.attr, 1071 &iscsi_tpg_attrib_default_erl.attr,
1072 &iscsi_tpg_attrib_t10_pi.attr,
1067 NULL, 1073 NULL,
1068}; 1074};
1069 1075
@@ -1815,6 +1821,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
1815 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1821 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1816} 1822}
1817 1823
1824static void lio_aborted_task(struct se_cmd *se_cmd)
1825{
1826 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1827
1828 cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
1829}
1830
1818static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) 1831static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
1819{ 1832{
1820 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1833 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
@@ -1999,6 +2012,7 @@ int iscsi_target_register_configfs(void)
1999 fabric->tf_ops.queue_data_in = &lio_queue_data_in; 2012 fabric->tf_ops.queue_data_in = &lio_queue_data_in;
2000 fabric->tf_ops.queue_status = &lio_queue_status; 2013 fabric->tf_ops.queue_status = &lio_queue_status;
2001 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; 2014 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
2015 fabric->tf_ops.aborted_task = &lio_aborted_task;
2002 /* 2016 /*
2003 * Setup function pointers for generic logic in target_core_fabric_configfs.c 2017 * Setup function pointers for generic logic in target_core_fabric_configfs.c
2004 */ 2018 */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 48f7b3bf4e8c..6960f22909ae 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -58,7 +58,8 @@
58#define TA_DEMO_MODE_DISCOVERY 1 58#define TA_DEMO_MODE_DISCOVERY 1
59#define TA_DEFAULT_ERL 0 59#define TA_DEFAULT_ERL 0
60#define TA_CACHE_CORE_NPS 0 60#define TA_CACHE_CORE_NPS 0
61 61/* T10 protection information disabled by default */
62#define TA_DEFAULT_T10_PI 0
62 63
63#define ISCSI_IOV_DATA_BUFFER 5 64#define ISCSI_IOV_DATA_BUFFER 5
64 65
@@ -556,7 +557,7 @@ struct iscsi_conn {
556 struct completion rx_half_close_comp; 557 struct completion rx_half_close_comp;
557 /* socket used by this connection */ 558 /* socket used by this connection */
558 struct socket *sock; 559 struct socket *sock;
559 void (*orig_data_ready)(struct sock *, int); 560 void (*orig_data_ready)(struct sock *);
560 void (*orig_state_change)(struct sock *); 561 void (*orig_state_change)(struct sock *);
561#define LOGIN_FLAGS_READ_ACTIVE 1 562#define LOGIN_FLAGS_READ_ACTIVE 1
562#define LOGIN_FLAGS_CLOSED 2 563#define LOGIN_FLAGS_CLOSED 2
@@ -765,6 +766,7 @@ struct iscsi_tpg_attrib {
765 u32 prod_mode_write_protect; 766 u32 prod_mode_write_protect;
766 u32 demo_mode_discovery; 767 u32 demo_mode_discovery;
767 u32 default_erl; 768 u32 default_erl;
769 u8 t10_pi;
768 struct iscsi_portal_group *tpg; 770 struct iscsi_portal_group *tpg;
769}; 771};
770 772
@@ -787,6 +789,7 @@ struct iscsi_np {
787 void *np_context; 789 void *np_context;
788 struct iscsit_transport *np_transport; 790 struct iscsit_transport *np_transport;
789 struct list_head np_list; 791 struct list_head np_list;
792 struct iscsi_tpg_np *tpg_np;
790} ____cacheline_aligned; 793} ____cacheline_aligned;
791 794
792struct iscsi_tpg_np { 795struct iscsi_tpg_np {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e29279e6b577..8739b98f6f93 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -259,6 +259,7 @@ static int iscsi_login_zero_tsih_s1(
259{ 259{
260 struct iscsi_session *sess = NULL; 260 struct iscsi_session *sess = NULL;
261 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 261 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
262 enum target_prot_op sup_pro_ops;
262 int ret; 263 int ret;
263 264
264 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); 265 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -320,8 +321,9 @@ static int iscsi_login_zero_tsih_s1(
320 kfree(sess); 321 kfree(sess);
321 return -ENOMEM; 322 return -ENOMEM;
322 } 323 }
324 sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
323 325
324 sess->se_sess = transport_init_session(); 326 sess->se_sess = transport_init_session(sup_pro_ops);
325 if (IS_ERR(sess->se_sess)) { 327 if (IS_ERR(sess->se_sess)) {
326 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 328 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
327 ISCSI_LOGIN_STATUS_NO_RESOURCES); 329 ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 582ba84075ec..75b685960e80 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -375,7 +375,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
375 return 0; 375 return 0;
376} 376}
377 377
378static void iscsi_target_sk_data_ready(struct sock *sk, int count) 378static void iscsi_target_sk_data_ready(struct sock *sk)
379{ 379{
380 struct iscsi_conn *conn = sk->sk_user_data; 380 struct iscsi_conn *conn = sk->sk_user_data;
381 bool rc; 381 bool rc;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 44a5471de00f..eb96b20dc09e 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -225,6 +225,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
225 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; 225 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
226 a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; 226 a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
227 a->default_erl = TA_DEFAULT_ERL; 227 a->default_erl = TA_DEFAULT_ERL;
228 a->t10_pi = TA_DEFAULT_T10_PI;
228} 229}
229 230
230int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) 231int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -500,6 +501,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
500 init_completion(&tpg_np->tpg_np_comp); 501 init_completion(&tpg_np->tpg_np_comp);
501 kref_init(&tpg_np->tpg_np_kref); 502 kref_init(&tpg_np->tpg_np_kref);
502 tpg_np->tpg_np = np; 503 tpg_np->tpg_np = np;
504 np->tpg_np = tpg_np;
503 tpg_np->tpg = tpg; 505 tpg_np->tpg = tpg;
504 506
505 spin_lock(&tpg->tpg_np_lock); 507 spin_lock(&tpg->tpg_np_lock);
@@ -858,3 +860,22 @@ int iscsit_ta_default_erl(
858 860
859 return 0; 861 return 0;
860} 862}
863
864int iscsit_ta_t10_pi(
865 struct iscsi_portal_group *tpg,
866 u32 flag)
867{
868 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
869
870 if ((flag != 0) && (flag != 1)) {
871 pr_err("Illegal value %d\n", flag);
872 return -EINVAL;
873 }
874
875 a->t10_pi = flag;
876 pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
877 " %s\n", tpg->tpgt, (a->t10_pi) ?
878 "ON" : "OFF");
879
880 return 0;
881}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 213c0fc7fdc9..0a182f2aa8a2 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@ extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); 39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
40extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); 40extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
41extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); 41extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
42extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
42 43
43#endif /* ISCSI_TARGET_TPG_H */ 44#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index e655b042ed18..53e157cb8c54 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
705} 705}
706EXPORT_SYMBOL(iscsit_release_cmd); 706EXPORT_SYMBOL(iscsit_release_cmd);
707 707
708static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, 708void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
709 bool check_queues) 709 bool check_queues)
710{ 710{
711 struct iscsi_conn *conn = cmd->conn; 711 struct iscsi_conn *conn = cmd->conn;
712 712
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 561a424d1980..a68508c4fec8 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
30extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 30extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
31extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); 31extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
32extern void iscsit_release_cmd(struct iscsi_cmd *); 32extern void iscsit_release_cmd(struct iscsi_cmd *);
33extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
33extern void iscsit_free_cmd(struct iscsi_cmd *, bool); 34extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
34extern int iscsit_check_session_usage_count(struct iscsi_session *); 35extern int iscsit_check_session_usage_count(struct iscsi_session *);
35extern void iscsit_dec_session_usage_count(struct iscsi_session *); 36extern void iscsit_dec_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index fadad7c5f635..c886ad1c39fb 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -212,6 +212,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
212 se_cmd->se_cmd_flags |= SCF_BIDI; 212 se_cmd->se_cmd_flags |= SCF_BIDI;
213 213
214 } 214 }
215
216 if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
217 se_cmd->prot_pto = true;
218
215 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 219 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
216 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 220 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
217 scsi_bufflen(sc), tcm_loop_sam_attr(sc), 221 scsi_bufflen(sc), tcm_loop_sam_attr(sc),
@@ -915,6 +919,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
915 wake_up(&tl_tmr->tl_tmr_wait); 919 wake_up(&tl_tmr->tl_tmr_wait);
916} 920}
917 921
922static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
923{
924 return;
925}
926
918static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 927static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
919{ 928{
920 switch (tl_hba->tl_proto_id) { 929 switch (tl_hba->tl_proto_id) {
@@ -1009,7 +1018,7 @@ static int tcm_loop_make_nexus(
1009 /* 1018 /*
1010 * Initialize the struct se_session pointer 1019 * Initialize the struct se_session pointer
1011 */ 1020 */
1012 tl_nexus->se_sess = transport_init_session(); 1021 tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
1013 if (IS_ERR(tl_nexus->se_sess)) { 1022 if (IS_ERR(tl_nexus->se_sess)) {
1014 ret = PTR_ERR(tl_nexus->se_sess); 1023 ret = PTR_ERR(tl_nexus->se_sess);
1015 goto out; 1024 goto out;
@@ -1483,6 +1492,7 @@ static int tcm_loop_register_configfs(void)
1483 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; 1492 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
1484 fabric->tf_ops.queue_status = &tcm_loop_queue_status; 1493 fabric->tf_ops.queue_status = &tcm_loop_queue_status;
1485 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; 1494 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
1495 fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
1486 1496
1487 /* 1497 /*
1488 * Setup function pointers for generic logic in target_core_fabric_configfs.c 1498 * Setup function pointers for generic logic in target_core_fabric_configfs.c
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 24884cac19ce..e7e93727553c 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -210,7 +210,7 @@ static struct sbp_session *sbp_session_create(
210 return ERR_PTR(-ENOMEM); 210 return ERR_PTR(-ENOMEM);
211 } 211 }
212 212
213 sess->se_sess = transport_init_session(); 213 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
214 if (IS_ERR(sess->se_sess)) { 214 if (IS_ERR(sess->se_sess)) {
215 pr_err("failed to init se_session\n"); 215 pr_err("failed to init se_session\n");
216 216
@@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1846{ 1846{
1847} 1847}
1848 1848
1849static void sbp_aborted_task(struct se_cmd *se_cmd)
1850{
1851 return;
1852}
1853
1849static int sbp_check_stop_free(struct se_cmd *se_cmd) 1854static int sbp_check_stop_free(struct se_cmd *se_cmd)
1850{ 1855{
1851 struct sbp_target_request *req = container_of(se_cmd, 1856 struct sbp_target_request *req = container_of(se_cmd,
@@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = {
2526 .queue_data_in = sbp_queue_data_in, 2531 .queue_data_in = sbp_queue_data_in,
2527 .queue_status = sbp_queue_status, 2532 .queue_status = sbp_queue_status,
2528 .queue_tm_rsp = sbp_queue_tm_rsp, 2533 .queue_tm_rsp = sbp_queue_tm_rsp,
2534 .aborted_task = sbp_aborted_task,
2529 .check_stop_free = sbp_check_stop_free, 2535 .check_stop_free = sbp_check_stop_free,
2530 2536
2531 .fabric_make_wwn = sbp_make_tport, 2537 .fabric_make_wwn = sbp_make_tport,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index c3d9df6aaf5f..fcbe6125b73e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -455,11 +455,26 @@ out:
455 return rc; 455 return rc;
456} 456}
457 457
458static inline int core_alua_state_nonoptimized( 458static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
459{
460 /*
461 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
462 * The ALUA additional sense code qualifier (ASCQ) is determined
463 * by the ALUA primary or secondary access state..
464 */
465 pr_debug("[%s]: ALUA TG Port not available, "
466 "SenseKey: NOT_READY, ASC/ASCQ: "
467 "0x04/0x%02x\n",
468 cmd->se_tfo->get_fabric_name(), alua_ascq);
469
470 cmd->scsi_asc = 0x04;
471 cmd->scsi_ascq = alua_ascq;
472}
473
474static inline void core_alua_state_nonoptimized(
459 struct se_cmd *cmd, 475 struct se_cmd *cmd,
460 unsigned char *cdb, 476 unsigned char *cdb,
461 int nonop_delay_msecs, 477 int nonop_delay_msecs)
462 u8 *alua_ascq)
463{ 478{
464 /* 479 /*
465 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked 480 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
@@ -468,13 +483,11 @@ static inline int core_alua_state_nonoptimized(
468 */ 483 */
469 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 484 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
470 cmd->alua_nonop_delay = nonop_delay_msecs; 485 cmd->alua_nonop_delay = nonop_delay_msecs;
471 return 0;
472} 486}
473 487
474static inline int core_alua_state_lba_dependent( 488static inline int core_alua_state_lba_dependent(
475 struct se_cmd *cmd, 489 struct se_cmd *cmd,
476 struct t10_alua_tg_pt_gp *tg_pt_gp, 490 struct t10_alua_tg_pt_gp *tg_pt_gp)
477 u8 *alua_ascq)
478{ 491{
479 struct se_device *dev = cmd->se_dev; 492 struct se_device *dev = cmd->se_dev;
480 u64 segment_size, segment_mult, sectors, lba; 493 u64 segment_size, segment_mult, sectors, lba;
@@ -520,7 +533,7 @@ static inline int core_alua_state_lba_dependent(
520 } 533 }
521 if (!cur_map) { 534 if (!cur_map) {
522 spin_unlock(&dev->t10_alua.lba_map_lock); 535 spin_unlock(&dev->t10_alua.lba_map_lock);
523 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 536 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
524 return 1; 537 return 1;
525 } 538 }
526 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, 539 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
@@ -531,11 +544,11 @@ static inline int core_alua_state_lba_dependent(
531 switch(map_mem->lba_map_mem_alua_state) { 544 switch(map_mem->lba_map_mem_alua_state) {
532 case ALUA_ACCESS_STATE_STANDBY: 545 case ALUA_ACCESS_STATE_STANDBY:
533 spin_unlock(&dev->t10_alua.lba_map_lock); 546 spin_unlock(&dev->t10_alua.lba_map_lock);
534 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 547 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
535 return 1; 548 return 1;
536 case ALUA_ACCESS_STATE_UNAVAILABLE: 549 case ALUA_ACCESS_STATE_UNAVAILABLE:
537 spin_unlock(&dev->t10_alua.lba_map_lock); 550 spin_unlock(&dev->t10_alua.lba_map_lock);
538 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 551 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
539 return 1; 552 return 1;
540 default: 553 default:
541 break; 554 break;
@@ -548,8 +561,7 @@ static inline int core_alua_state_lba_dependent(
548 561
549static inline int core_alua_state_standby( 562static inline int core_alua_state_standby(
550 struct se_cmd *cmd, 563 struct se_cmd *cmd,
551 unsigned char *cdb, 564 unsigned char *cdb)
552 u8 *alua_ascq)
553{ 565{
554 /* 566 /*
555 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by 567 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
@@ -570,7 +582,7 @@ static inline int core_alua_state_standby(
570 case MI_REPORT_TARGET_PGS: 582 case MI_REPORT_TARGET_PGS:
571 return 0; 583 return 0;
572 default: 584 default:
573 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 585 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
574 return 1; 586 return 1;
575 } 587 }
576 case MAINTENANCE_OUT: 588 case MAINTENANCE_OUT:
@@ -578,7 +590,7 @@ static inline int core_alua_state_standby(
578 case MO_SET_TARGET_PGS: 590 case MO_SET_TARGET_PGS:
579 return 0; 591 return 0;
580 default: 592 default:
581 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 593 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
582 return 1; 594 return 1;
583 } 595 }
584 case REQUEST_SENSE: 596 case REQUEST_SENSE:
@@ -588,7 +600,7 @@ static inline int core_alua_state_standby(
588 case WRITE_BUFFER: 600 case WRITE_BUFFER:
589 return 0; 601 return 0;
590 default: 602 default:
591 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 603 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
592 return 1; 604 return 1;
593 } 605 }
594 606
@@ -597,8 +609,7 @@ static inline int core_alua_state_standby(
597 609
598static inline int core_alua_state_unavailable( 610static inline int core_alua_state_unavailable(
599 struct se_cmd *cmd, 611 struct se_cmd *cmd,
600 unsigned char *cdb, 612 unsigned char *cdb)
601 u8 *alua_ascq)
602{ 613{
603 /* 614 /*
604 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by 615 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
@@ -613,7 +624,7 @@ static inline int core_alua_state_unavailable(
613 case MI_REPORT_TARGET_PGS: 624 case MI_REPORT_TARGET_PGS:
614 return 0; 625 return 0;
615 default: 626 default:
616 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 627 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
617 return 1; 628 return 1;
618 } 629 }
619 case MAINTENANCE_OUT: 630 case MAINTENANCE_OUT:
@@ -621,7 +632,7 @@ static inline int core_alua_state_unavailable(
621 case MO_SET_TARGET_PGS: 632 case MO_SET_TARGET_PGS:
622 return 0; 633 return 0;
623 default: 634 default:
624 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 635 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
625 return 1; 636 return 1;
626 } 637 }
627 case REQUEST_SENSE: 638 case REQUEST_SENSE:
@@ -629,7 +640,7 @@ static inline int core_alua_state_unavailable(
629 case WRITE_BUFFER: 640 case WRITE_BUFFER:
630 return 0; 641 return 0;
631 default: 642 default:
632 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 643 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
633 return 1; 644 return 1;
634 } 645 }
635 646
@@ -638,8 +649,7 @@ static inline int core_alua_state_unavailable(
638 649
639static inline int core_alua_state_transition( 650static inline int core_alua_state_transition(
640 struct se_cmd *cmd, 651 struct se_cmd *cmd,
641 unsigned char *cdb, 652 unsigned char *cdb)
642 u8 *alua_ascq)
643{ 653{
644 /* 654 /*
645 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by 655 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
@@ -654,7 +664,7 @@ static inline int core_alua_state_transition(
654 case MI_REPORT_TARGET_PGS: 664 case MI_REPORT_TARGET_PGS:
655 return 0; 665 return 0;
656 default: 666 default:
657 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 667 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
658 return 1; 668 return 1;
659 } 669 }
660 case REQUEST_SENSE: 670 case REQUEST_SENSE:
@@ -662,7 +672,7 @@ static inline int core_alua_state_transition(
662 case WRITE_BUFFER: 672 case WRITE_BUFFER:
663 return 0; 673 return 0;
664 default: 674 default:
665 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 675 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
666 return 1; 676 return 1;
667 } 677 }
668 678
@@ -684,8 +694,6 @@ target_alua_state_check(struct se_cmd *cmd)
684 struct t10_alua_tg_pt_gp *tg_pt_gp; 694 struct t10_alua_tg_pt_gp *tg_pt_gp;
685 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 695 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
686 int out_alua_state, nonop_delay_msecs; 696 int out_alua_state, nonop_delay_msecs;
687 u8 alua_ascq;
688 int ret;
689 697
690 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 698 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
691 return 0; 699 return 0;
@@ -701,9 +709,8 @@ target_alua_state_check(struct se_cmd *cmd)
701 if (atomic_read(&port->sep_tg_pt_secondary_offline)) { 709 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
702 pr_debug("ALUA: Got secondary offline status for local" 710 pr_debug("ALUA: Got secondary offline status for local"
703 " target port\n"); 711 " target port\n");
704 alua_ascq = ASCQ_04H_ALUA_OFFLINE; 712 set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
705 ret = 1; 713 return TCM_CHECK_CONDITION_NOT_READY;
706 goto out;
707 } 714 }
708 /* 715 /*
709 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the 716 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -731,20 +738,23 @@ target_alua_state_check(struct se_cmd *cmd)
731 738
732 switch (out_alua_state) { 739 switch (out_alua_state) {
733 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 740 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
734 ret = core_alua_state_nonoptimized(cmd, cdb, 741 core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
735 nonop_delay_msecs, &alua_ascq);
736 break; 742 break;
737 case ALUA_ACCESS_STATE_STANDBY: 743 case ALUA_ACCESS_STATE_STANDBY:
738 ret = core_alua_state_standby(cmd, cdb, &alua_ascq); 744 if (core_alua_state_standby(cmd, cdb))
745 return TCM_CHECK_CONDITION_NOT_READY;
739 break; 746 break;
740 case ALUA_ACCESS_STATE_UNAVAILABLE: 747 case ALUA_ACCESS_STATE_UNAVAILABLE:
741 ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq); 748 if (core_alua_state_unavailable(cmd, cdb))
749 return TCM_CHECK_CONDITION_NOT_READY;
742 break; 750 break;
743 case ALUA_ACCESS_STATE_TRANSITION: 751 case ALUA_ACCESS_STATE_TRANSITION:
744 ret = core_alua_state_transition(cmd, cdb, &alua_ascq); 752 if (core_alua_state_transition(cmd, cdb))
753 return TCM_CHECK_CONDITION_NOT_READY;
745 break; 754 break;
746 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 755 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
747 ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq); 756 if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
757 return TCM_CHECK_CONDITION_NOT_READY;
748 break; 758 break;
749 /* 759 /*
750 * OFFLINE is a secondary ALUA target port group access state, that is 760 * OFFLINE is a secondary ALUA target port group access state, that is
@@ -757,23 +767,6 @@ target_alua_state_check(struct se_cmd *cmd)
757 return TCM_INVALID_CDB_FIELD; 767 return TCM_INVALID_CDB_FIELD;
758 } 768 }
759 769
760out:
761 if (ret > 0) {
762 /*
763 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
764 * The ALUA additional sense code qualifier (ASCQ) is determined
765 * by the ALUA primary or secondary access state..
766 */
767 pr_debug("[%s]: ALUA TG Port not available, "
768 "SenseKey: NOT_READY, ASC/ASCQ: "
769 "0x04/0x%02x\n",
770 cmd->se_tfo->get_fabric_name(), alua_ascq);
771
772 cmd->scsi_asc = 0x04;
773 cmd->scsi_ascq = alua_ascq;
774 return TCM_CHECK_CONDITION_NOT_READY;
775 }
776
777 return 0; 770 return 0;
778} 771}
779 772
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f0e85b119692..60a9ae6df763 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check(
457 pr_err("Missing tfo->queue_tm_rsp()\n"); 457 pr_err("Missing tfo->queue_tm_rsp()\n");
458 return -EINVAL; 458 return -EINVAL;
459 } 459 }
460 if (!tfo->aborted_task) {
461 pr_err("Missing tfo->aborted_task()\n");
462 return -EINVAL;
463 }
460 /* 464 /*
461 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 465 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
462 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 466 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index cf991a91a8a9..7d6cddaec525 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -854,25 +854,6 @@ static int fd_init_prot(struct se_device *dev)
854 return 0; 854 return 0;
855} 855}
856 856
857static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
858 u32 unit_size, u32 *ref_tag, u16 app_tag,
859 bool inc_reftag)
860{
861 unsigned char *p = buf;
862 int i;
863
864 for (i = 0; i < unit_size; i += dev->prot_length) {
865 *((u16 *)&p[0]) = 0xffff;
866 *((__be16 *)&p[2]) = cpu_to_be16(app_tag);
867 *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
868
869 if (inc_reftag)
870 (*ref_tag)++;
871
872 p += dev->prot_length;
873 }
874}
875
876static int fd_format_prot(struct se_device *dev) 857static int fd_format_prot(struct se_device *dev)
877{ 858{
878 struct fd_dev *fd_dev = FD_DEV(dev); 859 struct fd_dev *fd_dev = FD_DEV(dev);
@@ -880,10 +861,8 @@ static int fd_format_prot(struct se_device *dev)
880 sector_t prot_length, prot; 861 sector_t prot_length, prot;
881 unsigned char *buf; 862 unsigned char *buf;
882 loff_t pos = 0; 863 loff_t pos = 0;
883 u32 ref_tag = 0;
884 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; 864 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
885 int rc, ret = 0, size, len; 865 int rc, ret = 0, size, len;
886 bool inc_reftag = false;
887 866
888 if (!dev->dev_attrib.pi_prot_type) { 867 if (!dev->dev_attrib.pi_prot_type) {
889 pr_err("Unable to format_prot while pi_prot_type == 0\n"); 868 pr_err("Unable to format_prot while pi_prot_type == 0\n");
@@ -894,37 +873,20 @@ static int fd_format_prot(struct se_device *dev)
894 return -ENODEV; 873 return -ENODEV;
895 } 874 }
896 875
897 switch (dev->dev_attrib.pi_prot_type) {
898 case TARGET_DIF_TYPE3_PROT:
899 ref_tag = 0xffffffff;
900 break;
901 case TARGET_DIF_TYPE2_PROT:
902 case TARGET_DIF_TYPE1_PROT:
903 inc_reftag = true;
904 break;
905 default:
906 break;
907 }
908
909 buf = vzalloc(unit_size); 876 buf = vzalloc(unit_size);
910 if (!buf) { 877 if (!buf) {
911 pr_err("Unable to allocate FILEIO prot buf\n"); 878 pr_err("Unable to allocate FILEIO prot buf\n");
912 return -ENOMEM; 879 return -ENOMEM;
913 } 880 }
914
915 prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; 881 prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
916 size = prot_length; 882 size = prot_length;
917 883
918 pr_debug("Using FILEIO prot_length: %llu\n", 884 pr_debug("Using FILEIO prot_length: %llu\n",
919 (unsigned long long)prot_length); 885 (unsigned long long)prot_length);
920 886
887 memset(buf, 0xff, unit_size);
921 for (prot = 0; prot < prot_length; prot += unit_size) { 888 for (prot = 0; prot < prot_length; prot += unit_size) {
922
923 fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
924 inc_reftag);
925
926 len = min(unit_size, size); 889 len = min(unit_size, size);
927
928 rc = kernel_write(prot_fd, buf, len, pos); 890 rc = kernel_write(prot_fd, buf, len, pos);
929 if (rc != len) { 891 if (rc != len) {
930 pr_err("vfs_write to prot file failed: %d\n", rc); 892 pr_err("vfs_write to prot file failed: %d\n", rc);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 554d4f75a75a..9e0232cca92e 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -203,10 +203,9 @@ static void iblock_free_device(struct se_device *dev)
203 203
204 if (ib_dev->ibd_bd != NULL) 204 if (ib_dev->ibd_bd != NULL)
205 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 205 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
206 if (ib_dev->ibd_bio_set != NULL) { 206 if (ib_dev->ibd_bio_set != NULL)
207 bioset_integrity_free(ib_dev->ibd_bio_set);
208 bioset_free(ib_dev->ibd_bio_set); 207 bioset_free(ib_dev->ibd_bio_set);
209 } 208
210 kfree(ib_dev); 209 kfree(ib_dev);
211} 210}
212 211
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 66a5aba5a0d9..b920db3388cd 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -242,7 +242,7 @@ static void rd_release_prot_space(struct rd_dev *rd_dev)
242 rd_dev->sg_prot_count = 0; 242 rd_dev->sg_prot_count = 0;
243} 243}
244 244
245static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length) 245static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
246{ 246{
247 struct rd_dev_sg_table *sg_table; 247 struct rd_dev_sg_table *sg_table;
248 u32 total_sg_needed, sg_tables; 248 u32 total_sg_needed, sg_tables;
@@ -252,8 +252,13 @@ static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
252 252
253 if (rd_dev->rd_flags & RDF_NULLIO) 253 if (rd_dev->rd_flags & RDF_NULLIO)
254 return 0; 254 return 0;
255 255 /*
256 total_sg_needed = rd_dev->rd_page_count / prot_length; 256 * prot_length=8byte dif data
257 * tot sg needed = rd_page_count * (PGSZ/block_size) *
258 * (prot_length/block_size) + pad
259 * PGSZ canceled each other.
260 */
261 total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
257 262
258 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 263 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
259 264
@@ -606,7 +611,8 @@ static int rd_init_prot(struct se_device *dev)
606 if (!dev->dev_attrib.pi_prot_type) 611 if (!dev->dev_attrib.pi_prot_type)
607 return 0; 612 return 0;
608 613
609 return rd_build_prot_space(rd_dev, dev->prot_length); 614 return rd_build_prot_space(rd_dev, dev->prot_length,
615 dev->dev_attrib.block_size);
610} 616}
611 617
612static void rd_free_prot(struct se_device *dev) 618static void rd_free_prot(struct se_device *dev)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 77e6531fb0a1..e0229592ec55 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -89,6 +89,7 @@ static sense_reason_t
89sbc_emulate_readcapacity_16(struct se_cmd *cmd) 89sbc_emulate_readcapacity_16(struct se_cmd *cmd)
90{ 90{
91 struct se_device *dev = cmd->se_dev; 91 struct se_device *dev = cmd->se_dev;
92 struct se_session *sess = cmd->se_sess;
92 unsigned char *rbuf; 93 unsigned char *rbuf;
93 unsigned char buf[32]; 94 unsigned char buf[32];
94 unsigned long long blocks = dev->transport->get_blocks(dev); 95 unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -109,8 +110,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
109 /* 110 /*
110 * Set P_TYPE and PROT_EN bits for DIF support 111 * Set P_TYPE and PROT_EN bits for DIF support
111 */ 112 */
112 if (dev->dev_attrib.pi_prot_type) 113 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
113 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 114 if (dev->dev_attrib.pi_prot_type)
115 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
116 }
114 117
115 if (dev->transport->get_lbppbe) 118 if (dev->transport->get_lbppbe)
116 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 119 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -425,13 +428,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
425 goto out; 428 goto out;
426 } 429 }
427 430
428 write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 431 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
429 GFP_KERNEL); 432 GFP_KERNEL);
430 if (!write_sg) { 433 if (!write_sg) {
431 pr_err("Unable to allocate compare_and_write sg\n"); 434 pr_err("Unable to allocate compare_and_write sg\n");
432 ret = TCM_OUT_OF_RESOURCES; 435 ret = TCM_OUT_OF_RESOURCES;
433 goto out; 436 goto out;
434 } 437 }
438 sg_init_table(write_sg, cmd->t_data_nents);
435 /* 439 /*
436 * Setup verify and write data payloads from total NumberLBAs. 440 * Setup verify and write data payloads from total NumberLBAs.
437 */ 441 */
@@ -569,30 +573,85 @@ sbc_compare_and_write(struct se_cmd *cmd)
569 return TCM_NO_SENSE; 573 return TCM_NO_SENSE;
570} 574}
571 575
576static int
577sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
578 bool is_write, struct se_cmd *cmd)
579{
580 if (is_write) {
581 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
582 TARGET_PROT_DOUT_INSERT;
583 switch (protect) {
584 case 0x0:
585 case 0x3:
586 cmd->prot_checks = 0;
587 break;
588 case 0x1:
589 case 0x5:
590 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
591 if (prot_type == TARGET_DIF_TYPE1_PROT)
592 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
593 break;
594 case 0x2:
595 if (prot_type == TARGET_DIF_TYPE1_PROT)
596 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
597 break;
598 case 0x4:
599 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
600 break;
601 default:
602 pr_err("Unsupported protect field %d\n", protect);
603 return -EINVAL;
604 }
605 } else {
606 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
607 TARGET_PROT_DIN_STRIP;
608 switch (protect) {
609 case 0x0:
610 case 0x1:
611 case 0x5:
612 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
613 if (prot_type == TARGET_DIF_TYPE1_PROT)
614 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
615 break;
616 case 0x2:
617 if (prot_type == TARGET_DIF_TYPE1_PROT)
618 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
619 break;
620 case 0x3:
621 cmd->prot_checks = 0;
622 break;
623 case 0x4:
624 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
625 break;
626 default:
627 pr_err("Unsupported protect field %d\n", protect);
628 return -EINVAL;
629 }
630 }
631
632 return 0;
633}
634
572static bool 635static bool
573sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 636sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
574 u32 sectors) 637 u32 sectors, bool is_write)
575{ 638{
576 if (!cmd->t_prot_sg || !cmd->t_prot_nents) 639 u8 protect = cdb[1] >> 5;
640
641 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
577 return true; 642 return true;
578 643
579 switch (dev->dev_attrib.pi_prot_type) { 644 switch (dev->dev_attrib.pi_prot_type) {
580 case TARGET_DIF_TYPE3_PROT: 645 case TARGET_DIF_TYPE3_PROT:
581 if (!(cdb[1] & 0xe0))
582 return true;
583
584 cmd->reftag_seed = 0xffffffff; 646 cmd->reftag_seed = 0xffffffff;
585 break; 647 break;
586 case TARGET_DIF_TYPE2_PROT: 648 case TARGET_DIF_TYPE2_PROT:
587 if (cdb[1] & 0xe0) 649 if (protect)
588 return false; 650 return false;
589 651
590 cmd->reftag_seed = cmd->t_task_lba; 652 cmd->reftag_seed = cmd->t_task_lba;
591 break; 653 break;
592 case TARGET_DIF_TYPE1_PROT: 654 case TARGET_DIF_TYPE1_PROT:
593 if (!(cdb[1] & 0xe0))
594 return true;
595
596 cmd->reftag_seed = cmd->t_task_lba; 655 cmd->reftag_seed = cmd->t_task_lba;
597 break; 656 break;
598 case TARGET_DIF_TYPE0_PROT: 657 case TARGET_DIF_TYPE0_PROT:
@@ -600,9 +659,15 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
600 return true; 659 return true;
601 } 660 }
602 661
662 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
663 is_write, cmd))
664 return false;
665
603 cmd->prot_type = dev->dev_attrib.pi_prot_type; 666 cmd->prot_type = dev->dev_attrib.pi_prot_type;
604 cmd->prot_length = dev->prot_length * sectors; 667 cmd->prot_length = dev->prot_length * sectors;
605 cmd->prot_handover = PROT_SEPERATED; 668 pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
669 __func__, cmd->prot_type, cmd->prot_length,
670 cmd->prot_op, cmd->prot_checks);
606 671
607 return true; 672 return true;
608} 673}
@@ -628,7 +693,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
628 sectors = transport_get_sectors_10(cdb); 693 sectors = transport_get_sectors_10(cdb);
629 cmd->t_task_lba = transport_lba_32(cdb); 694 cmd->t_task_lba = transport_lba_32(cdb);
630 695
631 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 696 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
632 return TCM_UNSUPPORTED_SCSI_OPCODE; 697 return TCM_UNSUPPORTED_SCSI_OPCODE;
633 698
634 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 699 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -639,7 +704,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
639 sectors = transport_get_sectors_12(cdb); 704 sectors = transport_get_sectors_12(cdb);
640 cmd->t_task_lba = transport_lba_32(cdb); 705 cmd->t_task_lba = transport_lba_32(cdb);
641 706
642 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 707 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
643 return TCM_UNSUPPORTED_SCSI_OPCODE; 708 return TCM_UNSUPPORTED_SCSI_OPCODE;
644 709
645 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 710 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -650,7 +715,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
650 sectors = transport_get_sectors_16(cdb); 715 sectors = transport_get_sectors_16(cdb);
651 cmd->t_task_lba = transport_lba_64(cdb); 716 cmd->t_task_lba = transport_lba_64(cdb);
652 717
653 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 718 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
654 return TCM_UNSUPPORTED_SCSI_OPCODE; 719 return TCM_UNSUPPORTED_SCSI_OPCODE;
655 720
656 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 721 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -669,7 +734,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
669 sectors = transport_get_sectors_10(cdb); 734 sectors = transport_get_sectors_10(cdb);
670 cmd->t_task_lba = transport_lba_32(cdb); 735 cmd->t_task_lba = transport_lba_32(cdb);
671 736
672 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 737 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
673 return TCM_UNSUPPORTED_SCSI_OPCODE; 738 return TCM_UNSUPPORTED_SCSI_OPCODE;
674 739
675 if (cdb[1] & 0x8) 740 if (cdb[1] & 0x8)
@@ -682,7 +747,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
682 sectors = transport_get_sectors_12(cdb); 747 sectors = transport_get_sectors_12(cdb);
683 cmd->t_task_lba = transport_lba_32(cdb); 748 cmd->t_task_lba = transport_lba_32(cdb);
684 749
685 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 750 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
686 return TCM_UNSUPPORTED_SCSI_OPCODE; 751 return TCM_UNSUPPORTED_SCSI_OPCODE;
687 752
688 if (cdb[1] & 0x8) 753 if (cdb[1] & 0x8)
@@ -695,7 +760,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
695 sectors = transport_get_sectors_16(cdb); 760 sectors = transport_get_sectors_16(cdb);
696 cmd->t_task_lba = transport_lba_64(cdb); 761 cmd->t_task_lba = transport_lba_64(cdb);
697 762
698 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 763 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
699 return TCM_UNSUPPORTED_SCSI_OPCODE; 764 return TCM_UNSUPPORTED_SCSI_OPCODE;
700 765
701 if (cdb[1] & 0x8) 766 if (cdb[1] & 0x8)
@@ -1031,6 +1096,50 @@ err:
1031} 1096}
1032EXPORT_SYMBOL(sbc_execute_unmap); 1097EXPORT_SYMBOL(sbc_execute_unmap);
1033 1098
1099void
1100sbc_dif_generate(struct se_cmd *cmd)
1101{
1102 struct se_device *dev = cmd->se_dev;
1103 struct se_dif_v1_tuple *sdt;
1104 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1105 sector_t sector = cmd->t_task_lba;
1106 void *daddr, *paddr;
1107 int i, j, offset = 0;
1108
1109 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1110 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1111 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1112
1113 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1114
1115 if (offset >= psg->length) {
1116 kunmap_atomic(paddr);
1117 psg = sg_next(psg);
1118 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1119 offset = 0;
1120 }
1121
1122 sdt = paddr + offset;
1123 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
1124 dev->dev_attrib.block_size));
1125 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
1126 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1127 sdt->app_tag = 0;
1128
1129 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
1130 " app_tag: 0x%04x ref_tag: %u\n",
1131 (unsigned long long)sector, sdt->guard_tag,
1132 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1133
1134 sector++;
1135 offset += sizeof(struct se_dif_v1_tuple);
1136 }
1137
1138 kunmap_atomic(paddr);
1139 kunmap_atomic(daddr);
1140 }
1141}
1142
1034static sense_reason_t 1143static sense_reason_t
1035sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 1144sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1036 const void *p, sector_t sector, unsigned int ei_lba) 1145 const void *p, sector_t sector, unsigned int ei_lba)
@@ -1162,9 +1271,9 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1162} 1271}
1163EXPORT_SYMBOL(sbc_dif_verify_write); 1272EXPORT_SYMBOL(sbc_dif_verify_write);
1164 1273
1165sense_reason_t 1274static sense_reason_t
1166sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1275__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1167 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1276 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1168{ 1277{
1169 struct se_device *dev = cmd->se_dev; 1278 struct se_device *dev = cmd->se_dev;
1170 struct se_dif_v1_tuple *sdt; 1279 struct se_dif_v1_tuple *sdt;
@@ -1217,8 +1326,31 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1217 kunmap_atomic(paddr); 1326 kunmap_atomic(paddr);
1218 kunmap_atomic(daddr); 1327 kunmap_atomic(daddr);
1219 } 1328 }
1220 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1221 1329
1222 return 0; 1330 return 0;
1223} 1331}
1332
1333sense_reason_t
1334sbc_dif_read_strip(struct se_cmd *cmd)
1335{
1336 struct se_device *dev = cmd->se_dev;
1337 u32 sectors = cmd->prot_length / dev->prot_length;
1338
1339 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
1340 cmd->t_prot_sg, 0);
1341}
1342
1343sense_reason_t
1344sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1345 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1346{
1347 sense_reason_t rc;
1348
1349 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
1350 if (rc)
1351 return rc;
1352
1353 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1354 return 0;
1355}
1224EXPORT_SYMBOL(sbc_dif_verify_read); 1356EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 3bebc71ea033..8653666612a8 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -71,6 +71,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
71{ 71{
72 struct se_lun *lun = cmd->se_lun; 72 struct se_lun *lun = cmd->se_lun;
73 struct se_device *dev = cmd->se_dev; 73 struct se_device *dev = cmd->se_dev;
74 struct se_session *sess = cmd->se_sess;
74 75
75 /* Set RMB (removable media) for tape devices */ 76 /* Set RMB (removable media) for tape devices */
76 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 77 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
@@ -101,10 +102,13 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
101 if (dev->dev_attrib.emulate_3pc) 102 if (dev->dev_attrib.emulate_3pc)
102 buf[5] |= 0x8; 103 buf[5] |= 0x8;
103 /* 104 /*
104 * Set Protection (PROTECT) bit when DIF has been enabled. 105 * Set Protection (PROTECT) bit when DIF has been enabled on the
106 * device, and the transport supports VERIFY + PASS.
105 */ 107 */
106 if (dev->dev_attrib.pi_prot_type) 108 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
107 buf[5] |= 0x1; 109 if (dev->dev_attrib.pi_prot_type)
110 buf[5] |= 0x1;
111 }
108 112
109 buf[7] = 0x2; /* CmdQue=1 */ 113 buf[7] = 0x2; /* CmdQue=1 */
110 114
@@ -473,16 +477,19 @@ static sense_reason_t
473spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 477spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
474{ 478{
475 struct se_device *dev = cmd->se_dev; 479 struct se_device *dev = cmd->se_dev;
480 struct se_session *sess = cmd->se_sess;
476 481
477 buf[3] = 0x3c; 482 buf[3] = 0x3c;
478 /* 483 /*
479 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 484 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
480 * only for TYPE3 protection. 485 * only for TYPE3 protection.
481 */ 486 */
482 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 487 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
483 buf[4] = 0x5; 488 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
484 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) 489 buf[4] = 0x5;
485 buf[4] = 0x4; 490 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
491 buf[4] = 0x4;
492 }
486 493
487 /* Set HEADSUP, ORDSUP, SIMPSUP */ 494 /* Set HEADSUP, ORDSUP, SIMPSUP */
488 buf[5] = 0x07; 495 buf[5] = 0x07;
@@ -762,7 +769,7 @@ out:
762 return ret; 769 return ret;
763} 770}
764 771
765static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p) 772static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
766{ 773{
767 p[0] = 0x01; 774 p[0] = 0x01;
768 p[1] = 0x0a; 775 p[1] = 0x0a;
@@ -775,8 +782,11 @@ out:
775 return 12; 782 return 12;
776} 783}
777 784
778static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p) 785static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
779{ 786{
787 struct se_device *dev = cmd->se_dev;
788 struct se_session *sess = cmd->se_sess;
789
780 p[0] = 0x0a; 790 p[0] = 0x0a;
781 p[1] = 0x0a; 791 p[1] = 0x0a;
782 792
@@ -868,8 +878,10 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
868 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 878 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
869 * TAG field. 879 * TAG field.
870 */ 880 */
871 if (dev->dev_attrib.pi_prot_type) 881 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
872 p[5] |= 0x80; 882 if (dev->dev_attrib.pi_prot_type)
883 p[5] |= 0x80;
884 }
873 885
874 p[8] = 0xff; 886 p[8] = 0xff;
875 p[9] = 0xff; 887 p[9] = 0xff;
@@ -879,8 +891,10 @@ out:
879 return 12; 891 return 12;
880} 892}
881 893
882static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p) 894static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
883{ 895{
896 struct se_device *dev = cmd->se_dev;
897
884 p[0] = 0x08; 898 p[0] = 0x08;
885 p[1] = 0x12; 899 p[1] = 0x12;
886 900
@@ -896,7 +910,7 @@ out:
896 return 20; 910 return 20;
897} 911}
898 912
899static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p) 913static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
900{ 914{
901 p[0] = 0x1c; 915 p[0] = 0x1c;
902 p[1] = 0x0a; 916 p[1] = 0x0a;
@@ -912,7 +926,7 @@ out:
912static struct { 926static struct {
913 uint8_t page; 927 uint8_t page;
914 uint8_t subpage; 928 uint8_t subpage;
915 int (*emulate)(struct se_device *, u8, unsigned char *); 929 int (*emulate)(struct se_cmd *, u8, unsigned char *);
916} modesense_handlers[] = { 930} modesense_handlers[] = {
917 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 931 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
918 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 932 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
@@ -1050,7 +1064,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1050 * the only two possibilities). 1064 * the only two possibilities).
1051 */ 1065 */
1052 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 1066 if ((modesense_handlers[i].subpage & ~subpage) == 0) {
1053 ret = modesense_handlers[i].emulate(dev, pc, &buf[length]); 1067 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1054 if (!ten && length + ret >= 255) 1068 if (!ten && length + ret >= 255)
1055 break; 1069 break;
1056 length += ret; 1070 length += ret;
@@ -1063,7 +1077,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1063 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1077 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1064 if (modesense_handlers[i].page == page && 1078 if (modesense_handlers[i].page == page &&
1065 modesense_handlers[i].subpage == subpage) { 1079 modesense_handlers[i].subpage == subpage) {
1066 length += modesense_handlers[i].emulate(dev, pc, &buf[length]); 1080 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1067 goto set_length; 1081 goto set_length;
1068 } 1082 }
1069 1083
@@ -1095,7 +1109,6 @@ set_length:
1095 1109
1096static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 1110static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
1097{ 1111{
1098 struct se_device *dev = cmd->se_dev;
1099 char *cdb = cmd->t_task_cdb; 1112 char *cdb = cmd->t_task_cdb;
1100 bool ten = cdb[0] == MODE_SELECT_10; 1113 bool ten = cdb[0] == MODE_SELECT_10;
1101 int off = ten ? 8 : 4; 1114 int off = ten ? 8 : 4;
@@ -1131,7 +1144,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
1131 if (modesense_handlers[i].page == page && 1144 if (modesense_handlers[i].page == page &&
1132 modesense_handlers[i].subpage == subpage) { 1145 modesense_handlers[i].subpage == subpage) {
1133 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1146 memset(tbuf, 0, SE_MODE_PAGE_BUF);
1134 length = modesense_handlers[i].emulate(dev, 0, tbuf); 1147 length = modesense_handlers[i].emulate(cmd, 0, tbuf);
1135 goto check_contents; 1148 goto check_contents;
1136 } 1149 }
1137 1150
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 70c638f730af..f7cd95e8111a 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -87,14 +87,17 @@ static void core_tmr_handle_tas_abort(
87 struct se_cmd *cmd, 87 struct se_cmd *cmd,
88 int tas) 88 int tas)
89{ 89{
90 bool remove = true;
90 /* 91 /*
91 * TASK ABORTED status (TAS) bit support 92 * TASK ABORTED status (TAS) bit support
92 */ 93 */
93 if ((tmr_nacl && 94 if ((tmr_nacl &&
94 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) 95 (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
96 remove = false;
95 transport_send_task_abort(cmd); 97 transport_send_task_abort(cmd);
98 }
96 99
97 transport_cmd_finish_abort(cmd, 0); 100 transport_cmd_finish_abort(cmd, remove);
98} 101}
99 102
100static int target_check_cdb_and_preempt(struct list_head *list, 103static int target_check_cdb_and_preempt(struct list_head *list,
@@ -127,6 +130,11 @@ void core_tmr_abort_task(
127 130
128 if (dev != se_cmd->se_dev) 131 if (dev != se_cmd->se_dev)
129 continue; 132 continue;
133
134 /* skip se_cmd associated with tmr */
135 if (tmr->task_cmd == se_cmd)
136 continue;
137
130 ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); 138 ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
131 if (tmr->ref_task_tag != ref_tag) 139 if (tmr->ref_task_tag != ref_tag)
132 continue; 140 continue;
@@ -150,18 +158,9 @@ void core_tmr_abort_task(
150 158
151 cancel_work_sync(&se_cmd->work); 159 cancel_work_sync(&se_cmd->work);
152 transport_wait_for_tasks(se_cmd); 160 transport_wait_for_tasks(se_cmd);
153 /*
154 * Now send SAM_STAT_TASK_ABORTED status for the referenced
155 * se_cmd descriptor..
156 */
157 transport_send_task_abort(se_cmd);
158 /*
159 * Also deal with possible extra acknowledge reference..
160 */
161 if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
162 target_put_sess_cmd(se_sess, se_cmd);
163 161
164 target_put_sess_cmd(se_sess, se_cmd); 162 target_put_sess_cmd(se_sess, se_cmd);
163 transport_cmd_finish_abort(se_cmd, true);
165 164
166 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 165 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
167 " ref_tag: %d\n", ref_tag); 166 " ref_tag: %d\n", ref_tag);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2956250b7225..d4b98690a736 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -235,7 +235,7 @@ void transport_subsystem_check_init(void)
235 sub_api_initialized = 1; 235 sub_api_initialized = 1;
236} 236}
237 237
238struct se_session *transport_init_session(void) 238struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
239{ 239{
240 struct se_session *se_sess; 240 struct se_session *se_sess;
241 241
@@ -251,6 +251,7 @@ struct se_session *transport_init_session(void)
251 INIT_LIST_HEAD(&se_sess->sess_wait_list); 251 INIT_LIST_HEAD(&se_sess->sess_wait_list);
252 spin_lock_init(&se_sess->sess_cmd_lock); 252 spin_lock_init(&se_sess->sess_cmd_lock);
253 kref_init(&se_sess->sess_kref); 253 kref_init(&se_sess->sess_kref);
254 se_sess->sup_prot_ops = sup_prot_ops;
254 255
255 return se_sess; 256 return se_sess;
256} 257}
@@ -288,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess,
288EXPORT_SYMBOL(transport_alloc_session_tags); 289EXPORT_SYMBOL(transport_alloc_session_tags);
289 290
290struct se_session *transport_init_session_tags(unsigned int tag_num, 291struct se_session *transport_init_session_tags(unsigned int tag_num,
291 unsigned int tag_size) 292 unsigned int tag_size,
293 enum target_prot_op sup_prot_ops)
292{ 294{
293 struct se_session *se_sess; 295 struct se_session *se_sess;
294 int rc; 296 int rc;
295 297
296 se_sess = transport_init_session(); 298 se_sess = transport_init_session(sup_prot_ops);
297 if (IS_ERR(se_sess)) 299 if (IS_ERR(se_sess))
298 return se_sess; 300 return se_sess;
299 301
@@ -603,6 +605,15 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
603 605
604void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 606void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
605{ 607{
608 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
609 transport_lun_remove_cmd(cmd);
610 /*
611 * Allow the fabric driver to unmap any resources before
612 * releasing the descriptor via TFO->release_cmd()
613 */
614 if (remove)
615 cmd->se_tfo->aborted_task(cmd);
616
606 if (transport_cmd_check_stop_to_fabric(cmd)) 617 if (transport_cmd_check_stop_to_fabric(cmd))
607 return; 618 return;
608 if (remove) 619 if (remove)
@@ -1365,6 +1376,13 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1365 target_put_sess_cmd(se_sess, se_cmd); 1376 target_put_sess_cmd(se_sess, se_cmd);
1366 return 0; 1377 return 0;
1367 } 1378 }
1379
1380 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1381 if (rc != 0) {
1382 transport_generic_request_failure(se_cmd, rc);
1383 return 0;
1384 }
1385
1368 /* 1386 /*
1369 * Save pointers for SGLs containing protection information, 1387 * Save pointers for SGLs containing protection information,
1370 * if present. 1388 * if present.
@@ -1374,11 +1392,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1374 se_cmd->t_prot_nents = sgl_prot_count; 1392 se_cmd->t_prot_nents = sgl_prot_count;
1375 } 1393 }
1376 1394
1377 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1378 if (rc != 0) {
1379 transport_generic_request_failure(se_cmd, rc);
1380 return 0;
1381 }
1382 /* 1395 /*
1383 * When a non zero sgl_count has been passed perform SGL passthrough 1396 * When a non zero sgl_count has been passed perform SGL passthrough
1384 * mapping for pre-allocated fabric memory instead of having target 1397 * mapping for pre-allocated fabric memory instead of having target
@@ -1754,6 +1767,15 @@ void target_execute_cmd(struct se_cmd *cmd)
1754 cmd->t_state = TRANSPORT_PROCESSING; 1767 cmd->t_state = TRANSPORT_PROCESSING;
1755 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1768 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
1756 spin_unlock_irq(&cmd->t_state_lock); 1769 spin_unlock_irq(&cmd->t_state_lock);
1770 /*
1771 * Perform WRITE_INSERT of PI using software emulation when backend
1772 * device has PI enabled, if the transport has not already generated
1773 * PI using hardware WRITE_INSERT offload.
1774 */
1775 if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
1776 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1777 sbc_dif_generate(cmd);
1778 }
1757 1779
1758 if (target_handle_task_attr(cmd)) { 1780 if (target_handle_task_attr(cmd)) {
1759 spin_lock_irq(&cmd->t_state_lock); 1781 spin_lock_irq(&cmd->t_state_lock);
@@ -1883,6 +1905,21 @@ static void transport_handle_queue_full(
1883 schedule_work(&cmd->se_dev->qf_work_queue); 1905 schedule_work(&cmd->se_dev->qf_work_queue);
1884} 1906}
1885 1907
1908static bool target_check_read_strip(struct se_cmd *cmd)
1909{
1910 sense_reason_t rc;
1911
1912 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
1913 rc = sbc_dif_read_strip(cmd);
1914 if (rc) {
1915 cmd->pi_err = rc;
1916 return true;
1917 }
1918 }
1919
1920 return false;
1921}
1922
1886static void target_complete_ok_work(struct work_struct *work) 1923static void target_complete_ok_work(struct work_struct *work)
1887{ 1924{
1888 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 1925 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -1947,6 +1984,22 @@ static void target_complete_ok_work(struct work_struct *work)
1947 cmd->data_length; 1984 cmd->data_length;
1948 } 1985 }
1949 spin_unlock(&cmd->se_lun->lun_sep_lock); 1986 spin_unlock(&cmd->se_lun->lun_sep_lock);
1987 /*
1988 * Perform READ_STRIP of PI using software emulation when
1989 * backend had PI enabled, if the transport will not be
1990 * performing hardware READ_STRIP offload.
1991 */
1992 if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
1993 target_check_read_strip(cmd)) {
1994 ret = transport_send_check_condition_and_sense(cmd,
1995 cmd->pi_err, 0);
1996 if (ret == -EAGAIN || ret == -ENOMEM)
1997 goto queue_full;
1998
1999 transport_lun_remove_cmd(cmd);
2000 transport_cmd_check_stop_to_fabric(cmd);
2001 return;
2002 }
1950 2003
1951 trace_target_cmd_complete(cmd); 2004 trace_target_cmd_complete(cmd);
1952 ret = cmd->se_tfo->queue_data_in(cmd); 2005 ret = cmd->se_tfo->queue_data_in(cmd);
@@ -2039,6 +2092,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)
2039 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2092 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2040 cmd->t_bidi_data_sg = NULL; 2093 cmd->t_bidi_data_sg = NULL;
2041 cmd->t_bidi_data_nents = 0; 2094 cmd->t_bidi_data_nents = 0;
2095
2096 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2097 cmd->t_prot_sg = NULL;
2098 cmd->t_prot_nents = 0;
2042} 2099}
2043 2100
2044/** 2101/**
@@ -2202,6 +2259,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2202 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2259 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2203 } 2260 }
2204 2261
2262 if (cmd->prot_op != TARGET_PROT_NORMAL) {
2263 ret = target_alloc_sgl(&cmd->t_prot_sg,
2264 &cmd->t_prot_nents,
2265 cmd->prot_length, true);
2266 if (ret < 0)
2267 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2268 }
2269
2205 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2270 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2206 cmd->data_length, zero_flag); 2271 cmd->data_length, zero_flag);
2207 if (ret < 0) 2272 if (ret < 0)
@@ -2770,13 +2835,17 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2770 if (!(cmd->transport_state & CMD_T_ABORTED)) 2835 if (!(cmd->transport_state & CMD_T_ABORTED))
2771 return 0; 2836 return 0;
2772 2837
2773 if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 2838 /*
2839 * If cmd has been aborted but either no status is to be sent or it has
2840 * already been sent, just return
2841 */
2842 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
2774 return 1; 2843 return 1;
2775 2844
2776 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", 2845 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
2777 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); 2846 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
2778 2847
2779 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 2848 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2780 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2849 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2781 trace_target_cmd_complete(cmd); 2850 trace_target_cmd_complete(cmd);
2782 cmd->se_tfo->queue_status(cmd); 2851 cmd->se_tfo->queue_status(cmd);
@@ -2790,7 +2859,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
2790 unsigned long flags; 2859 unsigned long flags;
2791 2860
2792 spin_lock_irqsave(&cmd->t_state_lock, flags); 2861 spin_lock_irqsave(&cmd->t_state_lock, flags);
2793 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { 2862 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
2794 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2863 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2795 return; 2864 return;
2796 } 2865 }
@@ -2805,6 +2874,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
2805 if (cmd->data_direction == DMA_TO_DEVICE) { 2874 if (cmd->data_direction == DMA_TO_DEVICE) {
2806 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2875 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2807 cmd->transport_state |= CMD_T_ABORTED; 2876 cmd->transport_state |= CMD_T_ABORTED;
2877 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2808 smp_mb__after_atomic_inc(); 2878 smp_mb__after_atomic_inc();
2809 return; 2879 return;
2810 } 2880 }
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 752863acecb8..a0bcfd3e7e7d 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -94,20 +94,19 @@ struct ft_lun {
94 */ 94 */
95struct ft_tpg { 95struct ft_tpg {
96 u32 index; 96 u32 index;
97 struct ft_lport_acl *lport_acl; 97 struct ft_lport_wwn *lport_wwn;
98 struct ft_tport *tport; /* active tport or NULL */ 98 struct ft_tport *tport; /* active tport or NULL */
99 struct list_head list; /* linkage in ft_lport_acl tpg_list */
100 struct list_head lun_list; /* head of LUNs */ 99 struct list_head lun_list; /* head of LUNs */
101 struct se_portal_group se_tpg; 100 struct se_portal_group se_tpg;
102 struct workqueue_struct *workqueue; 101 struct workqueue_struct *workqueue;
103}; 102};
104 103
105struct ft_lport_acl { 104struct ft_lport_wwn {
106 u64 wwpn; 105 u64 wwpn;
107 char name[FT_NAMELEN]; 106 char name[FT_NAMELEN];
108 struct list_head list; 107 struct list_head ft_wwn_node;
109 struct list_head tpg_list; 108 struct ft_tpg *tpg;
110 struct se_wwn fc_lport_wwn; 109 struct se_wwn se_wwn;
111}; 110};
112 111
113/* 112/*
@@ -128,7 +127,6 @@ struct ft_cmd {
128 u32 sg_cnt; /* No. of item in scatterlist */ 127 u32 sg_cnt; /* No. of item in scatterlist */
129}; 128};
130 129
131extern struct list_head ft_lport_list;
132extern struct mutex ft_lport_lock; 130extern struct mutex ft_lport_lock;
133extern struct fc4_prov ft_prov; 131extern struct fc4_prov ft_prov;
134extern struct target_fabric_configfs *ft_configfs; 132extern struct target_fabric_configfs *ft_configfs;
@@ -163,6 +161,7 @@ int ft_write_pending_status(struct se_cmd *);
163u32 ft_get_task_tag(struct se_cmd *); 161u32 ft_get_task_tag(struct se_cmd *);
164int ft_get_cmd_state(struct se_cmd *); 162int ft_get_cmd_state(struct se_cmd *);
165void ft_queue_tm_resp(struct se_cmd *); 163void ft_queue_tm_resp(struct se_cmd *);
164void ft_aborted_task(struct se_cmd *);
166 165
167/* 166/*
168 * other internal functions. 167 * other internal functions.
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 8b2c1aaf81de..01cf37f212c3 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
426 ft_send_resp_code(cmd, code); 426 ft_send_resp_code(cmd, code);
427} 427}
428 428
429void ft_aborted_task(struct se_cmd *se_cmd)
430{
431 return;
432}
433
429static void ft_send_work(struct work_struct *work); 434static void ft_send_work(struct work_struct *work);
430 435
431/* 436/*
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index e879da81ad93..efdcb9663a1a 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -50,7 +50,7 @@
50 50
51struct target_fabric_configfs *ft_configfs; 51struct target_fabric_configfs *ft_configfs;
52 52
53LIST_HEAD(ft_lport_list); 53static LIST_HEAD(ft_wwn_list);
54DEFINE_MUTEX(ft_lport_lock); 54DEFINE_MUTEX(ft_lport_lock);
55 55
56unsigned int ft_debug_logging; 56unsigned int ft_debug_logging;
@@ -298,7 +298,7 @@ static struct se_portal_group *ft_add_tpg(
298 struct config_group *group, 298 struct config_group *group,
299 const char *name) 299 const char *name)
300{ 300{
301 struct ft_lport_acl *lacl; 301 struct ft_lport_wwn *ft_wwn;
302 struct ft_tpg *tpg; 302 struct ft_tpg *tpg;
303 struct workqueue_struct *wq; 303 struct workqueue_struct *wq;
304 unsigned long index; 304 unsigned long index;
@@ -318,12 +318,17 @@ static struct se_portal_group *ft_add_tpg(
318 if (index > UINT_MAX) 318 if (index > UINT_MAX)
319 return NULL; 319 return NULL;
320 320
321 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); 321 if ((index != 1)) {
322 pr_err("Error, a single TPG=1 is used for HW port mappings\n");
323 return ERR_PTR(-ENOSYS);
324 }
325
326 ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
322 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 327 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
323 if (!tpg) 328 if (!tpg)
324 return NULL; 329 return NULL;
325 tpg->index = index; 330 tpg->index = index;
326 tpg->lport_acl = lacl; 331 tpg->lport_wwn = ft_wwn;
327 INIT_LIST_HEAD(&tpg->lun_list); 332 INIT_LIST_HEAD(&tpg->lun_list);
328 333
329 wq = alloc_workqueue("tcm_fc", 0, 1); 334 wq = alloc_workqueue("tcm_fc", 0, 1);
@@ -342,7 +347,7 @@ static struct se_portal_group *ft_add_tpg(
342 tpg->workqueue = wq; 347 tpg->workqueue = wq;
343 348
344 mutex_lock(&ft_lport_lock); 349 mutex_lock(&ft_lport_lock);
345 list_add_tail(&tpg->list, &lacl->tpg_list); 350 ft_wwn->tpg = tpg;
346 mutex_unlock(&ft_lport_lock); 351 mutex_unlock(&ft_lport_lock);
347 352
348 return &tpg->se_tpg; 353 return &tpg->se_tpg;
@@ -351,6 +356,7 @@ static struct se_portal_group *ft_add_tpg(
351static void ft_del_tpg(struct se_portal_group *se_tpg) 356static void ft_del_tpg(struct se_portal_group *se_tpg)
352{ 357{
353 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 358 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
359 struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
354 360
355 pr_debug("del tpg %s\n", 361 pr_debug("del tpg %s\n",
356 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 362 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
@@ -361,7 +367,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
361 synchronize_rcu(); 367 synchronize_rcu();
362 368
363 mutex_lock(&ft_lport_lock); 369 mutex_lock(&ft_lport_lock);
364 list_del(&tpg->list); 370 ft_wwn->tpg = NULL;
365 if (tpg->tport) { 371 if (tpg->tport) {
366 tpg->tport->tpg = NULL; 372 tpg->tport->tpg = NULL;
367 tpg->tport = NULL; 373 tpg->tport = NULL;
@@ -380,15 +386,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
380 */ 386 */
381struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport) 387struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
382{ 388{
383 struct ft_lport_acl *lacl; 389 struct ft_lport_wwn *ft_wwn;
384 struct ft_tpg *tpg;
385 390
386 list_for_each_entry(lacl, &ft_lport_list, list) { 391 list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
387 if (lacl->wwpn == lport->wwpn) { 392 if (ft_wwn->wwpn == lport->wwpn)
388 list_for_each_entry(tpg, &lacl->tpg_list, list) 393 return ft_wwn->tpg;
389 return tpg; /* XXX for now return first entry */
390 return NULL;
391 }
392 } 394 }
393 return NULL; 395 return NULL;
394} 396}
@@ -401,50 +403,49 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
401 * Add lport to allowed config. 403 * Add lport to allowed config.
402 * The name is the WWPN in lower-case ASCII, colon-separated bytes. 404 * The name is the WWPN in lower-case ASCII, colon-separated bytes.
403 */ 405 */
404static struct se_wwn *ft_add_lport( 406static struct se_wwn *ft_add_wwn(
405 struct target_fabric_configfs *tf, 407 struct target_fabric_configfs *tf,
406 struct config_group *group, 408 struct config_group *group,
407 const char *name) 409 const char *name)
408{ 410{
409 struct ft_lport_acl *lacl; 411 struct ft_lport_wwn *ft_wwn;
410 struct ft_lport_acl *old_lacl; 412 struct ft_lport_wwn *old_ft_wwn;
411 u64 wwpn; 413 u64 wwpn;
412 414
413 pr_debug("add lport %s\n", name); 415 pr_debug("add wwn %s\n", name);
414 if (ft_parse_wwn(name, &wwpn, 1) < 0) 416 if (ft_parse_wwn(name, &wwpn, 1) < 0)
415 return NULL; 417 return NULL;
416 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); 418 ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
417 if (!lacl) 419 if (!ft_wwn)
418 return NULL; 420 return NULL;
419 lacl->wwpn = wwpn; 421 ft_wwn->wwpn = wwpn;
420 INIT_LIST_HEAD(&lacl->tpg_list);
421 422
422 mutex_lock(&ft_lport_lock); 423 mutex_lock(&ft_lport_lock);
423 list_for_each_entry(old_lacl, &ft_lport_list, list) { 424 list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
424 if (old_lacl->wwpn == wwpn) { 425 if (old_ft_wwn->wwpn == wwpn) {
425 mutex_unlock(&ft_lport_lock); 426 mutex_unlock(&ft_lport_lock);
426 kfree(lacl); 427 kfree(ft_wwn);
427 return NULL; 428 return NULL;
428 } 429 }
429 } 430 }
430 list_add_tail(&lacl->list, &ft_lport_list); 431 list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
431 ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn); 432 ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
432 mutex_unlock(&ft_lport_lock); 433 mutex_unlock(&ft_lport_lock);
433 434
434 return &lacl->fc_lport_wwn; 435 return &ft_wwn->se_wwn;
435} 436}
436 437
437static void ft_del_lport(struct se_wwn *wwn) 438static void ft_del_wwn(struct se_wwn *wwn)
438{ 439{
439 struct ft_lport_acl *lacl = container_of(wwn, 440 struct ft_lport_wwn *ft_wwn = container_of(wwn,
440 struct ft_lport_acl, fc_lport_wwn); 441 struct ft_lport_wwn, se_wwn);
441 442
442 pr_debug("del lport %s\n", lacl->name); 443 pr_debug("del wwn %s\n", ft_wwn->name);
443 mutex_lock(&ft_lport_lock); 444 mutex_lock(&ft_lport_lock);
444 list_del(&lacl->list); 445 list_del(&ft_wwn->ft_wwn_node);
445 mutex_unlock(&ft_lport_lock); 446 mutex_unlock(&ft_lport_lock);
446 447
447 kfree(lacl); 448 kfree(ft_wwn);
448} 449}
449 450
450static ssize_t ft_wwn_show_attr_version( 451static ssize_t ft_wwn_show_attr_version(
@@ -471,7 +472,7 @@ static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
471{ 472{
472 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; 473 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
473 474
474 return tpg->lport_acl->name; 475 return tpg->lport_wwn->name;
475} 476}
476 477
477static u16 ft_get_tag(struct se_portal_group *se_tpg) 478static u16 ft_get_tag(struct se_portal_group *se_tpg)
@@ -536,12 +537,13 @@ static struct target_core_fabric_ops ft_fabric_ops = {
536 .queue_data_in = ft_queue_data_in, 537 .queue_data_in = ft_queue_data_in,
537 .queue_status = ft_queue_status, 538 .queue_status = ft_queue_status,
538 .queue_tm_rsp = ft_queue_tm_resp, 539 .queue_tm_rsp = ft_queue_tm_resp,
540 .aborted_task = ft_aborted_task,
539 /* 541 /*
540 * Setup function pointers for generic logic in 542 * Setup function pointers for generic logic in
541 * target_core_fabric_configfs.c 543 * target_core_fabric_configfs.c
542 */ 544 */
543 .fabric_make_wwn = &ft_add_lport, 545 .fabric_make_wwn = &ft_add_wwn,
544 .fabric_drop_wwn = &ft_del_lport, 546 .fabric_drop_wwn = &ft_del_wwn,
545 .fabric_make_tpg = &ft_add_tpg, 547 .fabric_make_tpg = &ft_add_tpg,
546 .fabric_drop_tpg = &ft_del_tpg, 548 .fabric_drop_tpg = &ft_del_tpg,
547 .fabric_post_link = NULL, 549 .fabric_post_link = NULL,
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index ae52c08dad09..21ce50880c79 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -51,7 +51,7 @@ static void ft_sess_delete_all(struct ft_tport *);
51 * Lookup or allocate target local port. 51 * Lookup or allocate target local port.
52 * Caller holds ft_lport_lock. 52 * Caller holds ft_lport_lock.
53 */ 53 */
54static struct ft_tport *ft_tport_create(struct fc_lport *lport) 54static struct ft_tport *ft_tport_get(struct fc_lport *lport)
55{ 55{
56 struct ft_tpg *tpg; 56 struct ft_tpg *tpg;
57 struct ft_tport *tport; 57 struct ft_tport *tport;
@@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
68 68
69 if (tport) { 69 if (tport) {
70 tport->tpg = tpg; 70 tport->tpg = tpg;
71 tpg->tport = tport;
71 return tport; 72 return tport;
72 } 73 }
73 74
@@ -114,7 +115,7 @@ static void ft_tport_delete(struct ft_tport *tport)
114void ft_lport_add(struct fc_lport *lport, void *arg) 115void ft_lport_add(struct fc_lport *lport, void *arg)
115{ 116{
116 mutex_lock(&ft_lport_lock); 117 mutex_lock(&ft_lport_lock);
117 ft_tport_create(lport); 118 ft_tport_get(lport);
118 mutex_unlock(&ft_lport_lock); 119 mutex_unlock(&ft_lport_lock);
119} 120}
120 121
@@ -211,7 +212,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
211 return NULL; 212 return NULL;
212 213
213 sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, 214 sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
214 sizeof(struct ft_cmd)); 215 sizeof(struct ft_cmd),
216 TARGET_PROT_NORMAL);
215 if (IS_ERR(sess->se_sess)) { 217 if (IS_ERR(sess->se_sess)) {
216 kfree(sess); 218 kfree(sess);
217 return NULL; 219 return NULL;
@@ -350,7 +352,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
350 struct ft_node_acl *acl; 352 struct ft_node_acl *acl;
351 u32 fcp_parm; 353 u32 fcp_parm;
352 354
353 tport = ft_tport_create(rdata->local_port); 355 tport = ft_tport_get(rdata->local_port);
354 if (!tport) 356 if (!tport)
355 goto not_target; /* not a target for this local port */ 357 goto not_target; /* not a target for this local port */
356 358
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index b0e540137e39..90ca082935f6 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -65,6 +65,7 @@ static void tty_audit_log(const char *description, int major, int minor,
65{ 65{
66 struct audit_buffer *ab; 66 struct audit_buffer *ab;
67 struct task_struct *tsk = current; 67 struct task_struct *tsk = current;
68 pid_t pid = task_pid_nr(tsk);
68 uid_t uid = from_kuid(&init_user_ns, task_uid(tsk)); 69 uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
69 uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk)); 70 uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
70 unsigned int sessionid = audit_get_sessionid(tsk); 71 unsigned int sessionid = audit_get_sessionid(tsk);
@@ -74,7 +75,7 @@ static void tty_audit_log(const char *description, int major, int minor,
74 char name[sizeof(tsk->comm)]; 75 char name[sizeof(tsk->comm)];
75 76
76 audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d" 77 audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
77 " minor=%d comm=", description, tsk->pid, uid, 78 " minor=%d comm=", description, pid, uid,
78 loginuid, sessionid, major, minor); 79 loginuid, sessionid, major, minor);
79 get_task_comm(name, tsk); 80 get_task_comm(name, tsk);
80 audit_log_untrustedstring(ab, name); 81 audit_log_untrustedstring(ab, name);
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 460c266b8e24..f058c0368d61 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
1471{ 1471{
1472} 1472}
1473 1473
1474static void usbg_aborted_task(struct se_cmd *se_cmd)
1475{
1476 return;
1477}
1478
1474static const char *usbg_check_wwn(const char *name) 1479static const char *usbg_check_wwn(const char *name)
1475{ 1480{
1476 const char *n; 1481 const char *n;
@@ -1726,7 +1731,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1726 pr_err("Unable to allocate struct tcm_vhost_nexus\n"); 1731 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1727 goto err_unlock; 1732 goto err_unlock;
1728 } 1733 }
1729 tv_nexus->tvn_se_sess = transport_init_session(); 1734 tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
1730 if (IS_ERR(tv_nexus->tvn_se_sess)) 1735 if (IS_ERR(tv_nexus->tvn_se_sess))
1731 goto err_free; 1736 goto err_free;
1732 1737
@@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = {
1897 .queue_data_in = usbg_send_read_response, 1902 .queue_data_in = usbg_send_read_response,
1898 .queue_status = usbg_send_status_response, 1903 .queue_status = usbg_send_status_response,
1899 .queue_tm_rsp = usbg_queue_tm_rsp, 1904 .queue_tm_rsp = usbg_queue_tm_rsp,
1905 .aborted_task = usbg_aborted_task,
1900 .check_stop_free = usbg_check_stop_free, 1906 .check_stop_free = usbg_check_stop_free,
1901 1907
1902 .fabric_make_wwn = usbg_make_tport, 1908 .fabric_make_wwn = usbg_make_tport,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e1e22e0f01e8..be414d2b2b22 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -818,9 +818,9 @@ static int vhost_net_release(struct inode *inode, struct file *f)
818 vhost_dev_cleanup(&n->dev, false); 818 vhost_dev_cleanup(&n->dev, false);
819 vhost_net_vq_reset(n); 819 vhost_net_vq_reset(n);
820 if (tx_sock) 820 if (tx_sock)
821 fput(tx_sock->file); 821 sockfd_put(tx_sock);
822 if (rx_sock) 822 if (rx_sock)
823 fput(rx_sock->file); 823 sockfd_put(rx_sock);
824 /* Make sure no callbacks are outstanding */ 824 /* Make sure no callbacks are outstanding */
825 synchronize_rcu_bh(); 825 synchronize_rcu_bh();
826 /* We do an extra flush before freeing memory, 826 /* We do an extra flush before freeing memory,
@@ -860,7 +860,7 @@ static struct socket *get_raw_socket(int fd)
860 } 860 }
861 return sock; 861 return sock;
862err: 862err:
863 fput(sock->file); 863 sockfd_put(sock);
864 return ERR_PTR(r); 864 return ERR_PTR(r);
865} 865}
866 866
@@ -966,7 +966,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
966 966
967 if (oldsock) { 967 if (oldsock) {
968 vhost_net_flush_vq(n, index); 968 vhost_net_flush_vq(n, index);
969 fput(oldsock->file); 969 sockfd_put(oldsock);
970 } 970 }
971 971
972 mutex_unlock(&n->dev.mutex); 972 mutex_unlock(&n->dev.mutex);
@@ -978,7 +978,7 @@ err_used:
978 if (ubufs) 978 if (ubufs)
979 vhost_net_ubuf_put_wait_and_free(ubufs); 979 vhost_net_ubuf_put_wait_and_free(ubufs);
980err_ubufs: 980err_ubufs:
981 fput(sock->file); 981 sockfd_put(sock);
982err_vq: 982err_vq:
983 mutex_unlock(&vq->mutex); 983 mutex_unlock(&vq->mutex);
984err: 984err:
@@ -1009,9 +1009,9 @@ static long vhost_net_reset_owner(struct vhost_net *n)
1009done: 1009done:
1010 mutex_unlock(&n->dev.mutex); 1010 mutex_unlock(&n->dev.mutex);
1011 if (tx_sock) 1011 if (tx_sock)
1012 fput(tx_sock->file); 1012 sockfd_put(tx_sock);
1013 if (rx_sock) 1013 if (rx_sock)
1014 fput(rx_sock->file); 1014 sockfd_put(rx_sock);
1015 return err; 1015 return err;
1016} 1016}
1017 1017
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e48d4a672580..cf50ce93975b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
539 return; 539 return;
540} 540}
541 541
542static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
543{
544 return;
545}
546
542static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 547static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
543{ 548{
544 vs->vs_events_nr--; 549 vs->vs_events_nr--;
@@ -1740,7 +1745,8 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1740 */ 1745 */
1741 tv_nexus->tvn_se_sess = transport_init_session_tags( 1746 tv_nexus->tvn_se_sess = transport_init_session_tags(
1742 TCM_VHOST_DEFAULT_TAGS, 1747 TCM_VHOST_DEFAULT_TAGS,
1743 sizeof(struct tcm_vhost_cmd)); 1748 sizeof(struct tcm_vhost_cmd),
1749 TARGET_PROT_NORMAL);
1744 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1750 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1745 mutex_unlock(&tpg->tv_tpg_mutex); 1751 mutex_unlock(&tpg->tv_tpg_mutex);
1746 kfree(tv_nexus); 1752 kfree(tv_nexus);
@@ -2131,6 +2137,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = {
2131 .queue_data_in = tcm_vhost_queue_data_in, 2137 .queue_data_in = tcm_vhost_queue_data_in,
2132 .queue_status = tcm_vhost_queue_status, 2138 .queue_status = tcm_vhost_queue_status,
2133 .queue_tm_rsp = tcm_vhost_queue_tm_rsp, 2139 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
2140 .aborted_task = tcm_vhost_aborted_task,
2134 /* 2141 /*
2135 * Setup callers for generic logic in target_core_fabric_configfs.c 2142 * Setup callers for generic logic in target_core_fabric_configfs.c
2136 */ 2143 */
diff --git a/fs/aio.c b/fs/aio.c
index 062a5f6a1448..12a3de0ee6da 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -52,7 +52,8 @@
52struct aio_ring { 52struct aio_ring {
53 unsigned id; /* kernel internal index number */ 53 unsigned id; /* kernel internal index number */
54 unsigned nr; /* number of io_events */ 54 unsigned nr; /* number of io_events */
55 unsigned head; 55 unsigned head; /* Written to by userland or under ring_lock
56 * mutex by aio_read_events_ring(). */
56 unsigned tail; 57 unsigned tail;
57 58
58 unsigned magic; 59 unsigned magic;
@@ -243,6 +244,11 @@ static void aio_free_ring(struct kioctx *ctx)
243{ 244{
244 int i; 245 int i;
245 246
247 /* Disconnect the kiotx from the ring file. This prevents future
248 * accesses to the kioctx from page migration.
249 */
250 put_aio_ring_file(ctx);
251
246 for (i = 0; i < ctx->nr_pages; i++) { 252 for (i = 0; i < ctx->nr_pages; i++) {
247 struct page *page; 253 struct page *page;
248 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 254 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
@@ -254,8 +260,6 @@ static void aio_free_ring(struct kioctx *ctx)
254 put_page(page); 260 put_page(page);
255 } 261 }
256 262
257 put_aio_ring_file(ctx);
258
259 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { 263 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
260 kfree(ctx->ring_pages); 264 kfree(ctx->ring_pages);
261 ctx->ring_pages = NULL; 265 ctx->ring_pages = NULL;
@@ -283,29 +287,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
283{ 287{
284 struct kioctx *ctx; 288 struct kioctx *ctx;
285 unsigned long flags; 289 unsigned long flags;
290 pgoff_t idx;
286 int rc; 291 int rc;
287 292
288 rc = 0; 293 rc = 0;
289 294
290 /* Make sure the old page hasn't already been changed */ 295 /* mapping->private_lock here protects against the kioctx teardown. */
291 spin_lock(&mapping->private_lock); 296 spin_lock(&mapping->private_lock);
292 ctx = mapping->private_data; 297 ctx = mapping->private_data;
293 if (ctx) { 298 if (!ctx) {
294 pgoff_t idx; 299 rc = -EINVAL;
295 spin_lock_irqsave(&ctx->completion_lock, flags); 300 goto out;
296 idx = old->index; 301 }
297 if (idx < (pgoff_t)ctx->nr_pages) { 302
298 if (ctx->ring_pages[idx] != old) 303 /* The ring_lock mutex. The prevents aio_read_events() from writing
299 rc = -EAGAIN; 304 * to the ring's head, and prevents page migration from mucking in
300 } else 305 * a partially initialized kiotx.
301 rc = -EINVAL; 306 */
302 spin_unlock_irqrestore(&ctx->completion_lock, flags); 307 if (!mutex_trylock(&ctx->ring_lock)) {
308 rc = -EAGAIN;
309 goto out;
310 }
311
312 idx = old->index;
313 if (idx < (pgoff_t)ctx->nr_pages) {
314 /* Make sure the old page hasn't already been changed */
315 if (ctx->ring_pages[idx] != old)
316 rc = -EAGAIN;
303 } else 317 } else
304 rc = -EINVAL; 318 rc = -EINVAL;
305 spin_unlock(&mapping->private_lock);
306 319
307 if (rc != 0) 320 if (rc != 0)
308 return rc; 321 goto out_unlock;
309 322
310 /* Writeback must be complete */ 323 /* Writeback must be complete */
311 BUG_ON(PageWriteback(old)); 324 BUG_ON(PageWriteback(old));
@@ -314,38 +327,26 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
314 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); 327 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
315 if (rc != MIGRATEPAGE_SUCCESS) { 328 if (rc != MIGRATEPAGE_SUCCESS) {
316 put_page(new); 329 put_page(new);
317 return rc; 330 goto out_unlock;
318 } 331 }
319 332
320 /* We can potentially race against kioctx teardown here. Use the 333 /* Take completion_lock to prevent other writes to the ring buffer
321 * address_space's private data lock to protect the mapping's 334 * while the old page is copied to the new. This prevents new
322 * private_data. 335 * events from being lost.
323 */ 336 */
324 spin_lock(&mapping->private_lock); 337 spin_lock_irqsave(&ctx->completion_lock, flags);
325 ctx = mapping->private_data; 338 migrate_page_copy(new, old);
326 if (ctx) { 339 BUG_ON(ctx->ring_pages[idx] != old);
327 pgoff_t idx; 340 ctx->ring_pages[idx] = new;
328 spin_lock_irqsave(&ctx->completion_lock, flags); 341 spin_unlock_irqrestore(&ctx->completion_lock, flags);
329 migrate_page_copy(new, old);
330 idx = old->index;
331 if (idx < (pgoff_t)ctx->nr_pages) {
332 /* And only do the move if things haven't changed */
333 if (ctx->ring_pages[idx] == old)
334 ctx->ring_pages[idx] = new;
335 else
336 rc = -EAGAIN;
337 } else
338 rc = -EINVAL;
339 spin_unlock_irqrestore(&ctx->completion_lock, flags);
340 } else
341 rc = -EBUSY;
342 spin_unlock(&mapping->private_lock);
343 342
344 if (rc == MIGRATEPAGE_SUCCESS) 343 /* The old page is no longer accessible. */
345 put_page(old); 344 put_page(old);
346 else
347 put_page(new);
348 345
346out_unlock:
347 mutex_unlock(&ctx->ring_lock);
348out:
349 spin_unlock(&mapping->private_lock);
349 return rc; 350 return rc;
350} 351}
351#endif 352#endif
@@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
380 file = aio_private_file(ctx, nr_pages); 381 file = aio_private_file(ctx, nr_pages);
381 if (IS_ERR(file)) { 382 if (IS_ERR(file)) {
382 ctx->aio_ring_file = NULL; 383 ctx->aio_ring_file = NULL;
383 return -EAGAIN; 384 return -ENOMEM;
384 } 385 }
385 386
386 ctx->aio_ring_file = file; 387 ctx->aio_ring_file = file;
@@ -415,7 +416,7 @@ static int aio_setup_ring(struct kioctx *ctx)
415 416
416 if (unlikely(i != nr_pages)) { 417 if (unlikely(i != nr_pages)) {
417 aio_free_ring(ctx); 418 aio_free_ring(ctx);
418 return -EAGAIN; 419 return -ENOMEM;
419 } 420 }
420 421
421 ctx->mmap_size = nr_pages * PAGE_SIZE; 422 ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -429,7 +430,7 @@ static int aio_setup_ring(struct kioctx *ctx)
429 if (IS_ERR((void *)ctx->mmap_base)) { 430 if (IS_ERR((void *)ctx->mmap_base)) {
430 ctx->mmap_size = 0; 431 ctx->mmap_size = 0;
431 aio_free_ring(ctx); 432 aio_free_ring(ctx);
432 return -EAGAIN; 433 return -ENOMEM;
433 } 434 }
434 435
435 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 436 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
@@ -556,6 +557,10 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
556 rcu_read_unlock(); 557 rcu_read_unlock();
557 spin_unlock(&mm->ioctx_lock); 558 spin_unlock(&mm->ioctx_lock);
558 559
560 /* While kioctx setup is in progress,
561 * we are protected from page migration
562 * changes ring_pages by ->ring_lock.
563 */
559 ring = kmap_atomic(ctx->ring_pages[0]); 564 ring = kmap_atomic(ctx->ring_pages[0]);
560 ring->id = ctx->id; 565 ring->id = ctx->id;
561 kunmap_atomic(ring); 566 kunmap_atomic(ring);
@@ -640,24 +645,28 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
640 645
641 ctx->max_reqs = nr_events; 646 ctx->max_reqs = nr_events;
642 647
643 if (percpu_ref_init(&ctx->users, free_ioctx_users))
644 goto err;
645
646 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
647 goto err;
648
649 spin_lock_init(&ctx->ctx_lock); 648 spin_lock_init(&ctx->ctx_lock);
650 spin_lock_init(&ctx->completion_lock); 649 spin_lock_init(&ctx->completion_lock);
651 mutex_init(&ctx->ring_lock); 650 mutex_init(&ctx->ring_lock);
651 /* Protect against page migration throughout kiotx setup by keeping
652 * the ring_lock mutex held until setup is complete. */
653 mutex_lock(&ctx->ring_lock);
652 init_waitqueue_head(&ctx->wait); 654 init_waitqueue_head(&ctx->wait);
653 655
654 INIT_LIST_HEAD(&ctx->active_reqs); 656 INIT_LIST_HEAD(&ctx->active_reqs);
655 657
658 if (percpu_ref_init(&ctx->users, free_ioctx_users))
659 goto err;
660
661 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
662 goto err;
663
656 ctx->cpu = alloc_percpu(struct kioctx_cpu); 664 ctx->cpu = alloc_percpu(struct kioctx_cpu);
657 if (!ctx->cpu) 665 if (!ctx->cpu)
658 goto err; 666 goto err;
659 667
660 if (aio_setup_ring(ctx) < 0) 668 err = aio_setup_ring(ctx);
669 if (err < 0)
661 goto err; 670 goto err;
662 671
663 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); 672 atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
@@ -683,6 +692,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
683 if (err) 692 if (err)
684 goto err_cleanup; 693 goto err_cleanup;
685 694
695 /* Release the ring_lock mutex now that all setup is complete. */
696 mutex_unlock(&ctx->ring_lock);
697
686 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 698 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
687 ctx, ctx->user_id, mm, ctx->nr_events); 699 ctx, ctx->user_id, mm, ctx->nr_events);
688 return ctx; 700 return ctx;
@@ -692,6 +704,7 @@ err_cleanup:
692err_ctx: 704err_ctx:
693 aio_free_ring(ctx); 705 aio_free_ring(ctx);
694err: 706err:
707 mutex_unlock(&ctx->ring_lock);
695 free_percpu(ctx->cpu); 708 free_percpu(ctx->cpu);
696 free_percpu(ctx->reqs.pcpu_count); 709 free_percpu(ctx->reqs.pcpu_count);
697 free_percpu(ctx->users.pcpu_count); 710 free_percpu(ctx->users.pcpu_count);
@@ -1024,6 +1037,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
1024 1037
1025 mutex_lock(&ctx->ring_lock); 1038 mutex_lock(&ctx->ring_lock);
1026 1039
1040 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1027 ring = kmap_atomic(ctx->ring_pages[0]); 1041 ring = kmap_atomic(ctx->ring_pages[0]);
1028 head = ring->head; 1042 head = ring->head;
1029 tail = ring->tail; 1043 tail = ring->tail;
diff --git a/fs/bio.c b/fs/bio.c
index b1bc722b89aa..6f0362b77806 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1002,7 +1002,7 @@ struct bio_map_data {
1002}; 1002};
1003 1003
1004static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, 1004static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
1005 struct sg_iovec *iov, int iov_count, 1005 const struct sg_iovec *iov, int iov_count,
1006 int is_our_pages) 1006 int is_our_pages)
1007{ 1007{
1008 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); 1008 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
@@ -1022,7 +1022,7 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs,
1022 sizeof(struct sg_iovec) * iov_count, gfp_mask); 1022 sizeof(struct sg_iovec) * iov_count, gfp_mask);
1023} 1023}
1024 1024
1025static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count, 1025static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count,
1026 int to_user, int from_user, int do_free_page) 1026 int to_user, int from_user, int do_free_page)
1027{ 1027{
1028 int ret = 0, i; 1028 int ret = 0, i;
@@ -1120,7 +1120,7 @@ EXPORT_SYMBOL(bio_uncopy_user);
1120 */ 1120 */
1121struct bio *bio_copy_user_iov(struct request_queue *q, 1121struct bio *bio_copy_user_iov(struct request_queue *q,
1122 struct rq_map_data *map_data, 1122 struct rq_map_data *map_data,
1123 struct sg_iovec *iov, int iov_count, 1123 const struct sg_iovec *iov, int iov_count,
1124 int write_to_vm, gfp_t gfp_mask) 1124 int write_to_vm, gfp_t gfp_mask)
1125{ 1125{
1126 struct bio_map_data *bmd; 1126 struct bio_map_data *bmd;
@@ -1259,7 +1259,7 @@ EXPORT_SYMBOL(bio_copy_user);
1259 1259
1260static struct bio *__bio_map_user_iov(struct request_queue *q, 1260static struct bio *__bio_map_user_iov(struct request_queue *q,
1261 struct block_device *bdev, 1261 struct block_device *bdev,
1262 struct sg_iovec *iov, int iov_count, 1262 const struct sg_iovec *iov, int iov_count,
1263 int write_to_vm, gfp_t gfp_mask) 1263 int write_to_vm, gfp_t gfp_mask)
1264{ 1264{
1265 int i, j; 1265 int i, j;
@@ -1407,7 +1407,7 @@ EXPORT_SYMBOL(bio_map_user);
1407 * device. Returns an error pointer in case of error. 1407 * device. Returns an error pointer in case of error.
1408 */ 1408 */
1409struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, 1409struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
1410 struct sg_iovec *iov, int iov_count, 1410 const struct sg_iovec *iov, int iov_count,
1411 int write_to_vm, gfp_t gfp_mask) 1411 int write_to_vm, gfp_t gfp_mask)
1412{ 1412{
1413 struct bio *bio; 1413 struct bio *bio;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ba0d2b05bb78..552a8d13bc32 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1518,7 +1518,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
1518 BUG_ON(iocb->ki_pos != pos); 1518 BUG_ON(iocb->ki_pos != pos);
1519 1519
1520 blk_start_plug(&plug); 1520 blk_start_plug(&plug);
1521 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 1521 ret = __generic_file_aio_write(iocb, iov, nr_segs);
1522 if (ret > 0) { 1522 if (ret > 0) {
1523 ssize_t err; 1523 ssize_t err;
1524 1524
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index ecb5832c0967..5a201d81049c 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -323,6 +323,8 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
323 323
324void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max) 324void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
325{ 325{
326 if (!wq)
327 return;
326 wq->normal->max_active = max; 328 wq->normal->max_active = max;
327 if (wq->high) 329 if (wq->high)
328 wq->high->max_active = max; 330 wq->high->max_active = max;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index aad7201ad11b..10db21fa0926 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -330,7 +330,10 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
330 goto out; 330 goto out;
331 } 331 }
332 332
333 root_level = btrfs_old_root_level(root, time_seq); 333 if (path->search_commit_root)
334 root_level = btrfs_header_level(root->commit_root);
335 else
336 root_level = btrfs_old_root_level(root, time_seq);
334 337
335 if (root_level + 1 == level) { 338 if (root_level + 1 == level) {
336 srcu_read_unlock(&fs_info->subvol_srcu, index); 339 srcu_read_unlock(&fs_info->subvol_srcu, index);
@@ -1099,9 +1102,9 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1099 * 1102 *
1100 * returns 0 on success, < 0 on error. 1103 * returns 0 on success, < 0 on error.
1101 */ 1104 */
1102int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1105static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1103 struct btrfs_fs_info *fs_info, u64 bytenr, 1106 struct btrfs_fs_info *fs_info, u64 bytenr,
1104 u64 time_seq, struct ulist **roots) 1107 u64 time_seq, struct ulist **roots)
1105{ 1108{
1106 struct ulist *tmp; 1109 struct ulist *tmp;
1107 struct ulist_node *node = NULL; 1110 struct ulist_node *node = NULL;
@@ -1137,6 +1140,20 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1137 return 0; 1140 return 0;
1138} 1141}
1139 1142
1143int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1144 struct btrfs_fs_info *fs_info, u64 bytenr,
1145 u64 time_seq, struct ulist **roots)
1146{
1147 int ret;
1148
1149 if (!trans)
1150 down_read(&fs_info->commit_root_sem);
1151 ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots);
1152 if (!trans)
1153 up_read(&fs_info->commit_root_sem);
1154 return ret;
1155}
1156
1140/* 1157/*
1141 * this makes the path point to (inum INODE_ITEM ioff) 1158 * this makes the path point to (inum INODE_ITEM ioff)
1142 */ 1159 */
@@ -1516,6 +1533,8 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1516 if (IS_ERR(trans)) 1533 if (IS_ERR(trans))
1517 return PTR_ERR(trans); 1534 return PTR_ERR(trans);
1518 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1535 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1536 } else {
1537 down_read(&fs_info->commit_root_sem);
1519 } 1538 }
1520 1539
1521 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1540 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
@@ -1526,8 +1545,8 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1526 1545
1527 ULIST_ITER_INIT(&ref_uiter); 1546 ULIST_ITER_INIT(&ref_uiter);
1528 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { 1547 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1529 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, 1548 ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val,
1530 tree_mod_seq_elem.seq, &roots); 1549 tree_mod_seq_elem.seq, &roots);
1531 if (ret) 1550 if (ret)
1532 break; 1551 break;
1533 ULIST_ITER_INIT(&root_uiter); 1552 ULIST_ITER_INIT(&root_uiter);
@@ -1549,6 +1568,8 @@ out:
1549 if (!search_commit_root) { 1568 if (!search_commit_root) {
1550 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1569 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1551 btrfs_end_transaction(trans, fs_info->extent_root); 1570 btrfs_end_transaction(trans, fs_info->extent_root);
1571 } else {
1572 up_read(&fs_info->commit_root_sem);
1552 } 1573 }
1553 1574
1554 return ret; 1575 return ret;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 88d1b1eedc9c..1bcfcdb23cf4 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2769,9 +2769,13 @@ again:
2769 * the commit roots are read only 2769 * the commit roots are read only
2770 * so we always do read locks 2770 * so we always do read locks
2771 */ 2771 */
2772 if (p->need_commit_sem)
2773 down_read(&root->fs_info->commit_root_sem);
2772 b = root->commit_root; 2774 b = root->commit_root;
2773 extent_buffer_get(b); 2775 extent_buffer_get(b);
2774 level = btrfs_header_level(b); 2776 level = btrfs_header_level(b);
2777 if (p->need_commit_sem)
2778 up_read(&root->fs_info->commit_root_sem);
2775 if (!p->skip_locking) 2779 if (!p->skip_locking)
2776 btrfs_tree_read_lock(b); 2780 btrfs_tree_read_lock(b);
2777 } else { 2781 } else {
@@ -5360,7 +5364,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5360{ 5364{
5361 int ret; 5365 int ret;
5362 int cmp; 5366 int cmp;
5363 struct btrfs_trans_handle *trans = NULL;
5364 struct btrfs_path *left_path = NULL; 5367 struct btrfs_path *left_path = NULL;
5365 struct btrfs_path *right_path = NULL; 5368 struct btrfs_path *right_path = NULL;
5366 struct btrfs_key left_key; 5369 struct btrfs_key left_key;
@@ -5378,9 +5381,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5378 u64 right_blockptr; 5381 u64 right_blockptr;
5379 u64 left_gen; 5382 u64 left_gen;
5380 u64 right_gen; 5383 u64 right_gen;
5381 u64 left_start_ctransid;
5382 u64 right_start_ctransid;
5383 u64 ctransid;
5384 5384
5385 left_path = btrfs_alloc_path(); 5385 left_path = btrfs_alloc_path();
5386 if (!left_path) { 5386 if (!left_path) {
@@ -5404,21 +5404,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5404 right_path->search_commit_root = 1; 5404 right_path->search_commit_root = 1;
5405 right_path->skip_locking = 1; 5405 right_path->skip_locking = 1;
5406 5406
5407 spin_lock(&left_root->root_item_lock);
5408 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5409 spin_unlock(&left_root->root_item_lock);
5410
5411 spin_lock(&right_root->root_item_lock);
5412 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5413 spin_unlock(&right_root->root_item_lock);
5414
5415 trans = btrfs_join_transaction(left_root);
5416 if (IS_ERR(trans)) {
5417 ret = PTR_ERR(trans);
5418 trans = NULL;
5419 goto out;
5420 }
5421
5422 /* 5407 /*
5423 * Strategy: Go to the first items of both trees. Then do 5408 * Strategy: Go to the first items of both trees. Then do
5424 * 5409 *
@@ -5455,6 +5440,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5455 * the right if possible or go up and right. 5440 * the right if possible or go up and right.
5456 */ 5441 */
5457 5442
5443 down_read(&left_root->fs_info->commit_root_sem);
5458 left_level = btrfs_header_level(left_root->commit_root); 5444 left_level = btrfs_header_level(left_root->commit_root);
5459 left_root_level = left_level; 5445 left_root_level = left_level;
5460 left_path->nodes[left_level] = left_root->commit_root; 5446 left_path->nodes[left_level] = left_root->commit_root;
@@ -5464,6 +5450,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5464 right_root_level = right_level; 5450 right_root_level = right_level;
5465 right_path->nodes[right_level] = right_root->commit_root; 5451 right_path->nodes[right_level] = right_root->commit_root;
5466 extent_buffer_get(right_path->nodes[right_level]); 5452 extent_buffer_get(right_path->nodes[right_level]);
5453 up_read(&left_root->fs_info->commit_root_sem);
5467 5454
5468 if (left_level == 0) 5455 if (left_level == 0)
5469 btrfs_item_key_to_cpu(left_path->nodes[left_level], 5456 btrfs_item_key_to_cpu(left_path->nodes[left_level],
@@ -5482,67 +5469,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
5482 advance_left = advance_right = 0; 5469 advance_left = advance_right = 0;
5483 5470
5484 while (1) { 5471 while (1) {
5485 /*
5486 * We need to make sure the transaction does not get committed
5487 * while we do anything on commit roots. This means, we need to
5488 * join and leave transactions for every item that we process.
5489 */
5490 if (trans && btrfs_should_end_transaction(trans, left_root)) {
5491 btrfs_release_path(left_path);
5492 btrfs_release_path(right_path);
5493
5494 ret = btrfs_end_transaction(trans, left_root);
5495 trans = NULL;
5496 if (ret < 0)
5497 goto out;
5498 }
5499 /* now rejoin the transaction */
5500 if (!trans) {
5501 trans = btrfs_join_transaction(left_root);
5502 if (IS_ERR(trans)) {
5503 ret = PTR_ERR(trans);
5504 trans = NULL;
5505 goto out;
5506 }
5507
5508 spin_lock(&left_root->root_item_lock);
5509 ctransid = btrfs_root_ctransid(&left_root->root_item);
5510 spin_unlock(&left_root->root_item_lock);
5511 if (ctransid != left_start_ctransid)
5512 left_start_ctransid = 0;
5513
5514 spin_lock(&right_root->root_item_lock);
5515 ctransid = btrfs_root_ctransid(&right_root->root_item);
5516 spin_unlock(&right_root->root_item_lock);
5517 if (ctransid != right_start_ctransid)
5518 right_start_ctransid = 0;
5519
5520 if (!left_start_ctransid || !right_start_ctransid) {
5521 WARN(1, KERN_WARNING
5522 "BTRFS: btrfs_compare_tree detected "
5523 "a change in one of the trees while "
5524 "iterating. This is probably a "
5525 "bug.\n");
5526 ret = -EIO;
5527 goto out;
5528 }
5529
5530 /*
5531 * the commit root may have changed, so start again
5532 * where we stopped
5533 */
5534 left_path->lowest_level = left_level;
5535 right_path->lowest_level = right_level;
5536 ret = btrfs_search_slot(NULL, left_root,
5537 &left_key, left_path, 0, 0);
5538 if (ret < 0)
5539 goto out;
5540 ret = btrfs_search_slot(NULL, right_root,
5541 &right_key, right_path, 0, 0);
5542 if (ret < 0)
5543 goto out;
5544 }
5545
5546 if (advance_left && !left_end_reached) { 5472 if (advance_left && !left_end_reached) {
5547 ret = tree_advance(left_root, left_path, &left_level, 5473 ret = tree_advance(left_root, left_path, &left_level,
5548 left_root_level, 5474 left_root_level,
@@ -5672,14 +5598,6 @@ out:
5672 btrfs_free_path(left_path); 5598 btrfs_free_path(left_path);
5673 btrfs_free_path(right_path); 5599 btrfs_free_path(right_path);
5674 kfree(tmp_buf); 5600 kfree(tmp_buf);
5675
5676 if (trans) {
5677 if (!ret)
5678 ret = btrfs_end_transaction(trans, left_root);
5679 else
5680 btrfs_end_transaction(trans, left_root);
5681 }
5682
5683 return ret; 5601 return ret;
5684} 5602}
5685 5603
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index bc96c03dd259..4c48df572bd6 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -609,6 +609,7 @@ struct btrfs_path {
609 unsigned int skip_locking:1; 609 unsigned int skip_locking:1;
610 unsigned int leave_spinning:1; 610 unsigned int leave_spinning:1;
611 unsigned int search_commit_root:1; 611 unsigned int search_commit_root:1;
612 unsigned int need_commit_sem:1;
612}; 613};
613 614
614/* 615/*
@@ -986,7 +987,8 @@ struct btrfs_dev_replace_item {
986#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6) 987#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
987#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7) 988#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7)
988#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8) 989#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
989#define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE 990#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
991 BTRFS_SPACE_INFO_GLOBAL_RSV)
990 992
991enum btrfs_raid_types { 993enum btrfs_raid_types {
992 BTRFS_RAID_RAID10, 994 BTRFS_RAID_RAID10,
@@ -1018,6 +1020,12 @@ enum btrfs_raid_types {
1018 */ 1020 */
1019#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48) 1021#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48)
1020 1022
1023/*
1024 * A fake block group type that is used to communicate global block reserve
1025 * size to userspace via the SPACE_INFO ioctl.
1026 */
1027#define BTRFS_SPACE_INFO_GLOBAL_RSV (1ULL << 49)
1028
1021#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \ 1029#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \
1022 BTRFS_AVAIL_ALLOC_BIT_SINGLE) 1030 BTRFS_AVAIL_ALLOC_BIT_SINGLE)
1023 1031
@@ -1440,7 +1448,7 @@ struct btrfs_fs_info {
1440 */ 1448 */
1441 struct mutex ordered_extent_flush_mutex; 1449 struct mutex ordered_extent_flush_mutex;
1442 1450
1443 struct rw_semaphore extent_commit_sem; 1451 struct rw_semaphore commit_root_sem;
1444 1452
1445 struct rw_semaphore cleanup_work_sem; 1453 struct rw_semaphore cleanup_work_sem;
1446 1454
@@ -1711,7 +1719,6 @@ struct btrfs_root {
1711 struct btrfs_block_rsv *block_rsv; 1719 struct btrfs_block_rsv *block_rsv;
1712 1720
1713 /* free ino cache stuff */ 1721 /* free ino cache stuff */
1714 struct mutex fs_commit_mutex;
1715 struct btrfs_free_space_ctl *free_ino_ctl; 1722 struct btrfs_free_space_ctl *free_ino_ctl;
1716 enum btrfs_caching_type cached; 1723 enum btrfs_caching_type cached;
1717 spinlock_t cache_lock; 1724 spinlock_t cache_lock;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index bd0f752b797b..029d46c2e170 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -329,6 +329,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
329{ 329{
330 struct extent_state *cached_state = NULL; 330 struct extent_state *cached_state = NULL;
331 int ret; 331 int ret;
332 bool need_lock = (current->journal_info ==
333 (void *)BTRFS_SEND_TRANS_STUB);
332 334
333 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) 335 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
334 return 0; 336 return 0;
@@ -336,6 +338,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
336 if (atomic) 338 if (atomic)
337 return -EAGAIN; 339 return -EAGAIN;
338 340
341 if (need_lock) {
342 btrfs_tree_read_lock(eb);
343 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
344 }
345
339 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 346 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
340 0, &cached_state); 347 0, &cached_state);
341 if (extent_buffer_uptodate(eb) && 348 if (extent_buffer_uptodate(eb) &&
@@ -347,10 +354,21 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
347 "found %llu\n", 354 "found %llu\n",
348 eb->start, parent_transid, btrfs_header_generation(eb)); 355 eb->start, parent_transid, btrfs_header_generation(eb));
349 ret = 1; 356 ret = 1;
350 clear_extent_buffer_uptodate(eb); 357
358 /*
359 * Things reading via commit roots that don't have normal protection,
360 * like send, can have a really old block in cache that may point at a
361 * block that has been free'd and re-allocated. So don't clear uptodate
362 * if we find an eb that is under IO (dirty/writeback) because we could
363 * end up reading in the stale data and then writing it back out and
364 * making everybody very sad.
365 */
366 if (!extent_buffer_under_io(eb))
367 clear_extent_buffer_uptodate(eb);
351out: 368out:
352 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, 369 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
353 &cached_state, GFP_NOFS); 370 &cached_state, GFP_NOFS);
371 btrfs_tree_read_unlock_blocking(eb);
354 return ret; 372 return ret;
355} 373}
356 374
@@ -1546,7 +1564,6 @@ int btrfs_init_fs_root(struct btrfs_root *root)
1546 root->subv_writers = writers; 1564 root->subv_writers = writers;
1547 1565
1548 btrfs_init_free_ino_ctl(root); 1566 btrfs_init_free_ino_ctl(root);
1549 mutex_init(&root->fs_commit_mutex);
1550 spin_lock_init(&root->cache_lock); 1567 spin_lock_init(&root->cache_lock);
1551 init_waitqueue_head(&root->cache_wait); 1568 init_waitqueue_head(&root->cache_wait);
1552 1569
@@ -2324,7 +2341,7 @@ int open_ctree(struct super_block *sb,
2324 mutex_init(&fs_info->transaction_kthread_mutex); 2341 mutex_init(&fs_info->transaction_kthread_mutex);
2325 mutex_init(&fs_info->cleaner_mutex); 2342 mutex_init(&fs_info->cleaner_mutex);
2326 mutex_init(&fs_info->volume_mutex); 2343 mutex_init(&fs_info->volume_mutex);
2327 init_rwsem(&fs_info->extent_commit_sem); 2344 init_rwsem(&fs_info->commit_root_sem);
2328 init_rwsem(&fs_info->cleanup_work_sem); 2345 init_rwsem(&fs_info->cleanup_work_sem);
2329 init_rwsem(&fs_info->subvol_sem); 2346 init_rwsem(&fs_info->subvol_sem);
2330 sema_init(&fs_info->uuid_tree_rescan_sem, 1); 2347 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c6b6a6e3e735..1306487c82cf 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -419,7 +419,7 @@ static noinline void caching_thread(struct btrfs_work *work)
419again: 419again:
420 mutex_lock(&caching_ctl->mutex); 420 mutex_lock(&caching_ctl->mutex);
421 /* need to make sure the commit_root doesn't disappear */ 421 /* need to make sure the commit_root doesn't disappear */
422 down_read(&fs_info->extent_commit_sem); 422 down_read(&fs_info->commit_root_sem);
423 423
424next: 424next:
425 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 425 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
@@ -443,10 +443,10 @@ next:
443 break; 443 break;
444 444
445 if (need_resched() || 445 if (need_resched() ||
446 rwsem_is_contended(&fs_info->extent_commit_sem)) { 446 rwsem_is_contended(&fs_info->commit_root_sem)) {
447 caching_ctl->progress = last; 447 caching_ctl->progress = last;
448 btrfs_release_path(path); 448 btrfs_release_path(path);
449 up_read(&fs_info->extent_commit_sem); 449 up_read(&fs_info->commit_root_sem);
450 mutex_unlock(&caching_ctl->mutex); 450 mutex_unlock(&caching_ctl->mutex);
451 cond_resched(); 451 cond_resched();
452 goto again; 452 goto again;
@@ -513,7 +513,7 @@ next:
513 513
514err: 514err:
515 btrfs_free_path(path); 515 btrfs_free_path(path);
516 up_read(&fs_info->extent_commit_sem); 516 up_read(&fs_info->commit_root_sem);
517 517
518 free_excluded_extents(extent_root, block_group); 518 free_excluded_extents(extent_root, block_group);
519 519
@@ -633,10 +633,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
633 return 0; 633 return 0;
634 } 634 }
635 635
636 down_write(&fs_info->extent_commit_sem); 636 down_write(&fs_info->commit_root_sem);
637 atomic_inc(&caching_ctl->count); 637 atomic_inc(&caching_ctl->count);
638 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 638 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
639 up_write(&fs_info->extent_commit_sem); 639 up_write(&fs_info->commit_root_sem);
640 640
641 btrfs_get_block_group(cache); 641 btrfs_get_block_group(cache);
642 642
@@ -2444,7 +2444,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2444 spin_unlock(&locked_ref->lock); 2444 spin_unlock(&locked_ref->lock);
2445 spin_lock(&delayed_refs->lock); 2445 spin_lock(&delayed_refs->lock);
2446 spin_lock(&locked_ref->lock); 2446 spin_lock(&locked_ref->lock);
2447 if (rb_first(&locked_ref->ref_root)) { 2447 if (rb_first(&locked_ref->ref_root) ||
2448 locked_ref->extent_op) {
2448 spin_unlock(&locked_ref->lock); 2449 spin_unlock(&locked_ref->lock);
2449 spin_unlock(&delayed_refs->lock); 2450 spin_unlock(&delayed_refs->lock);
2450 continue; 2451 continue;
@@ -5470,7 +5471,7 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5470 struct btrfs_block_group_cache *cache; 5471 struct btrfs_block_group_cache *cache;
5471 struct btrfs_space_info *space_info; 5472 struct btrfs_space_info *space_info;
5472 5473
5473 down_write(&fs_info->extent_commit_sem); 5474 down_write(&fs_info->commit_root_sem);
5474 5475
5475 list_for_each_entry_safe(caching_ctl, next, 5476 list_for_each_entry_safe(caching_ctl, next,
5476 &fs_info->caching_block_groups, list) { 5477 &fs_info->caching_block_groups, list) {
@@ -5489,7 +5490,7 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5489 else 5490 else
5490 fs_info->pinned_extents = &fs_info->freed_extents[0]; 5491 fs_info->pinned_extents = &fs_info->freed_extents[0];
5491 5492
5492 up_write(&fs_info->extent_commit_sem); 5493 up_write(&fs_info->commit_root_sem);
5493 5494
5494 list_for_each_entry_rcu(space_info, &fs_info->space_info, list) 5495 list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5495 percpu_counter_set(&space_info->total_bytes_pinned, 0); 5496 percpu_counter_set(&space_info->total_bytes_pinned, 0);
@@ -5744,6 +5745,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5744 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu", 5745 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
5745 bytenr, parent, root_objectid, owner_objectid, 5746 bytenr, parent, root_objectid, owner_objectid,
5746 owner_offset); 5747 owner_offset);
5748 btrfs_abort_transaction(trans, extent_root, ret);
5749 goto out;
5747 } else { 5750 } else {
5748 btrfs_abort_transaction(trans, extent_root, ret); 5751 btrfs_abort_transaction(trans, extent_root, ret);
5749 goto out; 5752 goto out;
@@ -8255,14 +8258,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
8255 struct btrfs_caching_control *caching_ctl; 8258 struct btrfs_caching_control *caching_ctl;
8256 struct rb_node *n; 8259 struct rb_node *n;
8257 8260
8258 down_write(&info->extent_commit_sem); 8261 down_write(&info->commit_root_sem);
8259 while (!list_empty(&info->caching_block_groups)) { 8262 while (!list_empty(&info->caching_block_groups)) {
8260 caching_ctl = list_entry(info->caching_block_groups.next, 8263 caching_ctl = list_entry(info->caching_block_groups.next,
8261 struct btrfs_caching_control, list); 8264 struct btrfs_caching_control, list);
8262 list_del(&caching_ctl->list); 8265 list_del(&caching_ctl->list);
8263 put_caching_control(caching_ctl); 8266 put_caching_control(caching_ctl);
8264 } 8267 }
8265 up_write(&info->extent_commit_sem); 8268 up_write(&info->commit_root_sem);
8266 8269
8267 spin_lock(&info->block_group_cache_lock); 8270 spin_lock(&info->block_group_cache_lock);
8268 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { 8271 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
@@ -8336,9 +8339,15 @@ static void __link_block_group(struct btrfs_space_info *space_info,
8336 struct btrfs_block_group_cache *cache) 8339 struct btrfs_block_group_cache *cache)
8337{ 8340{
8338 int index = get_block_group_index(cache); 8341 int index = get_block_group_index(cache);
8342 bool first = false;
8339 8343
8340 down_write(&space_info->groups_sem); 8344 down_write(&space_info->groups_sem);
8341 if (list_empty(&space_info->block_groups[index])) { 8345 if (list_empty(&space_info->block_groups[index]))
8346 first = true;
8347 list_add_tail(&cache->list, &space_info->block_groups[index]);
8348 up_write(&space_info->groups_sem);
8349
8350 if (first) {
8342 struct kobject *kobj = &space_info->block_group_kobjs[index]; 8351 struct kobject *kobj = &space_info->block_group_kobjs[index];
8343 int ret; 8352 int ret;
8344 8353
@@ -8350,8 +8359,6 @@ static void __link_block_group(struct btrfs_space_info *space_info,
8350 kobject_put(&space_info->kobj); 8359 kobject_put(&space_info->kobj);
8351 } 8360 }
8352 } 8361 }
8353 list_add_tail(&cache->list, &space_info->block_groups[index]);
8354 up_write(&space_info->groups_sem);
8355} 8362}
8356 8363
8357static struct btrfs_block_group_cache * 8364static struct btrfs_block_group_cache *
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ae69a00387e7..3955e475ceec 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -749,6 +749,7 @@ again:
749 * our range starts 749 * our range starts
750 */ 750 */
751 node = tree_search(tree, start); 751 node = tree_search(tree, start);
752process_node:
752 if (!node) 753 if (!node)
753 break; 754 break;
754 755
@@ -769,7 +770,10 @@ again:
769 if (start > end) 770 if (start > end)
770 break; 771 break;
771 772
772 cond_resched_lock(&tree->lock); 773 if (!cond_resched_lock(&tree->lock)) {
774 node = rb_next(node);
775 goto process_node;
776 }
773 } 777 }
774out: 778out:
775 spin_unlock(&tree->lock); 779 spin_unlock(&tree->lock);
@@ -4306,7 +4310,7 @@ static void __free_extent_buffer(struct extent_buffer *eb)
4306 kmem_cache_free(extent_buffer_cache, eb); 4310 kmem_cache_free(extent_buffer_cache, eb);
4307} 4311}
4308 4312
4309static int extent_buffer_under_io(struct extent_buffer *eb) 4313int extent_buffer_under_io(struct extent_buffer *eb)
4310{ 4314{
4311 return (atomic_read(&eb->io_pages) || 4315 return (atomic_read(&eb->io_pages) ||
4312 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || 4316 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 58b27e5ab521..c488b45237bf 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -320,6 +320,7 @@ int set_extent_buffer_dirty(struct extent_buffer *eb);
320int set_extent_buffer_uptodate(struct extent_buffer *eb); 320int set_extent_buffer_uptodate(struct extent_buffer *eb);
321int clear_extent_buffer_uptodate(struct extent_buffer *eb); 321int clear_extent_buffer_uptodate(struct extent_buffer *eb);
322int extent_buffer_uptodate(struct extent_buffer *eb); 322int extent_buffer_uptodate(struct extent_buffer *eb);
323int extent_buffer_under_io(struct extent_buffer *eb);
323int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, 324int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
324 unsigned long min_len, char **map, 325 unsigned long min_len, char **map,
325 unsigned long *map_start, 326 unsigned long *map_start,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c660527af838..eb742c07e7a4 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -425,13 +425,8 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
425 struct page *page = prepared_pages[pg]; 425 struct page *page = prepared_pages[pg];
426 /* 426 /*
427 * Copy data from userspace to the current page 427 * Copy data from userspace to the current page
428 *
429 * Disable pagefault to avoid recursive lock since
430 * the pages are already locked
431 */ 428 */
432 pagefault_disable();
433 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 429 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
434 pagefault_enable();
435 430
436 /* Flush processor's dcache for this page */ 431 /* Flush processor's dcache for this page */
437 flush_dcache_page(page); 432 flush_dcache_page(page);
@@ -1665,7 +1660,7 @@ again:
1665static ssize_t __btrfs_direct_write(struct kiocb *iocb, 1660static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1666 const struct iovec *iov, 1661 const struct iovec *iov,
1667 unsigned long nr_segs, loff_t pos, 1662 unsigned long nr_segs, loff_t pos,
1668 loff_t *ppos, size_t count, size_t ocount) 1663 size_t count, size_t ocount)
1669{ 1664{
1670 struct file *file = iocb->ki_filp; 1665 struct file *file = iocb->ki_filp;
1671 struct iov_iter i; 1666 struct iov_iter i;
@@ -1674,7 +1669,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1674 loff_t endbyte; 1669 loff_t endbyte;
1675 int err; 1670 int err;
1676 1671
1677 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, 1672 written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
1678 count, ocount); 1673 count, ocount);
1679 1674
1680 if (written < 0 || written == count) 1675 if (written < 0 || written == count)
@@ -1693,7 +1688,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1693 if (err) 1688 if (err)
1694 goto out; 1689 goto out;
1695 written += written_buffered; 1690 written += written_buffered;
1696 *ppos = pos + written_buffered; 1691 iocb->ki_pos = pos + written_buffered;
1697 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, 1692 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1698 endbyte >> PAGE_CACHE_SHIFT); 1693 endbyte >> PAGE_CACHE_SHIFT);
1699out: 1694out:
@@ -1725,8 +1720,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1725 struct file *file = iocb->ki_filp; 1720 struct file *file = iocb->ki_filp;
1726 struct inode *inode = file_inode(file); 1721 struct inode *inode = file_inode(file);
1727 struct btrfs_root *root = BTRFS_I(inode)->root; 1722 struct btrfs_root *root = BTRFS_I(inode)->root;
1728 loff_t *ppos = &iocb->ki_pos;
1729 u64 start_pos; 1723 u64 start_pos;
1724 u64 end_pos;
1730 ssize_t num_written = 0; 1725 ssize_t num_written = 0;
1731 ssize_t err = 0; 1726 ssize_t err = 0;
1732 size_t count, ocount; 1727 size_t count, ocount;
@@ -1781,7 +1776,9 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1781 1776
1782 start_pos = round_down(pos, root->sectorsize); 1777 start_pos = round_down(pos, root->sectorsize);
1783 if (start_pos > i_size_read(inode)) { 1778 if (start_pos > i_size_read(inode)) {
1784 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); 1779 /* Expand hole size to cover write data, preventing empty gap */
1780 end_pos = round_up(pos + iov->iov_len, root->sectorsize);
1781 err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
1785 if (err) { 1782 if (err) {
1786 mutex_unlock(&inode->i_mutex); 1783 mutex_unlock(&inode->i_mutex);
1787 goto out; 1784 goto out;
@@ -1793,7 +1790,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1793 1790
1794 if (unlikely(file->f_flags & O_DIRECT)) { 1791 if (unlikely(file->f_flags & O_DIRECT)) {
1795 num_written = __btrfs_direct_write(iocb, iov, nr_segs, 1792 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1796 pos, ppos, count, ocount); 1793 pos, count, ocount);
1797 } else { 1794 } else {
1798 struct iov_iter i; 1795 struct iov_iter i;
1799 1796
@@ -1801,7 +1798,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1801 1798
1802 num_written = __btrfs_buffered_write(file, &i, pos); 1799 num_written = __btrfs_buffered_write(file, &i, pos);
1803 if (num_written > 0) 1800 if (num_written > 0)
1804 *ppos = pos + num_written; 1801 iocb->ki_pos = pos + num_written;
1805 } 1802 }
1806 1803
1807 mutex_unlock(&inode->i_mutex); 1804 mutex_unlock(&inode->i_mutex);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index ab485e57b6fe..cc8ca193d830 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -55,7 +55,7 @@ static int caching_kthread(void *data)
55 key.type = BTRFS_INODE_ITEM_KEY; 55 key.type = BTRFS_INODE_ITEM_KEY;
56again: 56again:
57 /* need to make sure the commit_root doesn't disappear */ 57 /* need to make sure the commit_root doesn't disappear */
58 mutex_lock(&root->fs_commit_mutex); 58 down_read(&fs_info->commit_root_sem);
59 59
60 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 60 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
61 if (ret < 0) 61 if (ret < 0)
@@ -88,7 +88,7 @@ again:
88 btrfs_item_key_to_cpu(leaf, &key, 0); 88 btrfs_item_key_to_cpu(leaf, &key, 0);
89 btrfs_release_path(path); 89 btrfs_release_path(path);
90 root->cache_progress = last; 90 root->cache_progress = last;
91 mutex_unlock(&root->fs_commit_mutex); 91 up_read(&fs_info->commit_root_sem);
92 schedule_timeout(1); 92 schedule_timeout(1);
93 goto again; 93 goto again;
94 } else 94 } else
@@ -127,7 +127,7 @@ next:
127 btrfs_unpin_free_ino(root); 127 btrfs_unpin_free_ino(root);
128out: 128out:
129 wake_up(&root->cache_wait); 129 wake_up(&root->cache_wait);
130 mutex_unlock(&root->fs_commit_mutex); 130 up_read(&fs_info->commit_root_sem);
131 131
132 btrfs_free_path(path); 132 btrfs_free_path(path);
133 133
@@ -223,11 +223,11 @@ again:
223 * or the caching work is done. 223 * or the caching work is done.
224 */ 224 */
225 225
226 mutex_lock(&root->fs_commit_mutex); 226 down_write(&root->fs_info->commit_root_sem);
227 spin_lock(&root->cache_lock); 227 spin_lock(&root->cache_lock);
228 if (root->cached == BTRFS_CACHE_FINISHED) { 228 if (root->cached == BTRFS_CACHE_FINISHED) {
229 spin_unlock(&root->cache_lock); 229 spin_unlock(&root->cache_lock);
230 mutex_unlock(&root->fs_commit_mutex); 230 up_write(&root->fs_info->commit_root_sem);
231 goto again; 231 goto again;
232 } 232 }
233 spin_unlock(&root->cache_lock); 233 spin_unlock(&root->cache_lock);
@@ -240,7 +240,7 @@ again:
240 else 240 else
241 __btrfs_add_free_space(pinned, objectid, 1); 241 __btrfs_add_free_space(pinned, objectid, 1);
242 242
243 mutex_unlock(&root->fs_commit_mutex); 243 up_write(&root->fs_info->commit_root_sem);
244 } 244 }
245} 245}
246 246
@@ -250,7 +250,7 @@ again:
250 * and others will just be dropped, because the commit root we were 250 * and others will just be dropped, because the commit root we were
251 * searching has changed. 251 * searching has changed.
252 * 252 *
253 * Must be called with root->fs_commit_mutex held 253 * Must be called with root->fs_info->commit_root_sem held
254 */ 254 */
255void btrfs_unpin_free_ino(struct btrfs_root *root) 255void btrfs_unpin_free_ino(struct btrfs_root *root)
256{ 256{
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 06e9a4152b14..5f805bc944fa 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -394,6 +394,14 @@ static noinline int compress_file_range(struct inode *inode,
394 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 394 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
395 btrfs_add_inode_defrag(NULL, inode); 395 btrfs_add_inode_defrag(NULL, inode);
396 396
397 /*
398 * skip compression for a small file range(<=blocksize) that
399 * isn't an inline extent, since it dosen't save disk space at all.
400 */
401 if ((end - start + 1) <= blocksize &&
402 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
403 goto cleanup_and_bail_uncompressed;
404
397 actual_end = min_t(u64, isize, end + 1); 405 actual_end = min_t(u64, isize, end + 1);
398again: 406again:
399 will_compress = 0; 407 will_compress = 0;
@@ -1271,6 +1279,15 @@ next_slot:
1271 disk_bytenr += cur_offset - found_key.offset; 1279 disk_bytenr += cur_offset - found_key.offset;
1272 num_bytes = min(end + 1, extent_end) - cur_offset; 1280 num_bytes = min(end + 1, extent_end) - cur_offset;
1273 /* 1281 /*
1282 * if there are pending snapshots for this root,
1283 * we fall into common COW way.
1284 */
1285 if (!nolock) {
1286 err = btrfs_start_nocow_write(root);
1287 if (!err)
1288 goto out_check;
1289 }
1290 /*
1274 * force cow if csum exists in the range. 1291 * force cow if csum exists in the range.
1275 * this ensure that csum for a given extent are 1292 * this ensure that csum for a given extent are
1276 * either valid or do not exist. 1293 * either valid or do not exist.
@@ -1289,6 +1306,8 @@ next_slot:
1289out_check: 1306out_check:
1290 if (extent_end <= start) { 1307 if (extent_end <= start) {
1291 path->slots[0]++; 1308 path->slots[0]++;
1309 if (!nolock && nocow)
1310 btrfs_end_nocow_write(root);
1292 goto next_slot; 1311 goto next_slot;
1293 } 1312 }
1294 if (!nocow) { 1313 if (!nocow) {
@@ -1306,8 +1325,11 @@ out_check:
1306 ret = cow_file_range(inode, locked_page, 1325 ret = cow_file_range(inode, locked_page,
1307 cow_start, found_key.offset - 1, 1326 cow_start, found_key.offset - 1,
1308 page_started, nr_written, 1); 1327 page_started, nr_written, 1);
1309 if (ret) 1328 if (ret) {
1329 if (!nolock && nocow)
1330 btrfs_end_nocow_write(root);
1310 goto error; 1331 goto error;
1332 }
1311 cow_start = (u64)-1; 1333 cow_start = (u64)-1;
1312 } 1334 }
1313 1335
@@ -1354,8 +1376,11 @@ out_check:
1354 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1376 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1355 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1377 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1356 num_bytes); 1378 num_bytes);
1357 if (ret) 1379 if (ret) {
1380 if (!nolock && nocow)
1381 btrfs_end_nocow_write(root);
1358 goto error; 1382 goto error;
1383 }
1359 } 1384 }
1360 1385
1361 extent_clear_unlock_delalloc(inode, cur_offset, 1386 extent_clear_unlock_delalloc(inode, cur_offset,
@@ -1363,6 +1388,8 @@ out_check:
1363 locked_page, EXTENT_LOCKED | 1388 locked_page, EXTENT_LOCKED |
1364 EXTENT_DELALLOC, PAGE_UNLOCK | 1389 EXTENT_DELALLOC, PAGE_UNLOCK |
1365 PAGE_SET_PRIVATE2); 1390 PAGE_SET_PRIVATE2);
1391 if (!nolock && nocow)
1392 btrfs_end_nocow_write(root);
1366 cur_offset = extent_end; 1393 cur_offset = extent_end;
1367 if (cur_offset > end) 1394 if (cur_offset > end)
1368 break; 1395 break;
@@ -8476,19 +8503,20 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
8476 else 8503 else
8477 iput(inode); 8504 iput(inode);
8478 ret = -ENOMEM; 8505 ret = -ENOMEM;
8479 break; 8506 goto out;
8480 } 8507 }
8481 list_add_tail(&work->list, &works); 8508 list_add_tail(&work->list, &works);
8482 btrfs_queue_work(root->fs_info->flush_workers, 8509 btrfs_queue_work(root->fs_info->flush_workers,
8483 &work->work); 8510 &work->work);
8484 ret++; 8511 ret++;
8485 if (nr != -1 && ret >= nr) 8512 if (nr != -1 && ret >= nr)
8486 break; 8513 goto out;
8487 cond_resched(); 8514 cond_resched();
8488 spin_lock(&root->delalloc_lock); 8515 spin_lock(&root->delalloc_lock);
8489 } 8516 }
8490 spin_unlock(&root->delalloc_lock); 8517 spin_unlock(&root->delalloc_lock);
8491 8518
8519out:
8492 list_for_each_entry_safe(work, next, &works, list) { 8520 list_for_each_entry_safe(work, next, &works, list) {
8493 list_del_init(&work->list); 8521 list_del_init(&work->list);
8494 btrfs_wait_and_free_delalloc_work(work); 8522 btrfs_wait_and_free_delalloc_work(work);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0401397b5c92..e79ff6b90cb7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1472,6 +1472,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
1472 struct btrfs_trans_handle *trans; 1472 struct btrfs_trans_handle *trans;
1473 struct btrfs_device *device = NULL; 1473 struct btrfs_device *device = NULL;
1474 char *sizestr; 1474 char *sizestr;
1475 char *retptr;
1475 char *devstr = NULL; 1476 char *devstr = NULL;
1476 int ret = 0; 1477 int ret = 0;
1477 int mod = 0; 1478 int mod = 0;
@@ -1539,8 +1540,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
1539 mod = 1; 1540 mod = 1;
1540 sizestr++; 1541 sizestr++;
1541 } 1542 }
1542 new_size = memparse(sizestr, NULL); 1543 new_size = memparse(sizestr, &retptr);
1543 if (new_size == 0) { 1544 if (*retptr != '\0' || new_size == 0) {
1544 ret = -EINVAL; 1545 ret = -EINVAL;
1545 goto out_free; 1546 goto out_free;
1546 } 1547 }
@@ -3140,8 +3141,9 @@ process_slot:
3140 new_key.offset + datal, 3141 new_key.offset + datal,
3141 1); 3142 1);
3142 if (ret) { 3143 if (ret) {
3143 btrfs_abort_transaction(trans, root, 3144 if (ret != -EINVAL)
3144 ret); 3145 btrfs_abort_transaction(trans,
3146 root, ret);
3145 btrfs_end_transaction(trans, root); 3147 btrfs_end_transaction(trans, root);
3146 goto out; 3148 goto out;
3147 } 3149 }
@@ -3538,6 +3540,11 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
3538 up_read(&info->groups_sem); 3540 up_read(&info->groups_sem);
3539 } 3541 }
3540 3542
3543 /*
3544 * Global block reserve, exported as a space_info
3545 */
3546 slot_count++;
3547
3541 /* space_slots == 0 means they are asking for a count */ 3548 /* space_slots == 0 means they are asking for a count */
3542 if (space_args.space_slots == 0) { 3549 if (space_args.space_slots == 0) {
3543 space_args.total_spaces = slot_count; 3550 space_args.total_spaces = slot_count;
@@ -3596,6 +3603,21 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
3596 up_read(&info->groups_sem); 3603 up_read(&info->groups_sem);
3597 } 3604 }
3598 3605
3606 /*
3607 * Add global block reserve
3608 */
3609 if (slot_count) {
3610 struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
3611
3612 spin_lock(&block_rsv->lock);
3613 space.total_bytes = block_rsv->size;
3614 space.used_bytes = block_rsv->size - block_rsv->reserved;
3615 spin_unlock(&block_rsv->lock);
3616 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
3617 memcpy(dest, &space, sizeof(space));
3618 space_args.total_spaces++;
3619 }
3620
3599 user_dest = (struct btrfs_ioctl_space_info __user *) 3621 user_dest = (struct btrfs_ioctl_space_info __user *)
3600 (arg + sizeof(struct btrfs_ioctl_space_args)); 3622 (arg + sizeof(struct btrfs_ioctl_space_args));
3601 3623
@@ -4531,9 +4553,8 @@ static long btrfs_ioctl_set_received_subvol_32(struct file *file,
4531 } 4553 }
4532 4554
4533 args64 = kmalloc(sizeof(*args64), GFP_NOFS); 4555 args64 = kmalloc(sizeof(*args64), GFP_NOFS);
4534 if (IS_ERR(args64)) { 4556 if (!args64) {
4535 ret = PTR_ERR(args64); 4557 ret = -ENOMEM;
4536 args64 = NULL;
4537 goto out; 4558 goto out;
4538 } 4559 }
4539 4560
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index def428a25b2a..7f92ab1daa87 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2317,7 +2317,6 @@ void free_reloc_roots(struct list_head *list)
2317static noinline_for_stack 2317static noinline_for_stack
2318int merge_reloc_roots(struct reloc_control *rc) 2318int merge_reloc_roots(struct reloc_control *rc)
2319{ 2319{
2320 struct btrfs_trans_handle *trans;
2321 struct btrfs_root *root; 2320 struct btrfs_root *root;
2322 struct btrfs_root *reloc_root; 2321 struct btrfs_root *reloc_root;
2323 u64 last_snap; 2322 u64 last_snap;
@@ -2375,26 +2374,6 @@ again:
2375 list_add_tail(&reloc_root->root_list, 2374 list_add_tail(&reloc_root->root_list,
2376 &reloc_roots); 2375 &reloc_roots);
2377 goto out; 2376 goto out;
2378 } else if (!ret) {
2379 /*
2380 * recover the last snapshot tranid to avoid
2381 * the space balance break NOCOW.
2382 */
2383 root = read_fs_root(rc->extent_root->fs_info,
2384 objectid);
2385 if (IS_ERR(root))
2386 continue;
2387
2388 trans = btrfs_join_transaction(root);
2389 BUG_ON(IS_ERR(trans));
2390
2391 /* Check if the fs/file tree was snapshoted or not. */
2392 if (btrfs_root_last_snapshot(&root->root_item) ==
2393 otransid - 1)
2394 btrfs_set_root_last_snapshot(&root->root_item,
2395 last_snap);
2396
2397 btrfs_end_transaction(trans, root);
2398 } 2377 }
2399 } 2378 }
2400 2379
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 93e6d7172844..0be77993378e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2235,6 +2235,47 @@ behind_scrub_pages:
2235 return 0; 2235 return 0;
2236} 2236}
2237 2237
2238/*
2239 * Given a physical address, this will calculate it's
2240 * logical offset. if this is a parity stripe, it will return
2241 * the most left data stripe's logical offset.
2242 *
2243 * return 0 if it is a data stripe, 1 means parity stripe.
2244 */
2245static int get_raid56_logic_offset(u64 physical, int num,
2246 struct map_lookup *map, u64 *offset)
2247{
2248 int i;
2249 int j = 0;
2250 u64 stripe_nr;
2251 u64 last_offset;
2252 int stripe_index;
2253 int rot;
2254
2255 last_offset = (physical - map->stripes[num].physical) *
2256 nr_data_stripes(map);
2257 *offset = last_offset;
2258 for (i = 0; i < nr_data_stripes(map); i++) {
2259 *offset = last_offset + i * map->stripe_len;
2260
2261 stripe_nr = *offset;
2262 do_div(stripe_nr, map->stripe_len);
2263 do_div(stripe_nr, nr_data_stripes(map));
2264
2265 /* Work out the disk rotation on this stripe-set */
2266 rot = do_div(stripe_nr, map->num_stripes);
2267 /* calculate which stripe this data locates */
2268 rot += i;
2269 stripe_index = rot % map->num_stripes;
2270 if (stripe_index == num)
2271 return 0;
2272 if (stripe_index < num)
2273 j++;
2274 }
2275 *offset = last_offset + j * map->stripe_len;
2276 return 1;
2277}
2278
2238static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 2279static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2239 struct map_lookup *map, 2280 struct map_lookup *map,
2240 struct btrfs_device *scrub_dev, 2281 struct btrfs_device *scrub_dev,
@@ -2256,6 +2297,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2256 u64 physical; 2297 u64 physical;
2257 u64 logical; 2298 u64 logical;
2258 u64 logic_end; 2299 u64 logic_end;
2300 u64 physical_end;
2259 u64 generation; 2301 u64 generation;
2260 int mirror_num; 2302 int mirror_num;
2261 struct reada_control *reada1; 2303 struct reada_control *reada1;
@@ -2269,16 +2311,10 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2269 u64 extent_len; 2311 u64 extent_len;
2270 struct btrfs_device *extent_dev; 2312 struct btrfs_device *extent_dev;
2271 int extent_mirror_num; 2313 int extent_mirror_num;
2272 int stop_loop; 2314 int stop_loop = 0;
2273
2274 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2275 BTRFS_BLOCK_GROUP_RAID6)) {
2276 if (num >= nr_data_stripes(map)) {
2277 return 0;
2278 }
2279 }
2280 2315
2281 nstripes = length; 2316 nstripes = length;
2317 physical = map->stripes[num].physical;
2282 offset = 0; 2318 offset = 0;
2283 do_div(nstripes, map->stripe_len); 2319 do_div(nstripes, map->stripe_len);
2284 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 2320 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
@@ -2296,6 +2332,11 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2296 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 2332 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2297 increment = map->stripe_len; 2333 increment = map->stripe_len;
2298 mirror_num = num % map->num_stripes + 1; 2334 mirror_num = num % map->num_stripes + 1;
2335 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2336 BTRFS_BLOCK_GROUP_RAID6)) {
2337 get_raid56_logic_offset(physical, num, map, &offset);
2338 increment = map->stripe_len * nr_data_stripes(map);
2339 mirror_num = 1;
2299 } else { 2340 } else {
2300 increment = map->stripe_len; 2341 increment = map->stripe_len;
2301 mirror_num = 1; 2342 mirror_num = 1;
@@ -2319,7 +2360,15 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2319 * to not hold off transaction commits 2360 * to not hold off transaction commits
2320 */ 2361 */
2321 logical = base + offset; 2362 logical = base + offset;
2322 2363 physical_end = physical + nstripes * map->stripe_len;
2364 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2365 BTRFS_BLOCK_GROUP_RAID6)) {
2366 get_raid56_logic_offset(physical_end, num,
2367 map, &logic_end);
2368 logic_end += base;
2369 } else {
2370 logic_end = logical + increment * nstripes;
2371 }
2323 wait_event(sctx->list_wait, 2372 wait_event(sctx->list_wait,
2324 atomic_read(&sctx->bios_in_flight) == 0); 2373 atomic_read(&sctx->bios_in_flight) == 0);
2325 scrub_blocked_if_needed(fs_info); 2374 scrub_blocked_if_needed(fs_info);
@@ -2328,7 +2377,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2328 key_start.objectid = logical; 2377 key_start.objectid = logical;
2329 key_start.type = BTRFS_EXTENT_ITEM_KEY; 2378 key_start.type = BTRFS_EXTENT_ITEM_KEY;
2330 key_start.offset = (u64)0; 2379 key_start.offset = (u64)0;
2331 key_end.objectid = base + offset + nstripes * increment; 2380 key_end.objectid = logic_end;
2332 key_end.type = BTRFS_METADATA_ITEM_KEY; 2381 key_end.type = BTRFS_METADATA_ITEM_KEY;
2333 key_end.offset = (u64)-1; 2382 key_end.offset = (u64)-1;
2334 reada1 = btrfs_reada_add(root, &key_start, &key_end); 2383 reada1 = btrfs_reada_add(root, &key_start, &key_end);
@@ -2338,7 +2387,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2338 key_start.offset = logical; 2387 key_start.offset = logical;
2339 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 2388 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
2340 key_end.type = BTRFS_EXTENT_CSUM_KEY; 2389 key_end.type = BTRFS_EXTENT_CSUM_KEY;
2341 key_end.offset = base + offset + nstripes * increment; 2390 key_end.offset = logic_end;
2342 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end); 2391 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
2343 2392
2344 if (!IS_ERR(reada1)) 2393 if (!IS_ERR(reada1))
@@ -2356,11 +2405,17 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2356 /* 2405 /*
2357 * now find all extents for each stripe and scrub them 2406 * now find all extents for each stripe and scrub them
2358 */ 2407 */
2359 logical = base + offset;
2360 physical = map->stripes[num].physical;
2361 logic_end = logical + increment * nstripes;
2362 ret = 0; 2408 ret = 0;
2363 while (logical < logic_end) { 2409 while (physical < physical_end) {
2410 /* for raid56, we skip parity stripe */
2411 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2412 BTRFS_BLOCK_GROUP_RAID6)) {
2413 ret = get_raid56_logic_offset(physical, num,
2414 map, &logical);
2415 logical += base;
2416 if (ret)
2417 goto skip;
2418 }
2364 /* 2419 /*
2365 * canceled? 2420 * canceled?
2366 */ 2421 */
@@ -2504,15 +2559,29 @@ again:
2504 scrub_free_csums(sctx); 2559 scrub_free_csums(sctx);
2505 if (extent_logical + extent_len < 2560 if (extent_logical + extent_len <
2506 key.objectid + bytes) { 2561 key.objectid + bytes) {
2507 logical += increment; 2562 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2508 physical += map->stripe_len; 2563 BTRFS_BLOCK_GROUP_RAID6)) {
2509 2564 /*
2565 * loop until we find next data stripe
2566 * or we have finished all stripes.
2567 */
2568 do {
2569 physical += map->stripe_len;
2570 ret = get_raid56_logic_offset(
2571 physical, num,
2572 map, &logical);
2573 logical += base;
2574 } while (physical < physical_end && ret);
2575 } else {
2576 physical += map->stripe_len;
2577 logical += increment;
2578 }
2510 if (logical < key.objectid + bytes) { 2579 if (logical < key.objectid + bytes) {
2511 cond_resched(); 2580 cond_resched();
2512 goto again; 2581 goto again;
2513 } 2582 }
2514 2583
2515 if (logical >= logic_end) { 2584 if (physical >= physical_end) {
2516 stop_loop = 1; 2585 stop_loop = 1;
2517 break; 2586 break;
2518 } 2587 }
@@ -2521,6 +2590,7 @@ next:
2521 path->slots[0]++; 2590 path->slots[0]++;
2522 } 2591 }
2523 btrfs_release_path(path); 2592 btrfs_release_path(path);
2593skip:
2524 logical += increment; 2594 logical += increment;
2525 physical += map->stripe_len; 2595 physical += map->stripe_len;
2526 spin_lock(&sctx->stat_lock); 2596 spin_lock(&sctx->stat_lock);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 9b6da9d55f9a..1ac3ca98c429 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -493,6 +493,7 @@ static struct btrfs_path *alloc_path_for_send(void)
493 return NULL; 493 return NULL;
494 path->search_commit_root = 1; 494 path->search_commit_root = 1;
495 path->skip_locking = 1; 495 path->skip_locking = 1;
496 path->need_commit_sem = 1;
496 return path; 497 return path;
497} 498}
498 499
@@ -771,29 +772,22 @@ out:
771/* 772/*
772 * Helper function to retrieve some fields from an inode item. 773 * Helper function to retrieve some fields from an inode item.
773 */ 774 */
774static int get_inode_info(struct btrfs_root *root, 775static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
775 u64 ino, u64 *size, u64 *gen, 776 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
776 u64 *mode, u64 *uid, u64 *gid, 777 u64 *gid, u64 *rdev)
777 u64 *rdev)
778{ 778{
779 int ret; 779 int ret;
780 struct btrfs_inode_item *ii; 780 struct btrfs_inode_item *ii;
781 struct btrfs_key key; 781 struct btrfs_key key;
782 struct btrfs_path *path;
783
784 path = alloc_path_for_send();
785 if (!path)
786 return -ENOMEM;
787 782
788 key.objectid = ino; 783 key.objectid = ino;
789 key.type = BTRFS_INODE_ITEM_KEY; 784 key.type = BTRFS_INODE_ITEM_KEY;
790 key.offset = 0; 785 key.offset = 0;
791 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 786 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
792 if (ret < 0)
793 goto out;
794 if (ret) { 787 if (ret) {
795 ret = -ENOENT; 788 if (ret > 0)
796 goto out; 789 ret = -ENOENT;
790 return ret;
797 } 791 }
798 792
799 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], 793 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -811,7 +805,22 @@ static int get_inode_info(struct btrfs_root *root,
811 if (rdev) 805 if (rdev)
812 *rdev = btrfs_inode_rdev(path->nodes[0], ii); 806 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
813 807
814out: 808 return ret;
809}
810
811static int get_inode_info(struct btrfs_root *root,
812 u64 ino, u64 *size, u64 *gen,
813 u64 *mode, u64 *uid, u64 *gid,
814 u64 *rdev)
815{
816 struct btrfs_path *path;
817 int ret;
818
819 path = alloc_path_for_send();
820 if (!path)
821 return -ENOMEM;
822 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
823 rdev);
815 btrfs_free_path(path); 824 btrfs_free_path(path);
816 return ret; 825 return ret;
817} 826}
@@ -1085,6 +1094,7 @@ out:
1085struct backref_ctx { 1094struct backref_ctx {
1086 struct send_ctx *sctx; 1095 struct send_ctx *sctx;
1087 1096
1097 struct btrfs_path *path;
1088 /* number of total found references */ 1098 /* number of total found references */
1089 u64 found; 1099 u64 found;
1090 1100
@@ -1155,8 +1165,9 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1155 * There are inodes that have extents that lie behind its i_size. Don't 1165 * There are inodes that have extents that lie behind its i_size. Don't
1156 * accept clones from these extents. 1166 * accept clones from these extents.
1157 */ 1167 */
1158 ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL, 1168 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
1159 NULL); 1169 NULL, NULL, NULL);
1170 btrfs_release_path(bctx->path);
1160 if (ret < 0) 1171 if (ret < 0)
1161 return ret; 1172 return ret;
1162 1173
@@ -1235,12 +1246,17 @@ static int find_extent_clone(struct send_ctx *sctx,
1235 if (!tmp_path) 1246 if (!tmp_path)
1236 return -ENOMEM; 1247 return -ENOMEM;
1237 1248
1249 /* We only use this path under the commit sem */
1250 tmp_path->need_commit_sem = 0;
1251
1238 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS); 1252 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
1239 if (!backref_ctx) { 1253 if (!backref_ctx) {
1240 ret = -ENOMEM; 1254 ret = -ENOMEM;
1241 goto out; 1255 goto out;
1242 } 1256 }
1243 1257
1258 backref_ctx->path = tmp_path;
1259
1244 if (data_offset >= ino_size) { 1260 if (data_offset >= ino_size) {
1245 /* 1261 /*
1246 * There may be extents that lie behind the file's size. 1262 * There may be extents that lie behind the file's size.
@@ -1268,8 +1284,10 @@ static int find_extent_clone(struct send_ctx *sctx,
1268 } 1284 }
1269 logical = disk_byte + btrfs_file_extent_offset(eb, fi); 1285 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1270 1286
1287 down_read(&sctx->send_root->fs_info->commit_root_sem);
1271 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path, 1288 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
1272 &found_key, &flags); 1289 &found_key, &flags);
1290 up_read(&sctx->send_root->fs_info->commit_root_sem);
1273 btrfs_release_path(tmp_path); 1291 btrfs_release_path(tmp_path);
1274 1292
1275 if (ret < 0) 1293 if (ret < 0)
@@ -4418,6 +4436,9 @@ static int send_hole(struct send_ctx *sctx, u64 end)
4418 p = fs_path_alloc(); 4436 p = fs_path_alloc();
4419 if (!p) 4437 if (!p)
4420 return -ENOMEM; 4438 return -ENOMEM;
4439 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4440 if (ret < 0)
4441 goto tlv_put_failure;
4421 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE); 4442 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
4422 while (offset < end) { 4443 while (offset < end) {
4423 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE); 4444 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
@@ -4425,9 +4446,6 @@ static int send_hole(struct send_ctx *sctx, u64 end)
4425 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 4446 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4426 if (ret < 0) 4447 if (ret < 0)
4427 break; 4448 break;
4428 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4429 if (ret < 0)
4430 break;
4431 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4449 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4432 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4450 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4433 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len); 4451 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
@@ -4968,7 +4986,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
4968 4986
4969 if (S_ISREG(sctx->cur_inode_mode)) { 4987 if (S_ISREG(sctx->cur_inode_mode)) {
4970 if (need_send_hole(sctx)) { 4988 if (need_send_hole(sctx)) {
4971 if (sctx->cur_inode_last_extent == (u64)-1) { 4989 if (sctx->cur_inode_last_extent == (u64)-1 ||
4990 sctx->cur_inode_last_extent <
4991 sctx->cur_inode_size) {
4972 ret = get_last_extent(sctx, (u64)-1); 4992 ret = get_last_extent(sctx, (u64)-1);
4973 if (ret) 4993 if (ret)
4974 goto out; 4994 goto out;
@@ -5367,57 +5387,21 @@ out:
5367static int full_send_tree(struct send_ctx *sctx) 5387static int full_send_tree(struct send_ctx *sctx)
5368{ 5388{
5369 int ret; 5389 int ret;
5370 struct btrfs_trans_handle *trans = NULL;
5371 struct btrfs_root *send_root = sctx->send_root; 5390 struct btrfs_root *send_root = sctx->send_root;
5372 struct btrfs_key key; 5391 struct btrfs_key key;
5373 struct btrfs_key found_key; 5392 struct btrfs_key found_key;
5374 struct btrfs_path *path; 5393 struct btrfs_path *path;
5375 struct extent_buffer *eb; 5394 struct extent_buffer *eb;
5376 int slot; 5395 int slot;
5377 u64 start_ctransid;
5378 u64 ctransid;
5379 5396
5380 path = alloc_path_for_send(); 5397 path = alloc_path_for_send();
5381 if (!path) 5398 if (!path)
5382 return -ENOMEM; 5399 return -ENOMEM;
5383 5400
5384 spin_lock(&send_root->root_item_lock);
5385 start_ctransid = btrfs_root_ctransid(&send_root->root_item);
5386 spin_unlock(&send_root->root_item_lock);
5387
5388 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 5401 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
5389 key.type = BTRFS_INODE_ITEM_KEY; 5402 key.type = BTRFS_INODE_ITEM_KEY;
5390 key.offset = 0; 5403 key.offset = 0;
5391 5404
5392join_trans:
5393 /*
5394 * We need to make sure the transaction does not get committed
5395 * while we do anything on commit roots. Join a transaction to prevent
5396 * this.
5397 */
5398 trans = btrfs_join_transaction(send_root);
5399 if (IS_ERR(trans)) {
5400 ret = PTR_ERR(trans);
5401 trans = NULL;
5402 goto out;
5403 }
5404
5405 /*
5406 * Make sure the tree has not changed after re-joining. We detect this
5407 * by comparing start_ctransid and ctransid. They should always match.
5408 */
5409 spin_lock(&send_root->root_item_lock);
5410 ctransid = btrfs_root_ctransid(&send_root->root_item);
5411 spin_unlock(&send_root->root_item_lock);
5412
5413 if (ctransid != start_ctransid) {
5414 WARN(1, KERN_WARNING "BTRFS: the root that you're trying to "
5415 "send was modified in between. This is "
5416 "probably a bug.\n");
5417 ret = -EIO;
5418 goto out;
5419 }
5420
5421 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); 5405 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
5422 if (ret < 0) 5406 if (ret < 0)
5423 goto out; 5407 goto out;
@@ -5425,19 +5409,6 @@ join_trans:
5425 goto out_finish; 5409 goto out_finish;
5426 5410
5427 while (1) { 5411 while (1) {
5428 /*
5429 * When someone want to commit while we iterate, end the
5430 * joined transaction and rejoin.
5431 */
5432 if (btrfs_should_end_transaction(trans, send_root)) {
5433 ret = btrfs_end_transaction(trans, send_root);
5434 trans = NULL;
5435 if (ret < 0)
5436 goto out;
5437 btrfs_release_path(path);
5438 goto join_trans;
5439 }
5440
5441 eb = path->nodes[0]; 5412 eb = path->nodes[0];
5442 slot = path->slots[0]; 5413 slot = path->slots[0];
5443 btrfs_item_key_to_cpu(eb, &found_key, slot); 5414 btrfs_item_key_to_cpu(eb, &found_key, slot);
@@ -5465,12 +5436,6 @@ out_finish:
5465 5436
5466out: 5437out:
5467 btrfs_free_path(path); 5438 btrfs_free_path(path);
5468 if (trans) {
5469 if (!ret)
5470 ret = btrfs_end_transaction(trans, send_root);
5471 else
5472 btrfs_end_transaction(trans, send_root);
5473 }
5474 return ret; 5439 return ret;
5475} 5440}
5476 5441
@@ -5718,7 +5683,9 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
5718 NULL); 5683 NULL);
5719 sort_clone_roots = 1; 5684 sort_clone_roots = 1;
5720 5685
5686 current->journal_info = (void *)BTRFS_SEND_TRANS_STUB;
5721 ret = send_subvol(sctx); 5687 ret = send_subvol(sctx);
5688 current->journal_info = NULL;
5722 if (ret < 0) 5689 if (ret < 0)
5723 goto out; 5690 goto out;
5724 5691
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9dbf42395153..5011aadacab8 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -66,6 +66,8 @@
66static const struct super_operations btrfs_super_ops; 66static const struct super_operations btrfs_super_ops;
67static struct file_system_type btrfs_fs_type; 67static struct file_system_type btrfs_fs_type;
68 68
69static int btrfs_remount(struct super_block *sb, int *flags, char *data);
70
69static const char *btrfs_decode_error(int errno) 71static const char *btrfs_decode_error(int errno)
70{ 72{
71 char *errstr = "unknown"; 73 char *errstr = "unknown";
@@ -1185,6 +1187,26 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
1185 mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, 1187 mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name,
1186 newargs); 1188 newargs);
1187 kfree(newargs); 1189 kfree(newargs);
1190
1191 if (PTR_RET(mnt) == -EBUSY) {
1192 if (flags & MS_RDONLY) {
1193 mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY, device_name,
1194 newargs);
1195 } else {
1196 int r;
1197 mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY, device_name,
1198 newargs);
1199 if (IS_ERR(mnt))
1200 return ERR_CAST(mnt);
1201
1202 r = btrfs_remount(mnt->mnt_sb, &flags, NULL);
1203 if (r < 0) {
1204 /* FIXME: release vfsmount mnt ??*/
1205 return ERR_PTR(r);
1206 }
1207 }
1208 }
1209
1188 if (IS_ERR(mnt)) 1210 if (IS_ERR(mnt))
1189 return ERR_CAST(mnt); 1211 return ERR_CAST(mnt);
1190 1212
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index a04707f740d6..7579f6d0b854 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -75,10 +75,21 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
75 } 75 }
76} 76}
77 77
78static noinline void switch_commit_root(struct btrfs_root *root) 78static noinline void switch_commit_roots(struct btrfs_transaction *trans,
79 struct btrfs_fs_info *fs_info)
79{ 80{
80 free_extent_buffer(root->commit_root); 81 struct btrfs_root *root, *tmp;
81 root->commit_root = btrfs_root_node(root); 82
83 down_write(&fs_info->commit_root_sem);
84 list_for_each_entry_safe(root, tmp, &trans->switch_commits,
85 dirty_list) {
86 list_del_init(&root->dirty_list);
87 free_extent_buffer(root->commit_root);
88 root->commit_root = btrfs_root_node(root);
89 if (is_fstree(root->objectid))
90 btrfs_unpin_free_ino(root);
91 }
92 up_write(&fs_info->commit_root_sem);
82} 93}
83 94
84static inline void extwriter_counter_inc(struct btrfs_transaction *trans, 95static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
@@ -208,6 +219,7 @@ loop:
208 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 219 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
209 INIT_LIST_HEAD(&cur_trans->ordered_operations); 220 INIT_LIST_HEAD(&cur_trans->ordered_operations);
210 INIT_LIST_HEAD(&cur_trans->pending_chunks); 221 INIT_LIST_HEAD(&cur_trans->pending_chunks);
222 INIT_LIST_HEAD(&cur_trans->switch_commits);
211 list_add_tail(&cur_trans->list, &fs_info->trans_list); 223 list_add_tail(&cur_trans->list, &fs_info->trans_list);
212 extent_io_tree_init(&cur_trans->dirty_pages, 224 extent_io_tree_init(&cur_trans->dirty_pages,
213 fs_info->btree_inode->i_mapping); 225 fs_info->btree_inode->i_mapping);
@@ -375,7 +387,8 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
375 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 387 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
376 return ERR_PTR(-EROFS); 388 return ERR_PTR(-EROFS);
377 389
378 if (current->journal_info) { 390 if (current->journal_info &&
391 current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) {
379 WARN_ON(type & TRANS_EXTWRITERS); 392 WARN_ON(type & TRANS_EXTWRITERS);
380 h = current->journal_info; 393 h = current->journal_info;
381 h->use_count++; 394 h->use_count++;
@@ -919,9 +932,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
919 return ret; 932 return ret;
920 } 933 }
921 934
922 if (root != root->fs_info->extent_root)
923 switch_commit_root(root);
924
925 return 0; 935 return 0;
926} 936}
927 937
@@ -977,15 +987,16 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
977 list_del_init(next); 987 list_del_init(next);
978 root = list_entry(next, struct btrfs_root, dirty_list); 988 root = list_entry(next, struct btrfs_root, dirty_list);
979 989
990 if (root != fs_info->extent_root)
991 list_add_tail(&root->dirty_list,
992 &trans->transaction->switch_commits);
980 ret = update_cowonly_root(trans, root); 993 ret = update_cowonly_root(trans, root);
981 if (ret) 994 if (ret)
982 return ret; 995 return ret;
983 } 996 }
984 997
985 down_write(&fs_info->extent_commit_sem); 998 list_add_tail(&fs_info->extent_root->dirty_list,
986 switch_commit_root(fs_info->extent_root); 999 &trans->transaction->switch_commits);
987 up_write(&fs_info->extent_commit_sem);
988
989 btrfs_after_dev_replace_commit(fs_info); 1000 btrfs_after_dev_replace_commit(fs_info);
990 1001
991 return 0; 1002 return 0;
@@ -1042,11 +1053,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
1042 smp_wmb(); 1053 smp_wmb();
1043 1054
1044 if (root->commit_root != root->node) { 1055 if (root->commit_root != root->node) {
1045 mutex_lock(&root->fs_commit_mutex); 1056 list_add_tail(&root->dirty_list,
1046 switch_commit_root(root); 1057 &trans->transaction->switch_commits);
1047 btrfs_unpin_free_ino(root);
1048 mutex_unlock(&root->fs_commit_mutex);
1049
1050 btrfs_set_root_node(&root->root_item, 1058 btrfs_set_root_node(&root->root_item,
1051 root->node); 1059 root->node);
1052 } 1060 }
@@ -1857,11 +1865,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1857 1865
1858 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1866 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1859 root->fs_info->tree_root->node); 1867 root->fs_info->tree_root->node);
1860 switch_commit_root(root->fs_info->tree_root); 1868 list_add_tail(&root->fs_info->tree_root->dirty_list,
1869 &cur_trans->switch_commits);
1861 1870
1862 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1871 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1863 root->fs_info->chunk_root->node); 1872 root->fs_info->chunk_root->node);
1864 switch_commit_root(root->fs_info->chunk_root); 1873 list_add_tail(&root->fs_info->chunk_root->dirty_list,
1874 &cur_trans->switch_commits);
1875
1876 switch_commit_roots(cur_trans, root->fs_info);
1865 1877
1866 assert_qgroups_uptodate(trans); 1878 assert_qgroups_uptodate(trans);
1867 update_super_roots(root); 1879 update_super_roots(root);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 6ac037e9f9f0..b57b924e8e03 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -57,6 +57,7 @@ struct btrfs_transaction {
57 struct list_head pending_snapshots; 57 struct list_head pending_snapshots;
58 struct list_head ordered_operations; 58 struct list_head ordered_operations;
59 struct list_head pending_chunks; 59 struct list_head pending_chunks;
60 struct list_head switch_commits;
60 struct btrfs_delayed_ref_root delayed_refs; 61 struct btrfs_delayed_ref_root delayed_refs;
61 int aborted; 62 int aborted;
62}; 63};
@@ -78,6 +79,8 @@ struct btrfs_transaction {
78#define TRANS_EXTWRITERS (__TRANS_USERSPACE | __TRANS_START | \ 79#define TRANS_EXTWRITERS (__TRANS_USERSPACE | __TRANS_START | \
79 __TRANS_ATTACH) 80 __TRANS_ATTACH)
80 81
82#define BTRFS_SEND_TRANS_STUB 1
83
81struct btrfs_trans_handle { 84struct btrfs_trans_handle {
82 u64 transid; 85 u64 transid;
83 u64 bytes_reserved; 86 u64 bytes_reserved;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d241130a32fd..49d7fab73360 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -448,6 +448,14 @@ static void pending_bios_fn(struct btrfs_work *work)
448 run_scheduled_bios(device); 448 run_scheduled_bios(device);
449} 449}
450 450
451/*
452 * Add new device to list of registered devices
453 *
454 * Returns:
455 * 1 - first time device is seen
456 * 0 - device already known
457 * < 0 - error
458 */
451static noinline int device_list_add(const char *path, 459static noinline int device_list_add(const char *path,
452 struct btrfs_super_block *disk_super, 460 struct btrfs_super_block *disk_super,
453 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 461 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
@@ -455,6 +463,7 @@ static noinline int device_list_add(const char *path,
455 struct btrfs_device *device; 463 struct btrfs_device *device;
456 struct btrfs_fs_devices *fs_devices; 464 struct btrfs_fs_devices *fs_devices;
457 struct rcu_string *name; 465 struct rcu_string *name;
466 int ret = 0;
458 u64 found_transid = btrfs_super_generation(disk_super); 467 u64 found_transid = btrfs_super_generation(disk_super);
459 468
460 fs_devices = find_fsid(disk_super->fsid); 469 fs_devices = find_fsid(disk_super->fsid);
@@ -495,6 +504,7 @@ static noinline int device_list_add(const char *path,
495 fs_devices->num_devices++; 504 fs_devices->num_devices++;
496 mutex_unlock(&fs_devices->device_list_mutex); 505 mutex_unlock(&fs_devices->device_list_mutex);
497 506
507 ret = 1;
498 device->fs_devices = fs_devices; 508 device->fs_devices = fs_devices;
499 } else if (!device->name || strcmp(device->name->str, path)) { 509 } else if (!device->name || strcmp(device->name->str, path)) {
500 name = rcu_string_strdup(path, GFP_NOFS); 510 name = rcu_string_strdup(path, GFP_NOFS);
@@ -513,7 +523,8 @@ static noinline int device_list_add(const char *path,
513 fs_devices->latest_trans = found_transid; 523 fs_devices->latest_trans = found_transid;
514 } 524 }
515 *fs_devices_ret = fs_devices; 525 *fs_devices_ret = fs_devices;
516 return 0; 526
527 return ret;
517} 528}
518 529
519static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) 530static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
@@ -910,17 +921,19 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
910 transid = btrfs_super_generation(disk_super); 921 transid = btrfs_super_generation(disk_super);
911 total_devices = btrfs_super_num_devices(disk_super); 922 total_devices = btrfs_super_num_devices(disk_super);
912 923
913 if (disk_super->label[0]) {
914 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
915 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
916 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
917 } else {
918 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
919 }
920
921 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
922
923 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 924 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
925 if (ret > 0) {
926 if (disk_super->label[0]) {
927 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
928 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
929 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
930 } else {
931 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
932 }
933
934 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
935 ret = 0;
936 }
924 if (!ret && fs_devices_ret) 937 if (!ret && fs_devices_ret)
925 (*fs_devices_ret)->total_devices = total_devices; 938 (*fs_devices_ret)->total_devices = total_devices;
926 939
diff --git a/fs/buffer.c b/fs/buffer.c
index 8c53a2b15ecb..9ddb9fc7d923 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2114,8 +2114,8 @@ EXPORT_SYMBOL(generic_write_end);
2114 * Returns true if all buffers which correspond to a file portion 2114 * Returns true if all buffers which correspond to a file portion
2115 * we want to read are uptodate. 2115 * we want to read are uptodate.
2116 */ 2116 */
2117int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, 2117int block_is_partially_uptodate(struct page *page, unsigned long from,
2118 unsigned long from) 2118 unsigned long count)
2119{ 2119{
2120 unsigned block_start, block_end, blocksize; 2120 unsigned block_start, block_end, blocksize;
2121 unsigned to; 2121 unsigned to;
@@ -2127,7 +2127,7 @@ int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2127 2127
2128 head = page_buffers(page); 2128 head = page_buffers(page);
2129 blocksize = head->b_size; 2129 blocksize = head->b_size;
2130 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count); 2130 to = min_t(unsigned, PAGE_CACHE_SIZE - from, count);
2131 to = from + to; 2131 to = from + to;
2132 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) 2132 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2133 return 0; 2133 return 0;
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index 622f4696e484..5b99bafc31d1 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -124,7 +124,6 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
124 /* check parameters */ 124 /* check parameters */
125 ret = -EOPNOTSUPP; 125 ret = -EOPNOTSUPP;
126 if (!root->d_inode || 126 if (!root->d_inode ||
127 !root->d_inode->i_op ||
128 !root->d_inode->i_op->lookup || 127 !root->d_inode->i_op->lookup ||
129 !root->d_inode->i_op->mkdir || 128 !root->d_inode->i_op->mkdir ||
130 !root->d_inode->i_op->setxattr || 129 !root->d_inode->i_op->setxattr ||
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 6494d9f673aa..c0a681705104 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -779,8 +779,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
779 } 779 }
780 780
781 ret = -EPERM; 781 ret = -EPERM;
782 if (!subdir->d_inode->i_op || 782 if (!subdir->d_inode->i_op->setxattr ||
783 !subdir->d_inode->i_op->setxattr ||
784 !subdir->d_inode->i_op->getxattr || 783 !subdir->d_inode->i_op->getxattr ||
785 !subdir->d_inode->i_op->lookup || 784 !subdir->d_inode->i_op->lookup ||
786 !subdir->d_inode->i_op->mkdir || 785 !subdir->d_inode->i_op->mkdir ||
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 66075a4ad979..39da1c2efa50 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -601,7 +601,7 @@ ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
601 false); 601 false);
602 if (IS_ERR(req)) { 602 if (IS_ERR(req)) {
603 ret = PTR_ERR(req); 603 ret = PTR_ERR(req);
604 goto out; 604 break;
605 } 605 }
606 606
607 num_pages = calc_pages_for(page_align, len); 607 num_pages = calc_pages_for(page_align, len);
@@ -719,7 +719,7 @@ static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
719 false); 719 false);
720 if (IS_ERR(req)) { 720 if (IS_ERR(req)) {
721 ret = PTR_ERR(req); 721 ret = PTR_ERR(req);
722 goto out; 722 break;
723 } 723 }
724 724
725 /* 725 /*
@@ -972,6 +972,7 @@ retry_snap:
972 } 972 }
973 } else { 973 } else {
974 loff_t old_size = inode->i_size; 974 loff_t old_size = inode->i_size;
975 struct iov_iter from;
975 /* 976 /*
976 * No need to acquire the i_truncate_mutex. Because 977 * No need to acquire the i_truncate_mutex. Because
977 * the MDS revokes Fwb caps before sending truncate 978 * the MDS revokes Fwb caps before sending truncate
@@ -979,9 +980,10 @@ retry_snap:
979 * are pending vmtruncate. So write and vmtruncate 980 * are pending vmtruncate. So write and vmtruncate
980 * can not run at the same time 981 * can not run at the same time
981 */ 982 */
982 written = generic_file_buffered_write(iocb, iov, nr_segs, 983 iov_iter_init(&from, iov, nr_segs, count, 0);
983 pos, &iocb->ki_pos, 984 written = generic_perform_write(file, &from, pos);
984 count, 0); 985 if (likely(written >= 0))
986 iocb->ki_pos = pos + written;
985 if (inode->i_size > old_size) 987 if (inode->i_size > old_size)
986 ceph_fscache_update_objectsize(inode); 988 ceph_fscache_update_objectsize(inode);
987 mutex_unlock(&inode->i_mutex); 989 mutex_unlock(&inode->i_mutex);
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index efbe08289292..fdf941b44ff1 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -1,9 +1,8 @@
1#include <linux/ceph/ceph_debug.h>
1#include <linux/in.h> 2#include <linux/in.h>
2 3
3#include "super.h" 4#include "super.h"
4#include "mds_client.h" 5#include "mds_client.h"
5#include <linux/ceph/ceph_debug.h>
6
7#include "ioctl.h" 6#include "ioctl.h"
8 7
9 8
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 2c70cbe35d39..df9c9141c099 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -850,7 +850,6 @@ const struct inode_operations cifs_file_inode_ops = {
850/* revalidate:cifs_revalidate, */ 850/* revalidate:cifs_revalidate, */
851 .setattr = cifs_setattr, 851 .setattr = cifs_setattr,
852 .getattr = cifs_getattr, /* do we need this anymore? */ 852 .getattr = cifs_getattr, /* do we need this anymore? */
853 .rename = cifs_rename,
854 .permission = cifs_permission, 853 .permission = cifs_permission,
855#ifdef CONFIG_CIFS_XATTR 854#ifdef CONFIG_CIFS_XATTR
856 .setxattr = cifs_setxattr, 855 .setxattr = cifs_setxattr,
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 216d7e99f921..8add25538a3b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2579,19 +2579,32 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2579 struct cifsInodeInfo *cinode = CIFS_I(inode); 2579 struct cifsInodeInfo *cinode = CIFS_I(inode);
2580 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 2580 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2581 ssize_t rc = -EACCES; 2581 ssize_t rc = -EACCES;
2582 loff_t lock_pos = pos; 2582 loff_t lock_pos = iocb->ki_pos;
2583 2583
2584 if (file->f_flags & O_APPEND)
2585 lock_pos = i_size_read(inode);
2586 /* 2584 /*
2587 * We need to hold the sem to be sure nobody modifies lock list 2585 * We need to hold the sem to be sure nobody modifies lock list
2588 * with a brlock that prevents writing. 2586 * with a brlock that prevents writing.
2589 */ 2587 */
2590 down_read(&cinode->lock_sem); 2588 down_read(&cinode->lock_sem);
2589 mutex_lock(&inode->i_mutex);
2590 if (file->f_flags & O_APPEND)
2591 lock_pos = i_size_read(inode);
2591 if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs), 2592 if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs),
2592 server->vals->exclusive_lock_type, NULL, 2593 server->vals->exclusive_lock_type, NULL,
2593 CIFS_WRITE_OP)) 2594 CIFS_WRITE_OP)) {
2594 rc = generic_file_aio_write(iocb, iov, nr_segs, pos); 2595 rc = __generic_file_aio_write(iocb, iov, nr_segs);
2596 mutex_unlock(&inode->i_mutex);
2597
2598 if (rc > 0) {
2599 ssize_t err;
2600
2601 err = generic_write_sync(file, iocb->ki_pos - rc, rc);
2602 if (rc < 0)
2603 rc = err;
2604 }
2605 } else {
2606 mutex_unlock(&inode->i_mutex);
2607 }
2595 up_read(&cinode->lock_sem); 2608 up_read(&cinode->lock_sem);
2596 return rc; 2609 return rc;
2597} 2610}
@@ -2727,56 +2740,27 @@ cifs_retry_async_readv(struct cifs_readdata *rdata)
2727/** 2740/**
2728 * cifs_readdata_to_iov - copy data from pages in response to an iovec 2741 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2729 * @rdata: the readdata response with list of pages holding data 2742 * @rdata: the readdata response with list of pages holding data
2730 * @iov: vector in which we should copy the data 2743 * @iter: destination for our data
2731 * @nr_segs: number of segments in vector
2732 * @offset: offset into file of the first iovec
2733 * @copied: used to return the amount of data copied to the iov
2734 * 2744 *
2735 * This function copies data from a list of pages in a readdata response into 2745 * This function copies data from a list of pages in a readdata response into
2736 * an array of iovecs. It will first calculate where the data should go 2746 * an array of iovecs. It will first calculate where the data should go
2737 * based on the info in the readdata and then copy the data into that spot. 2747 * based on the info in the readdata and then copy the data into that spot.
2738 */ 2748 */
2739static ssize_t 2749static int
2740cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov, 2750cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
2741 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2742{ 2751{
2743 int rc = 0; 2752 size_t remaining = rdata->bytes;
2744 struct iov_iter ii;
2745 size_t pos = rdata->offset - offset;
2746 ssize_t remaining = rdata->bytes;
2747 unsigned char *pdata;
2748 unsigned int i; 2753 unsigned int i;
2749 2754
2750 /* set up iov_iter and advance to the correct offset */
2751 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2752 iov_iter_advance(&ii, pos);
2753
2754 *copied = 0;
2755 for (i = 0; i < rdata->nr_pages; i++) { 2755 for (i = 0; i < rdata->nr_pages; i++) {
2756 ssize_t copy;
2757 struct page *page = rdata->pages[i]; 2756 struct page *page = rdata->pages[i];
2758 2757 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
2759 /* copy a whole page or whatever's left */ 2758 size_t written = copy_page_to_iter(page, 0, copy, iter);
2760 copy = min_t(ssize_t, remaining, PAGE_SIZE); 2759 remaining -= written;
2761 2760 if (written < copy && iov_iter_count(iter) > 0)
2762 /* ...but limit it to whatever space is left in the iov */ 2761 break;
2763 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2764
2765 /* go while there's data to be copied and no errors */
2766 if (copy && !rc) {
2767 pdata = kmap(page);
2768 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2769 (int)copy);
2770 kunmap(page);
2771 if (!rc) {
2772 *copied += copy;
2773 remaining -= copy;
2774 iov_iter_advance(&ii, copy);
2775 }
2776 }
2777 } 2762 }
2778 2763 return remaining ? -EFAULT : 0;
2779 return rc;
2780} 2764}
2781 2765
2782static void 2766static void
@@ -2837,20 +2821,21 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2837 return total_read > 0 ? total_read : result; 2821 return total_read > 0 ? total_read : result;
2838} 2822}
2839 2823
2840static ssize_t 2824ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
2841cifs_iovec_read(struct file *file, const struct iovec *iov, 2825 unsigned long nr_segs, loff_t pos)
2842 unsigned long nr_segs, loff_t *poffset)
2843{ 2826{
2827 struct file *file = iocb->ki_filp;
2844 ssize_t rc; 2828 ssize_t rc;
2845 size_t len, cur_len; 2829 size_t len, cur_len;
2846 ssize_t total_read = 0; 2830 ssize_t total_read = 0;
2847 loff_t offset = *poffset; 2831 loff_t offset = pos;
2848 unsigned int npages; 2832 unsigned int npages;
2849 struct cifs_sb_info *cifs_sb; 2833 struct cifs_sb_info *cifs_sb;
2850 struct cifs_tcon *tcon; 2834 struct cifs_tcon *tcon;
2851 struct cifsFileInfo *open_file; 2835 struct cifsFileInfo *open_file;
2852 struct cifs_readdata *rdata, *tmp; 2836 struct cifs_readdata *rdata, *tmp;
2853 struct list_head rdata_list; 2837 struct list_head rdata_list;
2838 struct iov_iter to;
2854 pid_t pid; 2839 pid_t pid;
2855 2840
2856 if (!nr_segs) 2841 if (!nr_segs)
@@ -2860,6 +2845,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
2860 if (!len) 2845 if (!len)
2861 return 0; 2846 return 0;
2862 2847
2848 iov_iter_init(&to, iov, nr_segs, len, 0);
2849
2863 INIT_LIST_HEAD(&rdata_list); 2850 INIT_LIST_HEAD(&rdata_list);
2864 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2851 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2865 open_file = file->private_data; 2852 open_file = file->private_data;
@@ -2917,55 +2904,44 @@ error:
2917 if (!list_empty(&rdata_list)) 2904 if (!list_empty(&rdata_list))
2918 rc = 0; 2905 rc = 0;
2919 2906
2907 len = iov_iter_count(&to);
2920 /* the loop below should proceed in the order of increasing offsets */ 2908 /* the loop below should proceed in the order of increasing offsets */
2921restart_loop:
2922 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) { 2909 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2910 again:
2923 if (!rc) { 2911 if (!rc) {
2924 ssize_t copied;
2925
2926 /* FIXME: freezable sleep too? */ 2912 /* FIXME: freezable sleep too? */
2927 rc = wait_for_completion_killable(&rdata->done); 2913 rc = wait_for_completion_killable(&rdata->done);
2928 if (rc) 2914 if (rc)
2929 rc = -EINTR; 2915 rc = -EINTR;
2930 else if (rdata->result) 2916 else if (rdata->result) {
2931 rc = rdata->result; 2917 rc = rdata->result;
2932 else { 2918 /* resend call if it's a retryable error */
2933 rc = cifs_readdata_to_iov(rdata, iov, 2919 if (rc == -EAGAIN) {
2934 nr_segs, *poffset, 2920 rc = cifs_retry_async_readv(rdata);
2935 &copied); 2921 goto again;
2936 total_read += copied; 2922 }
2923 } else {
2924 rc = cifs_readdata_to_iov(rdata, &to);
2937 } 2925 }
2938 2926
2939 /* resend call if it's a retryable error */
2940 if (rc == -EAGAIN) {
2941 rc = cifs_retry_async_readv(rdata);
2942 goto restart_loop;
2943 }
2944 } 2927 }
2945 list_del_init(&rdata->list); 2928 list_del_init(&rdata->list);
2946 kref_put(&rdata->refcount, cifs_uncached_readdata_release); 2929 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2947 } 2930 }
2948 2931
2932 total_read = len - iov_iter_count(&to);
2933
2949 cifs_stats_bytes_read(tcon, total_read); 2934 cifs_stats_bytes_read(tcon, total_read);
2950 *poffset += total_read;
2951 2935
2952 /* mask nodata case */ 2936 /* mask nodata case */
2953 if (rc == -ENODATA) 2937 if (rc == -ENODATA)
2954 rc = 0; 2938 rc = 0;
2955 2939
2956 return total_read ? total_read : rc; 2940 if (total_read) {
2957} 2941 iocb->ki_pos = pos + total_read;
2958 2942 return total_read;
2959ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov, 2943 }
2960 unsigned long nr_segs, loff_t pos) 2944 return rc;
2961{
2962 ssize_t read;
2963
2964 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2965 if (read > 0)
2966 iocb->ki_pos = pos;
2967
2968 return read;
2969} 2945}
2970 2946
2971ssize_t 2947ssize_t
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 3190ca973dd6..1e5b45359509 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -424,7 +424,7 @@ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
424} 424}
425 425
426/* Data available on socket or listen socket received a connect */ 426/* Data available on socket or listen socket received a connect */
427static void lowcomms_data_ready(struct sock *sk, int count_unused) 427static void lowcomms_data_ready(struct sock *sk)
428{ 428{
429 struct connection *con = sock2con(sk); 429 struct connection *con = sock2con(sk);
430 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 430 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
diff --git a/fs/exec.c b/fs/exec.c
index 9e81c630dfa7..476f3ebf437e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -813,7 +813,7 @@ EXPORT_SYMBOL(kernel_read);
813 813
814ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len) 814ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
815{ 815{
816 ssize_t res = file->f_op->read(file, (void __user *)addr, len, &pos); 816 ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
817 if (res > 0) 817 if (res > 0)
818 flush_icache_range(addr, addr + len); 818 flush_icache_range(addr, addr + len);
819 return res; 819 return res;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 4e508fc83dcf..ca7502d89fde 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -146,7 +146,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
146 overwrite = 1; 146 overwrite = 1;
147 } 147 }
148 148
149 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 149 ret = __generic_file_aio_write(iocb, iov, nr_segs);
150 mutex_unlock(&inode->i_mutex); 150 mutex_unlock(&inode->i_mutex);
151 151
152 if (ret > 0) { 152 if (ret > 0) {
diff --git a/fs/file.c b/fs/file.c
index b61293badfb1..8f294cfac697 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -25,7 +25,10 @@
25 25
26int sysctl_nr_open __read_mostly = 1024*1024; 26int sysctl_nr_open __read_mostly = 1024*1024;
27int sysctl_nr_open_min = BITS_PER_LONG; 27int sysctl_nr_open_min = BITS_PER_LONG;
28int sysctl_nr_open_max = 1024 * 1024; /* raised later */ 28/* our max() is unusable in constant expressions ;-/ */
29#define __const_max(x, y) ((x) < (y) ? (x) : (y))
30int sysctl_nr_open_max = __const_max(INT_MAX, ~(size_t)0/sizeof(void *)) &
31 -BITS_PER_LONG;
29 32
30static void *alloc_fdmem(size_t size) 33static void *alloc_fdmem(size_t size)
31{ 34{
@@ -429,12 +432,6 @@ void exit_files(struct task_struct *tsk)
429 } 432 }
430} 433}
431 434
432void __init files_defer_init(void)
433{
434 sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
435 -BITS_PER_LONG;
436}
437
438struct files_struct init_files = { 435struct files_struct init_files = {
439 .count = ATOMIC_INIT(1), 436 .count = ATOMIC_INIT(1),
440 .fdt = &init_files.fdtab, 437 .fdt = &init_files.fdtab,
diff --git a/fs/file_table.c b/fs/file_table.c
index 01071c4d752e..a374f5033e97 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -52,7 +52,6 @@ static void file_free_rcu(struct rcu_head *head)
52static inline void file_free(struct file *f) 52static inline void file_free(struct file *f)
53{ 53{
54 percpu_counter_dec(&nr_files); 54 percpu_counter_dec(&nr_files);
55 file_check_state(f);
56 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); 55 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
57} 56}
58 57
@@ -178,47 +177,12 @@ struct file *alloc_file(struct path *path, fmode_t mode,
178 file->f_mapping = path->dentry->d_inode->i_mapping; 177 file->f_mapping = path->dentry->d_inode->i_mapping;
179 file->f_mode = mode; 178 file->f_mode = mode;
180 file->f_op = fop; 179 file->f_op = fop;
181
182 /*
183 * These mounts don't really matter in practice
184 * for r/o bind mounts. They aren't userspace-
185 * visible. We do this for consistency, and so
186 * that we can do debugging checks at __fput()
187 */
188 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
189 file_take_write(file);
190 WARN_ON(mnt_clone_write(path->mnt));
191 }
192 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) 180 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
193 i_readcount_inc(path->dentry->d_inode); 181 i_readcount_inc(path->dentry->d_inode);
194 return file; 182 return file;
195} 183}
196EXPORT_SYMBOL(alloc_file); 184EXPORT_SYMBOL(alloc_file);
197 185
198/**
199 * drop_file_write_access - give up ability to write to a file
200 * @file: the file to which we will stop writing
201 *
202 * This is a central place which will give up the ability
203 * to write to @file, along with access to write through
204 * its vfsmount.
205 */
206static void drop_file_write_access(struct file *file)
207{
208 struct vfsmount *mnt = file->f_path.mnt;
209 struct dentry *dentry = file->f_path.dentry;
210 struct inode *inode = dentry->d_inode;
211
212 put_write_access(inode);
213
214 if (special_file(inode->i_mode))
215 return;
216 if (file_check_writeable(file) != 0)
217 return;
218 __mnt_drop_write(mnt);
219 file_release_write(file);
220}
221
222/* the real guts of fput() - releasing the last reference to file 186/* the real guts of fput() - releasing the last reference to file
223 */ 187 */
224static void __fput(struct file *file) 188static void __fput(struct file *file)
@@ -253,8 +217,10 @@ static void __fput(struct file *file)
253 put_pid(file->f_owner.pid); 217 put_pid(file->f_owner.pid);
254 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) 218 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
255 i_readcount_dec(inode); 219 i_readcount_dec(inode);
256 if (file->f_mode & FMODE_WRITE) 220 if (file->f_mode & FMODE_WRITER) {
257 drop_file_write_access(file); 221 put_write_access(inode);
222 __mnt_drop_write(mnt);
223 }
258 file->f_path.dentry = NULL; 224 file->f_path.dentry = NULL;
259 file->f_path.mnt = NULL; 225 file->f_path.mnt = NULL;
260 file->f_inode = NULL; 226 file->f_inode = NULL;
@@ -359,6 +325,5 @@ void __init files_init(unsigned long mempages)
359 325
360 n = (mempages * (PAGE_SIZE / 1024)) / 10; 326 n = (mempages * (PAGE_SIZE / 1024)) / 10;
361 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 327 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
362 files_defer_init();
363 percpu_counter_init(&nr_files, 0); 328 percpu_counter_init(&nr_files, 0);
364} 329}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 0a648bb455ae..aac71ce373e4 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -667,15 +667,15 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
667 struct pipe_buffer *buf = cs->currbuf; 667 struct pipe_buffer *buf = cs->currbuf;
668 668
669 if (!cs->write) { 669 if (!cs->write) {
670 buf->ops->unmap(cs->pipe, buf, cs->mapaddr); 670 kunmap_atomic(cs->mapaddr);
671 } else { 671 } else {
672 kunmap(buf->page); 672 kunmap_atomic(cs->mapaddr);
673 buf->len = PAGE_SIZE - cs->len; 673 buf->len = PAGE_SIZE - cs->len;
674 } 674 }
675 cs->currbuf = NULL; 675 cs->currbuf = NULL;
676 cs->mapaddr = NULL; 676 cs->mapaddr = NULL;
677 } else if (cs->mapaddr) { 677 } else if (cs->mapaddr) {
678 kunmap(cs->pg); 678 kunmap_atomic(cs->mapaddr);
679 if (cs->write) { 679 if (cs->write) {
680 flush_dcache_page(cs->pg); 680 flush_dcache_page(cs->pg);
681 set_page_dirty_lock(cs->pg); 681 set_page_dirty_lock(cs->pg);
@@ -706,7 +706,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
706 706
707 BUG_ON(!cs->nr_segs); 707 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf; 708 cs->currbuf = buf;
709 cs->mapaddr = buf->ops->map(cs->pipe, buf, 0); 709 cs->mapaddr = kmap_atomic(buf->page);
710 cs->len = buf->len; 710 cs->len = buf->len;
711 cs->buf = cs->mapaddr + buf->offset; 711 cs->buf = cs->mapaddr + buf->offset;
712 cs->pipebufs++; 712 cs->pipebufs++;
@@ -726,7 +726,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
726 buf->len = 0; 726 buf->len = 0;
727 727
728 cs->currbuf = buf; 728 cs->currbuf = buf;
729 cs->mapaddr = kmap(page); 729 cs->mapaddr = kmap_atomic(page);
730 cs->buf = cs->mapaddr; 730 cs->buf = cs->mapaddr;
731 cs->len = PAGE_SIZE; 731 cs->len = PAGE_SIZE;
732 cs->pipebufs++; 732 cs->pipebufs++;
@@ -745,7 +745,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
745 return err; 745 return err;
746 BUG_ON(err != 1); 746 BUG_ON(err != 1);
747 offset = cs->addr % PAGE_SIZE; 747 offset = cs->addr % PAGE_SIZE;
748 cs->mapaddr = kmap(cs->pg); 748 cs->mapaddr = kmap_atomic(cs->pg);
749 cs->buf = cs->mapaddr + offset; 749 cs->buf = cs->mapaddr + offset;
750 cs->len = min(PAGE_SIZE - offset, cs->seglen); 750 cs->len = min(PAGE_SIZE - offset, cs->seglen);
751 cs->seglen -= cs->len; 751 cs->seglen -= cs->len;
@@ -874,7 +874,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
874out_fallback_unlock: 874out_fallback_unlock:
875 unlock_page(newpage); 875 unlock_page(newpage);
876out_fallback: 876out_fallback:
877 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1); 877 cs->mapaddr = kmap_atomic(buf->page);
878 cs->buf = cs->mapaddr + buf->offset; 878 cs->buf = cs->mapaddr + buf->offset;
879 879
880 err = lock_request(cs->fc, cs->req); 880 err = lock_request(cs->fc, cs->req);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 48992cac714b..13f8bdec5110 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1086,9 +1086,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1086 if (mapping_writably_mapped(mapping)) 1086 if (mapping_writably_mapped(mapping))
1087 flush_dcache_page(page); 1087 flush_dcache_page(page);
1088 1088
1089 pagefault_disable();
1090 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 1089 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
1091 pagefault_enable();
1092 flush_dcache_page(page); 1090 flush_dcache_page(page);
1093 1091
1094 mark_page_accessed(page); 1092 mark_page_accessed(page);
@@ -1237,8 +1235,7 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
1237 goto out; 1235 goto out;
1238 1236
1239 if (file->f_flags & O_DIRECT) { 1237 if (file->f_flags & O_DIRECT) {
1240 written = generic_file_direct_write(iocb, iov, &nr_segs, 1238 written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
1241 pos, &iocb->ki_pos,
1242 count, ocount); 1239 count, ocount);
1243 if (written < 0 || written == count) 1240 if (written < 0 || written == count)
1244 goto out; 1241 goto out;
diff --git a/fs/mount.h b/fs/mount.h
index b29e42f05f34..d55297f2fa05 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -10,7 +10,7 @@ struct mnt_namespace {
10 struct user_namespace *user_ns; 10 struct user_namespace *user_ns;
11 u64 seq; /* Sequence number to prevent loops */ 11 u64 seq; /* Sequence number to prevent loops */
12 wait_queue_head_t poll; 12 wait_queue_head_t poll;
13 int event; 13 u64 event;
14}; 14};
15 15
16struct mnt_pcp { 16struct mnt_pcp {
@@ -104,6 +104,9 @@ struct proc_mounts {
104 struct mnt_namespace *ns; 104 struct mnt_namespace *ns;
105 struct path root; 105 struct path root;
106 int (*show)(struct seq_file *, struct vfsmount *); 106 int (*show)(struct seq_file *, struct vfsmount *);
107 void *cached_mount;
108 u64 cached_event;
109 loff_t cached_index;
107}; 110};
108 111
109#define proc_mounts(p) (container_of((p), struct proc_mounts, m)) 112#define proc_mounts(p) (container_of((p), struct proc_mounts, m))
diff --git a/fs/namei.c b/fs/namei.c
index 88339f59efb5..c6157c894fce 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -358,6 +358,7 @@ int generic_permission(struct inode *inode, int mask)
358 358
359 return -EACCES; 359 return -EACCES;
360} 360}
361EXPORT_SYMBOL(generic_permission);
361 362
362/* 363/*
363 * We _really_ want to just do "generic_permission()" without 364 * We _really_ want to just do "generic_permission()" without
@@ -455,6 +456,7 @@ int inode_permission(struct inode *inode, int mask)
455 return retval; 456 return retval;
456 return __inode_permission(inode, mask); 457 return __inode_permission(inode, mask);
457} 458}
459EXPORT_SYMBOL(inode_permission);
458 460
459/** 461/**
460 * path_get - get a reference to a path 462 * path_get - get a reference to a path
@@ -924,6 +926,7 @@ int follow_up(struct path *path)
924 path->mnt = &parent->mnt; 926 path->mnt = &parent->mnt;
925 return 1; 927 return 1;
926} 928}
929EXPORT_SYMBOL(follow_up);
927 930
928/* 931/*
929 * Perform an automount 932 * Perform an automount
@@ -1085,6 +1088,7 @@ int follow_down_one(struct path *path)
1085 } 1088 }
1086 return 0; 1089 return 0;
1087} 1090}
1091EXPORT_SYMBOL(follow_down_one);
1088 1092
1089static inline bool managed_dentry_might_block(struct dentry *dentry) 1093static inline bool managed_dentry_might_block(struct dentry *dentry)
1090{ 1094{
@@ -1223,6 +1227,7 @@ int follow_down(struct path *path)
1223 } 1227 }
1224 return 0; 1228 return 0;
1225} 1229}
1230EXPORT_SYMBOL(follow_down);
1226 1231
1227/* 1232/*
1228 * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() 1233 * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
@@ -2025,6 +2030,7 @@ int kern_path(const char *name, unsigned int flags, struct path *path)
2025 *path = nd.path; 2030 *path = nd.path;
2026 return res; 2031 return res;
2027} 2032}
2033EXPORT_SYMBOL(kern_path);
2028 2034
2029/** 2035/**
2030 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair 2036 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
@@ -2049,6 +2055,7 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
2049 *path = nd.path; 2055 *path = nd.path;
2050 return err; 2056 return err;
2051} 2057}
2058EXPORT_SYMBOL(vfs_path_lookup);
2052 2059
2053/* 2060/*
2054 * Restricted form of lookup. Doesn't follow links, single-component only, 2061 * Restricted form of lookup. Doesn't follow links, single-component only,
@@ -2111,6 +2118,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
2111 2118
2112 return __lookup_hash(&this, base, 0); 2119 return __lookup_hash(&this, base, 0);
2113} 2120}
2121EXPORT_SYMBOL(lookup_one_len);
2114 2122
2115int user_path_at_empty(int dfd, const char __user *name, unsigned flags, 2123int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
2116 struct path *path, int *empty) 2124 struct path *path, int *empty)
@@ -2135,6 +2143,7 @@ int user_path_at(int dfd, const char __user *name, unsigned flags,
2135{ 2143{
2136 return user_path_at_empty(dfd, name, flags, path, NULL); 2144 return user_path_at_empty(dfd, name, flags, path, NULL);
2137} 2145}
2146EXPORT_SYMBOL(user_path_at);
2138 2147
2139/* 2148/*
2140 * NB: most callers don't do anything directly with the reference to the 2149 * NB: most callers don't do anything directly with the reference to the
@@ -2477,6 +2486,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
2477 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); 2486 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
2478 return NULL; 2487 return NULL;
2479} 2488}
2489EXPORT_SYMBOL(lock_rename);
2480 2490
2481void unlock_rename(struct dentry *p1, struct dentry *p2) 2491void unlock_rename(struct dentry *p1, struct dentry *p2)
2482{ 2492{
@@ -2486,6 +2496,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
2486 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); 2496 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
2487 } 2497 }
2488} 2498}
2499EXPORT_SYMBOL(unlock_rename);
2489 2500
2490int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, 2501int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2491 bool want_excl) 2502 bool want_excl)
@@ -2506,6 +2517,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2506 fsnotify_create(dir, dentry); 2517 fsnotify_create(dir, dentry);
2507 return error; 2518 return error;
2508} 2519}
2520EXPORT_SYMBOL(vfs_create);
2509 2521
2510static int may_open(struct path *path, int acc_mode, int flag) 2522static int may_open(struct path *path, int acc_mode, int flag)
2511{ 2523{
@@ -3375,6 +3387,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
3375 fsnotify_create(dir, dentry); 3387 fsnotify_create(dir, dentry);
3376 return error; 3388 return error;
3377} 3389}
3390EXPORT_SYMBOL(vfs_mknod);
3378 3391
3379static int may_mknod(umode_t mode) 3392static int may_mknod(umode_t mode)
3380{ 3393{
@@ -3464,6 +3477,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
3464 fsnotify_mkdir(dir, dentry); 3477 fsnotify_mkdir(dir, dentry);
3465 return error; 3478 return error;
3466} 3479}
3480EXPORT_SYMBOL(vfs_mkdir);
3467 3481
3468SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) 3482SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
3469{ 3483{
@@ -3518,6 +3532,7 @@ void dentry_unhash(struct dentry *dentry)
3518 __d_drop(dentry); 3532 __d_drop(dentry);
3519 spin_unlock(&dentry->d_lock); 3533 spin_unlock(&dentry->d_lock);
3520} 3534}
3535EXPORT_SYMBOL(dentry_unhash);
3521 3536
3522int vfs_rmdir(struct inode *dir, struct dentry *dentry) 3537int vfs_rmdir(struct inode *dir, struct dentry *dentry)
3523{ 3538{
@@ -3555,6 +3570,7 @@ out:
3555 d_delete(dentry); 3570 d_delete(dentry);
3556 return error; 3571 return error;
3557} 3572}
3573EXPORT_SYMBOL(vfs_rmdir);
3558 3574
3559static long do_rmdir(int dfd, const char __user *pathname) 3575static long do_rmdir(int dfd, const char __user *pathname)
3560{ 3576{
@@ -3672,6 +3688,7 @@ out:
3672 3688
3673 return error; 3689 return error;
3674} 3690}
3691EXPORT_SYMBOL(vfs_unlink);
3675 3692
3676/* 3693/*
3677 * Make sure that the actual truncation of the file will occur outside its 3694 * Make sure that the actual truncation of the file will occur outside its
@@ -3785,6 +3802,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
3785 fsnotify_create(dir, dentry); 3802 fsnotify_create(dir, dentry);
3786 return error; 3803 return error;
3787} 3804}
3805EXPORT_SYMBOL(vfs_symlink);
3788 3806
3789SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, 3807SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
3790 int, newdfd, const char __user *, newname) 3808 int, newdfd, const char __user *, newname)
@@ -3893,6 +3911,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
3893 fsnotify_link(dir, inode, new_dentry); 3911 fsnotify_link(dir, inode, new_dentry);
3894 return error; 3912 return error;
3895} 3913}
3914EXPORT_SYMBOL(vfs_link);
3896 3915
3897/* 3916/*
3898 * Hardlinks are often used in delicate situations. We avoid 3917 * Hardlinks are often used in delicate situations. We avoid
@@ -4152,6 +4171,7 @@ out:
4152 4171
4153 return error; 4172 return error;
4154} 4173}
4174EXPORT_SYMBOL(vfs_rename);
4155 4175
4156SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, 4176SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
4157 int, newdfd, const char __user *, newname, unsigned int, flags) 4177 int, newdfd, const char __user *, newname, unsigned int, flags)
@@ -4304,11 +4324,9 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
4304 return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); 4324 return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
4305} 4325}
4306 4326
4307int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) 4327int readlink_copy(char __user *buffer, int buflen, const char *link)
4308{ 4328{
4309 int len; 4329 int len = PTR_ERR(link);
4310
4311 len = PTR_ERR(link);
4312 if (IS_ERR(link)) 4330 if (IS_ERR(link))
4313 goto out; 4331 goto out;
4314 4332
@@ -4320,6 +4338,7 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
4320out: 4338out:
4321 return len; 4339 return len;
4322} 4340}
4341EXPORT_SYMBOL(readlink_copy);
4323 4342
4324/* 4343/*
4325 * A helper for ->readlink(). This should be used *ONLY* for symlinks that 4344 * A helper for ->readlink(). This should be used *ONLY* for symlinks that
@@ -4337,11 +4356,12 @@ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
4337 if (IS_ERR(cookie)) 4356 if (IS_ERR(cookie))
4338 return PTR_ERR(cookie); 4357 return PTR_ERR(cookie);
4339 4358
4340 res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd)); 4359 res = readlink_copy(buffer, buflen, nd_get_link(&nd));
4341 if (dentry->d_inode->i_op->put_link) 4360 if (dentry->d_inode->i_op->put_link)
4342 dentry->d_inode->i_op->put_link(dentry, &nd, cookie); 4361 dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
4343 return res; 4362 return res;
4344} 4363}
4364EXPORT_SYMBOL(generic_readlink);
4345 4365
4346/* get the link contents into pagecache */ 4366/* get the link contents into pagecache */
4347static char *page_getlink(struct dentry * dentry, struct page **ppage) 4367static char *page_getlink(struct dentry * dentry, struct page **ppage)
@@ -4361,14 +4381,14 @@ static char *page_getlink(struct dentry * dentry, struct page **ppage)
4361int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) 4381int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
4362{ 4382{
4363 struct page *page = NULL; 4383 struct page *page = NULL;
4364 char *s = page_getlink(dentry, &page); 4384 int res = readlink_copy(buffer, buflen, page_getlink(dentry, &page));
4365 int res = vfs_readlink(dentry,buffer,buflen,s);
4366 if (page) { 4385 if (page) {
4367 kunmap(page); 4386 kunmap(page);
4368 page_cache_release(page); 4387 page_cache_release(page);
4369 } 4388 }
4370 return res; 4389 return res;
4371} 4390}
4391EXPORT_SYMBOL(page_readlink);
4372 4392
4373void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd) 4393void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
4374{ 4394{
@@ -4376,6 +4396,7 @@ void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
4376 nd_set_link(nd, page_getlink(dentry, &page)); 4396 nd_set_link(nd, page_getlink(dentry, &page));
4377 return page; 4397 return page;
4378} 4398}
4399EXPORT_SYMBOL(page_follow_link_light);
4379 4400
4380void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 4401void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
4381{ 4402{
@@ -4386,6 +4407,7 @@ void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
4386 page_cache_release(page); 4407 page_cache_release(page);
4387 } 4408 }
4388} 4409}
4410EXPORT_SYMBOL(page_put_link);
4389 4411
4390/* 4412/*
4391 * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS 4413 * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
@@ -4423,45 +4445,18 @@ retry:
4423fail: 4445fail:
4424 return err; 4446 return err;
4425} 4447}
4448EXPORT_SYMBOL(__page_symlink);
4426 4449
4427int page_symlink(struct inode *inode, const char *symname, int len) 4450int page_symlink(struct inode *inode, const char *symname, int len)
4428{ 4451{
4429 return __page_symlink(inode, symname, len, 4452 return __page_symlink(inode, symname, len,
4430 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); 4453 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
4431} 4454}
4455EXPORT_SYMBOL(page_symlink);
4432 4456
4433const struct inode_operations page_symlink_inode_operations = { 4457const struct inode_operations page_symlink_inode_operations = {
4434 .readlink = generic_readlink, 4458 .readlink = generic_readlink,
4435 .follow_link = page_follow_link_light, 4459 .follow_link = page_follow_link_light,
4436 .put_link = page_put_link, 4460 .put_link = page_put_link,
4437}; 4461};
4438
4439EXPORT_SYMBOL(user_path_at);
4440EXPORT_SYMBOL(follow_down_one);
4441EXPORT_SYMBOL(follow_down);
4442EXPORT_SYMBOL(follow_up);
4443EXPORT_SYMBOL(get_write_access); /* nfsd */
4444EXPORT_SYMBOL(lock_rename);
4445EXPORT_SYMBOL(lookup_one_len);
4446EXPORT_SYMBOL(page_follow_link_light);
4447EXPORT_SYMBOL(page_put_link);
4448EXPORT_SYMBOL(page_readlink);
4449EXPORT_SYMBOL(__page_symlink);
4450EXPORT_SYMBOL(page_symlink);
4451EXPORT_SYMBOL(page_symlink_inode_operations); 4462EXPORT_SYMBOL(page_symlink_inode_operations);
4452EXPORT_SYMBOL(kern_path);
4453EXPORT_SYMBOL(vfs_path_lookup);
4454EXPORT_SYMBOL(inode_permission);
4455EXPORT_SYMBOL(unlock_rename);
4456EXPORT_SYMBOL(vfs_create);
4457EXPORT_SYMBOL(vfs_link);
4458EXPORT_SYMBOL(vfs_mkdir);
4459EXPORT_SYMBOL(vfs_mknod);
4460EXPORT_SYMBOL(generic_permission);
4461EXPORT_SYMBOL(vfs_readlink);
4462EXPORT_SYMBOL(vfs_rename);
4463EXPORT_SYMBOL(vfs_rmdir);
4464EXPORT_SYMBOL(vfs_symlink);
4465EXPORT_SYMBOL(vfs_unlink);
4466EXPORT_SYMBOL(dentry_unhash);
4467EXPORT_SYMBOL(generic_readlink);
diff --git a/fs/namespace.c b/fs/namespace.c
index 2ffc5a2905d4..182bc41cd887 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -52,7 +52,7 @@ static int __init set_mphash_entries(char *str)
52} 52}
53__setup("mphash_entries=", set_mphash_entries); 53__setup("mphash_entries=", set_mphash_entries);
54 54
55static int event; 55static u64 event;
56static DEFINE_IDA(mnt_id_ida); 56static DEFINE_IDA(mnt_id_ida);
57static DEFINE_IDA(mnt_group_ida); 57static DEFINE_IDA(mnt_group_ida);
58static DEFINE_SPINLOCK(mnt_id_lock); 58static DEFINE_SPINLOCK(mnt_id_lock);
@@ -414,9 +414,7 @@ EXPORT_SYMBOL_GPL(mnt_clone_write);
414 */ 414 */
415int __mnt_want_write_file(struct file *file) 415int __mnt_want_write_file(struct file *file)
416{ 416{
417 struct inode *inode = file_inode(file); 417 if (!(file->f_mode & FMODE_WRITER))
418
419 if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
420 return __mnt_want_write(file->f_path.mnt); 418 return __mnt_want_write(file->f_path.mnt);
421 else 419 else
422 return mnt_clone_write(file->f_path.mnt); 420 return mnt_clone_write(file->f_path.mnt);
@@ -570,13 +568,17 @@ int sb_prepare_remount_readonly(struct super_block *sb)
570static void free_vfsmnt(struct mount *mnt) 568static void free_vfsmnt(struct mount *mnt)
571{ 569{
572 kfree(mnt->mnt_devname); 570 kfree(mnt->mnt_devname);
573 mnt_free_id(mnt);
574#ifdef CONFIG_SMP 571#ifdef CONFIG_SMP
575 free_percpu(mnt->mnt_pcp); 572 free_percpu(mnt->mnt_pcp);
576#endif 573#endif
577 kmem_cache_free(mnt_cache, mnt); 574 kmem_cache_free(mnt_cache, mnt);
578} 575}
579 576
577static void delayed_free_vfsmnt(struct rcu_head *head)
578{
579 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
580}
581
580/* call under rcu_read_lock */ 582/* call under rcu_read_lock */
581bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) 583bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
582{ 584{
@@ -848,6 +850,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
848 850
849 root = mount_fs(type, flags, name, data); 851 root = mount_fs(type, flags, name, data);
850 if (IS_ERR(root)) { 852 if (IS_ERR(root)) {
853 mnt_free_id(mnt);
851 free_vfsmnt(mnt); 854 free_vfsmnt(mnt);
852 return ERR_CAST(root); 855 return ERR_CAST(root);
853 } 856 }
@@ -885,7 +888,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
885 goto out_free; 888 goto out_free;
886 } 889 }
887 890
888 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; 891 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
889 /* Don't allow unprivileged users to change mount flags */ 892 /* Don't allow unprivileged users to change mount flags */
890 if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) 893 if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
891 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; 894 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
@@ -928,20 +931,11 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
928 return mnt; 931 return mnt;
929 932
930 out_free: 933 out_free:
934 mnt_free_id(mnt);
931 free_vfsmnt(mnt); 935 free_vfsmnt(mnt);
932 return ERR_PTR(err); 936 return ERR_PTR(err);
933} 937}
934 938
935static void delayed_free(struct rcu_head *head)
936{
937 struct mount *mnt = container_of(head, struct mount, mnt_rcu);
938 kfree(mnt->mnt_devname);
939#ifdef CONFIG_SMP
940 free_percpu(mnt->mnt_pcp);
941#endif
942 kmem_cache_free(mnt_cache, mnt);
943}
944
945static void mntput_no_expire(struct mount *mnt) 939static void mntput_no_expire(struct mount *mnt)
946{ 940{
947put_again: 941put_again:
@@ -991,7 +985,7 @@ put_again:
991 dput(mnt->mnt.mnt_root); 985 dput(mnt->mnt.mnt_root);
992 deactivate_super(mnt->mnt.mnt_sb); 986 deactivate_super(mnt->mnt.mnt_sb);
993 mnt_free_id(mnt); 987 mnt_free_id(mnt);
994 call_rcu(&mnt->mnt_rcu, delayed_free); 988 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
995} 989}
996 990
997void mntput(struct vfsmount *mnt) 991void mntput(struct vfsmount *mnt)
@@ -1100,14 +1094,29 @@ static void *m_start(struct seq_file *m, loff_t *pos)
1100 struct proc_mounts *p = proc_mounts(m); 1094 struct proc_mounts *p = proc_mounts(m);
1101 1095
1102 down_read(&namespace_sem); 1096 down_read(&namespace_sem);
1103 return seq_list_start(&p->ns->list, *pos); 1097 if (p->cached_event == p->ns->event) {
1098 void *v = p->cached_mount;
1099 if (*pos == p->cached_index)
1100 return v;
1101 if (*pos == p->cached_index + 1) {
1102 v = seq_list_next(v, &p->ns->list, &p->cached_index);
1103 return p->cached_mount = v;
1104 }
1105 }
1106
1107 p->cached_event = p->ns->event;
1108 p->cached_mount = seq_list_start(&p->ns->list, *pos);
1109 p->cached_index = *pos;
1110 return p->cached_mount;
1104} 1111}
1105 1112
1106static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1113static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1107{ 1114{
1108 struct proc_mounts *p = proc_mounts(m); 1115 struct proc_mounts *p = proc_mounts(m);
1109 1116
1110 return seq_list_next(v, &p->ns->list, pos); 1117 p->cached_mount = seq_list_next(v, &p->ns->list, pos);
1118 p->cached_index = *pos;
1119 return p->cached_mount;
1111} 1120}
1112 1121
1113static void m_stop(struct seq_file *m, void *v) 1122static void m_stop(struct seq_file *m, void *v)
@@ -1661,9 +1670,9 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1661 if (err) 1670 if (err)
1662 goto out; 1671 goto out;
1663 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); 1672 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
1673 lock_mount_hash();
1664 if (err) 1674 if (err)
1665 goto out_cleanup_ids; 1675 goto out_cleanup_ids;
1666 lock_mount_hash();
1667 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1676 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1668 set_mnt_shared(p); 1677 set_mnt_shared(p);
1669 } else { 1678 } else {
@@ -1690,6 +1699,11 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1690 return 0; 1699 return 0;
1691 1700
1692 out_cleanup_ids: 1701 out_cleanup_ids:
1702 while (!hlist_empty(&tree_list)) {
1703 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
1704 umount_tree(child, 0);
1705 }
1706 unlock_mount_hash();
1693 cleanup_group_ids(source_mnt, NULL); 1707 cleanup_group_ids(source_mnt, NULL);
1694 out: 1708 out:
1695 return err; 1709 return err;
@@ -2044,7 +2058,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
2044 struct mount *parent; 2058 struct mount *parent;
2045 int err; 2059 int err;
2046 2060
2047 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT); 2061 mnt_flags &= ~MNT_INTERNAL_FLAGS;
2048 2062
2049 mp = lock_mount(path); 2063 mp = lock_mount(path);
2050 if (IS_ERR(mp)) 2064 if (IS_ERR(mp))
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 81b4f643ecef..e31e589369a4 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -470,9 +470,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
470{ 470{
471 struct ncp_mount_data_kernel data; 471 struct ncp_mount_data_kernel data;
472 struct ncp_server *server; 472 struct ncp_server *server;
473 struct file *ncp_filp;
474 struct inode *root_inode; 473 struct inode *root_inode;
475 struct inode *sock_inode;
476 struct socket *sock; 474 struct socket *sock;
477 int error; 475 int error;
478 int default_bufsize; 476 int default_bufsize;
@@ -541,18 +539,10 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
541 if (!uid_valid(data.mounted_uid) || !uid_valid(data.uid) || 539 if (!uid_valid(data.mounted_uid) || !uid_valid(data.uid) ||
542 !gid_valid(data.gid)) 540 !gid_valid(data.gid))
543 goto out; 541 goto out;
544 error = -EBADF; 542 sock = sockfd_lookup(data.ncp_fd, &error);
545 ncp_filp = fget(data.ncp_fd);
546 if (!ncp_filp)
547 goto out;
548 error = -ENOTSOCK;
549 sock_inode = file_inode(ncp_filp);
550 if (!S_ISSOCK(sock_inode->i_mode))
551 goto out_fput;
552 sock = SOCKET_I(sock_inode);
553 if (!sock) 543 if (!sock)
554 goto out_fput; 544 goto out;
555 545
556 if (sock->type == SOCK_STREAM) 546 if (sock->type == SOCK_STREAM)
557 default_bufsize = 0xF000; 547 default_bufsize = 0xF000;
558 else 548 else
@@ -574,27 +564,16 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
574 if (error) 564 if (error)
575 goto out_fput; 565 goto out_fput;
576 566
577 server->ncp_filp = ncp_filp;
578 server->ncp_sock = sock; 567 server->ncp_sock = sock;
579 568
580 if (data.info_fd != -1) { 569 if (data.info_fd != -1) {
581 struct socket *info_sock; 570 struct socket *info_sock = sockfd_lookup(data.info_fd, &error);
582
583 error = -EBADF;
584 server->info_filp = fget(data.info_fd);
585 if (!server->info_filp)
586 goto out_bdi;
587 error = -ENOTSOCK;
588 sock_inode = file_inode(server->info_filp);
589 if (!S_ISSOCK(sock_inode->i_mode))
590 goto out_fput2;
591 info_sock = SOCKET_I(sock_inode);
592 if (!info_sock) 571 if (!info_sock)
593 goto out_fput2; 572 goto out_bdi;
573 server->info_sock = info_sock;
594 error = -EBADFD; 574 error = -EBADFD;
595 if (info_sock->type != SOCK_STREAM) 575 if (info_sock->type != SOCK_STREAM)
596 goto out_fput2; 576 goto out_fput2;
597 server->info_sock = info_sock;
598 } 577 }
599 578
600/* server->lock = 0; */ 579/* server->lock = 0; */
@@ -766,17 +745,12 @@ out_nls:
766 mutex_destroy(&server->root_setup_lock); 745 mutex_destroy(&server->root_setup_lock);
767 mutex_destroy(&server->mutex); 746 mutex_destroy(&server->mutex);
768out_fput2: 747out_fput2:
769 if (server->info_filp) 748 if (server->info_sock)
770 fput(server->info_filp); 749 sockfd_put(server->info_sock);
771out_bdi: 750out_bdi:
772 bdi_destroy(&server->bdi); 751 bdi_destroy(&server->bdi);
773out_fput: 752out_fput:
774 /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>: 753 sockfd_put(sock);
775 *
776 * The previously used put_filp(ncp_filp); was bogus, since
777 * it doesn't perform proper unlocking.
778 */
779 fput(ncp_filp);
780out: 754out:
781 put_pid(data.wdog_pid); 755 put_pid(data.wdog_pid);
782 sb->s_fs_info = NULL; 756 sb->s_fs_info = NULL;
@@ -809,9 +783,9 @@ static void ncp_put_super(struct super_block *sb)
809 mutex_destroy(&server->root_setup_lock); 783 mutex_destroy(&server->root_setup_lock);
810 mutex_destroy(&server->mutex); 784 mutex_destroy(&server->mutex);
811 785
812 if (server->info_filp) 786 if (server->info_sock)
813 fput(server->info_filp); 787 sockfd_put(server->info_sock);
814 fput(server->ncp_filp); 788 sockfd_put(server->ncp_sock);
815 kill_pid(server->m.wdog_pid, SIGTERM, 1); 789 kill_pid(server->m.wdog_pid, SIGTERM, 1);
816 put_pid(server->m.wdog_pid); 790 put_pid(server->m.wdog_pid);
817 791
diff --git a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
index b81e97adc5a9..55e26fd80886 100644
--- a/fs/ncpfs/ncp_fs_sb.h
+++ b/fs/ncpfs/ncp_fs_sb.h
@@ -45,9 +45,7 @@ struct ncp_server {
45 45
46 __u8 name_space[NCP_NUMBER_OF_VOLUMES + 2]; 46 __u8 name_space[NCP_NUMBER_OF_VOLUMES + 2];
47 47
48 struct file *ncp_filp; /* File pointer to ncp socket */
49 struct socket *ncp_sock;/* ncp socket */ 48 struct socket *ncp_sock;/* ncp socket */
50 struct file *info_filp;
51 struct socket *info_sock; 49 struct socket *info_sock;
52 50
53 u8 sequence; 51 u8 sequence;
@@ -111,7 +109,7 @@ struct ncp_server {
111 109
112 spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */ 110 spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */
113 111
114 void (*data_ready)(struct sock* sk, int len); 112 void (*data_ready)(struct sock* sk);
115 void (*error_report)(struct sock* sk); 113 void (*error_report)(struct sock* sk);
116 void (*write_space)(struct sock* sk); /* STREAM mode only */ 114 void (*write_space)(struct sock* sk); /* STREAM mode only */
117 struct { 115 struct {
@@ -153,7 +151,7 @@ extern void ncp_tcp_tx_proc(struct work_struct *work);
153extern void ncpdgram_rcv_proc(struct work_struct *work); 151extern void ncpdgram_rcv_proc(struct work_struct *work);
154extern void ncpdgram_timeout_proc(struct work_struct *work); 152extern void ncpdgram_timeout_proc(struct work_struct *work);
155extern void ncpdgram_timeout_call(unsigned long server); 153extern void ncpdgram_timeout_call(unsigned long server);
156extern void ncp_tcp_data_ready(struct sock* sk, int len); 154extern void ncp_tcp_data_ready(struct sock* sk);
157extern void ncp_tcp_write_space(struct sock* sk); 155extern void ncp_tcp_write_space(struct sock* sk);
158extern void ncp_tcp_error_report(struct sock* sk); 156extern void ncp_tcp_error_report(struct sock* sk);
159 157
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 04a69a4d8e96..471bc3d1139e 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -97,11 +97,11 @@ static void ncp_req_put(struct ncp_request_reply *req)
97 kfree(req); 97 kfree(req);
98} 98}
99 99
100void ncp_tcp_data_ready(struct sock *sk, int len) 100void ncp_tcp_data_ready(struct sock *sk)
101{ 101{
102 struct ncp_server *server = sk->sk_user_data; 102 struct ncp_server *server = sk->sk_user_data;
103 103
104 server->data_ready(sk, len); 104 server->data_ready(sk);
105 schedule_work(&server->rcv.tq); 105 schedule_work(&server->rcv.tq);
106} 106}
107 107
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 9d8153ebacfb..f47af5e6e230 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -1704,8 +1704,6 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
1704 iput(bvi); 1704 iput(bvi);
1705skip_large_index_stuff: 1705skip_large_index_stuff:
1706 /* Setup the operations for this index inode. */ 1706 /* Setup the operations for this index inode. */
1707 vi->i_op = NULL;
1708 vi->i_fop = NULL;
1709 vi->i_mapping->a_ops = &ntfs_mst_aops; 1707 vi->i_mapping->a_ops = &ntfs_mst_aops;
1710 vi->i_blocks = ni->allocated_size >> 9; 1708 vi->i_blocks = ni->allocated_size >> 9;
1711 /* 1709 /*
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index eb649d23a4de..c6b90e670389 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -137,7 +137,7 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] =
137static void o2net_sc_connect_completed(struct work_struct *work); 137static void o2net_sc_connect_completed(struct work_struct *work);
138static void o2net_rx_until_empty(struct work_struct *work); 138static void o2net_rx_until_empty(struct work_struct *work);
139static void o2net_shutdown_sc(struct work_struct *work); 139static void o2net_shutdown_sc(struct work_struct *work);
140static void o2net_listen_data_ready(struct sock *sk, int bytes); 140static void o2net_listen_data_ready(struct sock *sk);
141static void o2net_sc_send_keep_req(struct work_struct *work); 141static void o2net_sc_send_keep_req(struct work_struct *work);
142static void o2net_idle_timer(unsigned long data); 142static void o2net_idle_timer(unsigned long data);
143static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); 143static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
@@ -597,9 +597,9 @@ static void o2net_set_nn_state(struct o2net_node *nn,
597} 597}
598 598
599/* see o2net_register_callbacks() */ 599/* see o2net_register_callbacks() */
600static void o2net_data_ready(struct sock *sk, int bytes) 600static void o2net_data_ready(struct sock *sk)
601{ 601{
602 void (*ready)(struct sock *sk, int bytes); 602 void (*ready)(struct sock *sk);
603 603
604 read_lock(&sk->sk_callback_lock); 604 read_lock(&sk->sk_callback_lock);
605 if (sk->sk_user_data) { 605 if (sk->sk_user_data) {
@@ -613,7 +613,7 @@ static void o2net_data_ready(struct sock *sk, int bytes)
613 } 613 }
614 read_unlock(&sk->sk_callback_lock); 614 read_unlock(&sk->sk_callback_lock);
615 615
616 ready(sk, bytes); 616 ready(sk);
617} 617}
618 618
619/* see o2net_register_callbacks() */ 619/* see o2net_register_callbacks() */
@@ -916,57 +916,30 @@ static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key)
916 916
917static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len) 917static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
918{ 918{
919 int ret; 919 struct kvec vec = { .iov_len = len, .iov_base = data, };
920 mm_segment_t oldfs; 920 struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
921 struct kvec vec = { 921 return kernel_recvmsg(sock, &msg, &vec, 1, len, msg.msg_flags);
922 .iov_len = len,
923 .iov_base = data,
924 };
925 struct msghdr msg = {
926 .msg_iovlen = 1,
927 .msg_iov = (struct iovec *)&vec,
928 .msg_flags = MSG_DONTWAIT,
929 };
930
931 oldfs = get_fs();
932 set_fs(get_ds());
933 ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
934 set_fs(oldfs);
935
936 return ret;
937} 922}
938 923
939static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec, 924static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
940 size_t veclen, size_t total) 925 size_t veclen, size_t total)
941{ 926{
942 int ret; 927 int ret;
943 mm_segment_t oldfs; 928 struct msghdr msg;
944 struct msghdr msg = {
945 .msg_iov = (struct iovec *)vec,
946 .msg_iovlen = veclen,
947 };
948 929
949 if (sock == NULL) { 930 if (sock == NULL) {
950 ret = -EINVAL; 931 ret = -EINVAL;
951 goto out; 932 goto out;
952 } 933 }
953 934
954 oldfs = get_fs(); 935 ret = kernel_sendmsg(sock, &msg, vec, veclen, total);
955 set_fs(get_ds()); 936 if (likely(ret == total))
956 ret = sock_sendmsg(sock, &msg, total); 937 return 0;
957 set_fs(oldfs); 938 mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret, total);
958 if (ret != total) { 939 if (ret >= 0)
959 mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret, 940 ret = -EPIPE; /* should be smarter, I bet */
960 total);
961 if (ret >= 0)
962 ret = -EPIPE; /* should be smarter, I bet */
963 goto out;
964 }
965
966 ret = 0;
967out: 941out:
968 if (ret < 0) 942 mlog(0, "returning error: %d\n", ret);
969 mlog(0, "returning error: %d\n", ret);
970 return ret; 943 return ret;
971} 944}
972 945
@@ -1953,9 +1926,9 @@ static void o2net_accept_many(struct work_struct *work)
1953 cond_resched(); 1926 cond_resched();
1954} 1927}
1955 1928
1956static void o2net_listen_data_ready(struct sock *sk, int bytes) 1929static void o2net_listen_data_ready(struct sock *sk)
1957{ 1930{
1958 void (*ready)(struct sock *sk, int bytes); 1931 void (*ready)(struct sock *sk);
1959 1932
1960 read_lock(&sk->sk_callback_lock); 1933 read_lock(&sk->sk_callback_lock);
1961 ready = sk->sk_user_data; 1934 ready = sk->sk_user_data;
@@ -1978,7 +1951,6 @@ static void o2net_listen_data_ready(struct sock *sk, int bytes)
1978 */ 1951 */
1979 1952
1980 if (sk->sk_state == TCP_LISTEN) { 1953 if (sk->sk_state == TCP_LISTEN) {
1981 mlog(ML_TCP, "bytes: %d\n", bytes);
1982 queue_work(o2net_wq, &o2net_listen_work); 1954 queue_work(o2net_wq, &o2net_listen_work);
1983 } else { 1955 } else {
1984 ready = NULL; 1956 ready = NULL;
@@ -1987,7 +1959,7 @@ static void o2net_listen_data_ready(struct sock *sk, int bytes)
1987out: 1959out:
1988 read_unlock(&sk->sk_callback_lock); 1960 read_unlock(&sk->sk_callback_lock);
1989 if (ready != NULL) 1961 if (ready != NULL)
1990 ready(sk, bytes); 1962 ready(sk);
1991} 1963}
1992 1964
1993static int o2net_open_listening_sock(__be32 addr, __be16 port) 1965static int o2net_open_listening_sock(__be32 addr, __be16 port)
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 4cbcb65784a3..dc024367110a 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -165,7 +165,7 @@ struct o2net_sock_container {
165 165
166 /* original handlers for the sockets */ 166 /* original handlers for the sockets */
167 void (*sc_state_change)(struct sock *sk); 167 void (*sc_state_change)(struct sock *sk);
168 void (*sc_data_ready)(struct sock *sk, int bytes); 168 void (*sc_data_ready)(struct sock *sk);
169 169
170 u32 sc_msg_key; 170 u32 sc_msg_key;
171 u16 sc_msg_type; 171 u16 sc_msg_type;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index ff33c5ef87f2..8970dcf74de5 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2367,15 +2367,18 @@ relock:
2367 2367
2368 if (direct_io) { 2368 if (direct_io) {
2369 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos, 2369 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
2370 ppos, count, ocount); 2370 count, ocount);
2371 if (written < 0) { 2371 if (written < 0) {
2372 ret = written; 2372 ret = written;
2373 goto out_dio; 2373 goto out_dio;
2374 } 2374 }
2375 } else { 2375 } else {
2376 struct iov_iter from;
2377 iov_iter_init(&from, iov, nr_segs, count, 0);
2376 current->backing_dev_info = file->f_mapping->backing_dev_info; 2378 current->backing_dev_info = file->f_mapping->backing_dev_info;
2377 written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos, 2379 written = generic_perform_write(file, &from, *ppos);
2378 ppos, count, 0); 2380 if (likely(written >= 0))
2381 iocb->ki_pos = *ppos + written;
2379 current->backing_dev_info = NULL; 2382 current->backing_dev_info = NULL;
2380 } 2383 }
2381 2384
diff --git a/fs/open.c b/fs/open.c
index 631aea815def..3d30eb1fc95e 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -655,35 +655,6 @@ out:
655 return error; 655 return error;
656} 656}
657 657
658/*
659 * You have to be very careful that these write
660 * counts get cleaned up in error cases and
661 * upon __fput(). This should probably never
662 * be called outside of __dentry_open().
663 */
664static inline int __get_file_write_access(struct inode *inode,
665 struct vfsmount *mnt)
666{
667 int error;
668 error = get_write_access(inode);
669 if (error)
670 return error;
671 /*
672 * Do not take mount writer counts on
673 * special files since no writes to
674 * the mount itself will occur.
675 */
676 if (!special_file(inode->i_mode)) {
677 /*
678 * Balanced in __fput()
679 */
680 error = __mnt_want_write(mnt);
681 if (error)
682 put_write_access(inode);
683 }
684 return error;
685}
686
687int open_check_o_direct(struct file *f) 658int open_check_o_direct(struct file *f)
688{ 659{
689 /* NB: we're sure to have correct a_ops only after f_op->open */ 660 /* NB: we're sure to have correct a_ops only after f_op->open */
@@ -708,26 +679,28 @@ static int do_dentry_open(struct file *f,
708 f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK | 679 f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
709 FMODE_PREAD | FMODE_PWRITE; 680 FMODE_PREAD | FMODE_PWRITE;
710 681
711 if (unlikely(f->f_flags & O_PATH))
712 f->f_mode = FMODE_PATH;
713
714 path_get(&f->f_path); 682 path_get(&f->f_path);
715 inode = f->f_inode = f->f_path.dentry->d_inode; 683 inode = f->f_inode = f->f_path.dentry->d_inode;
716 if (f->f_mode & FMODE_WRITE) {
717 error = __get_file_write_access(inode, f->f_path.mnt);
718 if (error)
719 goto cleanup_file;
720 if (!special_file(inode->i_mode))
721 file_take_write(f);
722 }
723
724 f->f_mapping = inode->i_mapping; 684 f->f_mapping = inode->i_mapping;
725 685
726 if (unlikely(f->f_mode & FMODE_PATH)) { 686 if (unlikely(f->f_flags & O_PATH)) {
687 f->f_mode = FMODE_PATH;
727 f->f_op = &empty_fops; 688 f->f_op = &empty_fops;
728 return 0; 689 return 0;
729 } 690 }
730 691
692 if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
693 error = get_write_access(inode);
694 if (unlikely(error))
695 goto cleanup_file;
696 error = __mnt_want_write(f->f_path.mnt);
697 if (unlikely(error)) {
698 put_write_access(inode);
699 goto cleanup_file;
700 }
701 f->f_mode |= FMODE_WRITER;
702 }
703
731 /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */ 704 /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
732 if (S_ISREG(inode->i_mode)) 705 if (S_ISREG(inode->i_mode))
733 f->f_mode |= FMODE_ATOMIC_POS; 706 f->f_mode |= FMODE_ATOMIC_POS;
@@ -764,18 +737,9 @@ static int do_dentry_open(struct file *f,
764 737
765cleanup_all: 738cleanup_all:
766 fops_put(f->f_op); 739 fops_put(f->f_op);
767 if (f->f_mode & FMODE_WRITE) { 740 if (f->f_mode & FMODE_WRITER) {
768 put_write_access(inode); 741 put_write_access(inode);
769 if (!special_file(inode->i_mode)) { 742 __mnt_drop_write(f->f_path.mnt);
770 /*
771 * We don't consider this a real
772 * mnt_want/drop_write() pair
773 * because it all happenend right
774 * here, so just reset the state.
775 */
776 file_reset_write(f);
777 __mnt_drop_write(f->f_path.mnt);
778 }
779 } 743 }
780cleanup_file: 744cleanup_file:
781 path_put(&f->f_path); 745 path_put(&f->f_path);
diff --git a/fs/pipe.c b/fs/pipe.c
index 78fd0d0788db..034bffac3f97 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -142,55 +142,6 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
142 return 0; 142 return 0;
143} 143}
144 144
145static int
146pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
147 int atomic)
148{
149 unsigned long copy;
150
151 while (len > 0) {
152 while (!iov->iov_len)
153 iov++;
154 copy = min_t(unsigned long, len, iov->iov_len);
155
156 if (atomic) {
157 if (__copy_to_user_inatomic(iov->iov_base, from, copy))
158 return -EFAULT;
159 } else {
160 if (copy_to_user(iov->iov_base, from, copy))
161 return -EFAULT;
162 }
163 from += copy;
164 len -= copy;
165 iov->iov_base += copy;
166 iov->iov_len -= copy;
167 }
168 return 0;
169}
170
171/*
172 * Attempt to pre-fault in the user memory, so we can use atomic copies.
173 * Returns the number of bytes not faulted in.
174 */
175static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
176{
177 while (!iov->iov_len)
178 iov++;
179
180 while (len > 0) {
181 unsigned long this_len;
182
183 this_len = min_t(unsigned long, len, iov->iov_len);
184 if (fault_in_pages_writeable(iov->iov_base, this_len))
185 break;
186
187 len -= this_len;
188 iov++;
189 }
190
191 return len;
192}
193
194/* 145/*
195 * Pre-fault in the user memory, so we can use atomic copies. 146 * Pre-fault in the user memory, so we can use atomic copies.
196 */ 147 */
@@ -226,52 +177,6 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
226} 177}
227 178
228/** 179/**
229 * generic_pipe_buf_map - virtually map a pipe buffer
230 * @pipe: the pipe that the buffer belongs to
231 * @buf: the buffer that should be mapped
232 * @atomic: whether to use an atomic map
233 *
234 * Description:
235 * This function returns a kernel virtual address mapping for the
236 * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
237 * and the caller has to be careful not to fault before calling
238 * the unmap function.
239 *
240 * Note that this function calls kmap_atomic() if @atomic != 0.
241 */
242void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
243 struct pipe_buffer *buf, int atomic)
244{
245 if (atomic) {
246 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
247 return kmap_atomic(buf->page);
248 }
249
250 return kmap(buf->page);
251}
252EXPORT_SYMBOL(generic_pipe_buf_map);
253
254/**
255 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
256 * @pipe: the pipe that the buffer belongs to
257 * @buf: the buffer that should be unmapped
258 * @map_data: the data that the mapping function returned
259 *
260 * Description:
261 * This function undoes the mapping that ->map() provided.
262 */
263void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
264 struct pipe_buffer *buf, void *map_data)
265{
266 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
267 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
268 kunmap_atomic(map_data);
269 } else
270 kunmap(buf->page);
271}
272EXPORT_SYMBOL(generic_pipe_buf_unmap);
273
274/**
275 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer 180 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
276 * @pipe: the pipe that the buffer belongs to 181 * @pipe: the pipe that the buffer belongs to
277 * @buf: the buffer to attempt to steal 182 * @buf: the buffer to attempt to steal
@@ -351,8 +256,6 @@ EXPORT_SYMBOL(generic_pipe_buf_release);
351 256
352static const struct pipe_buf_operations anon_pipe_buf_ops = { 257static const struct pipe_buf_operations anon_pipe_buf_ops = {
353 .can_merge = 1, 258 .can_merge = 1,
354 .map = generic_pipe_buf_map,
355 .unmap = generic_pipe_buf_unmap,
356 .confirm = generic_pipe_buf_confirm, 259 .confirm = generic_pipe_buf_confirm,
357 .release = anon_pipe_buf_release, 260 .release = anon_pipe_buf_release,
358 .steal = generic_pipe_buf_steal, 261 .steal = generic_pipe_buf_steal,
@@ -361,8 +264,6 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
361 264
362static const struct pipe_buf_operations packet_pipe_buf_ops = { 265static const struct pipe_buf_operations packet_pipe_buf_ops = {
363 .can_merge = 0, 266 .can_merge = 0,
364 .map = generic_pipe_buf_map,
365 .unmap = generic_pipe_buf_unmap,
366 .confirm = generic_pipe_buf_confirm, 267 .confirm = generic_pipe_buf_confirm,
367 .release = anon_pipe_buf_release, 268 .release = anon_pipe_buf_release,
368 .steal = generic_pipe_buf_steal, 269 .steal = generic_pipe_buf_steal,
@@ -379,12 +280,15 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
379 ssize_t ret; 280 ssize_t ret;
380 struct iovec *iov = (struct iovec *)_iov; 281 struct iovec *iov = (struct iovec *)_iov;
381 size_t total_len; 282 size_t total_len;
283 struct iov_iter iter;
382 284
383 total_len = iov_length(iov, nr_segs); 285 total_len = iov_length(iov, nr_segs);
384 /* Null read succeeds. */ 286 /* Null read succeeds. */
385 if (unlikely(total_len == 0)) 287 if (unlikely(total_len == 0))
386 return 0; 288 return 0;
387 289
290 iov_iter_init(&iter, iov, nr_segs, total_len, 0);
291
388 do_wakeup = 0; 292 do_wakeup = 0;
389 ret = 0; 293 ret = 0;
390 __pipe_lock(pipe); 294 __pipe_lock(pipe);
@@ -394,9 +298,9 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
394 int curbuf = pipe->curbuf; 298 int curbuf = pipe->curbuf;
395 struct pipe_buffer *buf = pipe->bufs + curbuf; 299 struct pipe_buffer *buf = pipe->bufs + curbuf;
396 const struct pipe_buf_operations *ops = buf->ops; 300 const struct pipe_buf_operations *ops = buf->ops;
397 void *addr;
398 size_t chars = buf->len; 301 size_t chars = buf->len;
399 int error, atomic; 302 size_t written;
303 int error;
400 304
401 if (chars > total_len) 305 if (chars > total_len)
402 chars = total_len; 306 chars = total_len;
@@ -408,21 +312,10 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
408 break; 312 break;
409 } 313 }
410 314
411 atomic = !iov_fault_in_pages_write(iov, chars); 315 written = copy_page_to_iter(buf->page, buf->offset, chars, &iter);
412redo: 316 if (unlikely(written < chars)) {
413 addr = ops->map(pipe, buf, atomic);
414 error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
415 ops->unmap(pipe, buf, addr);
416 if (unlikely(error)) {
417 /*
418 * Just retry with the slow path if we failed.
419 */
420 if (atomic) {
421 atomic = 0;
422 goto redo;
423 }
424 if (!ret) 317 if (!ret)
425 ret = error; 318 ret = -EFAULT;
426 break; 319 break;
427 } 320 }
428 ret += chars; 321 ret += chars;
@@ -538,10 +431,16 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
538 431
539 iov_fault_in_pages_read(iov, chars); 432 iov_fault_in_pages_read(iov, chars);
540redo1: 433redo1:
541 addr = ops->map(pipe, buf, atomic); 434 if (atomic)
435 addr = kmap_atomic(buf->page);
436 else
437 addr = kmap(buf->page);
542 error = pipe_iov_copy_from_user(offset + addr, iov, 438 error = pipe_iov_copy_from_user(offset + addr, iov,
543 chars, atomic); 439 chars, atomic);
544 ops->unmap(pipe, buf, addr); 440 if (atomic)
441 kunmap_atomic(addr);
442 else
443 kunmap(buf->page);
545 ret = error; 444 ret = error;
546 do_wakeup = 1; 445 do_wakeup = 1;
547 if (error) { 446 if (error) {
diff --git a/fs/pnode.c b/fs/pnode.c
index 88396df725b4..302bf22c4a30 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -164,46 +164,94 @@ static struct mount *propagation_next(struct mount *m,
164 } 164 }
165} 165}
166 166
167/* 167static struct mount *next_group(struct mount *m, struct mount *origin)
168 * return the source mount to be used for cloning
169 *
170 * @dest the current destination mount
171 * @last_dest the last seen destination mount
172 * @last_src the last seen source mount
173 * @type return CL_SLAVE if the new mount has to be
174 * cloned as a slave.
175 */
176static struct mount *get_source(struct mount *dest,
177 struct mount *last_dest,
178 struct mount *last_src,
179 int *type)
180{ 168{
181 struct mount *p_last_src = NULL; 169 while (1) {
182 struct mount *p_last_dest = NULL; 170 while (1) {
183 171 struct mount *next;
184 while (last_dest != dest->mnt_master) { 172 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
185 p_last_dest = last_dest; 173 return first_slave(m);
186 p_last_src = last_src; 174 next = next_peer(m);
187 last_dest = last_dest->mnt_master; 175 if (m->mnt_group_id == origin->mnt_group_id) {
188 last_src = last_src->mnt_master; 176 if (next == origin)
177 return NULL;
178 } else if (m->mnt_slave.next != &next->mnt_slave)
179 break;
180 m = next;
181 }
182 /* m is the last peer */
183 while (1) {
184 struct mount *master = m->mnt_master;
185 if (m->mnt_slave.next != &master->mnt_slave_list)
186 return next_slave(m);
187 m = next_peer(master);
188 if (master->mnt_group_id == origin->mnt_group_id)
189 break;
190 if (master->mnt_slave.next == &m->mnt_slave)
191 break;
192 m = master;
193 }
194 if (m == origin)
195 return NULL;
189 } 196 }
197}
190 198
191 if (p_last_dest) { 199/* all accesses are serialized by namespace_sem */
192 do { 200static struct user_namespace *user_ns;
193 p_last_dest = next_peer(p_last_dest); 201static struct mount *last_dest, *last_source, *dest_master;
194 } while (IS_MNT_NEW(p_last_dest)); 202static struct mountpoint *mp;
195 /* is that a peer of the earlier? */ 203static struct hlist_head *list;
196 if (dest == p_last_dest) { 204
197 *type = CL_MAKE_SHARED; 205static int propagate_one(struct mount *m)
198 return p_last_src; 206{
207 struct mount *child;
208 int type;
209 /* skip ones added by this propagate_mnt() */
210 if (IS_MNT_NEW(m))
211 return 0;
212 /* skip if mountpoint isn't covered by it */
213 if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
214 return 0;
215 if (m->mnt_group_id == last_dest->mnt_group_id) {
216 type = CL_MAKE_SHARED;
217 } else {
218 struct mount *n, *p;
219 for (n = m; ; n = p) {
220 p = n->mnt_master;
221 if (p == dest_master || IS_MNT_MARKED(p)) {
222 while (last_dest->mnt_master != p) {
223 last_source = last_source->mnt_master;
224 last_dest = last_source->mnt_parent;
225 }
226 if (n->mnt_group_id != last_dest->mnt_group_id) {
227 last_source = last_source->mnt_master;
228 last_dest = last_source->mnt_parent;
229 }
230 break;
231 }
199 } 232 }
233 type = CL_SLAVE;
234 /* beginning of peer group among the slaves? */
235 if (IS_MNT_SHARED(m))
236 type |= CL_MAKE_SHARED;
200 } 237 }
201 /* slave of the earlier, then */ 238
202 *type = CL_SLAVE; 239 /* Notice when we are propagating across user namespaces */
203 /* beginning of peer group among the slaves? */ 240 if (m->mnt_ns->user_ns != user_ns)
204 if (IS_MNT_SHARED(dest)) 241 type |= CL_UNPRIVILEGED;
205 *type |= CL_MAKE_SHARED; 242 child = copy_tree(last_source, last_source->mnt.mnt_root, type);
206 return last_src; 243 if (IS_ERR(child))
244 return PTR_ERR(child);
245 mnt_set_mountpoint(m, mp, child);
246 last_dest = m;
247 last_source = child;
248 if (m->mnt_master != dest_master) {
249 read_seqlock_excl(&mount_lock);
250 SET_MNT_MARK(m->mnt_master);
251 read_sequnlock_excl(&mount_lock);
252 }
253 hlist_add_head(&child->mnt_hash, list);
254 return 0;
207} 255}
208 256
209/* 257/*
@@ -222,56 +270,48 @@ static struct mount *get_source(struct mount *dest,
222int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, 270int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
223 struct mount *source_mnt, struct hlist_head *tree_list) 271 struct mount *source_mnt, struct hlist_head *tree_list)
224{ 272{
225 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 273 struct mount *m, *n;
226 struct mount *m, *child;
227 int ret = 0; 274 int ret = 0;
228 struct mount *prev_dest_mnt = dest_mnt; 275
229 struct mount *prev_src_mnt = source_mnt; 276 /*
230 HLIST_HEAD(tmp_list); 277 * we don't want to bother passing tons of arguments to
231 278 * propagate_one(); everything is serialized by namespace_sem,
232 for (m = propagation_next(dest_mnt, dest_mnt); m; 279 * so globals will do just fine.
233 m = propagation_next(m, dest_mnt)) { 280 */
234 int type; 281 user_ns = current->nsproxy->mnt_ns->user_ns;
235 struct mount *source; 282 last_dest = dest_mnt;
236 283 last_source = source_mnt;
237 if (IS_MNT_NEW(m)) 284 mp = dest_mp;
238 continue; 285 list = tree_list;
239 286 dest_master = dest_mnt->mnt_master;
240 source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); 287
241 288 /* all peers of dest_mnt, except dest_mnt itself */
242 /* Notice when we are propagating across user namespaces */ 289 for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
243 if (m->mnt_ns->user_ns != user_ns) 290 ret = propagate_one(n);
244 type |= CL_UNPRIVILEGED; 291 if (ret)
245
246 child = copy_tree(source, source->mnt.mnt_root, type);
247 if (IS_ERR(child)) {
248 ret = PTR_ERR(child);
249 tmp_list = *tree_list;
250 tmp_list.first->pprev = &tmp_list.first;
251 INIT_HLIST_HEAD(tree_list);
252 goto out; 292 goto out;
253 } 293 }
254 294
255 if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) { 295 /* all slave groups */
256 mnt_set_mountpoint(m, dest_mp, child); 296 for (m = next_group(dest_mnt, dest_mnt); m;
257 hlist_add_head(&child->mnt_hash, tree_list); 297 m = next_group(m, dest_mnt)) {
258 } else { 298 /* everything in that slave group */
259 /* 299 n = m;
260 * This can happen if the parent mount was bind mounted 300 do {
261 * on some subdirectory of a shared/slave mount. 301 ret = propagate_one(n);
262 */ 302 if (ret)
263 hlist_add_head(&child->mnt_hash, &tmp_list); 303 goto out;
264 } 304 n = next_peer(n);
265 prev_dest_mnt = m; 305 } while (n != m);
266 prev_src_mnt = child;
267 } 306 }
268out: 307out:
269 lock_mount_hash(); 308 read_seqlock_excl(&mount_lock);
270 while (!hlist_empty(&tmp_list)) { 309 hlist_for_each_entry(n, tree_list, mnt_hash) {
271 child = hlist_entry(tmp_list.first, struct mount, mnt_hash); 310 m = n->mnt_parent;
272 umount_tree(child, 0); 311 if (m->mnt_master != dest_mnt->mnt_master)
312 CLEAR_MNT_MARK(m->mnt_master);
273 } 313 }
274 unlock_mount_hash(); 314 read_sequnlock_excl(&mount_lock);
275 return ret; 315 return ret;
276} 316}
277 317
diff --git a/fs/pnode.h b/fs/pnode.h
index fc28a27fa892..4a246358b031 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -16,6 +16,9 @@
16#define IS_MNT_NEW(m) (!(m)->mnt_ns) 16#define IS_MNT_NEW(m) (!(m)->mnt_ns)
17#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED) 17#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
18#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE) 18#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
19#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
20#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
21#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
19 22
20#define CL_EXPIRE 0x01 23#define CL_EXPIRE 0x01
21#define CL_SLAVE 0x02 24#define CL_SLAVE 0x02
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 6b7087e2e8fb..2d696b0c93bf 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -200,41 +200,9 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
200 return result; 200 return result;
201} 201}
202 202
203static int proc_pid_cmdline(struct task_struct *task, char * buffer) 203static int proc_pid_cmdline(struct task_struct *task, char *buffer)
204{ 204{
205 int res = 0; 205 return get_cmdline(task, buffer, PAGE_SIZE);
206 unsigned int len;
207 struct mm_struct *mm = get_task_mm(task);
208 if (!mm)
209 goto out;
210 if (!mm->arg_end)
211 goto out_mm; /* Shh! No looking before we're done */
212
213 len = mm->arg_end - mm->arg_start;
214
215 if (len > PAGE_SIZE)
216 len = PAGE_SIZE;
217
218 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
219
220 // If the nul at the end of args has been overwritten, then
221 // assume application is using setproctitle(3).
222 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
223 len = strnlen(buffer, res);
224 if (len < res) {
225 res = len;
226 } else {
227 len = mm->env_end - mm->env_start;
228 if (len > PAGE_SIZE - res)
229 len = PAGE_SIZE - res;
230 res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
231 res = strnlen(buffer, res);
232 }
233 }
234out_mm:
235 mmput(mm);
236out:
237 return res;
238} 206}
239 207
240static int proc_pid_auxv(struct task_struct *task, char *buffer) 208static int proc_pid_auxv(struct task_struct *task, char *buffer)
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 9ae46b87470d..89026095f2b5 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -146,7 +146,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
146 struct task_struct *task; 146 struct task_struct *task;
147 void *ns; 147 void *ns;
148 char name[50]; 148 char name[50];
149 int len = -EACCES; 149 int res = -EACCES;
150 150
151 task = get_proc_task(inode); 151 task = get_proc_task(inode);
152 if (!task) 152 if (!task)
@@ -155,24 +155,18 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
155 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 155 if (!ptrace_may_access(task, PTRACE_MODE_READ))
156 goto out_put_task; 156 goto out_put_task;
157 157
158 len = -ENOENT; 158 res = -ENOENT;
159 ns = ns_ops->get(task); 159 ns = ns_ops->get(task);
160 if (!ns) 160 if (!ns)
161 goto out_put_task; 161 goto out_put_task;
162 162
163 snprintf(name, sizeof(name), "%s:[%u]", ns_ops->name, ns_ops->inum(ns)); 163 snprintf(name, sizeof(name), "%s:[%u]", ns_ops->name, ns_ops->inum(ns));
164 len = strlen(name); 164 res = readlink_copy(buffer, buflen, name);
165
166 if (len > buflen)
167 len = buflen;
168 if (copy_to_user(buffer, name, len))
169 len = -EFAULT;
170
171 ns_ops->put(ns); 165 ns_ops->put(ns);
172out_put_task: 166out_put_task:
173 put_task_struct(task); 167 put_task_struct(task);
174out: 168out:
175 return len; 169 return res;
176} 170}
177 171
178static const struct inode_operations proc_ns_link_inode_operations = { 172static const struct inode_operations proc_ns_link_inode_operations = {
diff --git a/fs/proc/self.c b/fs/proc/self.c
index ffeb202ec942..4348bb8907c2 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -16,7 +16,7 @@ static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
16 if (!tgid) 16 if (!tgid)
17 return -ENOENT; 17 return -ENOENT;
18 sprintf(tmp, "%d", tgid); 18 sprintf(tmp, "%d", tgid);
19 return vfs_readlink(dentry,buffer,buflen,tmp); 19 return readlink_copy(buffer, buflen, tmp);
20} 20}
21 21
22static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) 22static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 7be26f03a3f5..1a81373947f3 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -267,6 +267,7 @@ static int mounts_open_common(struct inode *inode, struct file *file,
267 p->root = root; 267 p->root = root;
268 p->m.poll_event = ns->event; 268 p->m.poll_event = ns->event;
269 p->show = show; 269 p->show = show;
270 p->cached_event = ~0ULL;
270 271
271 return 0; 272 return 0;
272 273
diff --git a/fs/splice.c b/fs/splice.c
index 12028fa41def..9bc07d2b53cf 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -136,8 +136,6 @@ error:
136 136
137const struct pipe_buf_operations page_cache_pipe_buf_ops = { 137const struct pipe_buf_operations page_cache_pipe_buf_ops = {
138 .can_merge = 0, 138 .can_merge = 0,
139 .map = generic_pipe_buf_map,
140 .unmap = generic_pipe_buf_unmap,
141 .confirm = page_cache_pipe_buf_confirm, 139 .confirm = page_cache_pipe_buf_confirm,
142 .release = page_cache_pipe_buf_release, 140 .release = page_cache_pipe_buf_release,
143 .steal = page_cache_pipe_buf_steal, 141 .steal = page_cache_pipe_buf_steal,
@@ -156,8 +154,6 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
156 154
157static const struct pipe_buf_operations user_page_pipe_buf_ops = { 155static const struct pipe_buf_operations user_page_pipe_buf_ops = {
158 .can_merge = 0, 156 .can_merge = 0,
159 .map = generic_pipe_buf_map,
160 .unmap = generic_pipe_buf_unmap,
161 .confirm = generic_pipe_buf_confirm, 157 .confirm = generic_pipe_buf_confirm,
162 .release = page_cache_pipe_buf_release, 158 .release = page_cache_pipe_buf_release,
163 .steal = user_page_pipe_buf_steal, 159 .steal = user_page_pipe_buf_steal,
@@ -547,8 +543,6 @@ EXPORT_SYMBOL(generic_file_splice_read);
547 543
548static const struct pipe_buf_operations default_pipe_buf_ops = { 544static const struct pipe_buf_operations default_pipe_buf_ops = {
549 .can_merge = 0, 545 .can_merge = 0,
550 .map = generic_pipe_buf_map,
551 .unmap = generic_pipe_buf_unmap,
552 .confirm = generic_pipe_buf_confirm, 546 .confirm = generic_pipe_buf_confirm,
553 .release = generic_pipe_buf_release, 547 .release = generic_pipe_buf_release,
554 .steal = generic_pipe_buf_steal, 548 .steal = generic_pipe_buf_steal,
@@ -564,8 +558,6 @@ static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
564/* Pipe buffer operations for a socket and similar. */ 558/* Pipe buffer operations for a socket and similar. */
565const struct pipe_buf_operations nosteal_pipe_buf_ops = { 559const struct pipe_buf_operations nosteal_pipe_buf_ops = {
566 .can_merge = 0, 560 .can_merge = 0,
567 .map = generic_pipe_buf_map,
568 .unmap = generic_pipe_buf_unmap,
569 .confirm = generic_pipe_buf_confirm, 561 .confirm = generic_pipe_buf_confirm,
570 .release = generic_pipe_buf_release, 562 .release = generic_pipe_buf_release,
571 .steal = generic_pipe_buf_nosteal, 563 .steal = generic_pipe_buf_nosteal,
@@ -767,13 +759,13 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
767 goto out; 759 goto out;
768 760
769 if (buf->page != page) { 761 if (buf->page != page) {
770 char *src = buf->ops->map(pipe, buf, 1); 762 char *src = kmap_atomic(buf->page);
771 char *dst = kmap_atomic(page); 763 char *dst = kmap_atomic(page);
772 764
773 memcpy(dst + offset, src + buf->offset, this_len); 765 memcpy(dst + offset, src + buf->offset, this_len);
774 flush_dcache_page(page); 766 flush_dcache_page(page);
775 kunmap_atomic(dst); 767 kunmap_atomic(dst);
776 buf->ops->unmap(pipe, buf, src); 768 kunmap_atomic(src);
777 } 769 }
778 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, 770 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
779 page, fsdata); 771 page, fsdata);
@@ -1067,9 +1059,9 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1067 void *data; 1059 void *data;
1068 loff_t tmp = sd->pos; 1060 loff_t tmp = sd->pos;
1069 1061
1070 data = buf->ops->map(pipe, buf, 0); 1062 data = kmap(buf->page);
1071 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); 1063 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
1072 buf->ops->unmap(pipe, buf, data); 1064 kunmap(buf->page);
1073 1065
1074 return ret; 1066 return ret;
1075} 1067}
@@ -1528,116 +1520,48 @@ static int get_iovec_page_array(const struct iovec __user *iov,
1528static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 1520static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1529 struct splice_desc *sd) 1521 struct splice_desc *sd)
1530{ 1522{
1531 char *src; 1523 int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data);
1532 int ret; 1524 return n == sd->len ? n : -EFAULT;
1533
1534 /*
1535 * See if we can use the atomic maps, by prefaulting in the
1536 * pages and doing an atomic copy
1537 */
1538 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
1539 src = buf->ops->map(pipe, buf, 1);
1540 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
1541 sd->len);
1542 buf->ops->unmap(pipe, buf, src);
1543 if (!ret) {
1544 ret = sd->len;
1545 goto out;
1546 }
1547 }
1548
1549 /*
1550 * No dice, use slow non-atomic map and copy
1551 */
1552 src = buf->ops->map(pipe, buf, 0);
1553
1554 ret = sd->len;
1555 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
1556 ret = -EFAULT;
1557
1558 buf->ops->unmap(pipe, buf, src);
1559out:
1560 if (ret > 0)
1561 sd->u.userptr += ret;
1562 return ret;
1563} 1525}
1564 1526
1565/* 1527/*
1566 * For lack of a better implementation, implement vmsplice() to userspace 1528 * For lack of a better implementation, implement vmsplice() to userspace
1567 * as a simple copy of the pipes pages to the user iov. 1529 * as a simple copy of the pipes pages to the user iov.
1568 */ 1530 */
1569static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, 1531static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1570 unsigned long nr_segs, unsigned int flags) 1532 unsigned long nr_segs, unsigned int flags)
1571{ 1533{
1572 struct pipe_inode_info *pipe; 1534 struct pipe_inode_info *pipe;
1573 struct splice_desc sd; 1535 struct splice_desc sd;
1574 ssize_t size;
1575 int error;
1576 long ret; 1536 long ret;
1537 struct iovec iovstack[UIO_FASTIOV];
1538 struct iovec *iov = iovstack;
1539 struct iov_iter iter;
1540 ssize_t count = 0;
1577 1541
1578 pipe = get_pipe_info(file); 1542 pipe = get_pipe_info(file);
1579 if (!pipe) 1543 if (!pipe)
1580 return -EBADF; 1544 return -EBADF;
1581 1545
1582 pipe_lock(pipe); 1546 ret = rw_copy_check_uvector(READ, uiov, nr_segs,
1583 1547 ARRAY_SIZE(iovstack), iovstack, &iov);
1584 error = ret = 0; 1548 if (ret <= 0)
1585 while (nr_segs) { 1549 return ret;
1586 void __user *base;
1587 size_t len;
1588
1589 /*
1590 * Get user address base and length for this iovec.
1591 */
1592 error = get_user(base, &iov->iov_base);
1593 if (unlikely(error))
1594 break;
1595 error = get_user(len, &iov->iov_len);
1596 if (unlikely(error))
1597 break;
1598
1599 /*
1600 * Sanity check this iovec. 0 read succeeds.
1601 */
1602 if (unlikely(!len))
1603 break;
1604 if (unlikely(!base)) {
1605 error = -EFAULT;
1606 break;
1607 }
1608
1609 if (unlikely(!access_ok(VERIFY_WRITE, base, len))) {
1610 error = -EFAULT;
1611 break;
1612 }
1613
1614 sd.len = 0;
1615 sd.total_len = len;
1616 sd.flags = flags;
1617 sd.u.userptr = base;
1618 sd.pos = 0;
1619
1620 size = __splice_from_pipe(pipe, &sd, pipe_to_user);
1621 if (size < 0) {
1622 if (!ret)
1623 ret = size;
1624
1625 break;
1626 }
1627
1628 ret += size;
1629 1550
1630 if (size < len) 1551 iov_iter_init(&iter, iov, nr_segs, count, 0);
1631 break;
1632 1552
1633 nr_segs--; 1553 sd.len = 0;
1634 iov++; 1554 sd.total_len = count;
1635 } 1555 sd.flags = flags;
1556 sd.u.data = &iter;
1557 sd.pos = 0;
1636 1558
1559 pipe_lock(pipe);
1560 ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
1637 pipe_unlock(pipe); 1561 pipe_unlock(pipe);
1638 1562
1639 if (!ret) 1563 if (iov != iovstack)
1640 ret = error; 1564 kfree(iov);
1641 1565
1642 return ret; 1566 return ret;
1643} 1567}
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 1037637957c7..d2c170f8b035 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -171,7 +171,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
171 } else 171 } else
172 up_write(&iinfo->i_data_sem); 172 up_write(&iinfo->i_data_sem);
173 173
174 retval = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 174 retval = __generic_file_aio_write(iocb, iov, nr_segs);
175 mutex_unlock(&inode->i_mutex); 175 mutex_unlock(&inode->i_mutex);
176 176
177 if (retval > 0) { 177 if (retval > 0) {
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 003c0051b62f..79e96ce98733 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -699,7 +699,7 @@ xfs_file_dio_aio_write(
699 699
700 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); 700 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
701 ret = generic_file_direct_write(iocb, iovp, 701 ret = generic_file_direct_write(iocb, iovp,
702 &nr_segs, pos, &iocb->ki_pos, count, ocount); 702 &nr_segs, pos, count, ocount);
703 703
704out: 704out:
705 xfs_rw_iunlock(ip, iolock); 705 xfs_rw_iunlock(ip, iolock);
@@ -715,7 +715,7 @@ xfs_file_buffered_aio_write(
715 const struct iovec *iovp, 715 const struct iovec *iovp,
716 unsigned long nr_segs, 716 unsigned long nr_segs,
717 loff_t pos, 717 loff_t pos,
718 size_t ocount) 718 size_t count)
719{ 719{
720 struct file *file = iocb->ki_filp; 720 struct file *file = iocb->ki_filp;
721 struct address_space *mapping = file->f_mapping; 721 struct address_space *mapping = file->f_mapping;
@@ -724,7 +724,7 @@ xfs_file_buffered_aio_write(
724 ssize_t ret; 724 ssize_t ret;
725 int enospc = 0; 725 int enospc = 0;
726 int iolock = XFS_IOLOCK_EXCL; 726 int iolock = XFS_IOLOCK_EXCL;
727 size_t count = ocount; 727 struct iov_iter from;
728 728
729 xfs_rw_ilock(ip, iolock); 729 xfs_rw_ilock(ip, iolock);
730 730
@@ -732,14 +732,15 @@ xfs_file_buffered_aio_write(
732 if (ret) 732 if (ret)
733 goto out; 733 goto out;
734 734
735 iov_iter_init(&from, iovp, nr_segs, count, 0);
735 /* We can write back this queue in page reclaim */ 736 /* We can write back this queue in page reclaim */
736 current->backing_dev_info = mapping->backing_dev_info; 737 current->backing_dev_info = mapping->backing_dev_info;
737 738
738write_retry: 739write_retry:
739 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); 740 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
740 ret = generic_file_buffered_write(iocb, iovp, nr_segs, 741 ret = generic_perform_write(file, &from, pos);
741 pos, &iocb->ki_pos, count, 0); 742 if (likely(ret >= 0))
742 743 iocb->ki_pos = pos + ret;
743 /* 744 /*
744 * If we just got an ENOSPC, try to write back all dirty inodes to 745 * If we just got an ENOSPC, try to write back all dirty inodes to
745 * convert delalloc space to free up some of the excess reserved 746 * convert delalloc space to free up some of the excess reserved
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index bcfe61202115..0b18776b075e 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -271,32 +271,6 @@ xfs_open_by_handle(
271 return error; 271 return error;
272} 272}
273 273
274/*
275 * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
276 * unused first argument.
277 */
278STATIC int
279do_readlink(
280 char __user *buffer,
281 int buflen,
282 const char *link)
283{
284 int len;
285
286 len = PTR_ERR(link);
287 if (IS_ERR(link))
288 goto out;
289
290 len = strlen(link);
291 if (len > (unsigned) buflen)
292 len = buflen;
293 if (copy_to_user(buffer, link, len))
294 len = -EFAULT;
295 out:
296 return len;
297}
298
299
300int 274int
301xfs_readlink_by_handle( 275xfs_readlink_by_handle(
302 struct file *parfilp, 276 struct file *parfilp,
@@ -334,7 +308,7 @@ xfs_readlink_by_handle(
334 error = -xfs_readlink(XFS_I(dentry->d_inode), link); 308 error = -xfs_readlink(XFS_I(dentry->d_inode), link);
335 if (error) 309 if (error)
336 goto out_kfree; 310 goto out_kfree;
337 error = do_readlink(hreq->ohandle, olen, link); 311 error = readlink_copy(hreq->ohandle, olen, link);
338 if (error) 312 if (error)
339 goto out_kfree; 313 goto out_kfree;
340 314
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index d8d4c898c1bb..70bef78912b7 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -4,7 +4,8 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/irqflags.h> 5#include <linux/irqflags.h>
6 6
7extern unsigned long wrong_size_cmpxchg(volatile void *ptr); 7extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
8 __noreturn;
8 9
9/* 10/*
10 * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned 11 * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 5b09392db673..d401e5463fb0 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -144,8 +144,6 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
144 144
145/** 145/**
146 * syscall_get_arch - return the AUDIT_ARCH for the current system call 146 * syscall_get_arch - return the AUDIT_ARCH for the current system call
147 * @task: task of interest, must be in system call entry tracing
148 * @regs: task_pt_regs() of @task
149 * 147 *
150 * Returns the AUDIT_ARCH_* based on the system call convention in use. 148 * Returns the AUDIT_ARCH_* based on the system call convention in use.
151 * 149 *
@@ -155,5 +153,5 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
155 * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must 153 * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
156 * provide an implementation of this. 154 * provide an implementation of this.
157 */ 155 */
158int syscall_get_arch(struct task_struct *task, struct pt_regs *regs); 156int syscall_get_arch(void);
159#endif /* _ASM_SYSCALL_H */ 157#endif /* _ASM_SYSCALL_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index ec1464df4c60..22cfddb75566 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -79,6 +79,14 @@ extern int is_audit_feature_set(int which);
79extern int __init audit_register_class(int class, unsigned *list); 79extern int __init audit_register_class(int class, unsigned *list);
80extern int audit_classify_syscall(int abi, unsigned syscall); 80extern int audit_classify_syscall(int abi, unsigned syscall);
81extern int audit_classify_arch(int arch); 81extern int audit_classify_arch(int arch);
82/* only for compat system calls */
83extern unsigned compat_write_class[];
84extern unsigned compat_read_class[];
85extern unsigned compat_dir_class[];
86extern unsigned compat_chattr_class[];
87extern unsigned compat_signal_class[];
88
89extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
82 90
83/* audit_names->type values */ 91/* audit_names->type values */
84#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ 92#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
@@ -94,6 +102,12 @@ struct filename;
94 102
95extern void audit_log_session_info(struct audit_buffer *ab); 103extern void audit_log_session_info(struct audit_buffer *ab);
96 104
105#ifdef CONFIG_AUDIT_COMPAT_GENERIC
106#define audit_is_compat(arch) (!((arch) & __AUDIT_ARCH_64BIT))
107#else
108#define audit_is_compat(arch) false
109#endif
110
97#ifdef CONFIG_AUDITSYSCALL 111#ifdef CONFIG_AUDITSYSCALL
98/* These are defined in auditsc.c */ 112/* These are defined in auditsc.c */
99 /* Public API */ 113 /* Public API */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5aa372a7380c..bba550826921 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -388,7 +388,7 @@ struct sg_iovec;
388struct rq_map_data; 388struct rq_map_data;
389extern struct bio *bio_map_user_iov(struct request_queue *, 389extern struct bio *bio_map_user_iov(struct request_queue *,
390 struct block_device *, 390 struct block_device *,
391 struct sg_iovec *, int, int, gfp_t); 391 const struct sg_iovec *, int, int, gfp_t);
392extern void bio_unmap_user(struct bio *); 392extern void bio_unmap_user(struct bio *);
393extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 393extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
394 gfp_t); 394 gfp_t);
@@ -414,7 +414,8 @@ extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
414extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, 414extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
415 unsigned long, unsigned int, int, gfp_t); 415 unsigned long, unsigned int, int, gfp_t);
416extern struct bio *bio_copy_user_iov(struct request_queue *, 416extern struct bio *bio_copy_user_iov(struct request_queue *,
417 struct rq_map_data *, struct sg_iovec *, 417 struct rq_map_data *,
418 const struct sg_iovec *,
418 int, int, gfp_t); 419 int, int, gfp_t);
419extern int bio_uncopy_user(struct bio *); 420extern int bio_uncopy_user(struct bio *);
420void zero_fill_bio(struct bio *bio); 421void zero_fill_bio(struct bio *bio);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 99617cf7dd1a..0d84981ee03f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -835,8 +835,8 @@ extern int blk_rq_map_user(struct request_queue *, struct request *,
835extern int blk_rq_unmap_user(struct bio *); 835extern int blk_rq_unmap_user(struct bio *);
836extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 836extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
837extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 837extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
838 struct rq_map_data *, struct sg_iovec *, int, 838 struct rq_map_data *, const struct sg_iovec *,
839 unsigned int, gfp_t); 839 int, unsigned int, gfp_t);
840extern int blk_execute_rq(struct request_queue *, struct gendisk *, 840extern int blk_execute_rq(struct request_queue *, struct gendisk *,
841 struct request *, int); 841 struct request *, int);
842extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 842extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index d77797a52b7b..c40302f909ce 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -210,8 +210,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
210int block_write_full_page_endio(struct page *page, get_block_t *get_block, 210int block_write_full_page_endio(struct page *page, get_block_t *get_block,
211 struct writeback_control *wbc, bh_end_io_t *handler); 211 struct writeback_control *wbc, bh_end_io_t *handler);
212int block_read_full_page(struct page*, get_block_t*); 212int block_read_full_page(struct page*, get_block_t*);
213int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, 213int block_is_partially_uptodate(struct page *page, unsigned long from,
214 unsigned long from); 214 unsigned long count);
215int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 215int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
216 unsigned flags, struct page **pagep, get_block_t *get_block); 216 unsigned flags, struct page **pagep, get_block_t *get_block);
217int __block_write_begin(struct page *page, loff_t pos, unsigned len, 217int __block_write_begin(struct page *page, loff_t pos, unsigned len,
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
new file mode 100644
index 000000000000..d1e49d52b640
--- /dev/null
+++ b/include/linux/compiler-clang.h
@@ -0,0 +1,12 @@
1#ifndef __LINUX_COMPILER_H
2#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
3#endif
4
5/* Some compiler specific definitions are overwritten here
6 * for Clang compiler
7 */
8
9#ifdef uninitialized_var
10#undef uninitialized_var
11#define uninitialized_var(x) x = *(&(x))
12#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 2472740d7ab2..ee7239ea1583 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -63,6 +63,13 @@ extern void __chk_io_ptr(const volatile void __iomem *);
63# include <linux/compiler-intel.h> 63# include <linux/compiler-intel.h>
64#endif 64#endif
65 65
66/* Clang compiler defines __GNUC__. So we will overwrite implementations
67 * coming from above header files here
68 */
69#ifdef __clang__
70#include <linux/compiler-clang.h>
71#endif
72
66/* 73/*
67 * Generic compiler-dependent macros required for kernel 74 * Generic compiler-dependent macros required for kernel
68 * build go below this comment. Actual compiler/compiler version 75 * build go below this comment. Actual compiler/compiler version
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 70e8e21c0a30..230f87bdf5ad 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -63,8 +63,6 @@ struct file_operations;
63struct vfsmount; 63struct vfsmount;
64struct dentry; 64struct dentry;
65 65
66extern void __init files_defer_init(void);
67
68#define rcu_dereference_check_fdtable(files, fdtfd) \ 66#define rcu_dereference_check_fdtable(files, fdtfd) \
69 rcu_dereference_check((fdtfd), lockdep_is_held(&(files)->file_lock)) 67 rcu_dereference_check((fdtfd), lockdep_is_held(&(files)->file_lock))
70 68
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 262dcbb75ffe..024fd03e5d18 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -220,7 +220,6 @@ enum {
220 BPF_S_ANC_RXHASH, 220 BPF_S_ANC_RXHASH,
221 BPF_S_ANC_CPU, 221 BPF_S_ANC_CPU,
222 BPF_S_ANC_ALU_XOR_X, 222 BPF_S_ANC_ALU_XOR_X,
223 BPF_S_ANC_SECCOMP_LD_W,
224 BPF_S_ANC_VLAN_TAG, 223 BPF_S_ANC_VLAN_TAG,
225 BPF_S_ANC_VLAN_TAG_PRESENT, 224 BPF_S_ANC_VLAN_TAG_PRESENT,
226 BPF_S_ANC_PAY_OFFSET, 225 BPF_S_ANC_PAY_OFFSET,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 81048f9bc783..7a9c5bca2b76 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -48,6 +48,7 @@ struct cred;
48struct swap_info_struct; 48struct swap_info_struct;
49struct seq_file; 49struct seq_file;
50struct workqueue_struct; 50struct workqueue_struct;
51struct iov_iter;
51 52
52extern void __init inode_init(void); 53extern void __init inode_init(void);
53extern void __init inode_init_early(void); 54extern void __init inode_init_early(void);
@@ -125,6 +126,8 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
125 126
126/* File needs atomic accesses to f_pos */ 127/* File needs atomic accesses to f_pos */
127#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) 128#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
129/* Write access to underlying fs */
130#define FMODE_WRITER ((__force fmode_t)0x10000)
128 131
129/* File was opened by fanotify and shouldn't generate fanotify events */ 132/* File was opened by fanotify and shouldn't generate fanotify events */
130#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 133#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
@@ -293,38 +296,6 @@ struct page;
293struct address_space; 296struct address_space;
294struct writeback_control; 297struct writeback_control;
295 298
296struct iov_iter {
297 const struct iovec *iov;
298 unsigned long nr_segs;
299 size_t iov_offset;
300 size_t count;
301};
302
303size_t iov_iter_copy_from_user_atomic(struct page *page,
304 struct iov_iter *i, unsigned long offset, size_t bytes);
305size_t iov_iter_copy_from_user(struct page *page,
306 struct iov_iter *i, unsigned long offset, size_t bytes);
307void iov_iter_advance(struct iov_iter *i, size_t bytes);
308int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
309size_t iov_iter_single_seg_count(const struct iov_iter *i);
310
311static inline void iov_iter_init(struct iov_iter *i,
312 const struct iovec *iov, unsigned long nr_segs,
313 size_t count, size_t written)
314{
315 i->iov = iov;
316 i->nr_segs = nr_segs;
317 i->iov_offset = 0;
318 i->count = count + written;
319
320 iov_iter_advance(i, written);
321}
322
323static inline size_t iov_iter_count(struct iov_iter *i)
324{
325 return i->count;
326}
327
328/* 299/*
329 * "descriptor" for what we're up to with a read. 300 * "descriptor" for what we're up to with a read.
330 * This allows us to use the same read code yet 301 * This allows us to use the same read code yet
@@ -383,7 +354,7 @@ struct address_space_operations {
383 int (*migratepage) (struct address_space *, 354 int (*migratepage) (struct address_space *,
384 struct page *, struct page *, enum migrate_mode); 355 struct page *, struct page *, enum migrate_mode);
385 int (*launder_page) (struct page *); 356 int (*launder_page) (struct page *);
386 int (*is_partially_uptodate) (struct page *, read_descriptor_t *, 357 int (*is_partially_uptodate) (struct page *, unsigned long,
387 unsigned long); 358 unsigned long);
388 void (*is_dirty_writeback) (struct page *, bool *, bool *); 359 void (*is_dirty_writeback) (struct page *, bool *, bool *);
389 int (*error_remove_page)(struct address_space *, struct page *); 360 int (*error_remove_page)(struct address_space *, struct page *);
@@ -770,9 +741,6 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
770 index < ra->start + ra->size); 741 index < ra->start + ra->size);
771} 742}
772 743
773#define FILE_MNT_WRITE_TAKEN 1
774#define FILE_MNT_WRITE_RELEASED 2
775
776struct file { 744struct file {
777 union { 745 union {
778 struct llist_node fu_llist; 746 struct llist_node fu_llist;
@@ -810,9 +778,6 @@ struct file {
810 struct list_head f_tfile_llink; 778 struct list_head f_tfile_llink;
811#endif /* #ifdef CONFIG_EPOLL */ 779#endif /* #ifdef CONFIG_EPOLL */
812 struct address_space *f_mapping; 780 struct address_space *f_mapping;
813#ifdef CONFIG_DEBUG_WRITECOUNT
814 unsigned long f_mnt_write_state;
815#endif
816} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ 781} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
817 782
818struct file_handle { 783struct file_handle {
@@ -830,49 +795,6 @@ static inline struct file *get_file(struct file *f)
830#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) 795#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
831#define file_count(x) atomic_long_read(&(x)->f_count) 796#define file_count(x) atomic_long_read(&(x)->f_count)
832 797
833#ifdef CONFIG_DEBUG_WRITECOUNT
834static inline void file_take_write(struct file *f)
835{
836 WARN_ON(f->f_mnt_write_state != 0);
837 f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
838}
839static inline void file_release_write(struct file *f)
840{
841 f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED;
842}
843static inline void file_reset_write(struct file *f)
844{
845 f->f_mnt_write_state = 0;
846}
847static inline void file_check_state(struct file *f)
848{
849 /*
850 * At this point, either both or neither of these bits
851 * should be set.
852 */
853 WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN);
854 WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED);
855}
856static inline int file_check_writeable(struct file *f)
857{
858 if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN)
859 return 0;
860 printk(KERN_WARNING "writeable file with no "
861 "mnt_want_write()\n");
862 WARN_ON(1);
863 return -EINVAL;
864}
865#else /* !CONFIG_DEBUG_WRITECOUNT */
866static inline void file_take_write(struct file *filp) {}
867static inline void file_release_write(struct file *filp) {}
868static inline void file_reset_write(struct file *filp) {}
869static inline void file_check_state(struct file *filp) {}
870static inline int file_check_writeable(struct file *filp)
871{
872 return 0;
873}
874#endif /* CONFIG_DEBUG_WRITECOUNT */
875
876#define MAX_NON_LFS ((1UL<<31) - 1) 798#define MAX_NON_LFS ((1UL<<31) - 1)
877 799
878/* Page cache limit. The filesystems should put that into their s_maxbytes 800/* Page cache limit. The filesystems should put that into their s_maxbytes
@@ -2481,16 +2403,13 @@ extern int generic_file_mmap(struct file *, struct vm_area_struct *);
2481extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); 2403extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
2482extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr, 2404extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
2483 unsigned long size, pgoff_t pgoff); 2405 unsigned long size, pgoff_t pgoff);
2484extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
2485int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); 2406int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
2486extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); 2407extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
2487extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, 2408extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long);
2488 loff_t *);
2489extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); 2409extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
2490extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, 2410extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
2491 unsigned long *, loff_t, loff_t *, size_t, size_t); 2411 unsigned long *, loff_t, size_t, size_t);
2492extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *, 2412extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
2493 unsigned long, loff_t, loff_t *, size_t, ssize_t);
2494extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); 2413extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
2495extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); 2414extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2496extern int generic_segment_checks(const struct iovec *iov, 2415extern int generic_segment_checks(const struct iovec *iov,
@@ -2582,7 +2501,7 @@ extern const struct file_operations generic_ro_fops;
2582 2501
2583#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) 2502#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
2584 2503
2585extern int vfs_readlink(struct dentry *, char __user *, int, const char *); 2504extern int readlink_copy(char __user *, int, const char *);
2586extern int page_readlink(struct dentry *, char __user *, int); 2505extern int page_readlink(struct dentry *, char __user *, int);
2587extern void *page_follow_link_light(struct dentry *, struct nameidata *); 2506extern void *page_follow_link_light(struct dentry *, struct nameidata *);
2588extern void page_put_link(struct dentry *, struct nameidata *, void *); 2507extern void page_put_link(struct dentry *, struct nameidata *, void *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index cdc30111d2f8..d16da3e53bc7 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -7,6 +7,7 @@
7#include <linux/percpu.h> 7#include <linux/percpu.h>
8#include <linux/hardirq.h> 8#include <linux/hardirq.h>
9#include <linux/perf_event.h> 9#include <linux/perf_event.h>
10#include <linux/tracepoint.h>
10 11
11struct trace_array; 12struct trace_array;
12struct trace_buffer; 13struct trace_buffer;
@@ -232,6 +233,7 @@ enum {
232 TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 233 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
233 TRACE_EVENT_FL_WAS_ENABLED_BIT, 234 TRACE_EVENT_FL_WAS_ENABLED_BIT,
234 TRACE_EVENT_FL_USE_CALL_FILTER_BIT, 235 TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
236 TRACE_EVENT_FL_TRACEPOINT_BIT,
235}; 237};
236 238
237/* 239/*
@@ -244,6 +246,7 @@ enum {
244 * (used for module unloading, if a module event is enabled, 246 * (used for module unloading, if a module event is enabled,
245 * it is best to clear the buffers that used it). 247 * it is best to clear the buffers that used it).
246 * USE_CALL_FILTER - For ftrace internal events, don't use file filter 248 * USE_CALL_FILTER - For ftrace internal events, don't use file filter
249 * TRACEPOINT - Event is a tracepoint
247 */ 250 */
248enum { 251enum {
249 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), 252 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -252,12 +255,17 @@ enum {
252 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 255 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
253 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), 256 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
254 TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), 257 TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
258 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
255}; 259};
256 260
257struct ftrace_event_call { 261struct ftrace_event_call {
258 struct list_head list; 262 struct list_head list;
259 struct ftrace_event_class *class; 263 struct ftrace_event_class *class;
260 char *name; 264 union {
265 char *name;
266 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
267 struct tracepoint *tp;
268 };
261 struct trace_event event; 269 struct trace_event event;
262 const char *print_fmt; 270 const char *print_fmt;
263 struct event_filter *filter; 271 struct event_filter *filter;
@@ -271,6 +279,7 @@ struct ftrace_event_call {
271 * bit 3: ftrace internal event (do not enable) 279 * bit 3: ftrace internal event (do not enable)
272 * bit 4: Event was enabled by module 280 * bit 4: Event was enabled by module
273 * bit 5: use call filter rather than file filter 281 * bit 5: use call filter rather than file filter
282 * bit 6: Event is a tracepoint
274 */ 283 */
275 int flags; /* static flags of different events */ 284 int flags; /* static flags of different events */
276 285
@@ -283,6 +292,15 @@ struct ftrace_event_call {
283#endif 292#endif
284}; 293};
285 294
295static inline const char *
296ftrace_event_name(struct ftrace_event_call *call)
297{
298 if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
299 return call->tp ? call->tp->name : NULL;
300 else
301 return call->name;
302}
303
286struct trace_array; 304struct trace_array;
287struct ftrace_subsystem_dir; 305struct ftrace_subsystem_dir;
288 306
@@ -353,7 +371,7 @@ struct ftrace_event_file {
353#define __TRACE_EVENT_FLAGS(name, value) \ 371#define __TRACE_EVENT_FLAGS(name, value) \
354 static int __init trace_init_flags_##name(void) \ 372 static int __init trace_init_flags_##name(void) \
355 { \ 373 { \
356 event_##name.flags = value; \ 374 event_##name.flags |= value; \
357 return 0; \ 375 return 0; \
358 } \ 376 } \
359 early_initcall(trace_init_flags_##name); 377 early_initcall(trace_init_flags_##name);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index abc848412e3c..bf9811e1321a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1204,6 +1204,7 @@ void account_page_writeback(struct page *page);
1204int set_page_dirty(struct page *page); 1204int set_page_dirty(struct page *page);
1205int set_page_dirty_lock(struct page *page); 1205int set_page_dirty_lock(struct page *page);
1206int clear_page_dirty_for_io(struct page *page); 1206int clear_page_dirty_for_io(struct page *page);
1207int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1207 1208
1208/* Is the vma a continuation of the stack vma above it? */ 1209/* Is the vma a continuation of the stack vma above it? */
1209static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1210static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 2b58d192ea24..8967e20cbe57 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -124,6 +124,8 @@ struct page {
124 union { 124 union {
125 struct list_head lru; /* Pageout list, eg. active_list 125 struct list_head lru; /* Pageout list, eg. active_list
126 * protected by zone->lru_lock ! 126 * protected by zone->lru_lock !
127 * Can be used as a generic list
128 * by the page owner.
127 */ 129 */
128 struct { /* slub per cpu partial pages */ 130 struct { /* slub per cpu partial pages */
129 struct page *next; /* Next partial slab */ 131 struct page *next; /* Next partial slab */
@@ -136,7 +138,6 @@ struct page {
136#endif 138#endif
137 }; 139 };
138 140
139 struct list_head list; /* slobs list of pages */
140 struct slab *slab_page; /* slab fields */ 141 struct slab *slab_page; /* slab fields */
141 struct rcu_head rcu_head; /* Used by SLAB 142 struct rcu_head rcu_head; /* Used by SLAB
142 * when destroying via RCU 143 * when destroying via RCU
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 9a165a213d93..44eeef0da186 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -556,6 +556,11 @@ struct amba_id {
556 * See documentation of "x86_match_cpu" for details. 556 * See documentation of "x86_match_cpu" for details.
557 */ 557 */
558 558
559/*
560 * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id.
561 * Although gcc seems to ignore this error, clang fails without this define.
562 */
563#define x86cpu_device_id x86_cpu_id
559struct x86_cpu_id { 564struct x86_cpu_id {
560 __u16 vendor; 565 __u16 vendor;
561 __u16 family; 566 __u16 family;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 371d346fa270..839bac270904 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -44,6 +44,8 @@ struct mnt_namespace;
44#define MNT_SHARED_MASK (MNT_UNBINDABLE) 44#define MNT_SHARED_MASK (MNT_UNBINDABLE)
45#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE) 45#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
46 46
47#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
48 MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
47 49
48#define MNT_INTERNAL 0x4000 50#define MNT_INTERNAL 0x4000
49 51
@@ -51,6 +53,7 @@ struct mnt_namespace;
51#define MNT_LOCKED 0x800000 53#define MNT_LOCKED 0x800000
52#define MNT_DOOMED 0x1000000 54#define MNT_DOOMED 0x1000000
53#define MNT_SYNC_UMOUNT 0x2000000 55#define MNT_SYNC_UMOUNT 0x2000000
56#define MNT_MARKED 0x4000000
54 57
55struct vfsmount { 58struct vfsmount {
56 struct dentry *mnt_root; /* root of the mounted tree */ 59 struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index ae4981ebd18e..f62f78aef4ac 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -24,8 +24,7 @@ struct request;
24struct nbd_device { 24struct nbd_device {
25 int flags; 25 int flags;
26 int harderror; /* Code of hard error */ 26 int harderror; /* Code of hard error */
27 struct socket * sock; 27 struct socket * sock; /* If == NULL, device is not ready, yet */
28 struct file * file; /* If == NULL, device is not ready, yet */
29 int magic; 28 int magic;
30 29
31 spinlock_t queue_lock; 30 spinlock_t queue_lock;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index ec2ffaf418c8..df78dc2b5524 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -87,7 +87,6 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
87/* delete keymap entries */ 87/* delete keymap entries */
88void nf_ct_gre_keymap_destroy(struct nf_conn *ct); 88void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
89 89
90void nf_ct_gre_keymap_flush(struct net *net);
91void nf_nat_need_gre(void); 90void nf_nat_need_gre(void);
92 91
93#endif /* __KERNEL__ */ 92#endif /* __KERNEL__ */
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index f6a15205853b..9ac1a62fc6f5 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -50,8 +50,13 @@ struct ntb_transport_qp;
50 50
51struct ntb_client { 51struct ntb_client {
52 struct device_driver driver; 52 struct device_driver driver;
53 int (*probe) (struct pci_dev *pdev); 53 int (*probe)(struct pci_dev *pdev);
54 void (*remove) (struct pci_dev *pdev); 54 void (*remove)(struct pci_dev *pdev);
55};
56
57enum {
58 NTB_LINK_DOWN = 0,
59 NTB_LINK_UP,
55}; 60};
56 61
57int ntb_register_client(struct ntb_client *drvr); 62int ntb_register_client(struct ntb_client *drvr);
@@ -60,11 +65,11 @@ int ntb_register_client_dev(char *device_name);
60void ntb_unregister_client_dev(char *device_name); 65void ntb_unregister_client_dev(char *device_name);
61 66
62struct ntb_queue_handlers { 67struct ntb_queue_handlers {
63 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 68 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
64 void *data, int len); 69 void *data, int len);
65 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 70 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
66 void *data, int len); 71 void *data, int len);
67 void (*event_handler) (void *data, int status); 72 void (*event_handler)(void *data, int status);
68}; 73};
69 74
70unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp); 75unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 6b9aafed225f..a50173ca1d72 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -66,20 +66,25 @@ enum {
66 66
67#define NVME_VS(major, minor) (major << 16 | minor) 67#define NVME_VS(major, minor) (major << 16 | minor)
68 68
69#define NVME_IO_TIMEOUT (5 * HZ) 69extern unsigned char io_timeout;
70#define NVME_IO_TIMEOUT (io_timeout * HZ)
70 71
71/* 72/*
72 * Represents an NVM Express device. Each nvme_dev is a PCI function. 73 * Represents an NVM Express device. Each nvme_dev is a PCI function.
73 */ 74 */
74struct nvme_dev { 75struct nvme_dev {
75 struct list_head node; 76 struct list_head node;
76 struct nvme_queue **queues; 77 struct nvme_queue __rcu **queues;
78 unsigned short __percpu *io_queue;
77 u32 __iomem *dbs; 79 u32 __iomem *dbs;
78 struct pci_dev *pci_dev; 80 struct pci_dev *pci_dev;
79 struct dma_pool *prp_page_pool; 81 struct dma_pool *prp_page_pool;
80 struct dma_pool *prp_small_pool; 82 struct dma_pool *prp_small_pool;
81 int instance; 83 int instance;
82 int queue_count; 84 unsigned queue_count;
85 unsigned online_queues;
86 unsigned max_qid;
87 int q_depth;
83 u32 db_stride; 88 u32 db_stride;
84 u32 ctrl_config; 89 u32 ctrl_config;
85 struct msix_entry *entry; 90 struct msix_entry *entry;
@@ -89,6 +94,7 @@ struct nvme_dev {
89 struct miscdevice miscdev; 94 struct miscdevice miscdev;
90 work_func_t reset_workfn; 95 work_func_t reset_workfn;
91 struct work_struct reset_work; 96 struct work_struct reset_work;
97 struct notifier_block nb;
92 char name[12]; 98 char name[12];
93 char serial[20]; 99 char serial[20];
94 char model[40]; 100 char model[40];
@@ -131,6 +137,7 @@ struct nvme_iod {
131 int length; /* Of data, in bytes */ 137 int length; /* Of data, in bytes */
132 unsigned long start_time; 138 unsigned long start_time;
133 dma_addr_t first_dma; 139 dma_addr_t first_dma;
140 struct list_head node;
134 struct scatterlist sg[0]; 141 struct scatterlist sg[0];
135}; 142};
136 143
@@ -146,16 +153,12 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
146 */ 153 */
147void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); 154void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
148 155
149int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, 156int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t);
150 struct nvme_iod *iod, int total_len, gfp_t gfp);
151struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 157struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
152 unsigned long addr, unsigned length); 158 unsigned long addr, unsigned length);
153void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 159void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
154 struct nvme_iod *iod); 160 struct nvme_iod *iod);
155struct nvme_queue *get_nvmeq(struct nvme_dev *dev); 161int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
156void put_nvmeq(struct nvme_queue *nvmeq);
157int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
158 u32 *result, unsigned timeout);
159int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns); 162int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
160int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, 163int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
161 u32 *result); 164 u32 *result);
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 4d9389c79e61..eb8b8ac6df3c 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -83,23 +83,6 @@ struct pipe_buf_operations {
83 int can_merge; 83 int can_merge;
84 84
85 /* 85 /*
86 * ->map() returns a virtual address mapping of the pipe buffer.
87 * The last integer flag reflects whether this should be an atomic
88 * mapping or not. The atomic map is faster, however you can't take
89 * page faults before calling ->unmap() again. So if you need to eg
90 * access user data through copy_to/from_user(), then you must get
91 * a non-atomic map. ->map() uses the kmap_atomic slot for
92 * atomic maps, you have to be careful if mapping another page as
93 * source or destination for a copy.
94 */
95 void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
96
97 /*
98 * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
99 */
100 void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
101
102 /*
103 * ->confirm() verifies that the data in the pipe buffer is there 86 * ->confirm() verifies that the data in the pipe buffer is there
104 * and that the contents are good. If the pages in the pipe belong 87 * and that the contents are good. If the pages in the pipe belong
105 * to a file system, we may need to wait for IO completion in this 88 * to a file system, we may need to wait for IO completion in this
@@ -150,8 +133,6 @@ struct pipe_inode_info *alloc_pipe_info(void);
150void free_pipe_info(struct pipe_inode_info *); 133void free_pipe_info(struct pipe_inode_info *);
151 134
152/* Generic pipe buffer ops functions */ 135/* Generic pipe buffer ops functions */
153void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
154void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
155void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); 136void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
156int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); 137int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
157int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); 138int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 075b3056c0c0..25f54c79f757 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1719,6 +1719,24 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1719} 1719}
1720 1720
1721 1721
1722static inline int pid_alive(const struct task_struct *p);
1723static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1724{
1725 pid_t pid = 0;
1726
1727 rcu_read_lock();
1728 if (pid_alive(tsk))
1729 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1730 rcu_read_unlock();
1731
1732 return pid;
1733}
1734
1735static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1736{
1737 return task_ppid_nr_ns(tsk, &init_pid_ns);
1738}
1739
1722static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, 1740static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1723 struct pid_namespace *ns) 1741 struct pid_namespace *ns)
1724{ 1742{
@@ -1758,7 +1776,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1758 * 1776 *
1759 * Return: 1 if the process is alive. 0 otherwise. 1777 * Return: 1 if the process is alive. 0 otherwise.
1760 */ 1778 */
1761static inline int pid_alive(struct task_struct *p) 1779static inline int pid_alive(const struct task_struct *p)
1762{ 1780{
1763 return p->pids[PIDTYPE_PID].pid != NULL; 1781 return p->pids[PIDTYPE_PID].pid != NULL;
1764} 1782}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3dd389aa91c7..307bfbe62387 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -242,6 +242,17 @@ struct kmem_cache {
242#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 242#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
243#endif 243#endif
244 244
245/*
246 * This restriction comes from byte sized index implementation.
247 * Page size is normally 2^12 bytes and, in this case, if we want to use
248 * byte sized index which can represent 2^8 entries, the size of the object
249 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
250 * If minimum size of kmalloc is less than 16, we use it as minimum object
251 * size and give up to use byte sized index.
252 */
253#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
254 (KMALLOC_MIN_SIZE) : 16)
255
245#ifndef CONFIG_SLOB 256#ifndef CONFIG_SLOB
246extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 257extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
247#ifdef CONFIG_ZONE_DMA 258#ifdef CONFIG_ZONE_DMA
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 947009ed5996..2e780134f449 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -22,7 +22,7 @@ struct svc_sock {
22 22
23 /* We keep the old state_change and data_ready CB's here */ 23 /* We keep the old state_change and data_ready CB's here */
24 void (*sk_ostate)(struct sock *); 24 void (*sk_ostate)(struct sock *);
25 void (*sk_odata)(struct sock *, int bytes); 25 void (*sk_odata)(struct sock *);
26 void (*sk_owspace)(struct sock *); 26 void (*sk_owspace)(struct sock *);
27 27
28 /* private TCP part */ 28 /* private TCP part */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 697ceb70a9a9..a4a0588c5397 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -119,8 +119,10 @@ extern struct trace_event_functions exit_syscall_print_funcs;
119 static struct syscall_metadata __syscall_meta_##sname; \ 119 static struct syscall_metadata __syscall_meta_##sname; \
120 static struct ftrace_event_call __used \ 120 static struct ftrace_event_call __used \
121 event_enter_##sname = { \ 121 event_enter_##sname = { \
122 .name = "sys_enter"#sname, \
123 .class = &event_class_syscall_enter, \ 122 .class = &event_class_syscall_enter, \
123 { \
124 .name = "sys_enter"#sname, \
125 }, \
124 .event.funcs = &enter_syscall_print_funcs, \ 126 .event.funcs = &enter_syscall_print_funcs, \
125 .data = (void *)&__syscall_meta_##sname,\ 127 .data = (void *)&__syscall_meta_##sname,\
126 .flags = TRACE_EVENT_FL_CAP_ANY, \ 128 .flags = TRACE_EVENT_FL_CAP_ANY, \
@@ -133,8 +135,10 @@ extern struct trace_event_functions exit_syscall_print_funcs;
133 static struct syscall_metadata __syscall_meta_##sname; \ 135 static struct syscall_metadata __syscall_meta_##sname; \
134 static struct ftrace_event_call __used \ 136 static struct ftrace_event_call __used \
135 event_exit_##sname = { \ 137 event_exit_##sname = { \
136 .name = "sys_exit"#sname, \
137 .class = &event_class_syscall_exit, \ 138 .class = &event_class_syscall_exit, \
139 { \
140 .name = "sys_exit"#sname, \
141 }, \
138 .event.funcs = &exit_syscall_print_funcs, \ 142 .event.funcs = &exit_syscall_print_funcs, \
139 .data = (void *)&__syscall_meta_##sname,\ 143 .data = (void *)&__syscall_meta_##sname,\
140 .flags = TRACE_EVENT_FL_CAP_ANY, \ 144 .flags = TRACE_EVENT_FL_CAP_ANY, \
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 812b2553dfd8..9d30ee469c2a 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -6,7 +6,7 @@
6 * 6 *
7 * See Documentation/trace/tracepoints.txt. 7 * See Documentation/trace/tracepoints.txt.
8 * 8 *
9 * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> 9 * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 * 10 *
11 * Heavily inspired from the Linux Kernel Markers. 11 * Heavily inspired from the Linux Kernel Markers.
12 * 12 *
@@ -21,6 +21,7 @@
21 21
22struct module; 22struct module;
23struct tracepoint; 23struct tracepoint;
24struct notifier_block;
24 25
25struct tracepoint_func { 26struct tracepoint_func {
26 void *func; 27 void *func;
@@ -35,31 +36,38 @@ struct tracepoint {
35 struct tracepoint_func __rcu *funcs; 36 struct tracepoint_func __rcu *funcs;
36}; 37};
37 38
38/*
39 * Connect a probe to a tracepoint.
40 * Internal API, should not be used directly.
41 */
42extern int tracepoint_probe_register(const char *name, void *probe, void *data);
43
44/*
45 * Disconnect a probe from a tracepoint.
46 * Internal API, should not be used directly.
47 */
48extern int 39extern int
49tracepoint_probe_unregister(const char *name, void *probe, void *data); 40tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
41extern int
42tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
43extern void
44for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
45 void *priv);
50 46
51#ifdef CONFIG_MODULES 47#ifdef CONFIG_MODULES
52struct tp_module { 48struct tp_module {
53 struct list_head list; 49 struct list_head list;
54 unsigned int num_tracepoints; 50 struct module *mod;
55 struct tracepoint * const *tracepoints_ptrs;
56}; 51};
52
57bool trace_module_has_bad_taint(struct module *mod); 53bool trace_module_has_bad_taint(struct module *mod);
54extern int register_tracepoint_module_notifier(struct notifier_block *nb);
55extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
58#else 56#else
59static inline bool trace_module_has_bad_taint(struct module *mod) 57static inline bool trace_module_has_bad_taint(struct module *mod)
60{ 58{
61 return false; 59 return false;
62} 60}
61static inline
62int register_tracepoint_module_notifier(struct notifier_block *nb)
63{
64 return 0;
65}
66static inline
67int unregister_tracepoint_module_notifier(struct notifier_block *nb)
68{
69 return 0;
70}
63#endif /* CONFIG_MODULES */ 71#endif /* CONFIG_MODULES */
64 72
65/* 73/*
@@ -72,6 +80,11 @@ static inline void tracepoint_synchronize_unregister(void)
72 synchronize_sched(); 80 synchronize_sched();
73} 81}
74 82
83#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
84extern void syscall_regfunc(void);
85extern void syscall_unregfunc(void);
86#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
87
75#define PARAMS(args...) args 88#define PARAMS(args...) args
76 89
77#endif /* _LINUX_TRACEPOINT_H */ 90#endif /* _LINUX_TRACEPOINT_H */
@@ -160,14 +173,14 @@ static inline void tracepoint_synchronize_unregister(void)
160 static inline int \ 173 static inline int \
161 register_trace_##name(void (*probe)(data_proto), void *data) \ 174 register_trace_##name(void (*probe)(data_proto), void *data) \
162 { \ 175 { \
163 return tracepoint_probe_register(#name, (void *)probe, \ 176 return tracepoint_probe_register(&__tracepoint_##name, \
164 data); \ 177 (void *)probe, data); \
165 } \ 178 } \
166 static inline int \ 179 static inline int \
167 unregister_trace_##name(void (*probe)(data_proto), void *data) \ 180 unregister_trace_##name(void (*probe)(data_proto), void *data) \
168 { \ 181 { \
169 return tracepoint_probe_unregister(#name, (void *)probe, \ 182 return tracepoint_probe_unregister(&__tracepoint_##name,\
170 data); \ 183 (void *)probe, data); \
171 } \ 184 } \
172 static inline void \ 185 static inline void \
173 check_trace_callback_type_##name(void (*cb)(data_proto)) \ 186 check_trace_callback_type_##name(void (*cb)(data_proto)) \
diff --git a/include/linux/uio.h b/include/linux/uio.h
index c55ce243cc09..199bcc34241b 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -9,14 +9,23 @@
9#ifndef __LINUX_UIO_H 9#ifndef __LINUX_UIO_H
10#define __LINUX_UIO_H 10#define __LINUX_UIO_H
11 11
12#include <linux/kernel.h>
12#include <uapi/linux/uio.h> 13#include <uapi/linux/uio.h>
13 14
15struct page;
14 16
15struct kvec { 17struct kvec {
16 void *iov_base; /* and that should *never* hold a userland pointer */ 18 void *iov_base; /* and that should *never* hold a userland pointer */
17 size_t iov_len; 19 size_t iov_len;
18}; 20};
19 21
22struct iov_iter {
23 const struct iovec *iov;
24 unsigned long nr_segs;
25 size_t iov_offset;
26 size_t count;
27};
28
20/* 29/*
21 * Total number of bytes covered by an iovec. 30 * Total number of bytes covered by an iovec.
22 * 31 *
@@ -34,8 +43,51 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
34 return ret; 43 return ret;
35} 44}
36 45
46static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
47{
48 return (struct iovec) {
49 .iov_base = iter->iov->iov_base + iter->iov_offset,
50 .iov_len = min(iter->count,
51 iter->iov->iov_len - iter->iov_offset),
52 };
53}
54
55#define iov_for_each(iov, iter, start) \
56 for (iter = (start); \
57 (iter).count && \
58 ((iov = iov_iter_iovec(&(iter))), 1); \
59 iov_iter_advance(&(iter), (iov).iov_len))
60
37unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); 61unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
38 62
63size_t iov_iter_copy_from_user_atomic(struct page *page,
64 struct iov_iter *i, unsigned long offset, size_t bytes);
65size_t iov_iter_copy_from_user(struct page *page,
66 struct iov_iter *i, unsigned long offset, size_t bytes);
67void iov_iter_advance(struct iov_iter *i, size_t bytes);
68int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
69size_t iov_iter_single_seg_count(const struct iov_iter *i);
70size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
71 struct iov_iter *i);
72
73static inline void iov_iter_init(struct iov_iter *i,
74 const struct iovec *iov, unsigned long nr_segs,
75 size_t count, size_t written)
76{
77 i->iov = iov;
78 i->nr_segs = nr_segs;
79 i->iov_offset = 0;
80 i->count = count + written;
81
82 iov_iter_advance(i, written);
83}
84
85static inline size_t iov_iter_count(struct iov_iter *i)
86{
87 return i->count;
88}
89
39int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); 90int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
40int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); 91int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
92
41#endif 93#endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 0b9f890ce431..fde142e5f25a 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -60,6 +60,7 @@ enum rc_filter_type {
60/** 60/**
61 * struct rc_dev - represents a remote control device 61 * struct rc_dev - represents a remote control device
62 * @dev: driver model's view of this device 62 * @dev: driver model's view of this device
63 * @sysfs_groups: sysfs attribute groups
63 * @input_name: name of the input child device 64 * @input_name: name of the input child device
64 * @input_phys: physical path to the input child device 65 * @input_phys: physical path to the input child device
65 * @input_id: id of the input child device (struct input_id) 66 * @input_id: id of the input child device (struct input_id)
@@ -112,10 +113,12 @@ enum rc_filter_type {
112 * device doesn't interrupt host until it sees IR pulses 113 * device doesn't interrupt host until it sees IR pulses
113 * @s_learning_mode: enable wide band receiver used for learning 114 * @s_learning_mode: enable wide band receiver used for learning
114 * @s_carrier_report: enable carrier reports 115 * @s_carrier_report: enable carrier reports
115 * @s_filter: set the scancode filter of a given type 116 * @s_filter: set the scancode filter
117 * @s_wakeup_filter: set the wakeup scancode filter
116 */ 118 */
117struct rc_dev { 119struct rc_dev {
118 struct device dev; 120 struct device dev;
121 const struct attribute_group *sysfs_groups[5];
119 const char *input_name; 122 const char *input_name;
120 const char *input_phys; 123 const char *input_phys;
121 struct input_id input_id; 124 struct input_id input_id;
@@ -159,8 +162,9 @@ struct rc_dev {
159 int (*s_learning_mode)(struct rc_dev *dev, int enable); 162 int (*s_learning_mode)(struct rc_dev *dev, int enable);
160 int (*s_carrier_report) (struct rc_dev *dev, int enable); 163 int (*s_carrier_report) (struct rc_dev *dev, int enable);
161 int (*s_filter)(struct rc_dev *dev, 164 int (*s_filter)(struct rc_dev *dev,
162 enum rc_filter_type type,
163 struct rc_scancode_filter *filter); 165 struct rc_scancode_filter *filter);
166 int (*s_wakeup_filter)(struct rc_dev *dev,
167 struct rc_scancode_filter *filter);
164}; 168};
165 169
166#define to_rc_dev(d) container_of(d, struct rc_dev, dev) 170#define to_rc_dev(d) container_of(d, struct rc_dev, dev)
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index c38a005bd0cf..6fab66c5c5af 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -67,7 +67,6 @@ enum p9_trans_status {
67 * @REQ_STATUS_ALLOC: request has been allocated but not sent 67 * @REQ_STATUS_ALLOC: request has been allocated but not sent
68 * @REQ_STATUS_UNSENT: request waiting to be sent 68 * @REQ_STATUS_UNSENT: request waiting to be sent
69 * @REQ_STATUS_SENT: request sent to server 69 * @REQ_STATUS_SENT: request sent to server
70 * @REQ_STATUS_FLSH: a flush has been sent for this request
71 * @REQ_STATUS_RCVD: response received from server 70 * @REQ_STATUS_RCVD: response received from server
72 * @REQ_STATUS_FLSHD: request has been flushed 71 * @REQ_STATUS_FLSHD: request has been flushed
73 * @REQ_STATUS_ERROR: request encountered an error on the client side 72 * @REQ_STATUS_ERROR: request encountered an error on the client side
@@ -83,7 +82,6 @@ enum p9_req_status_t {
83 REQ_STATUS_ALLOC, 82 REQ_STATUS_ALLOC,
84 REQ_STATUS_UNSENT, 83 REQ_STATUS_UNSENT,
85 REQ_STATUS_SENT, 84 REQ_STATUS_SENT,
86 REQ_STATUS_FLSH,
87 REQ_STATUS_RCVD, 85 REQ_STATUS_RCVD,
88 REQ_STATUS_FLSHD, 86 REQ_STATUS_FLSHD,
89 REQ_STATUS_ERROR, 87 REQ_STATUS_ERROR,
@@ -130,7 +128,6 @@ struct p9_req_t {
130 * @proto_version: 9P protocol version to use 128 * @proto_version: 9P protocol version to use
131 * @trans_mod: module API instantiated with this client 129 * @trans_mod: module API instantiated with this client
132 * @trans: tranport instance state and API 130 * @trans: tranport instance state and API
133 * @conn: connection state information used by trans_fd
134 * @fidpool: fid handle accounting for session 131 * @fidpool: fid handle accounting for session
135 * @fidlist: List of active fid handles 132 * @fidlist: List of active fid handles
136 * @tagpool - transaction id accounting for session 133 * @tagpool - transaction id accounting for session
@@ -159,7 +156,6 @@ struct p9_client {
159 struct p9_trans_module *trans_mod; 156 struct p9_trans_module *trans_mod;
160 enum p9_trans_status status; 157 enum p9_trans_status status;
161 void *trans; 158 void *trans;
162 struct p9_conn *conn;
163 159
164 struct p9_idpool *fidpool; 160 struct p9_idpool *fidpool;
165 struct list_head fidlist; 161 struct list_head fidlist;
@@ -261,7 +257,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
261int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status); 257int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
262int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl); 258int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
263struct p9_req_t *p9_tag_lookup(struct p9_client *, u16); 259struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
264void p9_client_cb(struct p9_client *c, struct p9_req_t *req); 260void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
265 261
266int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int); 262int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
267int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *); 263int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 9a36d9297114..d9fa68f26c41 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -40,6 +40,8 @@
40 * @close: member function to discard a connection on this transport 40 * @close: member function to discard a connection on this transport
41 * @request: member function to issue a request to the transport 41 * @request: member function to issue a request to the transport
42 * @cancel: member function to cancel a request (if it hasn't been sent) 42 * @cancel: member function to cancel a request (if it hasn't been sent)
43 * @cancelled: member function to notify that a cancelled request will not
44 * not receive a reply
43 * 45 *
44 * This is the basic API for a transport module which is registered by the 46 * This is the basic API for a transport module which is registered by the
45 * transport module with the 9P core network module and used by the client 47 * transport module with the 9P core network module and used by the client
@@ -58,6 +60,7 @@ struct p9_trans_module {
58 void (*close) (struct p9_client *); 60 void (*close) (struct p9_client *);
59 int (*request) (struct p9_client *, struct p9_req_t *req); 61 int (*request) (struct p9_client *, struct p9_req_t *req);
60 int (*cancel) (struct p9_client *, struct p9_req_t *req); 62 int (*cancel) (struct p9_client *, struct p9_req_t *req);
63 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
61 int (*zc_request)(struct p9_client *, struct p9_req_t *, 64 int (*zc_request)(struct p9_client *, struct p9_req_t *,
62 char *, char *, int , int, int, int); 65 char *, char *, int , int, int, int);
63}; 66};
diff --git a/include/net/dst.h b/include/net/dst.h
index 46ed958e0c6e..71c60f42be48 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -45,7 +45,7 @@ struct dst_entry {
45 void *__pad1; 45 void *__pad1;
46#endif 46#endif
47 int (*input)(struct sk_buff *); 47 int (*input)(struct sk_buff *);
48 int (*output)(struct sk_buff *); 48 int (*output)(struct sock *sk, struct sk_buff *skb);
49 49
50 unsigned short flags; 50 unsigned short flags;
51#define DST_HOST 0x0001 51#define DST_HOST 0x0001
@@ -367,7 +367,11 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
367 return child; 367 return child;
368} 368}
369 369
370int dst_discard(struct sk_buff *skb); 370int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
371static inline int dst_discard(struct sk_buff *skb)
372{
373 return dst_discard_sk(skb->sk, skb);
374}
371void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, 375void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
372 int initial_obsolete, unsigned short flags); 376 int initial_obsolete, unsigned short flags);
373void __dst_free(struct dst_entry *dst); 377void __dst_free(struct dst_entry *dst);
@@ -449,9 +453,13 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
449} 453}
450 454
451/* Output packet to network from transport. */ 455/* Output packet to network from transport. */
456static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb)
457{
458 return skb_dst(skb)->output(sk, skb);
459}
452static inline int dst_output(struct sk_buff *skb) 460static inline int dst_output(struct sk_buff *skb)
453{ 461{
454 return skb_dst(skb)->output(skb); 462 return dst_output_sk(skb->sk, skb);
455} 463}
456 464
457/* Input packet from network to transport. */ 465/* Input packet from network to transport. */
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index f981ba7adeed..74af137304be 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -40,7 +40,7 @@ void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
40 40
41void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); 41void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
42 42
43int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl); 43int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
44 44
45struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu); 45struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
46#endif /* _INET6_CONNECTION_SOCK_H */ 46#endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index c55aeed41ace..7a4313887568 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -36,7 +36,7 @@ struct tcp_congestion_ops;
36 * (i.e. things that depend on the address family) 36 * (i.e. things that depend on the address family)
37 */ 37 */
38struct inet_connection_sock_af_ops { 38struct inet_connection_sock_af_ops {
39 int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl); 39 int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
40 void (*send_check)(struct sock *sk, struct sk_buff *skb); 40 void (*send_check)(struct sock *sk, struct sk_buff *skb);
41 int (*rebuild_header)(struct sock *sk); 41 int (*rebuild_header)(struct sock *sk);
42 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); 42 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
diff --git a/include/net/ip.h b/include/net/ip.h
index 25064c28e059..3ec2b0fb9d83 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -104,14 +104,19 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
104 struct net_device *orig_dev); 104 struct net_device *orig_dev);
105int ip_local_deliver(struct sk_buff *skb); 105int ip_local_deliver(struct sk_buff *skb);
106int ip_mr_input(struct sk_buff *skb); 106int ip_mr_input(struct sk_buff *skb);
107int ip_output(struct sk_buff *skb); 107int ip_output(struct sock *sk, struct sk_buff *skb);
108int ip_mc_output(struct sk_buff *skb); 108int ip_mc_output(struct sock *sk, struct sk_buff *skb);
109int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 109int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
110int ip_do_nat(struct sk_buff *skb); 110int ip_do_nat(struct sk_buff *skb);
111void ip_send_check(struct iphdr *ip); 111void ip_send_check(struct iphdr *ip);
112int __ip_local_out(struct sk_buff *skb); 112int __ip_local_out(struct sk_buff *skb);
113int ip_local_out(struct sk_buff *skb); 113int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
114int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl); 114static inline int ip_local_out(struct sk_buff *skb)
115{
116 return ip_local_out_sk(skb->sk, skb);
117}
118
119int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
115void ip_init(void); 120void ip_init(void);
116int ip_append_data(struct sock *sk, struct flowi4 *fl4, 121int ip_append_data(struct sock *sk, struct flowi4 *fl4,
117 int getfrag(void *from, char *to, int offset, int len, 122 int getfrag(void *from, char *to, int offset, int len,
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 3c3bb184eb8f..6c4f5eac98e7 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -32,6 +32,11 @@ struct route_info {
32#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 32#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
33#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 33#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
34 34
35/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
36 * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
37 */
38#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr))
39
35/* 40/*
36 * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate 41 * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate
37 * between IPV6_ADDR_PREFERENCES socket option values 42 * between IPV6_ADDR_PREFERENCES socket option values
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index e77c10405d51..a4daf9eb8562 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -153,7 +153,7 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
153} 153}
154 154
155int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); 155int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
156int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, 156int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
157 __be32 src, __be32 dst, __u8 proto, 157 __be32 src, __be32 dst, __u8 proto,
158 __u8 tos, __u8 ttl, __be16 df, bool xnet); 158 __u8 tos, __u8 ttl, __be16 df, bool xnet);
159 159
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4f541f11ce63..d640925bc454 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -731,7 +731,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net,
731 * skb processing functions 731 * skb processing functions
732 */ 732 */
733 733
734int ip6_output(struct sk_buff *skb); 734int ip6_output(struct sock *sk, struct sk_buff *skb);
735int ip6_forward(struct sk_buff *skb); 735int ip6_forward(struct sk_buff *skb);
736int ip6_input(struct sk_buff *skb); 736int ip6_input(struct sk_buff *skb);
737int ip6_mc_input(struct sk_buff *skb); 737int ip6_mc_input(struct sk_buff *skb);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index cf2b7ae2b9d8..a75fc8e27cd6 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -13,6 +13,16 @@ struct nft_cmp_fast_expr {
13 u8 len; 13 u8 len;
14}; 14};
15 15
16/* Calculate the mask for the nft_cmp_fast expression. On big endian the
17 * mask needs to include the *upper* bytes when interpreting that data as
18 * something smaller than the full u32, therefore a cpu_to_le32 is done.
19 */
20static inline u32 nft_cmp_fast_mask(unsigned int len)
21{
22 return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr,
23 data) * BITS_PER_BYTE - len));
24}
25
16extern const struct nft_expr_ops nft_cmp_fast_ops; 26extern const struct nft_expr_ops nft_cmp_fast_ops;
17 27
18int nft_cmp_module_init(void); 28int nft_cmp_module_init(void);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a3353f45ef94..8e4de46c052e 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -101,7 +101,7 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
101int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 101int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
102int sctp_inet_listen(struct socket *sock, int backlog); 102int sctp_inet_listen(struct socket *sock, int backlog);
103void sctp_write_space(struct sock *sk); 103void sctp_write_space(struct sock *sk);
104void sctp_data_ready(struct sock *sk, int len); 104void sctp_data_ready(struct sock *sk);
105unsigned int sctp_poll(struct file *file, struct socket *sock, 105unsigned int sctp_poll(struct file *file, struct socket *sock,
106 poll_table *wait); 106 poll_table *wait);
107void sctp_sock_rfree(struct sk_buff *skb); 107void sctp_sock_rfree(struct sk_buff *skb);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6ee76c804893..d992ca3145fe 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1653,6 +1653,17 @@ struct sctp_association {
1653 /* This is the last advertised value of rwnd over a SACK chunk. */ 1653 /* This is the last advertised value of rwnd over a SACK chunk. */
1654 __u32 a_rwnd; 1654 __u32 a_rwnd;
1655 1655
1656 /* Number of bytes by which the rwnd has slopped. The rwnd is allowed
1657 * to slop over a maximum of the association's frag_point.
1658 */
1659 __u32 rwnd_over;
1660
1661 /* Keeps treack of rwnd pressure. This happens when we have
1662 * a window, but not recevie buffer (i.e small packets). This one
1663 * is releases slowly (1 PMTU at a time ).
1664 */
1665 __u32 rwnd_press;
1666
1656 /* This is the sndbuf size in use for the association. 1667 /* This is the sndbuf size in use for the association.
1657 * This corresponds to the sndbuf size for the association, 1668 * This corresponds to the sndbuf size for the association,
1658 * as specified in the sk->sndbuf. 1669 * as specified in the sk->sndbuf.
@@ -1881,7 +1892,8 @@ void sctp_assoc_update(struct sctp_association *old,
1881__u32 sctp_association_get_next_tsn(struct sctp_association *); 1892__u32 sctp_association_get_next_tsn(struct sctp_association *);
1882 1893
1883void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); 1894void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
1884void sctp_assoc_rwnd_update(struct sctp_association *, bool); 1895void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
1896void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
1885void sctp_assoc_set_primary(struct sctp_association *, 1897void sctp_assoc_set_primary(struct sctp_association *,
1886 struct sctp_transport *); 1898 struct sctp_transport *);
1887void sctp_assoc_del_nonprimary_peers(struct sctp_association *, 1899void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
diff --git a/include/net/sock.h b/include/net/sock.h
index 06a5668f05c9..8338a14e4805 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -418,7 +418,7 @@ struct sock {
418 u32 sk_classid; 418 u32 sk_classid;
419 struct cg_proto *sk_cgrp; 419 struct cg_proto *sk_cgrp;
420 void (*sk_state_change)(struct sock *sk); 420 void (*sk_state_change)(struct sock *sk);
421 void (*sk_data_ready)(struct sock *sk, int bytes); 421 void (*sk_data_ready)(struct sock *sk);
422 void (*sk_write_space)(struct sock *sk); 422 void (*sk_write_space)(struct sock *sk);
423 void (*sk_error_report)(struct sock *sk); 423 void (*sk_error_report)(struct sock *sk);
424 int (*sk_backlog_rcv)(struct sock *sk, 424 int (*sk_backlog_rcv)(struct sock *sk,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 32682ae47b3f..116e9c7e19cb 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -333,7 +333,7 @@ struct xfrm_state_afinfo {
333 const xfrm_address_t *saddr); 333 const xfrm_address_t *saddr);
334 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 334 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
335 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); 335 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
336 int (*output)(struct sk_buff *skb); 336 int (*output)(struct sock *sk, struct sk_buff *skb);
337 int (*output_finish)(struct sk_buff *skb); 337 int (*output_finish)(struct sk_buff *skb);
338 int (*extract_input)(struct xfrm_state *x, 338 int (*extract_input)(struct xfrm_state *x,
339 struct sk_buff *skb); 339 struct sk_buff *skb);
@@ -1540,7 +1540,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1540 1540
1541int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1541int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1542int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1542int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1543int xfrm4_output(struct sk_buff *skb); 1543int xfrm4_output(struct sock *sk, struct sk_buff *skb);
1544int xfrm4_output_finish(struct sk_buff *skb); 1544int xfrm4_output_finish(struct sk_buff *skb);
1545int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err); 1545int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
1546int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); 1546int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
@@ -1565,7 +1565,7 @@ __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1565__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); 1565__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1566int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1566int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1567int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1567int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1568int xfrm6_output(struct sk_buff *skb); 1568int xfrm6_output(struct sock *sk, struct sk_buff *skb);
1569int xfrm6_output_finish(struct sk_buff *skb); 1569int xfrm6_output_finish(struct sk_buff *skb);
1570int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1570int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1571 u8 **prevhdr); 1571 u8 **prevhdr);
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 4483fadfa68d..33b487b5da92 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -21,6 +21,8 @@ struct iscsit_transport {
21 int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool); 21 int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
22 int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); 22 int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
23 int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); 23 int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
24 void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
25 enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
24}; 26};
25 27
26static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd) 28static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 7020e33e742e..3a1c1eea1fff 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -73,10 +73,12 @@ sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
73 sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv, 73 sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
74 sector_t lba, sector_t nolb), 74 sector_t lba, sector_t nolb),
75 void *priv); 75 void *priv);
76void sbc_dif_generate(struct se_cmd *);
76sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int, 77sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int,
77 unsigned int, struct scatterlist *, int); 78 unsigned int, struct scatterlist *, int);
78sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int, 79sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int,
79 unsigned int, struct scatterlist *, int); 80 unsigned int, struct scatterlist *, int);
81sense_reason_t sbc_dif_read_strip(struct se_cmd *);
80 82
81void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 83void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
82int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); 84int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1772fadcff62..9ec9864ecf38 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -162,7 +162,7 @@ enum se_cmd_flags_table {
162 SCF_SENT_CHECK_CONDITION = 0x00000800, 162 SCF_SENT_CHECK_CONDITION = 0x00000800,
163 SCF_OVERFLOW_BIT = 0x00001000, 163 SCF_OVERFLOW_BIT = 0x00001000,
164 SCF_UNDERFLOW_BIT = 0x00002000, 164 SCF_UNDERFLOW_BIT = 0x00002000,
165 SCF_SENT_DELAYED_TAS = 0x00004000, 165 SCF_SEND_DELAYED_TAS = 0x00004000,
166 SCF_ALUA_NON_OPTIMIZED = 0x00008000, 166 SCF_ALUA_NON_OPTIMIZED = 0x00008000,
167 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, 167 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
168 SCF_ACK_KREF = 0x00040000, 168 SCF_ACK_KREF = 0x00040000,
@@ -442,19 +442,18 @@ struct se_tmr_req {
442}; 442};
443 443
444enum target_prot_op { 444enum target_prot_op {
445 TARGET_PROT_NORMAL = 0, 445 TARGET_PROT_NORMAL = 0,
446 TARGET_PROT_DIN_INSERT, 446 TARGET_PROT_DIN_INSERT = (1 << 0),
447 TARGET_PROT_DOUT_INSERT, 447 TARGET_PROT_DOUT_INSERT = (1 << 1),
448 TARGET_PROT_DIN_STRIP, 448 TARGET_PROT_DIN_STRIP = (1 << 2),
449 TARGET_PROT_DOUT_STRIP, 449 TARGET_PROT_DOUT_STRIP = (1 << 3),
450 TARGET_PROT_DIN_PASS, 450 TARGET_PROT_DIN_PASS = (1 << 4),
451 TARGET_PROT_DOUT_PASS, 451 TARGET_PROT_DOUT_PASS = (1 << 5),
452}; 452};
453 453
454enum target_prot_ho { 454#define TARGET_PROT_ALL TARGET_PROT_DIN_INSERT | TARGET_PROT_DOUT_INSERT | \
455 PROT_SEPERATED, 455 TARGET_PROT_DIN_STRIP | TARGET_PROT_DOUT_STRIP | \
456 PROT_INTERLEAVED, 456 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS
457};
458 457
459enum target_prot_type { 458enum target_prot_type {
460 TARGET_DIF_TYPE0_PROT, 459 TARGET_DIF_TYPE0_PROT,
@@ -463,6 +462,12 @@ enum target_prot_type {
463 TARGET_DIF_TYPE3_PROT, 462 TARGET_DIF_TYPE3_PROT,
464}; 463};
465 464
465enum target_core_dif_check {
466 TARGET_DIF_CHECK_GUARD = 0x1 << 0,
467 TARGET_DIF_CHECK_APPTAG = 0x1 << 1,
468 TARGET_DIF_CHECK_REFTAG = 0x1 << 2,
469};
470
466struct se_dif_v1_tuple { 471struct se_dif_v1_tuple {
467 __be16 guard_tag; 472 __be16 guard_tag;
468 __be16 app_tag; 473 __be16 app_tag;
@@ -556,13 +561,14 @@ struct se_cmd {
556 /* DIF related members */ 561 /* DIF related members */
557 enum target_prot_op prot_op; 562 enum target_prot_op prot_op;
558 enum target_prot_type prot_type; 563 enum target_prot_type prot_type;
564 u8 prot_checks;
559 u32 prot_length; 565 u32 prot_length;
560 u32 reftag_seed; 566 u32 reftag_seed;
561 struct scatterlist *t_prot_sg; 567 struct scatterlist *t_prot_sg;
562 unsigned int t_prot_nents; 568 unsigned int t_prot_nents;
563 enum target_prot_ho prot_handover;
564 sense_reason_t pi_err; 569 sense_reason_t pi_err;
565 sector_t bad_sector; 570 sector_t bad_sector;
571 bool prot_pto;
566}; 572};
567 573
568struct se_ua { 574struct se_ua {
@@ -603,6 +609,7 @@ struct se_node_acl {
603struct se_session { 609struct se_session {
604 unsigned sess_tearing_down:1; 610 unsigned sess_tearing_down:1;
605 u64 sess_bin_isid; 611 u64 sess_bin_isid;
612 enum target_prot_op sup_prot_ops;
606 struct se_node_acl *se_node_acl; 613 struct se_node_acl *se_node_acl;
607 struct se_portal_group *se_tpg; 614 struct se_portal_group *se_tpg;
608 void *fabric_sess_ptr; 615 void *fabric_sess_ptr;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 0218d689b3d7..22a4e98eec80 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -62,6 +62,7 @@ struct target_core_fabric_ops {
62 int (*queue_data_in)(struct se_cmd *); 62 int (*queue_data_in)(struct se_cmd *);
63 int (*queue_status)(struct se_cmd *); 63 int (*queue_status)(struct se_cmd *);
64 void (*queue_tm_rsp)(struct se_cmd *); 64 void (*queue_tm_rsp)(struct se_cmd *);
65 void (*aborted_task)(struct se_cmd *);
65 /* 66 /*
66 * fabric module calls for target_core_fabric_configfs.c 67 * fabric module calls for target_core_fabric_configfs.c
67 */ 68 */
@@ -83,10 +84,11 @@ struct target_core_fabric_ops {
83 void (*fabric_drop_nodeacl)(struct se_node_acl *); 84 void (*fabric_drop_nodeacl)(struct se_node_acl *);
84}; 85};
85 86
86struct se_session *transport_init_session(void); 87struct se_session *transport_init_session(enum target_prot_op);
87int transport_alloc_session_tags(struct se_session *, unsigned int, 88int transport_alloc_session_tags(struct se_session *, unsigned int,
88 unsigned int); 89 unsigned int);
89struct se_session *transport_init_session_tags(unsigned int, unsigned int); 90struct se_session *transport_init_session_tags(unsigned int, unsigned int,
91 enum target_prot_op);
90void __transport_register_session(struct se_portal_group *, 92void __transport_register_session(struct se_portal_group *,
91 struct se_node_acl *, struct se_session *, void *); 93 struct se_node_acl *, struct se_session *, void *);
92void transport_register_session(struct se_portal_group *, 94void transport_register_session(struct se_portal_group *,
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 5a4c04a75b3d..14e49c798135 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -13,9 +13,6 @@
13 13
14#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 14#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
15 15
16extern void syscall_regfunc(void);
17extern void syscall_unregfunc(void);
18
19TRACE_EVENT_FN(sys_enter, 16TRACE_EVENT_FN(sys_enter,
20 17
21 TP_PROTO(struct pt_regs *regs, long id), 18 TP_PROTO(struct pt_regs *regs, long id),
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 8765126b328c..0a1a4f7caf09 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -470,10 +470,13 @@ static inline notrace int ftrace_get_offsets_##call( \
470 * }; 470 * };
471 * 471 *
472 * static struct ftrace_event_call event_<call> = { 472 * static struct ftrace_event_call event_<call> = {
473 * .name = "<call>",
474 * .class = event_class_<template>, 473 * .class = event_class_<template>,
474 * {
475 * .tp = &__tracepoint_<call>,
476 * },
475 * .event = &ftrace_event_type_<call>, 477 * .event = &ftrace_event_type_<call>,
476 * .print_fmt = print_fmt_<call>, 478 * .print_fmt = print_fmt_<call>,
479 * .flags = TRACE_EVENT_FL_TRACEPOINT,
477 * }; 480 * };
478 * // its only safe to use pointers when doing linker tricks to 481 * // its only safe to use pointers when doing linker tricks to
479 * // create an array. 482 * // create an array.
@@ -605,10 +608,13 @@ static struct ftrace_event_class __used __refdata event_class_##call = { \
605#define DEFINE_EVENT(template, call, proto, args) \ 608#define DEFINE_EVENT(template, call, proto, args) \
606 \ 609 \
607static struct ftrace_event_call __used event_##call = { \ 610static struct ftrace_event_call __used event_##call = { \
608 .name = #call, \
609 .class = &event_class_##template, \ 611 .class = &event_class_##template, \
612 { \
613 .tp = &__tracepoint_##call, \
614 }, \
610 .event.funcs = &ftrace_event_type_funcs_##template, \ 615 .event.funcs = &ftrace_event_type_funcs_##template, \
611 .print_fmt = print_fmt_##template, \ 616 .print_fmt = print_fmt_##template, \
617 .flags = TRACE_EVENT_FL_TRACEPOINT, \
612}; \ 618}; \
613static struct ftrace_event_call __used \ 619static struct ftrace_event_call __used \
614__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 620__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
@@ -619,10 +625,13 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
619static const char print_fmt_##call[] = print; \ 625static const char print_fmt_##call[] = print; \
620 \ 626 \
621static struct ftrace_event_call __used event_##call = { \ 627static struct ftrace_event_call __used event_##call = { \
622 .name = #call, \
623 .class = &event_class_##template, \ 628 .class = &event_class_##template, \
629 { \
630 .tp = &__tracepoint_##call, \
631 }, \
624 .event.funcs = &ftrace_event_type_funcs_##call, \ 632 .event.funcs = &ftrace_event_type_funcs_##call, \
625 .print_fmt = print_fmt_##call, \ 633 .print_fmt = print_fmt_##call, \
634 .flags = TRACE_EVENT_FL_TRACEPOINT, \
626}; \ 635}; \
627static struct ftrace_event_call __used \ 636static struct ftrace_event_call __used \
628__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 637__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 2d48fe1274ca..11917f747cb4 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -70,7 +70,6 @@
70#define AUDIT_TTY_SET 1017 /* Set TTY auditing status */ 70#define AUDIT_TTY_SET 1017 /* Set TTY auditing status */
71#define AUDIT_SET_FEATURE 1018 /* Turn an audit feature on or off */ 71#define AUDIT_SET_FEATURE 1018 /* Turn an audit feature on or off */
72#define AUDIT_GET_FEATURE 1019 /* Get which features are enabled */ 72#define AUDIT_GET_FEATURE 1019 /* Get which features are enabled */
73#define AUDIT_FEATURE_CHANGE 1020 /* audit log listing feature changes */
74 73
75#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */ 74#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */
76#define AUDIT_USER_AVC 1107 /* We filter this differently */ 75#define AUDIT_USER_AVC 1107 /* We filter this differently */
@@ -109,6 +108,8 @@
109#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ 108#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */
110#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ 109#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */
111#define AUDIT_SECCOMP 1326 /* Secure Computing event */ 110#define AUDIT_SECCOMP 1326 /* Secure Computing event */
111#define AUDIT_PROCTITLE 1327 /* Proctitle emit event */
112#define AUDIT_FEATURE_CHANGE 1328 /* audit log listing feature changes */
112 113
113#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ 114#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
114#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ 115#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index ba478fa3012e..154dd6d3c8fe 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -308,8 +308,12 @@ struct vfs_cap_data {
308 308
309#define CAP_LEASE 28 309#define CAP_LEASE 28
310 310
311/* Allow writing the audit log via unicast netlink socket */
312
311#define CAP_AUDIT_WRITE 29 313#define CAP_AUDIT_WRITE 29
312 314
315/* Allow configuration of audit via unicast netlink socket */
316
313#define CAP_AUDIT_CONTROL 30 317#define CAP_AUDIT_CONTROL 30
314 318
315#define CAP_SETFCAP 31 319#define CAP_SETFCAP 31
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index e5ab62201119..096fe1c6f83d 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -434,6 +434,7 @@ enum {
434 NVME_SC_REFTAG_CHECK = 0x284, 434 NVME_SC_REFTAG_CHECK = 0x284,
435 NVME_SC_COMPARE_FAILED = 0x285, 435 NVME_SC_COMPARE_FAILED = 0x285,
436 NVME_SC_ACCESS_DENIED = 0x286, 436 NVME_SC_ACCESS_DENIED = 0x286,
437 NVME_SC_DNR = 0x4000,
437}; 438};
438 439
439struct nvme_completion { 440struct nvme_completion {
diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h
index 270db8914c01..9bf508ad0957 100644
--- a/include/uapi/linux/v4l2-common.h
+++ b/include/uapi/linux/v4l2-common.h
@@ -29,6 +29,8 @@
29#ifndef __V4L2_COMMON__ 29#ifndef __V4L2_COMMON__
30#define __V4L2_COMMON__ 30#define __V4L2_COMMON__
31 31
32#include <linux/types.h>
33
32/* 34/*
33 * 35 *
34 * Selection interface definitions 36 * Selection interface definitions
diff --git a/init/Kconfig b/init/Kconfig
index 427ba60d638f..765018c24cf9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -292,9 +292,12 @@ config AUDIT
292 logging of avc messages output). Does not do system-call 292 logging of avc messages output). Does not do system-call
293 auditing without CONFIG_AUDITSYSCALL. 293 auditing without CONFIG_AUDITSYSCALL.
294 294
295config HAVE_ARCH_AUDITSYSCALL
296 bool
297
295config AUDITSYSCALL 298config AUDITSYSCALL
296 bool "Enable system-call auditing support" 299 bool "Enable system-call auditing support"
297 depends on AUDIT && (X86 || PARISC || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT) || ALPHA) 300 depends on AUDIT && HAVE_ARCH_AUDITSYSCALL
298 default y if SECURITY_SELINUX 301 default y if SECURITY_SELINUX
299 help 302 help
300 Enable low-overhead system-call auditing infrastructure that 303 Enable low-overhead system-call auditing infrastructure that
diff --git a/kernel/audit.c b/kernel/audit.c
index 95a20f3f52f1..7c2893602d06 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -182,7 +182,7 @@ struct audit_buffer {
182 182
183struct audit_reply { 183struct audit_reply {
184 __u32 portid; 184 __u32 portid;
185 struct net *net; 185 struct net *net;
186 struct sk_buff *skb; 186 struct sk_buff *skb;
187}; 187};
188 188
@@ -396,7 +396,7 @@ static void audit_printk_skb(struct sk_buff *skb)
396 if (printk_ratelimit()) 396 if (printk_ratelimit())
397 pr_notice("type=%d %s\n", nlh->nlmsg_type, data); 397 pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
398 else 398 else
399 audit_log_lost("printk limit exceeded\n"); 399 audit_log_lost("printk limit exceeded");
400 } 400 }
401 401
402 audit_hold_skb(skb); 402 audit_hold_skb(skb);
@@ -412,7 +412,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
412 BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ 412 BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
413 if (audit_pid) { 413 if (audit_pid) {
414 pr_err("*NO* daemon at audit_pid=%d\n", audit_pid); 414 pr_err("*NO* daemon at audit_pid=%d\n", audit_pid);
415 audit_log_lost("auditd disappeared\n"); 415 audit_log_lost("auditd disappeared");
416 audit_pid = 0; 416 audit_pid = 0;
417 audit_sock = NULL; 417 audit_sock = NULL;
418 } 418 }
@@ -607,7 +607,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
607{ 607{
608 int err = 0; 608 int err = 0;
609 609
610 /* Only support the initial namespaces for now. */ 610 /* Only support initial user namespace for now. */
611 /* 611 /*
612 * We return ECONNREFUSED because it tricks userspace into thinking 612 * We return ECONNREFUSED because it tricks userspace into thinking
613 * that audit was not configured into the kernel. Lots of users 613 * that audit was not configured into the kernel. Lots of users
@@ -618,8 +618,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
618 * userspace will reject all logins. This should be removed when we 618 * userspace will reject all logins. This should be removed when we
619 * support non init namespaces!! 619 * support non init namespaces!!
620 */ 620 */
621 if ((current_user_ns() != &init_user_ns) || 621 if (current_user_ns() != &init_user_ns)
622 (task_active_pid_ns(current) != &init_pid_ns))
623 return -ECONNREFUSED; 622 return -ECONNREFUSED;
624 623
625 switch (msg_type) { 624 switch (msg_type) {
@@ -639,6 +638,11 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
639 case AUDIT_TTY_SET: 638 case AUDIT_TTY_SET:
640 case AUDIT_TRIM: 639 case AUDIT_TRIM:
641 case AUDIT_MAKE_EQUIV: 640 case AUDIT_MAKE_EQUIV:
641 /* Only support auditd and auditctl in initial pid namespace
642 * for now. */
643 if ((task_active_pid_ns(current) != &init_pid_ns))
644 return -EPERM;
645
642 if (!capable(CAP_AUDIT_CONTROL)) 646 if (!capable(CAP_AUDIT_CONTROL))
643 err = -EPERM; 647 err = -EPERM;
644 break; 648 break;
@@ -659,6 +663,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
659{ 663{
660 int rc = 0; 664 int rc = 0;
661 uid_t uid = from_kuid(&init_user_ns, current_uid()); 665 uid_t uid = from_kuid(&init_user_ns, current_uid());
666 pid_t pid = task_tgid_nr(current);
662 667
663 if (!audit_enabled && msg_type != AUDIT_USER_AVC) { 668 if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
664 *ab = NULL; 669 *ab = NULL;
@@ -668,7 +673,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
668 *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); 673 *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
669 if (unlikely(!*ab)) 674 if (unlikely(!*ab))
670 return rc; 675 return rc;
671 audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid); 676 audit_log_format(*ab, "pid=%d uid=%u", pid, uid);
672 audit_log_session_info(*ab); 677 audit_log_session_info(*ab);
673 audit_log_task_context(*ab); 678 audit_log_task_context(*ab);
674 679
@@ -1097,7 +1102,7 @@ static void __net_exit audit_net_exit(struct net *net)
1097 audit_sock = NULL; 1102 audit_sock = NULL;
1098 } 1103 }
1099 1104
1100 rcu_assign_pointer(aunet->nlsk, NULL); 1105 RCU_INIT_POINTER(aunet->nlsk, NULL);
1101 synchronize_net(); 1106 synchronize_net();
1102 netlink_kernel_release(sock); 1107 netlink_kernel_release(sock);
1103} 1108}
@@ -1829,11 +1834,11 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
1829 spin_unlock_irq(&tsk->sighand->siglock); 1834 spin_unlock_irq(&tsk->sighand->siglock);
1830 1835
1831 audit_log_format(ab, 1836 audit_log_format(ab,
1832 " ppid=%ld pid=%d auid=%u uid=%u gid=%u" 1837 " ppid=%d pid=%d auid=%u uid=%u gid=%u"
1833 " euid=%u suid=%u fsuid=%u" 1838 " euid=%u suid=%u fsuid=%u"
1834 " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", 1839 " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
1835 sys_getppid(), 1840 task_ppid_nr(tsk),
1836 tsk->pid, 1841 task_pid_nr(tsk),
1837 from_kuid(&init_user_ns, audit_get_loginuid(tsk)), 1842 from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
1838 from_kuid(&init_user_ns, cred->uid), 1843 from_kuid(&init_user_ns, cred->uid),
1839 from_kgid(&init_user_ns, cred->gid), 1844 from_kgid(&init_user_ns, cred->gid),
diff --git a/kernel/audit.h b/kernel/audit.h
index 8df132214606..7bb65730c890 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -106,6 +106,11 @@ struct audit_names {
106 bool should_free; 106 bool should_free;
107}; 107};
108 108
109struct audit_proctitle {
110 int len; /* length of the cmdline field. */
111 char *value; /* the cmdline field */
112};
113
109/* The per-task audit context. */ 114/* The per-task audit context. */
110struct audit_context { 115struct audit_context {
111 int dummy; /* must be the first element */ 116 int dummy; /* must be the first element */
@@ -202,6 +207,7 @@ struct audit_context {
202 } execve; 207 } execve;
203 }; 208 };
204 int fds[2]; 209 int fds[2];
210 struct audit_proctitle proctitle;
205 211
206#if AUDIT_DEBUG 212#if AUDIT_DEBUG
207 int put_count; 213 int put_count;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 92062fd6cc8c..8e9bc9c3dbb7 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -19,6 +19,8 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include <linux/kernel.h> 24#include <linux/kernel.h>
23#include <linux/audit.h> 25#include <linux/audit.h>
24#include <linux/kthread.h> 26#include <linux/kthread.h>
@@ -226,7 +228,7 @@ static int audit_match_signal(struct audit_entry *entry)
226#endif 228#endif
227 229
228/* Common user-space to kernel rule translation. */ 230/* Common user-space to kernel rule translation. */
229static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) 231static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *rule)
230{ 232{
231 unsigned listnr; 233 unsigned listnr;
232 struct audit_entry *entry; 234 struct audit_entry *entry;
@@ -249,7 +251,7 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
249 ; 251 ;
250 } 252 }
251 if (unlikely(rule->action == AUDIT_POSSIBLE)) { 253 if (unlikely(rule->action == AUDIT_POSSIBLE)) {
252 printk(KERN_ERR "AUDIT_POSSIBLE is deprecated\n"); 254 pr_err("AUDIT_POSSIBLE is deprecated\n");
253 goto exit_err; 255 goto exit_err;
254 } 256 }
255 if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS) 257 if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS)
@@ -403,7 +405,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
403 int i; 405 int i;
404 char *str; 406 char *str;
405 407
406 entry = audit_to_entry_common((struct audit_rule *)data); 408 entry = audit_to_entry_common(data);
407 if (IS_ERR(entry)) 409 if (IS_ERR(entry))
408 goto exit_nofree; 410 goto exit_nofree;
409 411
@@ -431,6 +433,19 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
431 f->val = 0; 433 f->val = 0;
432 } 434 }
433 435
436 if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
437 struct pid *pid;
438 rcu_read_lock();
439 pid = find_vpid(f->val);
440 if (!pid) {
441 rcu_read_unlock();
442 err = -ESRCH;
443 goto exit_free;
444 }
445 f->val = pid_nr(pid);
446 rcu_read_unlock();
447 }
448
434 err = audit_field_valid(entry, f); 449 err = audit_field_valid(entry, f);
435 if (err) 450 if (err)
436 goto exit_free; 451 goto exit_free;
@@ -479,8 +494,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
479 /* Keep currently invalid fields around in case they 494 /* Keep currently invalid fields around in case they
480 * become valid after a policy reload. */ 495 * become valid after a policy reload. */
481 if (err == -EINVAL) { 496 if (err == -EINVAL) {
482 printk(KERN_WARNING "audit rule for LSM " 497 pr_warn("audit rule for LSM \'%s\' is invalid\n",
483 "\'%s\' is invalid\n", str); 498 str);
484 err = 0; 499 err = 0;
485 } 500 }
486 if (err) { 501 if (err) {
@@ -709,8 +724,8 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
709 /* Keep currently invalid fields around in case they 724 /* Keep currently invalid fields around in case they
710 * become valid after a policy reload. */ 725 * become valid after a policy reload. */
711 if (ret == -EINVAL) { 726 if (ret == -EINVAL) {
712 printk(KERN_WARNING "audit rule for LSM \'%s\' is " 727 pr_warn("audit rule for LSM \'%s\' is invalid\n",
713 "invalid\n", df->lsm_str); 728 df->lsm_str);
714 ret = 0; 729 ret = 0;
715 } 730 }
716 731
@@ -1240,12 +1255,14 @@ static int audit_filter_user_rules(struct audit_krule *rule, int type,
1240 1255
1241 for (i = 0; i < rule->field_count; i++) { 1256 for (i = 0; i < rule->field_count; i++) {
1242 struct audit_field *f = &rule->fields[i]; 1257 struct audit_field *f = &rule->fields[i];
1258 pid_t pid;
1243 int result = 0; 1259 int result = 0;
1244 u32 sid; 1260 u32 sid;
1245 1261
1246 switch (f->type) { 1262 switch (f->type) {
1247 case AUDIT_PID: 1263 case AUDIT_PID:
1248 result = audit_comparator(task_pid_vnr(current), f->op, f->val); 1264 pid = task_pid_nr(current);
1265 result = audit_comparator(pid, f->op, f->val);
1249 break; 1266 break;
1250 case AUDIT_UID: 1267 case AUDIT_UID:
1251 result = audit_uid_comparator(current_uid(), f->op, f->uid); 1268 result = audit_uid_comparator(current_uid(), f->op, f->uid);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 7aef2f4b6c64..f251a5e8d17a 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -42,6 +42,8 @@
42 * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance. 42 * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance.
43 */ 43 */
44 44
45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
45#include <linux/init.h> 47#include <linux/init.h>
46#include <asm/types.h> 48#include <asm/types.h>
47#include <linux/atomic.h> 49#include <linux/atomic.h>
@@ -68,6 +70,7 @@
68#include <linux/capability.h> 70#include <linux/capability.h>
69#include <linux/fs_struct.h> 71#include <linux/fs_struct.h>
70#include <linux/compat.h> 72#include <linux/compat.h>
73#include <linux/ctype.h>
71 74
72#include "audit.h" 75#include "audit.h"
73 76
@@ -79,6 +82,9 @@
79/* no execve audit message should be longer than this (userspace limits) */ 82/* no execve audit message should be longer than this (userspace limits) */
80#define MAX_EXECVE_AUDIT_LEN 7500 83#define MAX_EXECVE_AUDIT_LEN 7500
81 84
85/* max length to print of cmdline/proctitle value during audit */
86#define MAX_PROCTITLE_AUDIT_LEN 128
87
82/* number of audit rules */ 88/* number of audit rules */
83int audit_n_rules; 89int audit_n_rules;
84 90
@@ -451,15 +457,17 @@ static int audit_filter_rules(struct task_struct *tsk,
451 struct audit_field *f = &rule->fields[i]; 457 struct audit_field *f = &rule->fields[i];
452 struct audit_names *n; 458 struct audit_names *n;
453 int result = 0; 459 int result = 0;
460 pid_t pid;
454 461
455 switch (f->type) { 462 switch (f->type) {
456 case AUDIT_PID: 463 case AUDIT_PID:
457 result = audit_comparator(tsk->pid, f->op, f->val); 464 pid = task_pid_nr(tsk);
465 result = audit_comparator(pid, f->op, f->val);
458 break; 466 break;
459 case AUDIT_PPID: 467 case AUDIT_PPID:
460 if (ctx) { 468 if (ctx) {
461 if (!ctx->ppid) 469 if (!ctx->ppid)
462 ctx->ppid = sys_getppid(); 470 ctx->ppid = task_ppid_nr(tsk);
463 result = audit_comparator(ctx->ppid, f->op, f->val); 471 result = audit_comparator(ctx->ppid, f->op, f->val);
464 } 472 }
465 break; 473 break;
@@ -805,7 +813,8 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
805 rcu_read_unlock(); 813 rcu_read_unlock();
806} 814}
807 815
808static inline struct audit_context *audit_get_context(struct task_struct *tsk, 816/* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */
817static inline struct audit_context *audit_take_context(struct task_struct *tsk,
809 int return_valid, 818 int return_valid,
810 long return_code) 819 long return_code)
811{ 820{
@@ -842,6 +851,13 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk,
842 return context; 851 return context;
843} 852}
844 853
854static inline void audit_proctitle_free(struct audit_context *context)
855{
856 kfree(context->proctitle.value);
857 context->proctitle.value = NULL;
858 context->proctitle.len = 0;
859}
860
845static inline void audit_free_names(struct audit_context *context) 861static inline void audit_free_names(struct audit_context *context)
846{ 862{
847 struct audit_names *n, *next; 863 struct audit_names *n, *next;
@@ -850,16 +866,15 @@ static inline void audit_free_names(struct audit_context *context)
850 if (context->put_count + context->ino_count != context->name_count) { 866 if (context->put_count + context->ino_count != context->name_count) {
851 int i = 0; 867 int i = 0;
852 868
853 printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d" 869 pr_err("%s:%d(:%d): major=%d in_syscall=%d"
854 " name_count=%d put_count=%d" 870 " name_count=%d put_count=%d ino_count=%d"
855 " ino_count=%d [NOT freeing]\n", 871 " [NOT freeing]\n", __FILE__, __LINE__,
856 __FILE__, __LINE__,
857 context->serial, context->major, context->in_syscall, 872 context->serial, context->major, context->in_syscall,
858 context->name_count, context->put_count, 873 context->name_count, context->put_count,
859 context->ino_count); 874 context->ino_count);
860 list_for_each_entry(n, &context->names_list, list) { 875 list_for_each_entry(n, &context->names_list, list) {
861 printk(KERN_ERR "names[%d] = %p = %s\n", i++, 876 pr_err("names[%d] = %p = %s\n", i++, n->name,
862 n->name, n->name->name ?: "(null)"); 877 n->name->name ?: "(null)");
863 } 878 }
864 dump_stack(); 879 dump_stack();
865 return; 880 return;
@@ -955,6 +970,7 @@ static inline void audit_free_context(struct audit_context *context)
955 audit_free_aux(context); 970 audit_free_aux(context);
956 kfree(context->filterkey); 971 kfree(context->filterkey);
957 kfree(context->sockaddr); 972 kfree(context->sockaddr);
973 audit_proctitle_free(context);
958 kfree(context); 974 kfree(context);
959} 975}
960 976
@@ -1157,7 +1173,7 @@ static void audit_log_execve_info(struct audit_context *context,
1157 */ 1173 */
1158 buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); 1174 buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
1159 if (!buf) { 1175 if (!buf) {
1160 audit_panic("out of memory for argv string\n"); 1176 audit_panic("out of memory for argv string");
1161 return; 1177 return;
1162 } 1178 }
1163 1179
@@ -1271,6 +1287,59 @@ static void show_special(struct audit_context *context, int *call_panic)
1271 audit_log_end(ab); 1287 audit_log_end(ab);
1272} 1288}
1273 1289
1290static inline int audit_proctitle_rtrim(char *proctitle, int len)
1291{
1292 char *end = proctitle + len - 1;
1293 while (end > proctitle && !isprint(*end))
1294 end--;
1295
1296 /* catch the case where proctitle is only 1 non-print character */
1297 len = end - proctitle + 1;
1298 len -= isprint(proctitle[len-1]) == 0;
1299 return len;
1300}
1301
1302static void audit_log_proctitle(struct task_struct *tsk,
1303 struct audit_context *context)
1304{
1305 int res;
1306 char *buf;
1307 char *msg = "(null)";
1308 int len = strlen(msg);
1309 struct audit_buffer *ab;
1310
1311 ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
1312 if (!ab)
1313 return; /* audit_panic or being filtered */
1314
1315 audit_log_format(ab, "proctitle=");
1316
1317 /* Not cached */
1318 if (!context->proctitle.value) {
1319 buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL);
1320 if (!buf)
1321 goto out;
1322 /* Historically called this from procfs naming */
1323 res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN);
1324 if (res == 0) {
1325 kfree(buf);
1326 goto out;
1327 }
1328 res = audit_proctitle_rtrim(buf, res);
1329 if (res == 0) {
1330 kfree(buf);
1331 goto out;
1332 }
1333 context->proctitle.value = buf;
1334 context->proctitle.len = res;
1335 }
1336 msg = context->proctitle.value;
1337 len = context->proctitle.len;
1338out:
1339 audit_log_n_untrustedstring(ab, msg, len);
1340 audit_log_end(ab);
1341}
1342
1274static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) 1343static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
1275{ 1344{
1276 int i, call_panic = 0; 1345 int i, call_panic = 0;
@@ -1388,6 +1457,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1388 audit_log_name(context, n, NULL, i++, &call_panic); 1457 audit_log_name(context, n, NULL, i++, &call_panic);
1389 } 1458 }
1390 1459
1460 audit_log_proctitle(tsk, context);
1461
1391 /* Send end of event record to help user space know we are finished */ 1462 /* Send end of event record to help user space know we are finished */
1392 ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); 1463 ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
1393 if (ab) 1464 if (ab)
@@ -1406,7 +1477,7 @@ void __audit_free(struct task_struct *tsk)
1406{ 1477{
1407 struct audit_context *context; 1478 struct audit_context *context;
1408 1479
1409 context = audit_get_context(tsk, 0, 0); 1480 context = audit_take_context(tsk, 0, 0);
1410 if (!context) 1481 if (!context)
1411 return; 1482 return;
1412 1483
@@ -1500,7 +1571,7 @@ void __audit_syscall_exit(int success, long return_code)
1500 else 1571 else
1501 success = AUDITSC_FAILURE; 1572 success = AUDITSC_FAILURE;
1502 1573
1503 context = audit_get_context(tsk, success, return_code); 1574 context = audit_take_context(tsk, success, return_code);
1504 if (!context) 1575 if (!context)
1505 return; 1576 return;
1506 1577
@@ -1550,7 +1621,7 @@ static inline void handle_one(const struct inode *inode)
1550 if (likely(put_tree_ref(context, chunk))) 1621 if (likely(put_tree_ref(context, chunk)))
1551 return; 1622 return;
1552 if (unlikely(!grow_tree_refs(context))) { 1623 if (unlikely(!grow_tree_refs(context))) {
1553 printk(KERN_WARNING "out of memory, audit has lost a tree reference\n"); 1624 pr_warn("out of memory, audit has lost a tree reference\n");
1554 audit_set_auditable(context); 1625 audit_set_auditable(context);
1555 audit_put_chunk(chunk); 1626 audit_put_chunk(chunk);
1556 unroll_tree_refs(context, p, count); 1627 unroll_tree_refs(context, p, count);
@@ -1609,8 +1680,7 @@ retry:
1609 goto retry; 1680 goto retry;
1610 } 1681 }
1611 /* too bad */ 1682 /* too bad */
1612 printk(KERN_WARNING 1683 pr_warn("out of memory, audit has lost a tree reference\n");
1613 "out of memory, audit has lost a tree reference\n");
1614 unroll_tree_refs(context, p, count); 1684 unroll_tree_refs(context, p, count);
1615 audit_set_auditable(context); 1685 audit_set_auditable(context);
1616 return; 1686 return;
@@ -1682,7 +1752,7 @@ void __audit_getname(struct filename *name)
1682 1752
1683 if (!context->in_syscall) { 1753 if (!context->in_syscall) {
1684#if AUDIT_DEBUG == 2 1754#if AUDIT_DEBUG == 2
1685 printk(KERN_ERR "%s:%d(:%d): ignoring getname(%p)\n", 1755 pr_err("%s:%d(:%d): ignoring getname(%p)\n",
1686 __FILE__, __LINE__, context->serial, name); 1756 __FILE__, __LINE__, context->serial, name);
1687 dump_stack(); 1757 dump_stack();
1688#endif 1758#endif
@@ -1721,15 +1791,15 @@ void audit_putname(struct filename *name)
1721 BUG_ON(!context); 1791 BUG_ON(!context);
1722 if (!name->aname || !context->in_syscall) { 1792 if (!name->aname || !context->in_syscall) {
1723#if AUDIT_DEBUG == 2 1793#if AUDIT_DEBUG == 2
1724 printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", 1794 pr_err("%s:%d(:%d): final_putname(%p)\n",
1725 __FILE__, __LINE__, context->serial, name); 1795 __FILE__, __LINE__, context->serial, name);
1726 if (context->name_count) { 1796 if (context->name_count) {
1727 struct audit_names *n; 1797 struct audit_names *n;
1728 int i = 0; 1798 int i = 0;
1729 1799
1730 list_for_each_entry(n, &context->names_list, list) 1800 list_for_each_entry(n, &context->names_list, list)
1731 printk(KERN_ERR "name[%d] = %p = %s\n", i++, 1801 pr_err("name[%d] = %p = %s\n", i++, n->name,
1732 n->name, n->name->name ?: "(null)"); 1802 n->name->name ?: "(null)");
1733 } 1803 }
1734#endif 1804#endif
1735 final_putname(name); 1805 final_putname(name);
@@ -1738,9 +1808,8 @@ void audit_putname(struct filename *name)
1738 else { 1808 else {
1739 ++context->put_count; 1809 ++context->put_count;
1740 if (context->put_count > context->name_count) { 1810 if (context->put_count > context->name_count) {
1741 printk(KERN_ERR "%s:%d(:%d): major=%d" 1811 pr_err("%s:%d(:%d): major=%d in_syscall=%d putname(%p)"
1742 " in_syscall=%d putname(%p) name_count=%d" 1812 " name_count=%d put_count=%d\n",
1743 " put_count=%d\n",
1744 __FILE__, __LINE__, 1813 __FILE__, __LINE__,
1745 context->serial, context->major, 1814 context->serial, context->major,
1746 context->in_syscall, name->name, 1815 context->in_syscall, name->name,
@@ -1981,12 +2050,10 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
1981 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); 2050 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
1982 if (!ab) 2051 if (!ab)
1983 return; 2052 return;
1984 audit_log_format(ab, "pid=%d uid=%u" 2053 audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
1985 " old-auid=%u new-auid=%u old-ses=%u new-ses=%u" 2054 audit_log_task_context(ab);
1986 " res=%d", 2055 audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d",
1987 current->pid, uid, 2056 oldloginuid, loginuid, oldsessionid, sessionid, !rc);
1988 oldloginuid, loginuid, oldsessionid, sessionid,
1989 !rc);
1990 audit_log_end(ab); 2057 audit_log_end(ab);
1991} 2058}
1992 2059
@@ -2208,7 +2275,7 @@ void __audit_ptrace(struct task_struct *t)
2208{ 2275{
2209 struct audit_context *context = current->audit_context; 2276 struct audit_context *context = current->audit_context;
2210 2277
2211 context->target_pid = t->pid; 2278 context->target_pid = task_pid_nr(t);
2212 context->target_auid = audit_get_loginuid(t); 2279 context->target_auid = audit_get_loginuid(t);
2213 context->target_uid = task_uid(t); 2280 context->target_uid = task_uid(t);
2214 context->target_sessionid = audit_get_sessionid(t); 2281 context->target_sessionid = audit_get_sessionid(t);
@@ -2233,7 +2300,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
2233 2300
2234 if (audit_pid && t->tgid == audit_pid) { 2301 if (audit_pid && t->tgid == audit_pid) {
2235 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { 2302 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
2236 audit_sig_pid = tsk->pid; 2303 audit_sig_pid = task_pid_nr(tsk);
2237 if (uid_valid(tsk->loginuid)) 2304 if (uid_valid(tsk->loginuid))
2238 audit_sig_uid = tsk->loginuid; 2305 audit_sig_uid = tsk->loginuid;
2239 else 2306 else
@@ -2247,7 +2314,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
2247 /* optimize the common case by putting first signal recipient directly 2314 /* optimize the common case by putting first signal recipient directly
2248 * in audit_context */ 2315 * in audit_context */
2249 if (!ctx->target_pid) { 2316 if (!ctx->target_pid) {
2250 ctx->target_pid = t->tgid; 2317 ctx->target_pid = task_tgid_nr(t);
2251 ctx->target_auid = audit_get_loginuid(t); 2318 ctx->target_auid = audit_get_loginuid(t);
2252 ctx->target_uid = t_uid; 2319 ctx->target_uid = t_uid;
2253 ctx->target_sessionid = audit_get_sessionid(t); 2320 ctx->target_sessionid = audit_get_sessionid(t);
@@ -2268,7 +2335,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
2268 } 2335 }
2269 BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); 2336 BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS);
2270 2337
2271 axp->target_pid[axp->pid_count] = t->tgid; 2338 axp->target_pid[axp->pid_count] = task_tgid_nr(t);
2272 axp->target_auid[axp->pid_count] = audit_get_loginuid(t); 2339 axp->target_auid[axp->pid_count] = audit_get_loginuid(t);
2273 axp->target_uid[axp->pid_count] = t_uid; 2340 axp->target_uid[axp->pid_count] = t_uid;
2274 axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); 2341 axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
@@ -2368,7 +2435,7 @@ static void audit_log_task(struct audit_buffer *ab)
2368 from_kgid(&init_user_ns, gid), 2435 from_kgid(&init_user_ns, gid),
2369 sessionid); 2436 sessionid);
2370 audit_log_task_context(ab); 2437 audit_log_task_context(ab);
2371 audit_log_format(ab, " pid=%d comm=", current->pid); 2438 audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
2372 audit_log_untrustedstring(ab, current->comm); 2439 audit_log_untrustedstring(ab, current->comm);
2373 if (mm) { 2440 if (mm) {
2374 down_read(&mm->mmap_sem); 2441 down_read(&mm->mmap_sem);
diff --git a/kernel/futex.c b/kernel/futex.c
index 6801b3751a95..5f589279e462 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -70,7 +70,10 @@
70#include "locking/rtmutex_common.h" 70#include "locking/rtmutex_common.h"
71 71
72/* 72/*
73 * Basic futex operation and ordering guarantees: 73 * READ this before attempting to hack on futexes!
74 *
75 * Basic futex operation and ordering guarantees
76 * =============================================
74 * 77 *
75 * The waiter reads the futex value in user space and calls 78 * The waiter reads the futex value in user space and calls
76 * futex_wait(). This function computes the hash bucket and acquires 79 * futex_wait(). This function computes the hash bucket and acquires
@@ -119,7 +122,7 @@
119 * sys_futex(WAIT, futex, val); 122 * sys_futex(WAIT, futex, val);
120 * futex_wait(futex, val); 123 * futex_wait(futex, val);
121 * 124 *
122 * waiters++; 125 * waiters++; (a)
123 * mb(); (A) <-- paired with -. 126 * mb(); (A) <-- paired with -.
124 * | 127 * |
125 * lock(hash_bucket(futex)); | 128 * lock(hash_bucket(futex)); |
@@ -135,14 +138,14 @@
135 * unlock(hash_bucket(futex)); 138 * unlock(hash_bucket(futex));
136 * schedule(); if (waiters) 139 * schedule(); if (waiters)
137 * lock(hash_bucket(futex)); 140 * lock(hash_bucket(futex));
138 * wake_waiters(futex); 141 * else wake_waiters(futex);
139 * unlock(hash_bucket(futex)); 142 * waiters--; (b) unlock(hash_bucket(futex));
140 * 143 *
141 * Where (A) orders the waiters increment and the futex value read -- this 144 * Where (A) orders the waiters increment and the futex value read through
142 * is guaranteed by the head counter in the hb spinlock; and where (B) 145 * atomic operations (see hb_waiters_inc) and where (B) orders the write
143 * orders the write to futex and the waiters read -- this is done by the 146 * to futex and the waiters read -- this is done by the barriers in
144 * barriers in get_futex_key_refs(), through either ihold or atomic_inc, 147 * get_futex_key_refs(), through either ihold or atomic_inc, depending on the
145 * depending on the futex type. 148 * futex type.
146 * 149 *
147 * This yields the following case (where X:=waiters, Y:=futex): 150 * This yields the following case (where X:=waiters, Y:=futex):
148 * 151 *
@@ -155,6 +158,17 @@
155 * Which guarantees that x==0 && y==0 is impossible; which translates back into 158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
156 * the guarantee that we cannot both miss the futex variable change and the 159 * the guarantee that we cannot both miss the futex variable change and the
157 * enqueue. 160 * enqueue.
161 *
162 * Note that a new waiter is accounted for in (a) even when it is possible that
163 * the wait call can return error, in which case we backtrack from it in (b).
164 * Refer to the comment in queue_lock().
165 *
166 * Similarly, in order to account for waiters being requeued on another
167 * address we always increment the waiters for the destination bucket before
168 * acquiring the lock. It then decrements them again after releasing it -
169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170 * will do the additional required waiter count housekeeping. This is done for
171 * double_lock_hb() and double_unlock_hb(), respectively.
158 */ 172 */
159 173
160#ifndef CONFIG_HAVE_FUTEX_CMPXCHG 174#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
diff --git a/kernel/relay.c b/kernel/relay.c
index 52d6a6f56261..5a56d3c8dc03 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1195,8 +1195,6 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
1195 1195
1196static const struct pipe_buf_operations relay_pipe_buf_ops = { 1196static const struct pipe_buf_operations relay_pipe_buf_ops = {
1197 .can_merge = 0, 1197 .can_merge = 0,
1198 .map = generic_pipe_buf_map,
1199 .unmap = generic_pipe_buf_unmap,
1200 .confirm = generic_pipe_buf_confirm, 1198 .confirm = generic_pipe_buf_confirm,
1201 .release = relay_pipe_buf_release, 1199 .release = relay_pipe_buf_release,
1202 .steal = generic_pipe_buf_steal, 1200 .steal = generic_pipe_buf_steal,
@@ -1253,7 +1251,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
1253 subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; 1251 subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
1254 pidx = (read_start / PAGE_SIZE) % subbuf_pages; 1252 pidx = (read_start / PAGE_SIZE) % subbuf_pages;
1255 poff = read_start & ~PAGE_MASK; 1253 poff = read_start & ~PAGE_MASK;
1256 nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers); 1254 nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max);
1257 1255
1258 for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { 1256 for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
1259 unsigned int this_len, this_end, private; 1257 unsigned int this_len, this_end, private;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index fd609bd9d6dd..590c37925084 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -69,18 +69,17 @@ static void populate_seccomp_data(struct seccomp_data *sd)
69{ 69{
70 struct task_struct *task = current; 70 struct task_struct *task = current;
71 struct pt_regs *regs = task_pt_regs(task); 71 struct pt_regs *regs = task_pt_regs(task);
72 unsigned long args[6];
72 73
73 sd->nr = syscall_get_nr(task, regs); 74 sd->nr = syscall_get_nr(task, regs);
74 sd->arch = syscall_get_arch(task, regs); 75 sd->arch = syscall_get_arch();
75 76 syscall_get_arguments(task, regs, 0, 6, args);
76 /* Unroll syscall_get_args to help gcc on arm. */ 77 sd->args[0] = args[0];
77 syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); 78 sd->args[1] = args[1];
78 syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); 79 sd->args[2] = args[2];
79 syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); 80 sd->args[3] = args[3];
80 syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); 81 sd->args[4] = args[4];
81 syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); 82 sd->args[5] = args[5];
82 syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]);
83
84 sd->instruction_pointer = KSTK_EIP(task); 83 sd->instruction_pointer = KSTK_EIP(task);
85} 84}
86 85
@@ -348,7 +347,7 @@ static void seccomp_send_sigsys(int syscall, int reason)
348 info.si_code = SYS_SECCOMP; 347 info.si_code = SYS_SECCOMP;
349 info.si_call_addr = (void __user *)KSTK_EIP(current); 348 info.si_call_addr = (void __user *)KSTK_EIP(current);
350 info.si_errno = reason; 349 info.si_errno = reason;
351 info.si_arch = syscall_get_arch(current, task_pt_regs(current)); 350 info.si_arch = syscall_get_arch();
352 info.si_syscall = syscall; 351 info.si_syscall = syscall;
353 force_sig_info(SIGSYS, &info, current); 352 force_sig_info(SIGSYS, &info, current);
354} 353}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9be67c5e5b0f..737b0efa1a62 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3611,6 +3611,8 @@ static const char readme_msg[] =
3611#ifdef CONFIG_TRACER_SNAPSHOT 3611#ifdef CONFIG_TRACER_SNAPSHOT
3612 "\t\t snapshot\n" 3612 "\t\t snapshot\n"
3613#endif 3613#endif
3614 "\t\t dump\n"
3615 "\t\t cpudump\n"
3614 "\t example: echo do_fault:traceoff > set_ftrace_filter\n" 3616 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3615 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" 3617 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3616 "\t The first one will disable tracing every time do_fault is hit\n" 3618 "\t The first one will disable tracing every time do_fault is hit\n"
@@ -4390,8 +4392,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4390 4392
4391static const struct pipe_buf_operations tracing_pipe_buf_ops = { 4393static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4392 .can_merge = 0, 4394 .can_merge = 0,
4393 .map = generic_pipe_buf_map,
4394 .unmap = generic_pipe_buf_unmap,
4395 .confirm = generic_pipe_buf_confirm, 4395 .confirm = generic_pipe_buf_confirm,
4396 .release = generic_pipe_buf_release, 4396 .release = generic_pipe_buf_release,
4397 .steal = generic_pipe_buf_steal, 4397 .steal = generic_pipe_buf_steal,
@@ -4486,7 +4486,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4486 trace_access_lock(iter->cpu_file); 4486 trace_access_lock(iter->cpu_file);
4487 4487
4488 /* Fill as many pages as possible. */ 4488 /* Fill as many pages as possible. */
4489 for (i = 0, rem = len; i < pipe->buffers && rem; i++) { 4489 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4490 spd.pages[i] = alloc_page(GFP_KERNEL); 4490 spd.pages[i] = alloc_page(GFP_KERNEL);
4491 if (!spd.pages[i]) 4491 if (!spd.pages[i])
4492 break; 4492 break;
@@ -5279,8 +5279,6 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5279/* Pipe buffer operations for a buffer. */ 5279/* Pipe buffer operations for a buffer. */
5280static const struct pipe_buf_operations buffer_pipe_buf_ops = { 5280static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5281 .can_merge = 0, 5281 .can_merge = 0,
5282 .map = generic_pipe_buf_map,
5283 .unmap = generic_pipe_buf_unmap,
5284 .confirm = generic_pipe_buf_confirm, 5282 .confirm = generic_pipe_buf_confirm,
5285 .release = buffer_pipe_buf_release, 5283 .release = buffer_pipe_buf_release,
5286 .steal = generic_pipe_buf_steal, 5284 .steal = generic_pipe_buf_steal,
@@ -5356,7 +5354,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5356 trace_access_lock(iter->cpu_file); 5354 trace_access_lock(iter->cpu_file);
5357 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 5355 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5358 5356
5359 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { 5357 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5360 struct page *page; 5358 struct page *page;
5361 int r; 5359 int r;
5362 5360
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 83a4378dc5e0..3ddfd8f62c05 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -223,24 +223,25 @@ int ftrace_event_reg(struct ftrace_event_call *call,
223{ 223{
224 struct ftrace_event_file *file = data; 224 struct ftrace_event_file *file = data;
225 225
226 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
226 switch (type) { 227 switch (type) {
227 case TRACE_REG_REGISTER: 228 case TRACE_REG_REGISTER:
228 return tracepoint_probe_register(call->name, 229 return tracepoint_probe_register(call->tp,
229 call->class->probe, 230 call->class->probe,
230 file); 231 file);
231 case TRACE_REG_UNREGISTER: 232 case TRACE_REG_UNREGISTER:
232 tracepoint_probe_unregister(call->name, 233 tracepoint_probe_unregister(call->tp,
233 call->class->probe, 234 call->class->probe,
234 file); 235 file);
235 return 0; 236 return 0;
236 237
237#ifdef CONFIG_PERF_EVENTS 238#ifdef CONFIG_PERF_EVENTS
238 case TRACE_REG_PERF_REGISTER: 239 case TRACE_REG_PERF_REGISTER:
239 return tracepoint_probe_register(call->name, 240 return tracepoint_probe_register(call->tp,
240 call->class->perf_probe, 241 call->class->perf_probe,
241 call); 242 call);
242 case TRACE_REG_PERF_UNREGISTER: 243 case TRACE_REG_PERF_UNREGISTER:
243 tracepoint_probe_unregister(call->name, 244 tracepoint_probe_unregister(call->tp,
244 call->class->perf_probe, 245 call->class->perf_probe,
245 call); 246 call);
246 return 0; 247 return 0;
@@ -352,7 +353,7 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
352 if (ret) { 353 if (ret) {
353 tracing_stop_cmdline_record(); 354 tracing_stop_cmdline_record();
354 pr_info("event trace: Could not enable event " 355 pr_info("event trace: Could not enable event "
355 "%s\n", call->name); 356 "%s\n", ftrace_event_name(call));
356 break; 357 break;
357 } 358 }
358 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); 359 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
@@ -481,27 +482,29 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
481{ 482{
482 struct ftrace_event_file *file; 483 struct ftrace_event_file *file;
483 struct ftrace_event_call *call; 484 struct ftrace_event_call *call;
485 const char *name;
484 int ret = -EINVAL; 486 int ret = -EINVAL;
485 487
486 list_for_each_entry(file, &tr->events, list) { 488 list_for_each_entry(file, &tr->events, list) {
487 489
488 call = file->event_call; 490 call = file->event_call;
491 name = ftrace_event_name(call);
489 492
490 if (!call->name || !call->class || !call->class->reg) 493 if (!name || !call->class || !call->class->reg)
491 continue; 494 continue;
492 495
493 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 496 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
494 continue; 497 continue;
495 498
496 if (match && 499 if (match &&
497 strcmp(match, call->name) != 0 && 500 strcmp(match, name) != 0 &&
498 strcmp(match, call->class->system) != 0) 501 strcmp(match, call->class->system) != 0)
499 continue; 502 continue;
500 503
501 if (sub && strcmp(sub, call->class->system) != 0) 504 if (sub && strcmp(sub, call->class->system) != 0)
502 continue; 505 continue;
503 506
504 if (event && strcmp(event, call->name) != 0) 507 if (event && strcmp(event, name) != 0)
505 continue; 508 continue;
506 509
507 ftrace_event_enable_disable(file, set); 510 ftrace_event_enable_disable(file, set);
@@ -699,7 +702,7 @@ static int t_show(struct seq_file *m, void *v)
699 702
700 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 703 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
701 seq_printf(m, "%s:", call->class->system); 704 seq_printf(m, "%s:", call->class->system);
702 seq_printf(m, "%s\n", call->name); 705 seq_printf(m, "%s\n", ftrace_event_name(call));
703 706
704 return 0; 707 return 0;
705} 708}
@@ -792,7 +795,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
792 mutex_lock(&event_mutex); 795 mutex_lock(&event_mutex);
793 list_for_each_entry(file, &tr->events, list) { 796 list_for_each_entry(file, &tr->events, list) {
794 call = file->event_call; 797 call = file->event_call;
795 if (!call->name || !call->class || !call->class->reg) 798 if (!ftrace_event_name(call) || !call->class || !call->class->reg)
796 continue; 799 continue;
797 800
798 if (system && strcmp(call->class->system, system->name) != 0) 801 if (system && strcmp(call->class->system, system->name) != 0)
@@ -907,7 +910,7 @@ static int f_show(struct seq_file *m, void *v)
907 910
908 switch ((unsigned long)v) { 911 switch ((unsigned long)v) {
909 case FORMAT_HEADER: 912 case FORMAT_HEADER:
910 seq_printf(m, "name: %s\n", call->name); 913 seq_printf(m, "name: %s\n", ftrace_event_name(call));
911 seq_printf(m, "ID: %d\n", call->event.type); 914 seq_printf(m, "ID: %d\n", call->event.type);
912 seq_printf(m, "format:\n"); 915 seq_printf(m, "format:\n");
913 return 0; 916 return 0;
@@ -1527,6 +1530,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1527 struct trace_array *tr = file->tr; 1530 struct trace_array *tr = file->tr;
1528 struct list_head *head; 1531 struct list_head *head;
1529 struct dentry *d_events; 1532 struct dentry *d_events;
1533 const char *name;
1530 int ret; 1534 int ret;
1531 1535
1532 /* 1536 /*
@@ -1540,10 +1544,11 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1540 } else 1544 } else
1541 d_events = parent; 1545 d_events = parent;
1542 1546
1543 file->dir = debugfs_create_dir(call->name, d_events); 1547 name = ftrace_event_name(call);
1548 file->dir = debugfs_create_dir(name, d_events);
1544 if (!file->dir) { 1549 if (!file->dir) {
1545 pr_warning("Could not create debugfs '%s' directory\n", 1550 pr_warning("Could not create debugfs '%s' directory\n",
1546 call->name); 1551 name);
1547 return -1; 1552 return -1;
1548 } 1553 }
1549 1554
@@ -1567,7 +1572,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1567 ret = call->class->define_fields(call); 1572 ret = call->class->define_fields(call);
1568 if (ret < 0) { 1573 if (ret < 0) {
1569 pr_warning("Could not initialize trace point" 1574 pr_warning("Could not initialize trace point"
1570 " events/%s\n", call->name); 1575 " events/%s\n", name);
1571 return -1; 1576 return -1;
1572 } 1577 }
1573 } 1578 }
@@ -1631,15 +1636,17 @@ static void event_remove(struct ftrace_event_call *call)
1631static int event_init(struct ftrace_event_call *call) 1636static int event_init(struct ftrace_event_call *call)
1632{ 1637{
1633 int ret = 0; 1638 int ret = 0;
1639 const char *name;
1634 1640
1635 if (WARN_ON(!call->name)) 1641 name = ftrace_event_name(call);
1642 if (WARN_ON(!name))
1636 return -EINVAL; 1643 return -EINVAL;
1637 1644
1638 if (call->class->raw_init) { 1645 if (call->class->raw_init) {
1639 ret = call->class->raw_init(call); 1646 ret = call->class->raw_init(call);
1640 if (ret < 0 && ret != -ENOSYS) 1647 if (ret < 0 && ret != -ENOSYS)
1641 pr_warn("Could not initialize trace events/%s\n", 1648 pr_warn("Could not initialize trace events/%s\n",
1642 call->name); 1649 name);
1643 } 1650 }
1644 1651
1645 return ret; 1652 return ret;
@@ -1885,7 +1892,7 @@ __trace_add_event_dirs(struct trace_array *tr)
1885 ret = __trace_add_new_event(call, tr); 1892 ret = __trace_add_new_event(call, tr);
1886 if (ret < 0) 1893 if (ret < 0)
1887 pr_warning("Could not create directory for event %s\n", 1894 pr_warning("Could not create directory for event %s\n",
1888 call->name); 1895 ftrace_event_name(call));
1889 } 1896 }
1890} 1897}
1891 1898
@@ -1894,18 +1901,20 @@ find_event_file(struct trace_array *tr, const char *system, const char *event)
1894{ 1901{
1895 struct ftrace_event_file *file; 1902 struct ftrace_event_file *file;
1896 struct ftrace_event_call *call; 1903 struct ftrace_event_call *call;
1904 const char *name;
1897 1905
1898 list_for_each_entry(file, &tr->events, list) { 1906 list_for_each_entry(file, &tr->events, list) {
1899 1907
1900 call = file->event_call; 1908 call = file->event_call;
1909 name = ftrace_event_name(call);
1901 1910
1902 if (!call->name || !call->class || !call->class->reg) 1911 if (!name || !call->class || !call->class->reg)
1903 continue; 1912 continue;
1904 1913
1905 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 1914 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1906 continue; 1915 continue;
1907 1916
1908 if (strcmp(event, call->name) == 0 && 1917 if (strcmp(event, name) == 0 &&
1909 strcmp(system, call->class->system) == 0) 1918 strcmp(system, call->class->system) == 0)
1910 return file; 1919 return file;
1911 } 1920 }
@@ -1973,7 +1982,7 @@ event_enable_print(struct seq_file *m, unsigned long ip,
1973 seq_printf(m, "%s:%s:%s", 1982 seq_printf(m, "%s:%s:%s",
1974 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 1983 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1975 data->file->event_call->class->system, 1984 data->file->event_call->class->system,
1976 data->file->event_call->name); 1985 ftrace_event_name(data->file->event_call));
1977 1986
1978 if (data->count == -1) 1987 if (data->count == -1)
1979 seq_printf(m, ":unlimited\n"); 1988 seq_printf(m, ":unlimited\n");
@@ -2193,7 +2202,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
2193 ret = event_create_dir(tr->event_dir, file); 2202 ret = event_create_dir(tr->event_dir, file);
2194 if (ret < 0) 2203 if (ret < 0)
2195 pr_warning("Could not create directory for event %s\n", 2204 pr_warning("Could not create directory for event %s\n",
2196 file->event_call->name); 2205 ftrace_event_name(file->event_call));
2197 } 2206 }
2198} 2207}
2199 2208
@@ -2217,7 +2226,7 @@ __trace_early_add_events(struct trace_array *tr)
2217 ret = __trace_early_add_new_event(call, tr); 2226 ret = __trace_early_add_new_event(call, tr);
2218 if (ret < 0) 2227 if (ret < 0)
2219 pr_warning("Could not create early event %s\n", 2228 pr_warning("Could not create early event %s\n",
2220 call->name); 2229 ftrace_event_name(call));
2221 } 2230 }
2222} 2231}
2223 2232
@@ -2549,7 +2558,7 @@ static __init void event_trace_self_tests(void)
2549 continue; 2558 continue;
2550#endif 2559#endif
2551 2560
2552 pr_info("Testing event %s: ", call->name); 2561 pr_info("Testing event %s: ", ftrace_event_name(call));
2553 2562
2554 /* 2563 /*
2555 * If an event is already enabled, someone is using 2564 * If an event is already enabled, someone is using
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 8efbb69b04f0..925f537f07d1 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1095 seq_printf(m, "%s:%s:%s", 1095 seq_printf(m, "%s:%s:%s",
1096 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 1096 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1097 enable_data->file->event_call->class->system, 1097 enable_data->file->event_call->class->system,
1098 enable_data->file->event_call->name); 1098 ftrace_event_name(enable_data->file->event_call));
1099 1099
1100 if (data->count == -1) 1100 if (data->count == -1)
1101 seq_puts(m, ":unlimited"); 1101 seq_puts(m, ":unlimited");
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index ee0a5098ac43..d4ddde28a81a 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -173,9 +173,11 @@ struct ftrace_event_class __refdata event_class_ftrace_##call = { \
173}; \ 173}; \
174 \ 174 \
175struct ftrace_event_call __used event_##call = { \ 175struct ftrace_event_call __used event_##call = { \
176 .name = #call, \
177 .event.type = etype, \
178 .class = &event_class_ftrace_##call, \ 176 .class = &event_class_ftrace_##call, \
177 { \
178 .name = #call, \
179 }, \
180 .event.type = etype, \
179 .print_fmt = print, \ 181 .print_fmt = print, \
180 .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ 182 .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \
181}; \ 183}; \
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d021d21dd150..903ae28962be 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -341,7 +341,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
341 struct trace_kprobe *tk; 341 struct trace_kprobe *tk;
342 342
343 list_for_each_entry(tk, &probe_list, list) 343 list_for_each_entry(tk, &probe_list, list)
344 if (strcmp(tk->tp.call.name, event) == 0 && 344 if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 &&
345 strcmp(tk->tp.call.class->system, group) == 0) 345 strcmp(tk->tp.call.class->system, group) == 0)
346 return tk; 346 return tk;
347 return NULL; 347 return NULL;
@@ -516,7 +516,8 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
516 mutex_lock(&probe_lock); 516 mutex_lock(&probe_lock);
517 517
518 /* Delete old (same name) event if exist */ 518 /* Delete old (same name) event if exist */
519 old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); 519 old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call),
520 tk->tp.call.class->system);
520 if (old_tk) { 521 if (old_tk) {
521 ret = unregister_trace_kprobe(old_tk); 522 ret = unregister_trace_kprobe(old_tk);
522 if (ret < 0) 523 if (ret < 0)
@@ -564,7 +565,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
564 if (ret) 565 if (ret)
565 pr_warning("Failed to re-register probe %s on" 566 pr_warning("Failed to re-register probe %s on"
566 "%s: %d\n", 567 "%s: %d\n",
567 tk->tp.call.name, mod->name, ret); 568 ftrace_event_name(&tk->tp.call),
569 mod->name, ret);
568 } 570 }
569 } 571 }
570 mutex_unlock(&probe_lock); 572 mutex_unlock(&probe_lock);
@@ -818,7 +820,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
818 int i; 820 int i;
819 821
820 seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); 822 seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
821 seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); 823 seq_printf(m, ":%s/%s", tk->tp.call.class->system,
824 ftrace_event_name(&tk->tp.call));
822 825
823 if (!tk->symbol) 826 if (!tk->symbol)
824 seq_printf(m, " 0x%p", tk->rp.kp.addr); 827 seq_printf(m, " 0x%p", tk->rp.kp.addr);
@@ -876,7 +879,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
876{ 879{
877 struct trace_kprobe *tk = v; 880 struct trace_kprobe *tk = v;
878 881
879 seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, 882 seq_printf(m, " %-44s %15lu %15lu\n",
883 ftrace_event_name(&tk->tp.call), tk->nhit,
880 tk->rp.kp.nmissed); 884 tk->rp.kp.nmissed);
881 885
882 return 0; 886 return 0;
@@ -1011,7 +1015,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
1011 field = (struct kprobe_trace_entry_head *)iter->ent; 1015 field = (struct kprobe_trace_entry_head *)iter->ent;
1012 tp = container_of(event, struct trace_probe, call.event); 1016 tp = container_of(event, struct trace_probe, call.event);
1013 1017
1014 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1018 if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
1015 goto partial; 1019 goto partial;
1016 1020
1017 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1021 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1047,7 +1051,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
1047 field = (struct kretprobe_trace_entry_head *)iter->ent; 1051 field = (struct kretprobe_trace_entry_head *)iter->ent;
1048 tp = container_of(event, struct trace_probe, call.event); 1052 tp = container_of(event, struct trace_probe, call.event);
1049 1053
1050 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1054 if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
1051 goto partial; 1055 goto partial;
1052 1056
1053 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1057 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1286,7 +1290,8 @@ static int register_kprobe_event(struct trace_kprobe *tk)
1286 call->data = tk; 1290 call->data = tk;
1287 ret = trace_add_event_call(call); 1291 ret = trace_add_event_call(call);
1288 if (ret) { 1292 if (ret) {
1289 pr_info("Failed to register kprobe event: %s\n", call->name); 1293 pr_info("Failed to register kprobe event: %s\n",
1294 ftrace_event_name(call));
1290 kfree(call->print_fmt); 1295 kfree(call->print_fmt);
1291 unregister_ftrace_event(&call->event); 1296 unregister_ftrace_event(&call->event);
1292 } 1297 }
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index ca0e79e2abaa..a436de18aa99 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -431,7 +431,7 @@ int ftrace_raw_output_prep(struct trace_iterator *iter,
431 } 431 }
432 432
433 trace_seq_init(p); 433 trace_seq_init(p);
434 ret = trace_seq_printf(s, "%s: ", event->name); 434 ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event));
435 if (!ret) 435 if (!ret)
436 return TRACE_TYPE_PARTIAL_LINE; 436 return TRACE_TYPE_PARTIAL_LINE;
437 437
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index e4473367e7a4..930e51462dc8 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -294,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
294 struct trace_uprobe *tu; 294 struct trace_uprobe *tu;
295 295
296 list_for_each_entry(tu, &uprobe_list, list) 296 list_for_each_entry(tu, &uprobe_list, list)
297 if (strcmp(tu->tp.call.name, event) == 0 && 297 if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 &&
298 strcmp(tu->tp.call.class->system, group) == 0) 298 strcmp(tu->tp.call.class->system, group) == 0)
299 return tu; 299 return tu;
300 300
@@ -324,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
324 mutex_lock(&uprobe_lock); 324 mutex_lock(&uprobe_lock);
325 325
326 /* register as an event */ 326 /* register as an event */
327 old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); 327 old_tu = find_probe_event(ftrace_event_name(&tu->tp.call),
328 tu->tp.call.class->system);
328 if (old_tu) { 329 if (old_tu) {
329 /* delete old event */ 330 /* delete old event */
330 ret = unregister_trace_uprobe(old_tu); 331 ret = unregister_trace_uprobe(old_tu);
@@ -599,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
599 char c = is_ret_probe(tu) ? 'r' : 'p'; 600 char c = is_ret_probe(tu) ? 'r' : 'p';
600 int i; 601 int i;
601 602
602 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); 603 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
604 ftrace_event_name(&tu->tp.call));
603 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); 605 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
604 606
605 for (i = 0; i < tu->tp.nr_args; i++) 607 for (i = 0; i < tu->tp.nr_args; i++)
@@ -649,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
649{ 651{
650 struct trace_uprobe *tu = v; 652 struct trace_uprobe *tu = v;
651 653
652 seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); 654 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
655 ftrace_event_name(&tu->tp.call), tu->nhit);
653 return 0; 656 return 0;
654} 657}
655 658
@@ -844,12 +847,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
844 tu = container_of(event, struct trace_uprobe, tp.call.event); 847 tu = container_of(event, struct trace_uprobe, tp.call.event);
845 848
846 if (is_ret_probe(tu)) { 849 if (is_ret_probe(tu)) {
847 if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, 850 if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
851 ftrace_event_name(&tu->tp.call),
848 entry->vaddr[1], entry->vaddr[0])) 852 entry->vaddr[1], entry->vaddr[0]))
849 goto partial; 853 goto partial;
850 data = DATAOF_TRACE_ENTRY(entry, true); 854 data = DATAOF_TRACE_ENTRY(entry, true);
851 } else { 855 } else {
852 if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, 856 if (!trace_seq_printf(s, "%s: (0x%lx)",
857 ftrace_event_name(&tu->tp.call),
853 entry->vaddr[0])) 858 entry->vaddr[0]))
854 goto partial; 859 goto partial;
855 data = DATAOF_TRACE_ENTRY(entry, false); 860 data = DATAOF_TRACE_ENTRY(entry, false);
@@ -1275,7 +1280,8 @@ static int register_uprobe_event(struct trace_uprobe *tu)
1275 ret = trace_add_event_call(call); 1280 ret = trace_add_event_call(call);
1276 1281
1277 if (ret) { 1282 if (ret) {
1278 pr_info("Failed to register uprobe event: %s\n", call->name); 1283 pr_info("Failed to register uprobe event: %s\n",
1284 ftrace_event_name(call));
1279 kfree(call->print_fmt); 1285 kfree(call->print_fmt);
1280 unregister_ftrace_event(&call->event); 1286 unregister_ftrace_event(&call->event);
1281 } 1287 }
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index fb0a38a26555..ac5b23cf7212 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Mathieu Desnoyers 2 * Copyright (C) 2008-2014 Mathieu Desnoyers
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -33,39 +33,27 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[];
33/* Set to 1 to enable tracepoint debug output */ 33/* Set to 1 to enable tracepoint debug output */
34static const int tracepoint_debug; 34static const int tracepoint_debug;
35 35
36#ifdef CONFIG_MODULES
36/* 37/*
37 * Tracepoints mutex protects the builtin and module tracepoints and the hash 38 * Tracepoint module list mutex protects the local module list.
38 * table, as well as the local module list.
39 */ 39 */
40static DEFINE_MUTEX(tracepoints_mutex); 40static DEFINE_MUTEX(tracepoint_module_list_mutex);
41 41
42#ifdef CONFIG_MODULES 42/* Local list of struct tp_module */
43/* Local list of struct module */
44static LIST_HEAD(tracepoint_module_list); 43static LIST_HEAD(tracepoint_module_list);
45#endif /* CONFIG_MODULES */ 44#endif /* CONFIG_MODULES */
46 45
47/* 46/*
48 * Tracepoint hash table, containing the active tracepoints. 47 * tracepoints_mutex protects the builtin and module tracepoints.
49 * Protected by tracepoints_mutex. 48 * tracepoints_mutex nests inside tracepoint_module_list_mutex.
50 */ 49 */
51#define TRACEPOINT_HASH_BITS 6 50static DEFINE_MUTEX(tracepoints_mutex);
52#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
53static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
54 51
55/* 52/*
56 * Note about RCU : 53 * Note about RCU :
57 * It is used to delay the free of multiple probes array until a quiescent 54 * It is used to delay the free of multiple probes array until a quiescent
58 * state is reached. 55 * state is reached.
59 * Tracepoint entries modifications are protected by the tracepoints_mutex.
60 */ 56 */
61struct tracepoint_entry {
62 struct hlist_node hlist;
63 struct tracepoint_func *funcs;
64 int refcount; /* Number of times armed. 0 if disarmed. */
65 int enabled; /* Tracepoint enabled */
66 char name[0];
67};
68
69struct tp_probes { 57struct tp_probes {
70 struct rcu_head rcu; 58 struct rcu_head rcu;
71 struct tracepoint_func probes[0]; 59 struct tracepoint_func probes[0];
@@ -92,34 +80,33 @@ static inline void release_probes(struct tracepoint_func *old)
92 } 80 }
93} 81}
94 82
95static void debug_print_probes(struct tracepoint_entry *entry) 83static void debug_print_probes(struct tracepoint_func *funcs)
96{ 84{
97 int i; 85 int i;
98 86
99 if (!tracepoint_debug || !entry->funcs) 87 if (!tracepoint_debug || !funcs)
100 return; 88 return;
101 89
102 for (i = 0; entry->funcs[i].func; i++) 90 for (i = 0; funcs[i].func; i++)
103 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); 91 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
104} 92}
105 93
106static struct tracepoint_func * 94static struct tracepoint_func *func_add(struct tracepoint_func **funcs,
107tracepoint_entry_add_probe(struct tracepoint_entry *entry, 95 struct tracepoint_func *tp_func)
108 void *probe, void *data)
109{ 96{
110 int nr_probes = 0; 97 int nr_probes = 0;
111 struct tracepoint_func *old, *new; 98 struct tracepoint_func *old, *new;
112 99
113 if (WARN_ON(!probe)) 100 if (WARN_ON(!tp_func->func))
114 return ERR_PTR(-EINVAL); 101 return ERR_PTR(-EINVAL);
115 102
116 debug_print_probes(entry); 103 debug_print_probes(*funcs);
117 old = entry->funcs; 104 old = *funcs;
118 if (old) { 105 if (old) {
119 /* (N -> N+1), (N != 0, 1) probes */ 106 /* (N -> N+1), (N != 0, 1) probes */
120 for (nr_probes = 0; old[nr_probes].func; nr_probes++) 107 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
121 if (old[nr_probes].func == probe && 108 if (old[nr_probes].func == tp_func->func &&
122 old[nr_probes].data == data) 109 old[nr_probes].data == tp_func->data)
123 return ERR_PTR(-EEXIST); 110 return ERR_PTR(-EEXIST);
124 } 111 }
125 /* + 2 : one for new probe, one for NULL func */ 112 /* + 2 : one for new probe, one for NULL func */
@@ -128,33 +115,30 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry,
128 return ERR_PTR(-ENOMEM); 115 return ERR_PTR(-ENOMEM);
129 if (old) 116 if (old)
130 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); 117 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
131 new[nr_probes].func = probe; 118 new[nr_probes] = *tp_func;
132 new[nr_probes].data = data;
133 new[nr_probes + 1].func = NULL; 119 new[nr_probes + 1].func = NULL;
134 entry->refcount = nr_probes + 1; 120 *funcs = new;
135 entry->funcs = new; 121 debug_print_probes(*funcs);
136 debug_print_probes(entry);
137 return old; 122 return old;
138} 123}
139 124
140static void * 125static void *func_remove(struct tracepoint_func **funcs,
141tracepoint_entry_remove_probe(struct tracepoint_entry *entry, 126 struct tracepoint_func *tp_func)
142 void *probe, void *data)
143{ 127{
144 int nr_probes = 0, nr_del = 0, i; 128 int nr_probes = 0, nr_del = 0, i;
145 struct tracepoint_func *old, *new; 129 struct tracepoint_func *old, *new;
146 130
147 old = entry->funcs; 131 old = *funcs;
148 132
149 if (!old) 133 if (!old)
150 return ERR_PTR(-ENOENT); 134 return ERR_PTR(-ENOENT);
151 135
152 debug_print_probes(entry); 136 debug_print_probes(*funcs);
153 /* (N -> M), (N > 1, M >= 0) probes */ 137 /* (N -> M), (N > 1, M >= 0) probes */
154 if (probe) { 138 if (tp_func->func) {
155 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 139 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
156 if (old[nr_probes].func == probe && 140 if (old[nr_probes].func == tp_func->func &&
157 old[nr_probes].data == data) 141 old[nr_probes].data == tp_func->data)
158 nr_del++; 142 nr_del++;
159 } 143 }
160 } 144 }
@@ -165,9 +149,8 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
165 */ 149 */
166 if (nr_probes - nr_del == 0) { 150 if (nr_probes - nr_del == 0) {
167 /* N -> 0, (N > 1) */ 151 /* N -> 0, (N > 1) */
168 entry->funcs = NULL; 152 *funcs = NULL;
169 entry->refcount = 0; 153 debug_print_probes(*funcs);
170 debug_print_probes(entry);
171 return old; 154 return old;
172 } else { 155 } else {
173 int j = 0; 156 int j = 0;
@@ -177,91 +160,35 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
177 if (new == NULL) 160 if (new == NULL)
178 return ERR_PTR(-ENOMEM); 161 return ERR_PTR(-ENOMEM);
179 for (i = 0; old[i].func; i++) 162 for (i = 0; old[i].func; i++)
180 if (old[i].func != probe || old[i].data != data) 163 if (old[i].func != tp_func->func
164 || old[i].data != tp_func->data)
181 new[j++] = old[i]; 165 new[j++] = old[i];
182 new[nr_probes - nr_del].func = NULL; 166 new[nr_probes - nr_del].func = NULL;
183 entry->refcount = nr_probes - nr_del; 167 *funcs = new;
184 entry->funcs = new;
185 } 168 }
186 debug_print_probes(entry); 169 debug_print_probes(*funcs);
187 return old; 170 return old;
188} 171}
189 172
190/* 173/*
191 * Get tracepoint if the tracepoint is present in the tracepoint hash table. 174 * Add the probe function to a tracepoint.
192 * Must be called with tracepoints_mutex held.
193 * Returns NULL if not present.
194 */ 175 */
195static struct tracepoint_entry *get_tracepoint(const char *name) 176static int tracepoint_add_func(struct tracepoint *tp,
177 struct tracepoint_func *func)
196{ 178{
197 struct hlist_head *head; 179 struct tracepoint_func *old, *tp_funcs;
198 struct tracepoint_entry *e;
199 u32 hash = jhash(name, strlen(name), 0);
200
201 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
202 hlist_for_each_entry(e, head, hlist) {
203 if (!strcmp(name, e->name))
204 return e;
205 }
206 return NULL;
207}
208 180
209/* 181 if (tp->regfunc && !static_key_enabled(&tp->key))
210 * Add the tracepoint to the tracepoint hash table. Must be called with 182 tp->regfunc();
211 * tracepoints_mutex held.
212 */
213static struct tracepoint_entry *add_tracepoint(const char *name)
214{
215 struct hlist_head *head;
216 struct tracepoint_entry *e;
217 size_t name_len = strlen(name) + 1;
218 u32 hash = jhash(name, name_len-1, 0);
219
220 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
221 hlist_for_each_entry(e, head, hlist) {
222 if (!strcmp(name, e->name)) {
223 printk(KERN_NOTICE
224 "tracepoint %s busy\n", name);
225 return ERR_PTR(-EEXIST); /* Already there */
226 }
227 }
228 /*
229 * Using kmalloc here to allocate a variable length element. Could
230 * cause some memory fragmentation if overused.
231 */
232 e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
233 if (!e)
234 return ERR_PTR(-ENOMEM);
235 memcpy(&e->name[0], name, name_len);
236 e->funcs = NULL;
237 e->refcount = 0;
238 e->enabled = 0;
239 hlist_add_head(&e->hlist, head);
240 return e;
241}
242 183
243/* 184 tp_funcs = rcu_dereference_protected(tp->funcs,
244 * Remove the tracepoint from the tracepoint hash table. Must be called with 185 lockdep_is_held(&tracepoints_mutex));
245 * mutex_lock held. 186 old = func_add(&tp_funcs, func);
246 */ 187 if (IS_ERR(old)) {
247static inline void remove_tracepoint(struct tracepoint_entry *e) 188 WARN_ON_ONCE(1);
248{ 189 return PTR_ERR(old);
249 hlist_del(&e->hlist); 190 }
250 kfree(e); 191 release_probes(old);
251}
252
253/*
254 * Sets the probe callback corresponding to one tracepoint.
255 */
256static void set_tracepoint(struct tracepoint_entry **entry,
257 struct tracepoint *elem, int active)
258{
259 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
260
261 if (elem->regfunc && !static_key_enabled(&elem->key) && active)
262 elem->regfunc();
263 else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
264 elem->unregfunc();
265 192
266 /* 193 /*
267 * rcu_assign_pointer has a smp_wmb() which makes sure that the new 194 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
@@ -270,193 +197,90 @@ static void set_tracepoint(struct tracepoint_entry **entry,
270 * include/linux/tracepoints.h. A matching smp_read_barrier_depends() 197 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
271 * is used. 198 * is used.
272 */ 199 */
273 rcu_assign_pointer(elem->funcs, (*entry)->funcs); 200 rcu_assign_pointer(tp->funcs, tp_funcs);
274 if (active && !static_key_enabled(&elem->key)) 201 if (!static_key_enabled(&tp->key))
275 static_key_slow_inc(&elem->key); 202 static_key_slow_inc(&tp->key);
276 else if (!active && static_key_enabled(&elem->key)) 203 return 0;
277 static_key_slow_dec(&elem->key);
278} 204}
279 205
280/* 206/*
281 * Disable a tracepoint and its probe callback. 207 * Remove a probe function from a tracepoint.
282 * Note: only waiting an RCU period after setting elem->call to the empty 208 * Note: only waiting an RCU period after setting elem->call to the empty
283 * function insures that the original callback is not used anymore. This insured 209 * function insures that the original callback is not used anymore. This insured
284 * by preempt_disable around the call site. 210 * by preempt_disable around the call site.
285 */ 211 */
286static void disable_tracepoint(struct tracepoint *elem) 212static int tracepoint_remove_func(struct tracepoint *tp,
213 struct tracepoint_func *func)
287{ 214{
288 if (elem->unregfunc && static_key_enabled(&elem->key)) 215 struct tracepoint_func *old, *tp_funcs;
289 elem->unregfunc();
290
291 if (static_key_enabled(&elem->key))
292 static_key_slow_dec(&elem->key);
293 rcu_assign_pointer(elem->funcs, NULL);
294}
295 216
296/** 217 tp_funcs = rcu_dereference_protected(tp->funcs,
297 * tracepoint_update_probe_range - Update a probe range 218 lockdep_is_held(&tracepoints_mutex));
298 * @begin: beginning of the range 219 old = func_remove(&tp_funcs, func);
299 * @end: end of the range 220 if (IS_ERR(old)) {
300 * 221 WARN_ON_ONCE(1);
301 * Updates the probe callback corresponding to a range of tracepoints. 222 return PTR_ERR(old);
302 * Called with tracepoints_mutex held.
303 */
304static void tracepoint_update_probe_range(struct tracepoint * const *begin,
305 struct tracepoint * const *end)
306{
307 struct tracepoint * const *iter;
308 struct tracepoint_entry *mark_entry;
309
310 if (!begin)
311 return;
312
313 for (iter = begin; iter < end; iter++) {
314 mark_entry = get_tracepoint((*iter)->name);
315 if (mark_entry) {
316 set_tracepoint(&mark_entry, *iter,
317 !!mark_entry->refcount);
318 mark_entry->enabled = !!mark_entry->refcount;
319 } else {
320 disable_tracepoint(*iter);
321 }
322 } 223 }
323} 224 release_probes(old);
324
325#ifdef CONFIG_MODULES
326void module_update_tracepoints(void)
327{
328 struct tp_module *tp_mod;
329
330 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
331 tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
332 tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
333}
334#else /* CONFIG_MODULES */
335void module_update_tracepoints(void)
336{
337}
338#endif /* CONFIG_MODULES */
339 225
226 if (!tp_funcs) {
227 /* Removed last function */
228 if (tp->unregfunc && static_key_enabled(&tp->key))
229 tp->unregfunc();
340 230
341/* 231 if (static_key_enabled(&tp->key))
342 * Update probes, removing the faulty probes. 232 static_key_slow_dec(&tp->key);
343 * Called with tracepoints_mutex held.
344 */
345static void tracepoint_update_probes(void)
346{
347 /* Core kernel tracepoints */
348 tracepoint_update_probe_range(__start___tracepoints_ptrs,
349 __stop___tracepoints_ptrs);
350 /* tracepoints in modules. */
351 module_update_tracepoints();
352}
353
354static struct tracepoint_func *
355tracepoint_add_probe(const char *name, void *probe, void *data)
356{
357 struct tracepoint_entry *entry;
358 struct tracepoint_func *old;
359
360 entry = get_tracepoint(name);
361 if (!entry) {
362 entry = add_tracepoint(name);
363 if (IS_ERR(entry))
364 return (struct tracepoint_func *)entry;
365 } 233 }
366 old = tracepoint_entry_add_probe(entry, probe, data); 234 rcu_assign_pointer(tp->funcs, tp_funcs);
367 if (IS_ERR(old) && !entry->refcount) 235 return 0;
368 remove_tracepoint(entry);
369 return old;
370} 236}
371 237
372/** 238/**
373 * tracepoint_probe_register - Connect a probe to a tracepoint 239 * tracepoint_probe_register - Connect a probe to a tracepoint
374 * @name: tracepoint name 240 * @tp: tracepoint
375 * @probe: probe handler 241 * @probe: probe handler
376 * @data: probe private data
377 *
378 * Returns:
379 * - 0 if the probe was successfully registered, and tracepoint
380 * callsites are currently loaded for that probe,
381 * - -ENODEV if the probe was successfully registered, but no tracepoint
382 * callsite is currently loaded for that probe,
383 * - other negative error value on error.
384 *
385 * When tracepoint_probe_register() returns either 0 or -ENODEV,
386 * parameters @name, @probe, and @data may be used by the tracepoint
387 * infrastructure until the probe is unregistered.
388 * 242 *
389 * The probe address must at least be aligned on the architecture pointer size. 243 * Returns 0 if ok, error value on error.
244 * Note: if @tp is within a module, the caller is responsible for
245 * unregistering the probe before the module is gone. This can be
246 * performed either with a tracepoint module going notifier, or from
247 * within module exit functions.
390 */ 248 */
391int tracepoint_probe_register(const char *name, void *probe, void *data) 249int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
392{ 250{
393 struct tracepoint_func *old; 251 struct tracepoint_func tp_func;
394 struct tracepoint_entry *entry; 252 int ret;
395 int ret = 0;
396 253
397 mutex_lock(&tracepoints_mutex); 254 mutex_lock(&tracepoints_mutex);
398 old = tracepoint_add_probe(name, probe, data); 255 tp_func.func = probe;
399 if (IS_ERR(old)) { 256 tp_func.data = data;
400 mutex_unlock(&tracepoints_mutex); 257 ret = tracepoint_add_func(tp, &tp_func);
401 return PTR_ERR(old);
402 }
403 tracepoint_update_probes(); /* may update entry */
404 entry = get_tracepoint(name);
405 /* Make sure the entry was enabled */
406 if (!entry || !entry->enabled)
407 ret = -ENODEV;
408 mutex_unlock(&tracepoints_mutex); 258 mutex_unlock(&tracepoints_mutex);
409 release_probes(old);
410 return ret; 259 return ret;
411} 260}
412EXPORT_SYMBOL_GPL(tracepoint_probe_register); 261EXPORT_SYMBOL_GPL(tracepoint_probe_register);
413 262
414static struct tracepoint_func *
415tracepoint_remove_probe(const char *name, void *probe, void *data)
416{
417 struct tracepoint_entry *entry;
418 struct tracepoint_func *old;
419
420 entry = get_tracepoint(name);
421 if (!entry)
422 return ERR_PTR(-ENOENT);
423 old = tracepoint_entry_remove_probe(entry, probe, data);
424 if (IS_ERR(old))
425 return old;
426 if (!entry->refcount)
427 remove_tracepoint(entry);
428 return old;
429}
430
431/** 263/**
432 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint 264 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
433 * @name: tracepoint name 265 * @tp: tracepoint
434 * @probe: probe function pointer 266 * @probe: probe function pointer
435 * @data: probe private data
436 * 267 *
437 * We do not need to call a synchronize_sched to make sure the probes have 268 * Returns 0 if ok, error value on error.
438 * finished running before doing a module unload, because the module unload
439 * itself uses stop_machine(), which insures that every preempt disabled section
440 * have finished.
441 */ 269 */
442int tracepoint_probe_unregister(const char *name, void *probe, void *data) 270int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
443{ 271{
444 struct tracepoint_func *old; 272 struct tracepoint_func tp_func;
273 int ret;
445 274
446 mutex_lock(&tracepoints_mutex); 275 mutex_lock(&tracepoints_mutex);
447 old = tracepoint_remove_probe(name, probe, data); 276 tp_func.func = probe;
448 if (IS_ERR(old)) { 277 tp_func.data = data;
449 mutex_unlock(&tracepoints_mutex); 278 ret = tracepoint_remove_func(tp, &tp_func);
450 return PTR_ERR(old);
451 }
452 tracepoint_update_probes(); /* may update entry */
453 mutex_unlock(&tracepoints_mutex); 279 mutex_unlock(&tracepoints_mutex);
454 release_probes(old); 280 return ret;
455 return 0;
456} 281}
457EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); 282EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
458 283
459
460#ifdef CONFIG_MODULES 284#ifdef CONFIG_MODULES
461bool trace_module_has_bad_taint(struct module *mod) 285bool trace_module_has_bad_taint(struct module *mod)
462{ 286{
@@ -464,6 +288,74 @@ bool trace_module_has_bad_taint(struct module *mod)
464 (1 << TAINT_UNSIGNED_MODULE)); 288 (1 << TAINT_UNSIGNED_MODULE));
465} 289}
466 290
291static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
292
293/**
294 * register_tracepoint_notifier - register tracepoint coming/going notifier
295 * @nb: notifier block
296 *
297 * Notifiers registered with this function are called on module
298 * coming/going with the tracepoint_module_list_mutex held.
299 * The notifier block callback should expect a "struct tp_module" data
300 * pointer.
301 */
302int register_tracepoint_module_notifier(struct notifier_block *nb)
303{
304 struct tp_module *tp_mod;
305 int ret;
306
307 mutex_lock(&tracepoint_module_list_mutex);
308 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
309 if (ret)
310 goto end;
311 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
312 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
313end:
314 mutex_unlock(&tracepoint_module_list_mutex);
315 return ret;
316}
317EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
318
319/**
320 * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
321 * @nb: notifier block
322 *
323 * The notifier block callback should expect a "struct tp_module" data
324 * pointer.
325 */
326int unregister_tracepoint_module_notifier(struct notifier_block *nb)
327{
328 struct tp_module *tp_mod;
329 int ret;
330
331 mutex_lock(&tracepoint_module_list_mutex);
332 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
333 if (ret)
334 goto end;
335 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
336 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
337end:
338 mutex_unlock(&tracepoint_module_list_mutex);
339 return ret;
340
341}
342EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
343
344/*
345 * Ensure the tracer unregistered the module's probes before the module
346 * teardown is performed. Prevents leaks of probe and data pointers.
347 */
348static void tp_module_going_check_quiescent(struct tracepoint * const *begin,
349 struct tracepoint * const *end)
350{
351 struct tracepoint * const *iter;
352
353 if (!begin)
354 return;
355 for (iter = begin; iter < end; iter++)
356 WARN_ON_ONCE((*iter)->funcs);
357}
358
467static int tracepoint_module_coming(struct module *mod) 359static int tracepoint_module_coming(struct module *mod)
468{ 360{
469 struct tp_module *tp_mod; 361 struct tp_module *tp_mod;
@@ -479,36 +371,41 @@ static int tracepoint_module_coming(struct module *mod)
479 */ 371 */
480 if (trace_module_has_bad_taint(mod)) 372 if (trace_module_has_bad_taint(mod))
481 return 0; 373 return 0;
482 mutex_lock(&tracepoints_mutex); 374 mutex_lock(&tracepoint_module_list_mutex);
483 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); 375 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
484 if (!tp_mod) { 376 if (!tp_mod) {
485 ret = -ENOMEM; 377 ret = -ENOMEM;
486 goto end; 378 goto end;
487 } 379 }
488 tp_mod->num_tracepoints = mod->num_tracepoints; 380 tp_mod->mod = mod;
489 tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
490 list_add_tail(&tp_mod->list, &tracepoint_module_list); 381 list_add_tail(&tp_mod->list, &tracepoint_module_list);
491 tracepoint_update_probe_range(mod->tracepoints_ptrs, 382 blocking_notifier_call_chain(&tracepoint_notify_list,
492 mod->tracepoints_ptrs + mod->num_tracepoints); 383 MODULE_STATE_COMING, tp_mod);
493end: 384end:
494 mutex_unlock(&tracepoints_mutex); 385 mutex_unlock(&tracepoint_module_list_mutex);
495 return ret; 386 return ret;
496} 387}
497 388
498static int tracepoint_module_going(struct module *mod) 389static void tracepoint_module_going(struct module *mod)
499{ 390{
500 struct tp_module *pos; 391 struct tp_module *tp_mod;
501 392
502 if (!mod->num_tracepoints) 393 if (!mod->num_tracepoints)
503 return 0; 394 return;
504 395
505 mutex_lock(&tracepoints_mutex); 396 mutex_lock(&tracepoint_module_list_mutex);
506 tracepoint_update_probe_range(mod->tracepoints_ptrs, 397 list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
507 mod->tracepoints_ptrs + mod->num_tracepoints); 398 if (tp_mod->mod == mod) {
508 list_for_each_entry(pos, &tracepoint_module_list, list) { 399 blocking_notifier_call_chain(&tracepoint_notify_list,
509 if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) { 400 MODULE_STATE_GOING, tp_mod);
510 list_del(&pos->list); 401 list_del(&tp_mod->list);
511 kfree(pos); 402 kfree(tp_mod);
403 /*
404 * Called the going notifier before checking for
405 * quiescence.
406 */
407 tp_module_going_check_quiescent(mod->tracepoints_ptrs,
408 mod->tracepoints_ptrs + mod->num_tracepoints);
512 break; 409 break;
513 } 410 }
514 } 411 }
@@ -518,12 +415,11 @@ static int tracepoint_module_going(struct module *mod)
518 * flag on "going", in case a module taints the kernel only after being 415 * flag on "going", in case a module taints the kernel only after being
519 * loaded. 416 * loaded.
520 */ 417 */
521 mutex_unlock(&tracepoints_mutex); 418 mutex_unlock(&tracepoint_module_list_mutex);
522 return 0;
523} 419}
524 420
525int tracepoint_module_notify(struct notifier_block *self, 421static int tracepoint_module_notify(struct notifier_block *self,
526 unsigned long val, void *data) 422 unsigned long val, void *data)
527{ 423{
528 struct module *mod = data; 424 struct module *mod = data;
529 int ret = 0; 425 int ret = 0;
@@ -535,24 +431,58 @@ int tracepoint_module_notify(struct notifier_block *self,
535 case MODULE_STATE_LIVE: 431 case MODULE_STATE_LIVE:
536 break; 432 break;
537 case MODULE_STATE_GOING: 433 case MODULE_STATE_GOING:
538 ret = tracepoint_module_going(mod); 434 tracepoint_module_going(mod);
435 break;
436 case MODULE_STATE_UNFORMED:
539 break; 437 break;
540 } 438 }
541 return ret; 439 return ret;
542} 440}
543 441
544struct notifier_block tracepoint_module_nb = { 442static struct notifier_block tracepoint_module_nb = {
545 .notifier_call = tracepoint_module_notify, 443 .notifier_call = tracepoint_module_notify,
546 .priority = 0, 444 .priority = 0,
547}; 445};
548 446
549static int init_tracepoints(void) 447static __init int init_tracepoints(void)
550{ 448{
551 return register_module_notifier(&tracepoint_module_nb); 449 int ret;
450
451 ret = register_module_notifier(&tracepoint_module_nb);
452 if (ret)
453 pr_warning("Failed to register tracepoint module enter notifier\n");
454
455 return ret;
552} 456}
553__initcall(init_tracepoints); 457__initcall(init_tracepoints);
554#endif /* CONFIG_MODULES */ 458#endif /* CONFIG_MODULES */
555 459
460static void for_each_tracepoint_range(struct tracepoint * const *begin,
461 struct tracepoint * const *end,
462 void (*fct)(struct tracepoint *tp, void *priv),
463 void *priv)
464{
465 struct tracepoint * const *iter;
466
467 if (!begin)
468 return;
469 for (iter = begin; iter < end; iter++)
470 fct(*iter, priv);
471}
472
473/**
474 * for_each_kernel_tracepoint - iteration on all kernel tracepoints
475 * @fct: callback
476 * @priv: private data
477 */
478void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
479 void *priv)
480{
481 for_each_tracepoint_range(__start___tracepoints_ptrs,
482 __stop___tracepoints_ptrs, fct, priv);
483}
484EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
485
556#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 486#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
557 487
558/* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 488/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 0d8f6023fd8d..bf71b4b2d632 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
152 152
153 /* Find the matching extent */ 153 /* Find the matching extent */
154 extents = map->nr_extents; 154 extents = map->nr_extents;
155 smp_read_barrier_depends(); 155 smp_rmb();
156 for (idx = 0; idx < extents; idx++) { 156 for (idx = 0; idx < extents; idx++) {
157 first = map->extent[idx].first; 157 first = map->extent[idx].first;
158 last = first + map->extent[idx].count - 1; 158 last = first + map->extent[idx].count - 1;
@@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id)
176 176
177 /* Find the matching extent */ 177 /* Find the matching extent */
178 extents = map->nr_extents; 178 extents = map->nr_extents;
179 smp_read_barrier_depends(); 179 smp_rmb();
180 for (idx = 0; idx < extents; idx++) { 180 for (idx = 0; idx < extents; idx++) {
181 first = map->extent[idx].first; 181 first = map->extent[idx].first;
182 last = first + map->extent[idx].count - 1; 182 last = first + map->extent[idx].count - 1;
@@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
199 199
200 /* Find the matching extent */ 200 /* Find the matching extent */
201 extents = map->nr_extents; 201 extents = map->nr_extents;
202 smp_read_barrier_depends(); 202 smp_rmb();
203 for (idx = 0; idx < extents; idx++) { 203 for (idx = 0; idx < extents; idx++) {
204 first = map->extent[idx].lower_first; 204 first = map->extent[idx].lower_first;
205 last = first + map->extent[idx].count - 1; 205 last = first + map->extent[idx].count - 1;
@@ -615,9 +615,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
615 * were written before the count of the extents. 615 * were written before the count of the extents.
616 * 616 *
617 * To achieve this smp_wmb() is used on guarantee the write 617 * To achieve this smp_wmb() is used on guarantee the write
618 * order and smp_read_barrier_depends() is guaranteed that we 618 * order and smp_rmb() is guaranteed that we don't have crazy
619 * don't have crazy architectures returning stale data. 619 * architectures returning stale data.
620 *
621 */ 620 */
622 mutex_lock(&id_map_mutex); 621 mutex_lock(&id_map_mutex);
623 622
diff --git a/lib/Kconfig b/lib/Kconfig
index 5d4984c505f8..4771fb3f4da4 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -182,6 +182,15 @@ config AUDIT_GENERIC
182 depends on AUDIT && !AUDIT_ARCH 182 depends on AUDIT && !AUDIT_ARCH
183 default y 183 default y
184 184
185config AUDIT_ARCH_COMPAT_GENERIC
186 bool
187 default n
188
189config AUDIT_COMPAT_GENERIC
190 bool
191 depends on AUDIT_GENERIC && AUDIT_ARCH_COMPAT_GENERIC && COMPAT
192 default y
193
185config RANDOM32_SELFTEST 194config RANDOM32_SELFTEST
186 bool "PRNG perform self test on init" 195 bool "PRNG perform self test on init"
187 default n 196 default n
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dd7f8858188a..140b66a874c1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1045,16 +1045,6 @@ config DEBUG_BUGVERBOSE
1045 of the BUG call as well as the EIP and oops trace. This aids 1045 of the BUG call as well as the EIP and oops trace. This aids
1046 debugging but costs about 70-100K of memory. 1046 debugging but costs about 70-100K of memory.
1047 1047
1048config DEBUG_WRITECOUNT
1049 bool "Debug filesystem writers count"
1050 depends on DEBUG_KERNEL
1051 help
1052 Enable this to catch wrong use of the writers count in struct
1053 vfsmount. This will increase the size of each file struct by
1054 32 bits.
1055
1056 If unsure, say N.
1057
1058config DEBUG_LIST 1048config DEBUG_LIST
1059 bool "Debug linked list manipulation" 1049 bool "Debug linked list manipulation"
1060 depends on DEBUG_KERNEL 1050 depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 48140e3ba73f..0cd7b68e1382 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
96obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o 96obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
97obj-$(CONFIG_SMP) += percpu_counter.o 97obj-$(CONFIG_SMP) += percpu_counter.o
98obj-$(CONFIG_AUDIT_GENERIC) += audit.o 98obj-$(CONFIG_AUDIT_GENERIC) += audit.o
99obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
99 100
100obj-$(CONFIG_SWIOTLB) += swiotlb.o 101obj-$(CONFIG_SWIOTLB) += swiotlb.o
101obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o 102obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
diff --git a/lib/audit.c b/lib/audit.c
index 76bbed4a20e5..1d726a22565b 100644
--- a/lib/audit.c
+++ b/lib/audit.c
@@ -30,11 +30,17 @@ static unsigned signal_class[] = {
30 30
31int audit_classify_arch(int arch) 31int audit_classify_arch(int arch)
32{ 32{
33 return 0; 33 if (audit_is_compat(arch))
34 return 1;
35 else
36 return 0;
34} 37}
35 38
36int audit_classify_syscall(int abi, unsigned syscall) 39int audit_classify_syscall(int abi, unsigned syscall)
37{ 40{
41 if (audit_is_compat(abi))
42 return audit_classify_compat_syscall(abi, syscall);
43
38 switch(syscall) { 44 switch(syscall) {
39#ifdef __NR_open 45#ifdef __NR_open
40 case __NR_open: 46 case __NR_open:
@@ -57,6 +63,13 @@ int audit_classify_syscall(int abi, unsigned syscall)
57 63
58static int __init audit_classes_init(void) 64static int __init audit_classes_init(void)
59{ 65{
66#ifdef CONFIG_AUDIT_COMPAT_GENERIC
67 audit_register_class(AUDIT_CLASS_WRITE_32, compat_write_class);
68 audit_register_class(AUDIT_CLASS_READ_32, compat_read_class);
69 audit_register_class(AUDIT_CLASS_DIR_WRITE_32, compat_dir_class);
70 audit_register_class(AUDIT_CLASS_CHATTR_32, compat_chattr_class);
71 audit_register_class(AUDIT_CLASS_SIGNAL_32, compat_signal_class);
72#endif
60 audit_register_class(AUDIT_CLASS_WRITE, write_class); 73 audit_register_class(AUDIT_CLASS_WRITE, write_class);
61 audit_register_class(AUDIT_CLASS_READ, read_class); 74 audit_register_class(AUDIT_CLASS_READ, read_class);
62 audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); 75 audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
diff --git a/lib/compat_audit.c b/lib/compat_audit.c
new file mode 100644
index 000000000000..873f75b640ab
--- /dev/null
+++ b/lib/compat_audit.c
@@ -0,0 +1,50 @@
1#include <linux/init.h>
2#include <linux/types.h>
3#include <asm/unistd32.h>
4
5unsigned compat_dir_class[] = {
6#include <asm-generic/audit_dir_write.h>
7~0U
8};
9
10unsigned compat_read_class[] = {
11#include <asm-generic/audit_read.h>
12~0U
13};
14
15unsigned compat_write_class[] = {
16#include <asm-generic/audit_write.h>
17~0U
18};
19
20unsigned compat_chattr_class[] = {
21#include <asm-generic/audit_change_attr.h>
22~0U
23};
24
25unsigned compat_signal_class[] = {
26#include <asm-generic/audit_signal.h>
27~0U
28};
29
30int audit_classify_compat_syscall(int abi, unsigned syscall)
31{
32 switch (syscall) {
33#ifdef __NR_open
34 case __NR_open:
35 return 2;
36#endif
37#ifdef __NR_openat
38 case __NR_openat:
39 return 3;
40#endif
41#ifdef __NR_socketcall
42 case __NR_socketcall:
43 return 4;
44#endif
45 case __NR_execve:
46 return 5;
47 default:
48 return 1;
49 }
50}
diff --git a/mm/Makefile b/mm/Makefile
index 9e5aaf92197d..b484452dac57 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -17,7 +17,8 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
17 util.o mmzone.o vmstat.o backing-dev.o \ 17 util.o mmzone.o vmstat.o backing-dev.o \
18 mm_init.o mmu_context.o percpu.o slab_common.o \ 18 mm_init.o mmu_context.o percpu.o slab_common.o \
19 compaction.o balloon_compaction.o vmacache.o \ 19 compaction.o balloon_compaction.o vmacache.o \
20 interval_tree.o list_lru.o workingset.o $(mmu-y) 20 interval_tree.o list_lru.o workingset.o \
21 iov_iter.o $(mmu-y)
21 22
22obj-y += init-mm.o 23obj-y += init-mm.o
23 24
diff --git a/mm/filemap.c b/mm/filemap.c
index 27ebc0c9571b..a82fbe4c9e8e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -77,7 +77,7 @@
77 * ->mmap_sem 77 * ->mmap_sem
78 * ->lock_page (access_process_vm) 78 * ->lock_page (access_process_vm)
79 * 79 *
80 * ->i_mutex (generic_file_buffered_write) 80 * ->i_mutex (generic_perform_write)
81 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 81 * ->mmap_sem (fault_in_pages_readable->do_page_fault)
82 * 82 *
83 * bdi->wb.list_lock 83 * bdi->wb.list_lock
@@ -1428,7 +1428,8 @@ static void shrink_readahead_size_eio(struct file *filp,
1428 * do_generic_file_read - generic file read routine 1428 * do_generic_file_read - generic file read routine
1429 * @filp: the file to read 1429 * @filp: the file to read
1430 * @ppos: current file position 1430 * @ppos: current file position
1431 * @desc: read_descriptor 1431 * @iter: data destination
1432 * @written: already copied
1432 * 1433 *
1433 * This is a generic file read routine, and uses the 1434 * This is a generic file read routine, and uses the
1434 * mapping->a_ops->readpage() function for the actual low-level stuff. 1435 * mapping->a_ops->readpage() function for the actual low-level stuff.
@@ -1436,8 +1437,8 @@ static void shrink_readahead_size_eio(struct file *filp,
1436 * This is really ugly. But the goto's actually try to clarify some 1437 * This is really ugly. But the goto's actually try to clarify some
1437 * of the logic when it comes to error handling etc. 1438 * of the logic when it comes to error handling etc.
1438 */ 1439 */
1439static void do_generic_file_read(struct file *filp, loff_t *ppos, 1440static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1440 read_descriptor_t *desc) 1441 struct iov_iter *iter, ssize_t written)
1441{ 1442{
1442 struct address_space *mapping = filp->f_mapping; 1443 struct address_space *mapping = filp->f_mapping;
1443 struct inode *inode = mapping->host; 1444 struct inode *inode = mapping->host;
@@ -1447,12 +1448,12 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
1447 pgoff_t prev_index; 1448 pgoff_t prev_index;
1448 unsigned long offset; /* offset into pagecache page */ 1449 unsigned long offset; /* offset into pagecache page */
1449 unsigned int prev_offset; 1450 unsigned int prev_offset;
1450 int error; 1451 int error = 0;
1451 1452
1452 index = *ppos >> PAGE_CACHE_SHIFT; 1453 index = *ppos >> PAGE_CACHE_SHIFT;
1453 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1454 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1454 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1455 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1455 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1456 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1456 offset = *ppos & ~PAGE_CACHE_MASK; 1457 offset = *ppos & ~PAGE_CACHE_MASK;
1457 1458
1458 for (;;) { 1459 for (;;) {
@@ -1487,7 +1488,7 @@ find_page:
1487 if (!page->mapping) 1488 if (!page->mapping)
1488 goto page_not_up_to_date_locked; 1489 goto page_not_up_to_date_locked;
1489 if (!mapping->a_ops->is_partially_uptodate(page, 1490 if (!mapping->a_ops->is_partially_uptodate(page,
1490 desc, offset)) 1491 offset, iter->count))
1491 goto page_not_up_to_date_locked; 1492 goto page_not_up_to_date_locked;
1492 unlock_page(page); 1493 unlock_page(page);
1493 } 1494 }
@@ -1537,24 +1538,23 @@ page_ok:
1537 /* 1538 /*
1538 * Ok, we have the page, and it's up-to-date, so 1539 * Ok, we have the page, and it's up-to-date, so
1539 * now we can copy it to user space... 1540 * now we can copy it to user space...
1540 *
1541 * The file_read_actor routine returns how many bytes were
1542 * actually used..
1543 * NOTE! This may not be the same as how much of a user buffer
1544 * we filled up (we may be padding etc), so we can only update
1545 * "pos" here (the actor routine has to update the user buffer
1546 * pointers and the remaining count).
1547 */ 1541 */
1548 ret = file_read_actor(desc, page, offset, nr); 1542
1543 ret = copy_page_to_iter(page, offset, nr, iter);
1549 offset += ret; 1544 offset += ret;
1550 index += offset >> PAGE_CACHE_SHIFT; 1545 index += offset >> PAGE_CACHE_SHIFT;
1551 offset &= ~PAGE_CACHE_MASK; 1546 offset &= ~PAGE_CACHE_MASK;
1552 prev_offset = offset; 1547 prev_offset = offset;
1553 1548
1554 page_cache_release(page); 1549 page_cache_release(page);
1555 if (ret == nr && desc->count) 1550 written += ret;
1556 continue; 1551 if (!iov_iter_count(iter))
1557 goto out; 1552 goto out;
1553 if (ret < nr) {
1554 error = -EFAULT;
1555 goto out;
1556 }
1557 continue;
1558 1558
1559page_not_up_to_date: 1559page_not_up_to_date:
1560 /* Get exclusive access to the page ... */ 1560 /* Get exclusive access to the page ... */
@@ -1589,6 +1589,7 @@ readpage:
1589 if (unlikely(error)) { 1589 if (unlikely(error)) {
1590 if (error == AOP_TRUNCATED_PAGE) { 1590 if (error == AOP_TRUNCATED_PAGE) {
1591 page_cache_release(page); 1591 page_cache_release(page);
1592 error = 0;
1592 goto find_page; 1593 goto find_page;
1593 } 1594 }
1594 goto readpage_error; 1595 goto readpage_error;
@@ -1619,7 +1620,6 @@ readpage:
1619 1620
1620readpage_error: 1621readpage_error:
1621 /* UHHUH! A synchronous read error occurred. Report it */ 1622 /* UHHUH! A synchronous read error occurred. Report it */
1622 desc->error = error;
1623 page_cache_release(page); 1623 page_cache_release(page);
1624 goto out; 1624 goto out;
1625 1625
@@ -1630,16 +1630,17 @@ no_cached_page:
1630 */ 1630 */
1631 page = page_cache_alloc_cold(mapping); 1631 page = page_cache_alloc_cold(mapping);
1632 if (!page) { 1632 if (!page) {
1633 desc->error = -ENOMEM; 1633 error = -ENOMEM;
1634 goto out; 1634 goto out;
1635 } 1635 }
1636 error = add_to_page_cache_lru(page, mapping, 1636 error = add_to_page_cache_lru(page, mapping,
1637 index, GFP_KERNEL); 1637 index, GFP_KERNEL);
1638 if (error) { 1638 if (error) {
1639 page_cache_release(page); 1639 page_cache_release(page);
1640 if (error == -EEXIST) 1640 if (error == -EEXIST) {
1641 error = 0;
1641 goto find_page; 1642 goto find_page;
1642 desc->error = error; 1643 }
1643 goto out; 1644 goto out;
1644 } 1645 }
1645 goto readpage; 1646 goto readpage;
@@ -1652,44 +1653,7 @@ out:
1652 1653
1653 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1654 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1654 file_accessed(filp); 1655 file_accessed(filp);
1655} 1656 return written ? written : error;
1656
1657int file_read_actor(read_descriptor_t *desc, struct page *page,
1658 unsigned long offset, unsigned long size)
1659{
1660 char *kaddr;
1661 unsigned long left, count = desc->count;
1662
1663 if (size > count)
1664 size = count;
1665
1666 /*
1667 * Faults on the destination of a read are common, so do it before
1668 * taking the kmap.
1669 */
1670 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1671 kaddr = kmap_atomic(page);
1672 left = __copy_to_user_inatomic(desc->arg.buf,
1673 kaddr + offset, size);
1674 kunmap_atomic(kaddr);
1675 if (left == 0)
1676 goto success;
1677 }
1678
1679 /* Do it the slow way */
1680 kaddr = kmap(page);
1681 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1682 kunmap(page);
1683
1684 if (left) {
1685 size -= left;
1686 desc->error = -EFAULT;
1687 }
1688success:
1689 desc->count = count - size;
1690 desc->written += size;
1691 desc->arg.buf += size;
1692 return size;
1693} 1657}
1694 1658
1695/* 1659/*
@@ -1747,14 +1711,15 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1747{ 1711{
1748 struct file *filp = iocb->ki_filp; 1712 struct file *filp = iocb->ki_filp;
1749 ssize_t retval; 1713 ssize_t retval;
1750 unsigned long seg = 0;
1751 size_t count; 1714 size_t count;
1752 loff_t *ppos = &iocb->ki_pos; 1715 loff_t *ppos = &iocb->ki_pos;
1716 struct iov_iter i;
1753 1717
1754 count = 0; 1718 count = 0;
1755 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1719 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1756 if (retval) 1720 if (retval)
1757 return retval; 1721 return retval;
1722 iov_iter_init(&i, iov, nr_segs, count, 0);
1758 1723
1759 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1724 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1760 if (filp->f_flags & O_DIRECT) { 1725 if (filp->f_flags & O_DIRECT) {
@@ -1776,6 +1741,11 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1776 if (retval > 0) { 1741 if (retval > 0) {
1777 *ppos = pos + retval; 1742 *ppos = pos + retval;
1778 count -= retval; 1743 count -= retval;
1744 /*
1745 * If we did a short DIO read we need to skip the
1746 * section of the iov that we've already read data into.
1747 */
1748 iov_iter_advance(&i, retval);
1779 } 1749 }
1780 1750
1781 /* 1751 /*
@@ -1792,39 +1762,7 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1792 } 1762 }
1793 } 1763 }
1794 1764
1795 count = retval; 1765 retval = do_generic_file_read(filp, ppos, &i, retval);
1796 for (seg = 0; seg < nr_segs; seg++) {
1797 read_descriptor_t desc;
1798 loff_t offset = 0;
1799
1800 /*
1801 * If we did a short DIO read we need to skip the section of the
1802 * iov that we've already read data into.
1803 */
1804 if (count) {
1805 if (count > iov[seg].iov_len) {
1806 count -= iov[seg].iov_len;
1807 continue;
1808 }
1809 offset = count;
1810 count = 0;
1811 }
1812
1813 desc.written = 0;
1814 desc.arg.buf = iov[seg].iov_base + offset;
1815 desc.count = iov[seg].iov_len - offset;
1816 if (desc.count == 0)
1817 continue;
1818 desc.error = 0;
1819 do_generic_file_read(filp, ppos, &desc);
1820 retval += desc.written;
1821 if (desc.error) {
1822 retval = retval ?: desc.error;
1823 break;
1824 }
1825 if (desc.count > 0)
1826 break;
1827 }
1828out: 1766out:
1829 return retval; 1767 return retval;
1830} 1768}
@@ -2335,150 +2273,6 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
2335} 2273}
2336EXPORT_SYMBOL(read_cache_page_gfp); 2274EXPORT_SYMBOL(read_cache_page_gfp);
2337 2275
2338static size_t __iovec_copy_from_user_inatomic(char *vaddr,
2339 const struct iovec *iov, size_t base, size_t bytes)
2340{
2341 size_t copied = 0, left = 0;
2342
2343 while (bytes) {
2344 char __user *buf = iov->iov_base + base;
2345 int copy = min(bytes, iov->iov_len - base);
2346
2347 base = 0;
2348 left = __copy_from_user_inatomic(vaddr, buf, copy);
2349 copied += copy;
2350 bytes -= copy;
2351 vaddr += copy;
2352 iov++;
2353
2354 if (unlikely(left))
2355 break;
2356 }
2357 return copied - left;
2358}
2359
2360/*
2361 * Copy as much as we can into the page and return the number of bytes which
2362 * were successfully copied. If a fault is encountered then return the number of
2363 * bytes which were copied.
2364 */
2365size_t iov_iter_copy_from_user_atomic(struct page *page,
2366 struct iov_iter *i, unsigned long offset, size_t bytes)
2367{
2368 char *kaddr;
2369 size_t copied;
2370
2371 BUG_ON(!in_atomic());
2372 kaddr = kmap_atomic(page);
2373 if (likely(i->nr_segs == 1)) {
2374 int left;
2375 char __user *buf = i->iov->iov_base + i->iov_offset;
2376 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
2377 copied = bytes - left;
2378 } else {
2379 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2380 i->iov, i->iov_offset, bytes);
2381 }
2382 kunmap_atomic(kaddr);
2383
2384 return copied;
2385}
2386EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
2387
2388/*
2389 * This has the same sideeffects and return value as
2390 * iov_iter_copy_from_user_atomic().
2391 * The difference is that it attempts to resolve faults.
2392 * Page must not be locked.
2393 */
2394size_t iov_iter_copy_from_user(struct page *page,
2395 struct iov_iter *i, unsigned long offset, size_t bytes)
2396{
2397 char *kaddr;
2398 size_t copied;
2399
2400 kaddr = kmap(page);
2401 if (likely(i->nr_segs == 1)) {
2402 int left;
2403 char __user *buf = i->iov->iov_base + i->iov_offset;
2404 left = __copy_from_user(kaddr + offset, buf, bytes);
2405 copied = bytes - left;
2406 } else {
2407 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2408 i->iov, i->iov_offset, bytes);
2409 }
2410 kunmap(page);
2411 return copied;
2412}
2413EXPORT_SYMBOL(iov_iter_copy_from_user);
2414
2415void iov_iter_advance(struct iov_iter *i, size_t bytes)
2416{
2417 BUG_ON(i->count < bytes);
2418
2419 if (likely(i->nr_segs == 1)) {
2420 i->iov_offset += bytes;
2421 i->count -= bytes;
2422 } else {
2423 const struct iovec *iov = i->iov;
2424 size_t base = i->iov_offset;
2425 unsigned long nr_segs = i->nr_segs;
2426
2427 /*
2428 * The !iov->iov_len check ensures we skip over unlikely
2429 * zero-length segments (without overruning the iovec).
2430 */
2431 while (bytes || unlikely(i->count && !iov->iov_len)) {
2432 int copy;
2433
2434 copy = min(bytes, iov->iov_len - base);
2435 BUG_ON(!i->count || i->count < copy);
2436 i->count -= copy;
2437 bytes -= copy;
2438 base += copy;
2439 if (iov->iov_len == base) {
2440 iov++;
2441 nr_segs--;
2442 base = 0;
2443 }
2444 }
2445 i->iov = iov;
2446 i->iov_offset = base;
2447 i->nr_segs = nr_segs;
2448 }
2449}
2450EXPORT_SYMBOL(iov_iter_advance);
2451
2452/*
2453 * Fault in the first iovec of the given iov_iter, to a maximum length
2454 * of bytes. Returns 0 on success, or non-zero if the memory could not be
2455 * accessed (ie. because it is an invalid address).
2456 *
2457 * writev-intensive code may want this to prefault several iovecs -- that
2458 * would be possible (callers must not rely on the fact that _only_ the
2459 * first iovec will be faulted with the current implementation).
2460 */
2461int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2462{
2463 char __user *buf = i->iov->iov_base + i->iov_offset;
2464 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
2465 return fault_in_pages_readable(buf, bytes);
2466}
2467EXPORT_SYMBOL(iov_iter_fault_in_readable);
2468
2469/*
2470 * Return the count of just the current iov_iter segment.
2471 */
2472size_t iov_iter_single_seg_count(const struct iov_iter *i)
2473{
2474 const struct iovec *iov = i->iov;
2475 if (i->nr_segs == 1)
2476 return i->count;
2477 else
2478 return min(i->count, iov->iov_len - i->iov_offset);
2479}
2480EXPORT_SYMBOL(iov_iter_single_seg_count);
2481
2482/* 2276/*
2483 * Performs necessary checks before doing a write 2277 * Performs necessary checks before doing a write
2484 * 2278 *
@@ -2585,7 +2379,7 @@ EXPORT_SYMBOL(pagecache_write_end);
2585 2379
2586ssize_t 2380ssize_t
2587generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 2381generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2588 unsigned long *nr_segs, loff_t pos, loff_t *ppos, 2382 unsigned long *nr_segs, loff_t pos,
2589 size_t count, size_t ocount) 2383 size_t count, size_t ocount)
2590{ 2384{
2591 struct file *file = iocb->ki_filp; 2385 struct file *file = iocb->ki_filp;
@@ -2646,7 +2440,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2646 i_size_write(inode, pos); 2440 i_size_write(inode, pos);
2647 mark_inode_dirty(inode); 2441 mark_inode_dirty(inode);
2648 } 2442 }
2649 *ppos = pos; 2443 iocb->ki_pos = pos;
2650 } 2444 }
2651out: 2445out:
2652 return written; 2446 return written;
@@ -2692,7 +2486,7 @@ found:
2692} 2486}
2693EXPORT_SYMBOL(grab_cache_page_write_begin); 2487EXPORT_SYMBOL(grab_cache_page_write_begin);
2694 2488
2695static ssize_t generic_perform_write(struct file *file, 2489ssize_t generic_perform_write(struct file *file,
2696 struct iov_iter *i, loff_t pos) 2490 struct iov_iter *i, loff_t pos)
2697{ 2491{
2698 struct address_space *mapping = file->f_mapping; 2492 struct address_space *mapping = file->f_mapping;
@@ -2742,9 +2536,7 @@ again:
2742 if (mapping_writably_mapped(mapping)) 2536 if (mapping_writably_mapped(mapping))
2743 flush_dcache_page(page); 2537 flush_dcache_page(page);
2744 2538
2745 pagefault_disable();
2746 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2539 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2747 pagefault_enable();
2748 flush_dcache_page(page); 2540 flush_dcache_page(page);
2749 2541
2750 mark_page_accessed(page); 2542 mark_page_accessed(page);
@@ -2782,27 +2574,7 @@ again:
2782 2574
2783 return written ? written : status; 2575 return written ? written : status;
2784} 2576}
2785 2577EXPORT_SYMBOL(generic_perform_write);
2786ssize_t
2787generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2788 unsigned long nr_segs, loff_t pos, loff_t *ppos,
2789 size_t count, ssize_t written)
2790{
2791 struct file *file = iocb->ki_filp;
2792 ssize_t status;
2793 struct iov_iter i;
2794
2795 iov_iter_init(&i, iov, nr_segs, count, written);
2796 status = generic_perform_write(file, &i, pos);
2797
2798 if (likely(status >= 0)) {
2799 written += status;
2800 *ppos = pos + status;
2801 }
2802
2803 return written ? written : status;
2804}
2805EXPORT_SYMBOL(generic_file_buffered_write);
2806 2578
2807/** 2579/**
2808 * __generic_file_aio_write - write data to a file 2580 * __generic_file_aio_write - write data to a file
@@ -2824,16 +2596,18 @@ EXPORT_SYMBOL(generic_file_buffered_write);
2824 * avoid syncing under i_mutex. 2596 * avoid syncing under i_mutex.
2825 */ 2597 */
2826ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 2598ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2827 unsigned long nr_segs, loff_t *ppos) 2599 unsigned long nr_segs)
2828{ 2600{
2829 struct file *file = iocb->ki_filp; 2601 struct file *file = iocb->ki_filp;
2830 struct address_space * mapping = file->f_mapping; 2602 struct address_space * mapping = file->f_mapping;
2831 size_t ocount; /* original count */ 2603 size_t ocount; /* original count */
2832 size_t count; /* after file limit checks */ 2604 size_t count; /* after file limit checks */
2833 struct inode *inode = mapping->host; 2605 struct inode *inode = mapping->host;
2834 loff_t pos; 2606 loff_t pos = iocb->ki_pos;
2835 ssize_t written; 2607 ssize_t written = 0;
2836 ssize_t err; 2608 ssize_t err;
2609 ssize_t status;
2610 struct iov_iter from;
2837 2611
2838 ocount = 0; 2612 ocount = 0;
2839 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 2613 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
@@ -2841,12 +2615,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2841 return err; 2615 return err;
2842 2616
2843 count = ocount; 2617 count = ocount;
2844 pos = *ppos;
2845 2618
2846 /* We can write back this queue in page reclaim */ 2619 /* We can write back this queue in page reclaim */
2847 current->backing_dev_info = mapping->backing_dev_info; 2620 current->backing_dev_info = mapping->backing_dev_info;
2848 written = 0;
2849
2850 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2621 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2851 if (err) 2622 if (err)
2852 goto out; 2623 goto out;
@@ -2862,45 +2633,47 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2862 if (err) 2633 if (err)
2863 goto out; 2634 goto out;
2864 2635
2636 iov_iter_init(&from, iov, nr_segs, count, 0);
2637
2865 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 2638 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2866 if (unlikely(file->f_flags & O_DIRECT)) { 2639 if (unlikely(file->f_flags & O_DIRECT)) {
2867 loff_t endbyte; 2640 loff_t endbyte;
2868 ssize_t written_buffered;
2869 2641
2870 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, 2642 written = generic_file_direct_write(iocb, iov, &from.nr_segs, pos,
2871 ppos, count, ocount); 2643 count, ocount);
2872 if (written < 0 || written == count) 2644 if (written < 0 || written == count)
2873 goto out; 2645 goto out;
2646 iov_iter_advance(&from, written);
2647
2874 /* 2648 /*
2875 * direct-io write to a hole: fall through to buffered I/O 2649 * direct-io write to a hole: fall through to buffered I/O
2876 * for completing the rest of the request. 2650 * for completing the rest of the request.
2877 */ 2651 */
2878 pos += written; 2652 pos += written;
2879 count -= written; 2653 count -= written;
2880 written_buffered = generic_file_buffered_write(iocb, iov, 2654
2881 nr_segs, pos, ppos, count, 2655 status = generic_perform_write(file, &from, pos);
2882 written);
2883 /* 2656 /*
2884 * If generic_file_buffered_write() retuned a synchronous error 2657 * If generic_perform_write() returned a synchronous error
2885 * then we want to return the number of bytes which were 2658 * then we want to return the number of bytes which were
2886 * direct-written, or the error code if that was zero. Note 2659 * direct-written, or the error code if that was zero. Note
2887 * that this differs from normal direct-io semantics, which 2660 * that this differs from normal direct-io semantics, which
2888 * will return -EFOO even if some bytes were written. 2661 * will return -EFOO even if some bytes were written.
2889 */ 2662 */
2890 if (written_buffered < 0) { 2663 if (unlikely(status < 0) && !written) {
2891 err = written_buffered; 2664 err = status;
2892 goto out; 2665 goto out;
2893 } 2666 }
2894 2667 iocb->ki_pos = pos + status;
2895 /* 2668 /*
2896 * We need to ensure that the page cache pages are written to 2669 * We need to ensure that the page cache pages are written to
2897 * disk and invalidated to preserve the expected O_DIRECT 2670 * disk and invalidated to preserve the expected O_DIRECT
2898 * semantics. 2671 * semantics.
2899 */ 2672 */
2900 endbyte = pos + written_buffered - written - 1; 2673 endbyte = pos + status - 1;
2901 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); 2674 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2902 if (err == 0) { 2675 if (err == 0) {
2903 written = written_buffered; 2676 written += status;
2904 invalidate_mapping_pages(mapping, 2677 invalidate_mapping_pages(mapping,
2905 pos >> PAGE_CACHE_SHIFT, 2678 pos >> PAGE_CACHE_SHIFT,
2906 endbyte >> PAGE_CACHE_SHIFT); 2679 endbyte >> PAGE_CACHE_SHIFT);
@@ -2911,8 +2684,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2911 */ 2684 */
2912 } 2685 }
2913 } else { 2686 } else {
2914 written = generic_file_buffered_write(iocb, iov, nr_segs, 2687 written = generic_perform_write(file, &from, pos);
2915 pos, ppos, count, written); 2688 if (likely(written >= 0))
2689 iocb->ki_pos = pos + written;
2916 } 2690 }
2917out: 2691out:
2918 current->backing_dev_info = NULL; 2692 current->backing_dev_info = NULL;
@@ -2941,7 +2715,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2941 BUG_ON(iocb->ki_pos != pos); 2715 BUG_ON(iocb->ki_pos != pos);
2942 2716
2943 mutex_lock(&inode->i_mutex); 2717 mutex_lock(&inode->i_mutex);
2944 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 2718 ret = __generic_file_aio_write(iocb, iov, nr_segs);
2945 mutex_unlock(&inode->i_mutex); 2719 mutex_unlock(&inode->i_mutex);
2946 2720
2947 if (ret > 0) { 2721 if (ret > 0) {
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
new file mode 100644
index 000000000000..10e46cd721de
--- /dev/null
+++ b/mm/iov_iter.c
@@ -0,0 +1,224 @@
1#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
4
5size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
6 struct iov_iter *i)
7{
8 size_t skip, copy, left, wanted;
9 const struct iovec *iov;
10 char __user *buf;
11 void *kaddr, *from;
12
13 if (unlikely(bytes > i->count))
14 bytes = i->count;
15
16 if (unlikely(!bytes))
17 return 0;
18
19 wanted = bytes;
20 iov = i->iov;
21 skip = i->iov_offset;
22 buf = iov->iov_base + skip;
23 copy = min(bytes, iov->iov_len - skip);
24
25 if (!fault_in_pages_writeable(buf, copy)) {
26 kaddr = kmap_atomic(page);
27 from = kaddr + offset;
28
29 /* first chunk, usually the only one */
30 left = __copy_to_user_inatomic(buf, from, copy);
31 copy -= left;
32 skip += copy;
33 from += copy;
34 bytes -= copy;
35
36 while (unlikely(!left && bytes)) {
37 iov++;
38 buf = iov->iov_base;
39 copy = min(bytes, iov->iov_len);
40 left = __copy_to_user_inatomic(buf, from, copy);
41 copy -= left;
42 skip = copy;
43 from += copy;
44 bytes -= copy;
45 }
46 if (likely(!bytes)) {
47 kunmap_atomic(kaddr);
48 goto done;
49 }
50 offset = from - kaddr;
51 buf += copy;
52 kunmap_atomic(kaddr);
53 copy = min(bytes, iov->iov_len - skip);
54 }
55 /* Too bad - revert to non-atomic kmap */
56 kaddr = kmap(page);
57 from = kaddr + offset;
58 left = __copy_to_user(buf, from, copy);
59 copy -= left;
60 skip += copy;
61 from += copy;
62 bytes -= copy;
63 while (unlikely(!left && bytes)) {
64 iov++;
65 buf = iov->iov_base;
66 copy = min(bytes, iov->iov_len);
67 left = __copy_to_user(buf, from, copy);
68 copy -= left;
69 skip = copy;
70 from += copy;
71 bytes -= copy;
72 }
73 kunmap(page);
74done:
75 i->count -= wanted - bytes;
76 i->nr_segs -= iov - i->iov;
77 i->iov = iov;
78 i->iov_offset = skip;
79 return wanted - bytes;
80}
81EXPORT_SYMBOL(copy_page_to_iter);
82
83static size_t __iovec_copy_from_user_inatomic(char *vaddr,
84 const struct iovec *iov, size_t base, size_t bytes)
85{
86 size_t copied = 0, left = 0;
87
88 while (bytes) {
89 char __user *buf = iov->iov_base + base;
90 int copy = min(bytes, iov->iov_len - base);
91
92 base = 0;
93 left = __copy_from_user_inatomic(vaddr, buf, copy);
94 copied += copy;
95 bytes -= copy;
96 vaddr += copy;
97 iov++;
98
99 if (unlikely(left))
100 break;
101 }
102 return copied - left;
103}
104
105/*
106 * Copy as much as we can into the page and return the number of bytes which
107 * were successfully copied. If a fault is encountered then return the number of
108 * bytes which were copied.
109 */
110size_t iov_iter_copy_from_user_atomic(struct page *page,
111 struct iov_iter *i, unsigned long offset, size_t bytes)
112{
113 char *kaddr;
114 size_t copied;
115
116 kaddr = kmap_atomic(page);
117 if (likely(i->nr_segs == 1)) {
118 int left;
119 char __user *buf = i->iov->iov_base + i->iov_offset;
120 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
121 copied = bytes - left;
122 } else {
123 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
124 i->iov, i->iov_offset, bytes);
125 }
126 kunmap_atomic(kaddr);
127
128 return copied;
129}
130EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
131
132/*
133 * This has the same sideeffects and return value as
134 * iov_iter_copy_from_user_atomic().
135 * The difference is that it attempts to resolve faults.
136 * Page must not be locked.
137 */
138size_t iov_iter_copy_from_user(struct page *page,
139 struct iov_iter *i, unsigned long offset, size_t bytes)
140{
141 char *kaddr;
142 size_t copied;
143
144 kaddr = kmap(page);
145 if (likely(i->nr_segs == 1)) {
146 int left;
147 char __user *buf = i->iov->iov_base + i->iov_offset;
148 left = __copy_from_user(kaddr + offset, buf, bytes);
149 copied = bytes - left;
150 } else {
151 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
152 i->iov, i->iov_offset, bytes);
153 }
154 kunmap(page);
155 return copied;
156}
157EXPORT_SYMBOL(iov_iter_copy_from_user);
158
159void iov_iter_advance(struct iov_iter *i, size_t bytes)
160{
161 BUG_ON(i->count < bytes);
162
163 if (likely(i->nr_segs == 1)) {
164 i->iov_offset += bytes;
165 i->count -= bytes;
166 } else {
167 const struct iovec *iov = i->iov;
168 size_t base = i->iov_offset;
169 unsigned long nr_segs = i->nr_segs;
170
171 /*
172 * The !iov->iov_len check ensures we skip over unlikely
173 * zero-length segments (without overruning the iovec).
174 */
175 while (bytes || unlikely(i->count && !iov->iov_len)) {
176 int copy;
177
178 copy = min(bytes, iov->iov_len - base);
179 BUG_ON(!i->count || i->count < copy);
180 i->count -= copy;
181 bytes -= copy;
182 base += copy;
183 if (iov->iov_len == base) {
184 iov++;
185 nr_segs--;
186 base = 0;
187 }
188 }
189 i->iov = iov;
190 i->iov_offset = base;
191 i->nr_segs = nr_segs;
192 }
193}
194EXPORT_SYMBOL(iov_iter_advance);
195
196/*
197 * Fault in the first iovec of the given iov_iter, to a maximum length
198 * of bytes. Returns 0 on success, or non-zero if the memory could not be
199 * accessed (ie. because it is an invalid address).
200 *
201 * writev-intensive code may want this to prefault several iovecs -- that
202 * would be possible (callers must not rely on the fact that _only_ the
203 * first iovec will be faulted with the current implementation).
204 */
205int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
206{
207 char __user *buf = i->iov->iov_base + i->iov_offset;
208 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
209 return fault_in_pages_readable(buf, bytes);
210}
211EXPORT_SYMBOL(iov_iter_fault_in_readable);
212
213/*
214 * Return the count of just the current iov_iter segment.
215 */
216size_t iov_iter_single_seg_count(const struct iov_iter *i)
217{
218 const struct iovec *iov = i->iov;
219 if (i->nr_segs == 1)
220 return i->count;
221 else
222 return min(i->count, iov->iov_len - i->iov_offset);
223}
224EXPORT_SYMBOL(iov_iter_single_seg_count);
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index cb79065c19e5..8505c9262b35 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -23,129 +23,44 @@
23 23
24/** 24/**
25 * process_vm_rw_pages - read/write pages from task specified 25 * process_vm_rw_pages - read/write pages from task specified
26 * @task: task to read/write from 26 * @pages: array of pointers to pages we want to copy
27 * @mm: mm for task
28 * @process_pages: struct pages area that can store at least
29 * nr_pages_to_copy struct page pointers
30 * @pa: address of page in task to start copying from/to
31 * @start_offset: offset in page to start copying from/to 27 * @start_offset: offset in page to start copying from/to
32 * @len: number of bytes to copy 28 * @len: number of bytes to copy
33 * @lvec: iovec array specifying where to copy to/from 29 * @iter: where to copy to/from locally
34 * @lvec_cnt: number of elements in iovec array
35 * @lvec_current: index in iovec array we are up to
36 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
37 * @vm_write: 0 means copy from, 1 means copy to 30 * @vm_write: 0 means copy from, 1 means copy to
38 * @nr_pages_to_copy: number of pages to copy
39 * @bytes_copied: returns number of bytes successfully copied
40 * Returns 0 on success, error code otherwise 31 * Returns 0 on success, error code otherwise
41 */ 32 */
42static int process_vm_rw_pages(struct task_struct *task, 33static int process_vm_rw_pages(struct page **pages,
43 struct mm_struct *mm, 34 unsigned offset,
44 struct page **process_pages, 35 size_t len,
45 unsigned long pa, 36 struct iov_iter *iter,
46 unsigned long start_offset, 37 int vm_write)
47 unsigned long len,
48 const struct iovec *lvec,
49 unsigned long lvec_cnt,
50 unsigned long *lvec_current,
51 size_t *lvec_offset,
52 int vm_write,
53 unsigned int nr_pages_to_copy,
54 ssize_t *bytes_copied)
55{ 38{
56 int pages_pinned;
57 void *target_kaddr;
58 int pgs_copied = 0;
59 int j;
60 int ret;
61 ssize_t bytes_to_copy;
62 ssize_t rc = 0;
63
64 *bytes_copied = 0;
65
66 /* Get the pages we're interested in */
67 down_read(&mm->mmap_sem);
68 pages_pinned = get_user_pages(task, mm, pa,
69 nr_pages_to_copy,
70 vm_write, 0, process_pages, NULL);
71 up_read(&mm->mmap_sem);
72
73 if (pages_pinned != nr_pages_to_copy) {
74 rc = -EFAULT;
75 goto end;
76 }
77
78 /* Do the copy for each page */ 39 /* Do the copy for each page */
79 for (pgs_copied = 0; 40 while (len && iov_iter_count(iter)) {
80 (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt); 41 struct page *page = *pages++;
81 pgs_copied++) { 42 size_t copy = PAGE_SIZE - offset;
82 /* Make sure we have a non zero length iovec */ 43 size_t copied;
83 while (*lvec_current < lvec_cnt 44
84 && lvec[*lvec_current].iov_len == 0) 45 if (copy > len)
85 (*lvec_current)++; 46 copy = len;
86 if (*lvec_current == lvec_cnt) 47
87 break; 48 if (vm_write) {
88 49 if (copy > iov_iter_count(iter))
89 /* 50 copy = iov_iter_count(iter);
90 * Will copy smallest of: 51 copied = iov_iter_copy_from_user(page, iter,
91 * - bytes remaining in page 52 offset, copy);
92 * - bytes remaining in destination iovec 53 iov_iter_advance(iter, copied);
93 */ 54 set_page_dirty_lock(page);
94 bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset,
95 len - *bytes_copied);
96 bytes_to_copy = min_t(ssize_t, bytes_to_copy,
97 lvec[*lvec_current].iov_len
98 - *lvec_offset);
99
100 target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;
101
102 if (vm_write)
103 ret = copy_from_user(target_kaddr,
104 lvec[*lvec_current].iov_base
105 + *lvec_offset,
106 bytes_to_copy);
107 else
108 ret = copy_to_user(lvec[*lvec_current].iov_base
109 + *lvec_offset,
110 target_kaddr, bytes_to_copy);
111 kunmap(process_pages[pgs_copied]);
112 if (ret) {
113 *bytes_copied += bytes_to_copy - ret;
114 pgs_copied++;
115 rc = -EFAULT;
116 goto end;
117 }
118 *bytes_copied += bytes_to_copy;
119 *lvec_offset += bytes_to_copy;
120 if (*lvec_offset == lvec[*lvec_current].iov_len) {
121 /*
122 * Need to copy remaining part of page into the
123 * next iovec if there are any bytes left in page
124 */
125 (*lvec_current)++;
126 *lvec_offset = 0;
127 start_offset = (start_offset + bytes_to_copy)
128 % PAGE_SIZE;
129 if (start_offset)
130 pgs_copied--;
131 } else { 55 } else {
132 start_offset = 0; 56 copied = copy_page_to_iter(page, offset, copy, iter);
133 }
134 }
135
136end:
137 if (vm_write) {
138 for (j = 0; j < pages_pinned; j++) {
139 if (j < pgs_copied)
140 set_page_dirty_lock(process_pages[j]);
141 put_page(process_pages[j]);
142 } 57 }
143 } else { 58 len -= copied;
144 for (j = 0; j < pages_pinned; j++) 59 if (copied < copy && iov_iter_count(iter))
145 put_page(process_pages[j]); 60 return -EFAULT;
61 offset = 0;
146 } 62 }
147 63 return 0;
148 return rc;
149} 64}
150 65
151/* Maximum number of pages kmalloc'd to hold struct page's during copy */ 66/* Maximum number of pages kmalloc'd to hold struct page's during copy */
@@ -155,67 +70,60 @@ end:
155 * process_vm_rw_single_vec - read/write pages from task specified 70 * process_vm_rw_single_vec - read/write pages from task specified
156 * @addr: start memory address of target process 71 * @addr: start memory address of target process
157 * @len: size of area to copy to/from 72 * @len: size of area to copy to/from
158 * @lvec: iovec array specifying where to copy to/from locally 73 * @iter: where to copy to/from locally
159 * @lvec_cnt: number of elements in iovec array
160 * @lvec_current: index in iovec array we are up to
161 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
162 * @process_pages: struct pages area that can store at least 74 * @process_pages: struct pages area that can store at least
163 * nr_pages_to_copy struct page pointers 75 * nr_pages_to_copy struct page pointers
164 * @mm: mm for task 76 * @mm: mm for task
165 * @task: task to read/write from 77 * @task: task to read/write from
166 * @vm_write: 0 means copy from, 1 means copy to 78 * @vm_write: 0 means copy from, 1 means copy to
167 * @bytes_copied: returns number of bytes successfully copied
168 * Returns 0 on success or on failure error code 79 * Returns 0 on success or on failure error code
169 */ 80 */
170static int process_vm_rw_single_vec(unsigned long addr, 81static int process_vm_rw_single_vec(unsigned long addr,
171 unsigned long len, 82 unsigned long len,
172 const struct iovec *lvec, 83 struct iov_iter *iter,
173 unsigned long lvec_cnt,
174 unsigned long *lvec_current,
175 size_t *lvec_offset,
176 struct page **process_pages, 84 struct page **process_pages,
177 struct mm_struct *mm, 85 struct mm_struct *mm,
178 struct task_struct *task, 86 struct task_struct *task,
179 int vm_write, 87 int vm_write)
180 ssize_t *bytes_copied)
181{ 88{
182 unsigned long pa = addr & PAGE_MASK; 89 unsigned long pa = addr & PAGE_MASK;
183 unsigned long start_offset = addr - pa; 90 unsigned long start_offset = addr - pa;
184 unsigned long nr_pages; 91 unsigned long nr_pages;
185 ssize_t bytes_copied_loop;
186 ssize_t rc = 0; 92 ssize_t rc = 0;
187 unsigned long nr_pages_copied = 0;
188 unsigned long nr_pages_to_copy;
189 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 93 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
190 / sizeof(struct pages *); 94 / sizeof(struct pages *);
191 95
192 *bytes_copied = 0;
193
194 /* Work out address and page range required */ 96 /* Work out address and page range required */
195 if (len == 0) 97 if (len == 0)
196 return 0; 98 return 0;
197 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 99 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
198 100
199 while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) { 101 while (!rc && nr_pages && iov_iter_count(iter)) {
200 nr_pages_to_copy = min(nr_pages - nr_pages_copied, 102 int pages = min(nr_pages, max_pages_per_loop);
201 max_pages_per_loop); 103 size_t bytes;
202 104
203 rc = process_vm_rw_pages(task, mm, process_pages, pa, 105 /* Get the pages we're interested in */
204 start_offset, len, 106 down_read(&mm->mmap_sem);
205 lvec, lvec_cnt, 107 pages = get_user_pages(task, mm, pa, pages,
206 lvec_current, lvec_offset, 108 vm_write, 0, process_pages, NULL);
207 vm_write, nr_pages_to_copy, 109 up_read(&mm->mmap_sem);
208 &bytes_copied_loop);
209 start_offset = 0;
210 *bytes_copied += bytes_copied_loop;
211 110
212 if (rc < 0) { 111 if (pages <= 0)
213 return rc; 112 return -EFAULT;
214 } else { 113
215 len -= bytes_copied_loop; 114 bytes = pages * PAGE_SIZE - start_offset;
216 nr_pages_copied += nr_pages_to_copy; 115 if (bytes > len)
217 pa += nr_pages_to_copy * PAGE_SIZE; 116 bytes = len;
218 } 117
118 rc = process_vm_rw_pages(process_pages,
119 start_offset, bytes, iter,
120 vm_write);
121 len -= bytes;
122 start_offset = 0;
123 nr_pages -= pages;
124 pa += pages * PAGE_SIZE;
125 while (pages)
126 put_page(process_pages[--pages]);
219 } 127 }
220 128
221 return rc; 129 return rc;
@@ -228,8 +136,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
228/** 136/**
229 * process_vm_rw_core - core of reading/writing pages from task specified 137 * process_vm_rw_core - core of reading/writing pages from task specified
230 * @pid: PID of process to read/write from/to 138 * @pid: PID of process to read/write from/to
231 * @lvec: iovec array specifying where to copy to/from locally 139 * @iter: where to copy to/from locally
232 * @liovcnt: size of lvec array
233 * @rvec: iovec array specifying where to copy to/from in the other process 140 * @rvec: iovec array specifying where to copy to/from in the other process
234 * @riovcnt: size of rvec array 141 * @riovcnt: size of rvec array
235 * @flags: currently unused 142 * @flags: currently unused
@@ -238,8 +145,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
238 * return less bytes than expected if an error occurs during the copying 145 * return less bytes than expected if an error occurs during the copying
239 * process. 146 * process.
240 */ 147 */
241static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, 148static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
242 unsigned long liovcnt,
243 const struct iovec *rvec, 149 const struct iovec *rvec,
244 unsigned long riovcnt, 150 unsigned long riovcnt,
245 unsigned long flags, int vm_write) 151 unsigned long flags, int vm_write)
@@ -250,13 +156,10 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
250 struct mm_struct *mm; 156 struct mm_struct *mm;
251 unsigned long i; 157 unsigned long i;
252 ssize_t rc = 0; 158 ssize_t rc = 0;
253 ssize_t bytes_copied_loop;
254 ssize_t bytes_copied = 0;
255 unsigned long nr_pages = 0; 159 unsigned long nr_pages = 0;
256 unsigned long nr_pages_iov; 160 unsigned long nr_pages_iov;
257 unsigned long iov_l_curr_idx = 0;
258 size_t iov_l_curr_offset = 0;
259 ssize_t iov_len; 161 ssize_t iov_len;
162 size_t total_len = iov_iter_count(iter);
260 163
261 /* 164 /*
262 * Work out how many pages of struct pages we're going to need 165 * Work out how many pages of struct pages we're going to need
@@ -310,24 +213,20 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
310 goto put_task_struct; 213 goto put_task_struct;
311 } 214 }
312 215
313 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { 216 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
314 rc = process_vm_rw_single_vec( 217 rc = process_vm_rw_single_vec(
315 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, 218 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
316 lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset, 219 iter, process_pages, mm, task, vm_write);
317 process_pages, mm, task, vm_write, &bytes_copied_loop); 220
318 bytes_copied += bytes_copied_loop; 221 /* copied = space before - space after */
319 if (rc != 0) { 222 total_len -= iov_iter_count(iter);
320 /* If we have managed to copy any data at all then 223
321 we return the number of bytes copied. Otherwise 224 /* If we have managed to copy any data at all then
322 we return the error code */ 225 we return the number of bytes copied. Otherwise
323 if (bytes_copied) 226 we return the error code */
324 rc = bytes_copied; 227 if (total_len)
325 goto put_mm; 228 rc = total_len;
326 }
327 }
328 229
329 rc = bytes_copied;
330put_mm:
331 mmput(mm); 230 mmput(mm);
332 231
333put_task_struct: 232put_task_struct:
@@ -363,6 +262,7 @@ static ssize_t process_vm_rw(pid_t pid,
363 struct iovec iovstack_r[UIO_FASTIOV]; 262 struct iovec iovstack_r[UIO_FASTIOV];
364 struct iovec *iov_l = iovstack_l; 263 struct iovec *iov_l = iovstack_l;
365 struct iovec *iov_r = iovstack_r; 264 struct iovec *iov_r = iovstack_r;
265 struct iov_iter iter;
366 ssize_t rc; 266 ssize_t rc;
367 267
368 if (flags != 0) 268 if (flags != 0)
@@ -378,13 +278,14 @@ static ssize_t process_vm_rw(pid_t pid,
378 if (rc <= 0) 278 if (rc <= 0)
379 goto free_iovecs; 279 goto free_iovecs;
380 280
281 iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
282
381 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV, 283 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
382 iovstack_r, &iov_r); 284 iovstack_r, &iov_r);
383 if (rc <= 0) 285 if (rc <= 0)
384 goto free_iovecs; 286 goto free_iovecs;
385 287
386 rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags, 288 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
387 vm_write);
388 289
389free_iovecs: 290free_iovecs:
390 if (iov_r != iovstack_r) 291 if (iov_r != iovstack_r)
@@ -424,6 +325,7 @@ compat_process_vm_rw(compat_pid_t pid,
424 struct iovec iovstack_r[UIO_FASTIOV]; 325 struct iovec iovstack_r[UIO_FASTIOV];
425 struct iovec *iov_l = iovstack_l; 326 struct iovec *iov_l = iovstack_l;
426 struct iovec *iov_r = iovstack_r; 327 struct iovec *iov_r = iovstack_r;
328 struct iov_iter iter;
427 ssize_t rc = -EFAULT; 329 ssize_t rc = -EFAULT;
428 330
429 if (flags != 0) 331 if (flags != 0)
@@ -439,14 +341,14 @@ compat_process_vm_rw(compat_pid_t pid,
439 &iov_l); 341 &iov_l);
440 if (rc <= 0) 342 if (rc <= 0)
441 goto free_iovecs; 343 goto free_iovecs;
344 iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
442 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, 345 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
443 UIO_FASTIOV, iovstack_r, 346 UIO_FASTIOV, iovstack_r,
444 &iov_r); 347 &iov_r);
445 if (rc <= 0) 348 if (rc <= 0)
446 goto free_iovecs; 349 goto free_iovecs;
447 350
448 rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags, 351 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
449 vm_write);
450 352
451free_iovecs: 353free_iovecs:
452 if (iov_r != iovstack_r) 354 if (iov_r != iovstack_r)
diff --git a/mm/shmem.c b/mm/shmem.c
index 70273f8df586..9f70e02111c6 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1402,13 +1402,25 @@ shmem_write_end(struct file *file, struct address_space *mapping,
1402 return copied; 1402 return copied;
1403} 1403}
1404 1404
1405static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1405static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1406 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1406{ 1407{
1407 struct inode *inode = file_inode(filp); 1408 struct file *file = iocb->ki_filp;
1409 struct inode *inode = file_inode(file);
1408 struct address_space *mapping = inode->i_mapping; 1410 struct address_space *mapping = inode->i_mapping;
1409 pgoff_t index; 1411 pgoff_t index;
1410 unsigned long offset; 1412 unsigned long offset;
1411 enum sgp_type sgp = SGP_READ; 1413 enum sgp_type sgp = SGP_READ;
1414 int error = 0;
1415 ssize_t retval;
1416 size_t count;
1417 loff_t *ppos = &iocb->ki_pos;
1418 struct iov_iter iter;
1419
1420 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1421 if (retval)
1422 return retval;
1423 iov_iter_init(&iter, iov, nr_segs, count, 0);
1412 1424
1413 /* 1425 /*
1414 * Might this read be for a stacking filesystem? Then when reading 1426 * Might this read be for a stacking filesystem? Then when reading
@@ -1436,10 +1448,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
1436 break; 1448 break;
1437 } 1449 }
1438 1450
1439 desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 1451 error = shmem_getpage(inode, index, &page, sgp, NULL);
1440 if (desc->error) { 1452 if (error) {
1441 if (desc->error == -EINVAL) 1453 if (error == -EINVAL)
1442 desc->error = 0; 1454 error = 0;
1443 break; 1455 break;
1444 } 1456 }
1445 if (page) 1457 if (page)
@@ -1483,61 +1495,26 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
1483 /* 1495 /*
1484 * Ok, we have the page, and it's up-to-date, so 1496 * Ok, we have the page, and it's up-to-date, so
1485 * now we can copy it to user space... 1497 * now we can copy it to user space...
1486 *
1487 * The actor routine returns how many bytes were actually used..
1488 * NOTE! This may not be the same as how much of a user buffer
1489 * we filled up (we may be padding etc), so we can only update
1490 * "pos" here (the actor routine has to update the user buffer
1491 * pointers and the remaining count).
1492 */ 1498 */
1493 ret = actor(desc, page, offset, nr); 1499 ret = copy_page_to_iter(page, offset, nr, &iter);
1500 retval += ret;
1494 offset += ret; 1501 offset += ret;
1495 index += offset >> PAGE_CACHE_SHIFT; 1502 index += offset >> PAGE_CACHE_SHIFT;
1496 offset &= ~PAGE_CACHE_MASK; 1503 offset &= ~PAGE_CACHE_MASK;
1497 1504
1498 page_cache_release(page); 1505 page_cache_release(page);
1499 if (ret != nr || !desc->count) 1506 if (!iov_iter_count(&iter))
1500 break; 1507 break;
1501 1508 if (ret < nr) {
1509 error = -EFAULT;
1510 break;
1511 }
1502 cond_resched(); 1512 cond_resched();
1503 } 1513 }
1504 1514
1505 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1515 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1506 file_accessed(filp); 1516 file_accessed(file);
1507} 1517 return retval ? retval : error;
1508
1509static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1510 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1511{
1512 struct file *filp = iocb->ki_filp;
1513 ssize_t retval;
1514 unsigned long seg;
1515 size_t count;
1516 loff_t *ppos = &iocb->ki_pos;
1517
1518 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1519 if (retval)
1520 return retval;
1521
1522 for (seg = 0; seg < nr_segs; seg++) {
1523 read_descriptor_t desc;
1524
1525 desc.written = 0;
1526 desc.arg.buf = iov[seg].iov_base;
1527 desc.count = iov[seg].iov_len;
1528 if (desc.count == 0)
1529 continue;
1530 desc.error = 0;
1531 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1532 retval += desc.written;
1533 if (desc.error) {
1534 retval = retval ?: desc.error;
1535 break;
1536 }
1537 if (desc.count > 0)
1538 break;
1539 }
1540 return retval;
1541} 1518}
1542 1519
1543static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1520static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
@@ -1576,7 +1553,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1576 index = *ppos >> PAGE_CACHE_SHIFT; 1553 index = *ppos >> PAGE_CACHE_SHIFT;
1577 loff = *ppos & ~PAGE_CACHE_MASK; 1554 loff = *ppos & ~PAGE_CACHE_MASK;
1578 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1555 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1579 nr_pages = min(req_pages, pipe->buffers); 1556 nr_pages = min(req_pages, spd.nr_pages_max);
1580 1557
1581 spd.nr_pages = find_get_pages_contig(mapping, index, 1558 spd.nr_pages = find_get_pages_contig(mapping, index,
1582 nr_pages, spd.pages); 1559 nr_pages, spd.pages);
diff --git a/mm/slab.c b/mm/slab.c
index 3db4cb06e32e..388cb1ae6fbc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -157,6 +157,17 @@
157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158#endif 158#endif
159 159
160#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
161 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
162
163#if FREELIST_BYTE_INDEX
164typedef unsigned char freelist_idx_t;
165#else
166typedef unsigned short freelist_idx_t;
167#endif
168
169#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE)
170
160/* 171/*
161 * true if a page was allocated from pfmemalloc reserves for network-based 172 * true if a page was allocated from pfmemalloc reserves for network-based
162 * swap 173 * swap
@@ -277,8 +288,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
277 * OTOH the cpuarrays can contain lots of objects, 288 * OTOH the cpuarrays can contain lots of objects,
278 * which could lock up otherwise freeable slabs. 289 * which could lock up otherwise freeable slabs.
279 */ 290 */
280#define REAPTIMEOUT_CPUC (2*HZ) 291#define REAPTIMEOUT_AC (2*HZ)
281#define REAPTIMEOUT_LIST3 (4*HZ) 292#define REAPTIMEOUT_NODE (4*HZ)
282 293
283#if STATS 294#if STATS
284#define STATS_INC_ACTIVE(x) ((x)->num_active++) 295#define STATS_INC_ACTIVE(x) ((x)->num_active++)
@@ -565,9 +576,31 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
565 return cachep->array[smp_processor_id()]; 576 return cachep->array[smp_processor_id()];
566} 577}
567 578
568static size_t slab_mgmt_size(size_t nr_objs, size_t align) 579static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
580 size_t idx_size, size_t align)
569{ 581{
570 return ALIGN(nr_objs * sizeof(unsigned int), align); 582 int nr_objs;
583 size_t freelist_size;
584
585 /*
586 * Ignore padding for the initial guess. The padding
587 * is at most @align-1 bytes, and @buffer_size is at
588 * least @align. In the worst case, this result will
589 * be one greater than the number of objects that fit
590 * into the memory allocation when taking the padding
591 * into account.
592 */
593 nr_objs = slab_size / (buffer_size + idx_size);
594
595 /*
596 * This calculated number will be either the right
597 * amount, or one greater than what we want.
598 */
599 freelist_size = slab_size - nr_objs * buffer_size;
600 if (freelist_size < ALIGN(nr_objs * idx_size, align))
601 nr_objs--;
602
603 return nr_objs;
571} 604}
572 605
573/* 606/*
@@ -600,25 +633,9 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
600 nr_objs = slab_size / buffer_size; 633 nr_objs = slab_size / buffer_size;
601 634
602 } else { 635 } else {
603 /* 636 nr_objs = calculate_nr_objs(slab_size, buffer_size,
604 * Ignore padding for the initial guess. The padding 637 sizeof(freelist_idx_t), align);
605 * is at most @align-1 bytes, and @buffer_size is at 638 mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
606 * least @align. In the worst case, this result will
607 * be one greater than the number of objects that fit
608 * into the memory allocation when taking the padding
609 * into account.
610 */
611 nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int));
612
613 /*
614 * This calculated number will be either the right
615 * amount, or one greater than what we want.
616 */
617 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
618 > slab_size)
619 nr_objs--;
620
621 mgmt_size = slab_mgmt_size(nr_objs, align);
622 } 639 }
623 *num = nr_objs; 640 *num = nr_objs;
624 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 641 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
@@ -1067,7 +1084,7 @@ static int init_cache_node_node(int node)
1067 1084
1068 list_for_each_entry(cachep, &slab_caches, list) { 1085 list_for_each_entry(cachep, &slab_caches, list) {
1069 /* 1086 /*
1070 * Set up the size64 kmemlist for cpu before we can 1087 * Set up the kmem_cache_node for cpu before we can
1071 * begin anything. Make sure some other cpu on this 1088 * begin anything. Make sure some other cpu on this
1072 * node has not already allocated this 1089 * node has not already allocated this
1073 */ 1090 */
@@ -1076,12 +1093,12 @@ static int init_cache_node_node(int node)
1076 if (!n) 1093 if (!n)
1077 return -ENOMEM; 1094 return -ENOMEM;
1078 kmem_cache_node_init(n); 1095 kmem_cache_node_init(n);
1079 n->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1096 n->next_reap = jiffies + REAPTIMEOUT_NODE +
1080 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1097 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1081 1098
1082 /* 1099 /*
1083 * The l3s don't come and go as CPUs come and 1100 * The kmem_cache_nodes don't come and go as CPUs
1084 * go. slab_mutex is sufficient 1101 * come and go. slab_mutex is sufficient
1085 * protection here. 1102 * protection here.
1086 */ 1103 */
1087 cachep->node[node] = n; 1104 cachep->node[node] = n;
@@ -1406,8 +1423,8 @@ static void __init set_up_node(struct kmem_cache *cachep, int index)
1406 for_each_online_node(node) { 1423 for_each_online_node(node) {
1407 cachep->node[node] = &init_kmem_cache_node[index + node]; 1424 cachep->node[node] = &init_kmem_cache_node[index + node];
1408 cachep->node[node]->next_reap = jiffies + 1425 cachep->node[node]->next_reap = jiffies +
1409 REAPTIMEOUT_LIST3 + 1426 REAPTIMEOUT_NODE +
1410 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1427 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1411 } 1428 }
1412} 1429}
1413 1430
@@ -2010,6 +2027,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2010 if (!num) 2027 if (!num)
2011 continue; 2028 continue;
2012 2029
2030 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
2031 if (num > SLAB_OBJ_MAX_NUM)
2032 break;
2033
2013 if (flags & CFLGS_OFF_SLAB) { 2034 if (flags & CFLGS_OFF_SLAB) {
2014 /* 2035 /*
2015 * Max number of objs-per-slab for caches which 2036 * Max number of objs-per-slab for caches which
@@ -2017,7 +2038,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2017 * looping condition in cache_grow(). 2038 * looping condition in cache_grow().
2018 */ 2039 */
2019 offslab_limit = size; 2040 offslab_limit = size;
2020 offslab_limit /= sizeof(unsigned int); 2041 offslab_limit /= sizeof(freelist_idx_t);
2021 2042
2022 if (num > offslab_limit) 2043 if (num > offslab_limit)
2023 break; 2044 break;
@@ -2103,8 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2103 } 2124 }
2104 } 2125 }
2105 cachep->node[numa_mem_id()]->next_reap = 2126 cachep->node[numa_mem_id()]->next_reap =
2106 jiffies + REAPTIMEOUT_LIST3 + 2127 jiffies + REAPTIMEOUT_NODE +
2107 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2128 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
2108 2129
2109 cpu_cache_get(cachep)->avail = 0; 2130 cpu_cache_get(cachep)->avail = 0;
2110 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2131 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
@@ -2243,7 +2264,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2243 * it too early on. Always use on-slab management when 2264 * it too early on. Always use on-slab management when
2244 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) 2265 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2245 */ 2266 */
2246 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init && 2267 if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
2247 !(flags & SLAB_NOLEAKTRACE)) 2268 !(flags & SLAB_NOLEAKTRACE))
2248 /* 2269 /*
2249 * Size is large, assume best to place the slab management obj 2270 * Size is large, assume best to place the slab management obj
@@ -2252,6 +2273,12 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2252 flags |= CFLGS_OFF_SLAB; 2273 flags |= CFLGS_OFF_SLAB;
2253 2274
2254 size = ALIGN(size, cachep->align); 2275 size = ALIGN(size, cachep->align);
2276 /*
2277 * We should restrict the number of objects in a slab to implement
2278 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2279 */
2280 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2281 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2255 2282
2256 left_over = calculate_slab_order(cachep, size, cachep->align, flags); 2283 left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2257 2284
@@ -2259,7 +2286,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2259 return -E2BIG; 2286 return -E2BIG;
2260 2287
2261 freelist_size = 2288 freelist_size =
2262 ALIGN(cachep->num * sizeof(unsigned int), cachep->align); 2289 ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
2263 2290
2264 /* 2291 /*
2265 * If the slab has been placed off-slab, and we have enough space then 2292 * If the slab has been placed off-slab, and we have enough space then
@@ -2272,7 +2299,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2272 2299
2273 if (flags & CFLGS_OFF_SLAB) { 2300 if (flags & CFLGS_OFF_SLAB) {
2274 /* really off slab. No need for manual alignment */ 2301 /* really off slab. No need for manual alignment */
2275 freelist_size = cachep->num * sizeof(unsigned int); 2302 freelist_size = cachep->num * sizeof(freelist_idx_t);
2276 2303
2277#ifdef CONFIG_PAGE_POISONING 2304#ifdef CONFIG_PAGE_POISONING
2278 /* If we're going to use the generic kernel_map_pages() 2305 /* If we're going to use the generic kernel_map_pages()
@@ -2300,10 +2327,10 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2300 if (flags & CFLGS_OFF_SLAB) { 2327 if (flags & CFLGS_OFF_SLAB) {
2301 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); 2328 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2302 /* 2329 /*
2303 * This is a possibility for one of the malloc_sizes caches. 2330 * This is a possibility for one of the kmalloc_{dma,}_caches.
2304 * But since we go off slab only for object size greater than 2331 * But since we go off slab only for object size greater than
2305 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, 2332 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
2306 * this should not happen at all. 2333 * in ascending order,this should not happen at all.
2307 * But leave a BUG_ON for some lucky dude. 2334 * But leave a BUG_ON for some lucky dude.
2308 */ 2335 */
2309 BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); 2336 BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
@@ -2511,14 +2538,17 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2511 2538
2512/* 2539/*
2513 * Get the memory for a slab management obj. 2540 * Get the memory for a slab management obj.
2514 * For a slab cache when the slab descriptor is off-slab, slab descriptors 2541 *
2515 * always come from malloc_sizes caches. The slab descriptor cannot 2542 * For a slab cache when the slab descriptor is off-slab, the
2516 * come from the same cache which is getting created because, 2543 * slab descriptor can't come from the same cache which is being created,
2517 * when we are searching for an appropriate cache for these 2544 * Because if it is the case, that means we defer the creation of
2518 * descriptors in kmem_cache_create, we search through the malloc_sizes array. 2545 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2519 * If we are creating a malloc_sizes cache here it would not be visible to 2546 * And we eventually call down to __kmem_cache_create(), which
2520 * kmem_find_general_cachep till the initialization is complete. 2547 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2521 * Hence we cannot have freelist_cache same as the original cache. 2548 * This is a "chicken-and-egg" problem.
2549 *
2550 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2551 * which are all initialized during kmem_cache_init().
2522 */ 2552 */
2523static void *alloc_slabmgmt(struct kmem_cache *cachep, 2553static void *alloc_slabmgmt(struct kmem_cache *cachep,
2524 struct page *page, int colour_off, 2554 struct page *page, int colour_off,
@@ -2542,9 +2572,15 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
2542 return freelist; 2572 return freelist;
2543} 2573}
2544 2574
2545static inline unsigned int *slab_freelist(struct page *page) 2575static inline freelist_idx_t get_free_obj(struct page *page, unsigned char idx)
2546{ 2576{
2547 return (unsigned int *)(page->freelist); 2577 return ((freelist_idx_t *)page->freelist)[idx];
2578}
2579
2580static inline void set_free_obj(struct page *page,
2581 unsigned char idx, freelist_idx_t val)
2582{
2583 ((freelist_idx_t *)(page->freelist))[idx] = val;
2548} 2584}
2549 2585
2550static void cache_init_objs(struct kmem_cache *cachep, 2586static void cache_init_objs(struct kmem_cache *cachep,
@@ -2589,7 +2625,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2589 if (cachep->ctor) 2625 if (cachep->ctor)
2590 cachep->ctor(objp); 2626 cachep->ctor(objp);
2591#endif 2627#endif
2592 slab_freelist(page)[i] = i; 2628 set_free_obj(page, i, i);
2593 } 2629 }
2594} 2630}
2595 2631
@@ -2608,7 +2644,7 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
2608{ 2644{
2609 void *objp; 2645 void *objp;
2610 2646
2611 objp = index_to_obj(cachep, page, slab_freelist(page)[page->active]); 2647 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2612 page->active++; 2648 page->active++;
2613#if DEBUG 2649#if DEBUG
2614 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2650 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
@@ -2629,7 +2665,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
2629 2665
2630 /* Verify double free bug */ 2666 /* Verify double free bug */
2631 for (i = page->active; i < cachep->num; i++) { 2667 for (i = page->active; i < cachep->num; i++) {
2632 if (slab_freelist(page)[i] == objnr) { 2668 if (get_free_obj(page, i) == objnr) {
2633 printk(KERN_ERR "slab: double free detected in cache " 2669 printk(KERN_ERR "slab: double free detected in cache "
2634 "'%s', objp %p\n", cachep->name, objp); 2670 "'%s', objp %p\n", cachep->name, objp);
2635 BUG(); 2671 BUG();
@@ -2637,7 +2673,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
2637 } 2673 }
2638#endif 2674#endif
2639 page->active--; 2675 page->active--;
2640 slab_freelist(page)[page->active] = objnr; 2676 set_free_obj(page, page->active, objnr);
2641} 2677}
2642 2678
2643/* 2679/*
@@ -2886,9 +2922,9 @@ retry:
2886 /* move slabp to correct slabp list: */ 2922 /* move slabp to correct slabp list: */
2887 list_del(&page->lru); 2923 list_del(&page->lru);
2888 if (page->active == cachep->num) 2924 if (page->active == cachep->num)
2889 list_add(&page->list, &n->slabs_full); 2925 list_add(&page->lru, &n->slabs_full);
2890 else 2926 else
2891 list_add(&page->list, &n->slabs_partial); 2927 list_add(&page->lru, &n->slabs_partial);
2892 } 2928 }
2893 2929
2894must_grow: 2930must_grow:
@@ -3245,11 +3281,11 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3245 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, 3281 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3246 flags); 3282 flags);
3247 3283
3248 if (likely(ptr)) 3284 if (likely(ptr)) {
3249 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); 3285 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3250 3286 if (unlikely(flags & __GFP_ZERO))
3251 if (unlikely((flags & __GFP_ZERO) && ptr)) 3287 memset(ptr, 0, cachep->object_size);
3252 memset(ptr, 0, cachep->object_size); 3288 }
3253 3289
3254 return ptr; 3290 return ptr;
3255} 3291}
@@ -3310,17 +3346,17 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3310 flags); 3346 flags);
3311 prefetchw(objp); 3347 prefetchw(objp);
3312 3348
3313 if (likely(objp)) 3349 if (likely(objp)) {
3314 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); 3350 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3315 3351 if (unlikely(flags & __GFP_ZERO))
3316 if (unlikely((flags & __GFP_ZERO) && objp)) 3352 memset(objp, 0, cachep->object_size);
3317 memset(objp, 0, cachep->object_size); 3353 }
3318 3354
3319 return objp; 3355 return objp;
3320} 3356}
3321 3357
3322/* 3358/*
3323 * Caller needs to acquire correct kmem_list's list_lock 3359 * Caller needs to acquire correct kmem_cache_node's list_lock
3324 */ 3360 */
3325static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 3361static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3326 int node) 3362 int node)
@@ -3574,11 +3610,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3574 struct kmem_cache *cachep; 3610 struct kmem_cache *cachep;
3575 void *ret; 3611 void *ret;
3576 3612
3577 /* If you want to save a few bytes .text space: replace
3578 * __ with kmem_.
3579 * Then kmalloc uses the uninlined functions instead of the inline
3580 * functions.
3581 */
3582 cachep = kmalloc_slab(size, flags); 3613 cachep = kmalloc_slab(size, flags);
3583 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3614 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3584 return cachep; 3615 return cachep;
@@ -3670,7 +3701,7 @@ EXPORT_SYMBOL(kfree);
3670/* 3701/*
3671 * This initializes kmem_cache_node or resizes various caches for all nodes. 3702 * This initializes kmem_cache_node or resizes various caches for all nodes.
3672 */ 3703 */
3673static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) 3704static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3674{ 3705{
3675 int node; 3706 int node;
3676 struct kmem_cache_node *n; 3707 struct kmem_cache_node *n;
@@ -3726,8 +3757,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3726 } 3757 }
3727 3758
3728 kmem_cache_node_init(n); 3759 kmem_cache_node_init(n);
3729 n->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3760 n->next_reap = jiffies + REAPTIMEOUT_NODE +
3730 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3761 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
3731 n->shared = new_shared; 3762 n->shared = new_shared;
3732 n->alien = new_alien; 3763 n->alien = new_alien;
3733 n->free_limit = (1 + nr_cpus_node(node)) * 3764 n->free_limit = (1 + nr_cpus_node(node)) *
@@ -3813,7 +3844,7 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3813 kfree(ccold); 3844 kfree(ccold);
3814 } 3845 }
3815 kfree(new); 3846 kfree(new);
3816 return alloc_kmemlist(cachep, gfp); 3847 return alloc_kmem_cache_node(cachep, gfp);
3817} 3848}
3818 3849
3819static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3850static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
@@ -3982,7 +4013,7 @@ static void cache_reap(struct work_struct *w)
3982 if (time_after(n->next_reap, jiffies)) 4013 if (time_after(n->next_reap, jiffies))
3983 goto next; 4014 goto next;
3984 4015
3985 n->next_reap = jiffies + REAPTIMEOUT_LIST3; 4016 n->next_reap = jiffies + REAPTIMEOUT_NODE;
3986 4017
3987 drain_array(searchp, n, n->shared, 0, node); 4018 drain_array(searchp, n, n->shared, 0, node);
3988 4019
@@ -4003,7 +4034,7 @@ next:
4003 next_reap_node(); 4034 next_reap_node();
4004out: 4035out:
4005 /* Set up the next iteration */ 4036 /* Set up the next iteration */
4006 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); 4037 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
4007} 4038}
4008 4039
4009#ifdef CONFIG_SLABINFO 4040#ifdef CONFIG_SLABINFO
@@ -4210,7 +4241,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
4210 4241
4211 for (j = page->active; j < c->num; j++) { 4242 for (j = page->active; j < c->num; j++) {
4212 /* Skip freed item */ 4243 /* Skip freed item */
4213 if (slab_freelist(page)[j] == i) { 4244 if (get_free_obj(page, j) == i) {
4214 active = false; 4245 active = false;
4215 break; 4246 break;
4216 } 4247 }
diff --git a/mm/slob.c b/mm/slob.c
index 4bf8809dfcce..730cad45d4be 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp)
111 111
112static void set_slob_page_free(struct page *sp, struct list_head *list) 112static void set_slob_page_free(struct page *sp, struct list_head *list)
113{ 113{
114 list_add(&sp->list, list); 114 list_add(&sp->lru, list);
115 __SetPageSlobFree(sp); 115 __SetPageSlobFree(sp);
116} 116}
117 117
118static inline void clear_slob_page_free(struct page *sp) 118static inline void clear_slob_page_free(struct page *sp)
119{ 119{
120 list_del(&sp->list); 120 list_del(&sp->lru);
121 __ClearPageSlobFree(sp); 121 __ClearPageSlobFree(sp);
122} 122}
123 123
@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
282 282
283 spin_lock_irqsave(&slob_lock, flags); 283 spin_lock_irqsave(&slob_lock, flags);
284 /* Iterate through each partially free page, try to find room */ 284 /* Iterate through each partially free page, try to find room */
285 list_for_each_entry(sp, slob_list, list) { 285 list_for_each_entry(sp, slob_list, lru) {
286#ifdef CONFIG_NUMA 286#ifdef CONFIG_NUMA
287 /* 287 /*
288 * If there's a node specification, search for a partial 288 * If there's a node specification, search for a partial
@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
296 continue; 296 continue;
297 297
298 /* Attempt to alloc */ 298 /* Attempt to alloc */
299 prev = sp->list.prev; 299 prev = sp->lru.prev;
300 b = slob_page_alloc(sp, size, align); 300 b = slob_page_alloc(sp, size, align);
301 if (!b) 301 if (!b)
302 continue; 302 continue;
@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
322 spin_lock_irqsave(&slob_lock, flags); 322 spin_lock_irqsave(&slob_lock, flags);
323 sp->units = SLOB_UNITS(PAGE_SIZE); 323 sp->units = SLOB_UNITS(PAGE_SIZE);
324 sp->freelist = b; 324 sp->freelist = b;
325 INIT_LIST_HEAD(&sp->list); 325 INIT_LIST_HEAD(&sp->lru);
326 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); 326 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
327 set_slob_page_free(sp, slob_list); 327 set_slob_page_free(sp, slob_list);
328 b = slob_page_alloc(sp, size, align); 328 b = slob_page_alloc(sp, size, align);
diff --git a/mm/slub.c b/mm/slub.c
index f620bbf4054a..5e234f1f8853 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1352,11 +1352,12 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1352 page = alloc_slab_page(alloc_gfp, node, oo); 1352 page = alloc_slab_page(alloc_gfp, node, oo);
1353 if (unlikely(!page)) { 1353 if (unlikely(!page)) {
1354 oo = s->min; 1354 oo = s->min;
1355 alloc_gfp = flags;
1355 /* 1356 /*
1356 * Allocation may have failed due to fragmentation. 1357 * Allocation may have failed due to fragmentation.
1357 * Try a lower order alloc if possible 1358 * Try a lower order alloc if possible
1358 */ 1359 */
1359 page = alloc_slab_page(flags, node, oo); 1360 page = alloc_slab_page(alloc_gfp, node, oo);
1360 1361
1361 if (page) 1362 if (page)
1362 stat(s, ORDER_FALLBACK); 1363 stat(s, ORDER_FALLBACK);
@@ -1366,7 +1367,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1366 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { 1367 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1367 int pages = 1 << oo_order(oo); 1368 int pages = 1 << oo_order(oo);
1368 1369
1369 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); 1370 kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
1370 1371
1371 /* 1372 /*
1372 * Objects from caches that have a constructor don't get 1373 * Objects from caches that have a constructor don't get
diff --git a/mm/util.c b/mm/util.c
index d7813e6d4cc7..f380af7ea779 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -446,6 +446,54 @@ unsigned long vm_commit_limit(void)
446 return allowed; 446 return allowed;
447} 447}
448 448
449/**
450 * get_cmdline() - copy the cmdline value to a buffer.
451 * @task: the task whose cmdline value to copy.
452 * @buffer: the buffer to copy to.
453 * @buflen: the length of the buffer. Larger cmdline values are truncated
454 * to this length.
455 * Returns the size of the cmdline field copied. Note that the copy does
456 * not guarantee an ending NULL byte.
457 */
458int get_cmdline(struct task_struct *task, char *buffer, int buflen)
459{
460 int res = 0;
461 unsigned int len;
462 struct mm_struct *mm = get_task_mm(task);
463 if (!mm)
464 goto out;
465 if (!mm->arg_end)
466 goto out_mm; /* Shh! No looking before we're done */
467
468 len = mm->arg_end - mm->arg_start;
469
470 if (len > buflen)
471 len = buflen;
472
473 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
474
475 /*
476 * If the nul at the end of args has been overwritten, then
477 * assume application is using setproctitle(3).
478 */
479 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
480 len = strnlen(buffer, res);
481 if (len < res) {
482 res = len;
483 } else {
484 len = mm->env_end - mm->env_start;
485 if (len > buflen - res)
486 len = buflen - res;
487 res += access_process_vm(task, mm->env_start,
488 buffer+res, len, 0);
489 res = strnlen(buffer, res);
490 }
491 }
492out_mm:
493 mmput(mm);
494out:
495 return res;
496}
449 497
450/* Tracepoints definitions. */ 498/* Tracepoints definitions. */
451EXPORT_TRACEPOINT_SYMBOL(kmalloc); 499EXPORT_TRACEPOINT_SYMBOL(kmalloc);
diff --git a/net/9p/client.c b/net/9p/client.c
index 9186550d77a6..0004cbaac4a4 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -415,9 +415,17 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
415 * req: request received 415 * req: request received
416 * 416 *
417 */ 417 */
418void p9_client_cb(struct p9_client *c, struct p9_req_t *req) 418void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
419{ 419{
420 p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); 420 p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);
421
422 /*
423 * This barrier is needed to make sure any change made to req before
424 * the other thread wakes up will indeed be seen by the waiting side.
425 */
426 smp_wmb();
427 req->status = status;
428
421 wake_up(req->wq); 429 wake_up(req->wq);
422 p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); 430 p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
423} 431}
@@ -655,16 +663,13 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
655 if (IS_ERR(req)) 663 if (IS_ERR(req))
656 return PTR_ERR(req); 664 return PTR_ERR(req);
657 665
658
659 /* 666 /*
660 * if we haven't received a response for oldreq, 667 * if we haven't received a response for oldreq,
661 * remove it from the list 668 * remove it from the list
662 */ 669 */
663 if (oldreq->status == REQ_STATUS_FLSH) { 670 if (oldreq->status == REQ_STATUS_SENT)
664 spin_lock(&c->lock); 671 if (c->trans_mod->cancelled)
665 list_del(&oldreq->req_list); 672 c->trans_mod->cancelled(c, oldreq);
666 spin_unlock(&c->lock);
667 }
668 673
669 p9_free_req(c, req); 674 p9_free_req(c, req);
670 return 0; 675 return 0;
@@ -751,6 +756,12 @@ again:
751 err = wait_event_interruptible(*req->wq, 756 err = wait_event_interruptible(*req->wq,
752 req->status >= REQ_STATUS_RCVD); 757 req->status >= REQ_STATUS_RCVD);
753 758
759 /*
760 * Make sure our req is coherent with regard to updates in other
761 * threads - echoes to wmb() in the callback
762 */
763 smp_rmb();
764
754 if ((err == -ERESTARTSYS) && (c->status == Connected) 765 if ((err == -ERESTARTSYS) && (c->status == Connected)
755 && (type == P9_TFLUSH)) { 766 && (type == P9_TFLUSH)) {
756 sigpending = 1; 767 sigpending = 1;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index b7bd7f2961bf..80d08f6664cb 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -66,20 +66,6 @@ struct p9_fd_opts {
66 int privport; 66 int privport;
67}; 67};
68 68
69/**
70 * struct p9_trans_fd - transport state
71 * @rd: reference to file to read from
72 * @wr: reference of file to write to
73 * @conn: connection state reference
74 *
75 */
76
77struct p9_trans_fd {
78 struct file *rd;
79 struct file *wr;
80 struct p9_conn *conn;
81};
82
83/* 69/*
84 * Option Parsing (code inspired by NFS code) 70 * Option Parsing (code inspired by NFS code)
85 * - a little lazy - parse all fd-transport options 71 * - a little lazy - parse all fd-transport options
@@ -159,6 +145,20 @@ struct p9_conn {
159 unsigned long wsched; 145 unsigned long wsched;
160}; 146};
161 147
148/**
149 * struct p9_trans_fd - transport state
150 * @rd: reference to file to read from
151 * @wr: reference of file to write to
152 * @conn: connection state reference
153 *
154 */
155
156struct p9_trans_fd {
157 struct file *rd;
158 struct file *wr;
159 struct p9_conn conn;
160};
161
162static void p9_poll_workfn(struct work_struct *work); 162static void p9_poll_workfn(struct work_struct *work);
163 163
164static DEFINE_SPINLOCK(p9_poll_lock); 164static DEFINE_SPINLOCK(p9_poll_lock);
@@ -212,15 +212,9 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
212 m->err = err; 212 m->err = err;
213 213
214 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { 214 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
215 req->status = REQ_STATUS_ERROR;
216 if (!req->t_err)
217 req->t_err = err;
218 list_move(&req->req_list, &cancel_list); 215 list_move(&req->req_list, &cancel_list);
219 } 216 }
220 list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { 217 list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
221 req->status = REQ_STATUS_ERROR;
222 if (!req->t_err)
223 req->t_err = err;
224 list_move(&req->req_list, &cancel_list); 218 list_move(&req->req_list, &cancel_list);
225 } 219 }
226 spin_unlock_irqrestore(&m->client->lock, flags); 220 spin_unlock_irqrestore(&m->client->lock, flags);
@@ -228,7 +222,9 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
228 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { 222 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
229 p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); 223 p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
230 list_del(&req->req_list); 224 list_del(&req->req_list);
231 p9_client_cb(m->client, req); 225 if (!req->t_err)
226 req->t_err = err;
227 p9_client_cb(m->client, req, REQ_STATUS_ERROR);
232 } 228 }
233} 229}
234 230
@@ -302,6 +298,7 @@ static void p9_read_work(struct work_struct *work)
302{ 298{
303 int n, err; 299 int n, err;
304 struct p9_conn *m; 300 struct p9_conn *m;
301 int status = REQ_STATUS_ERROR;
305 302
306 m = container_of(work, struct p9_conn, rq); 303 m = container_of(work, struct p9_conn, rq);
307 304
@@ -348,8 +345,7 @@ static void p9_read_work(struct work_struct *work)
348 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); 345 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
349 346
350 m->req = p9_tag_lookup(m->client, tag); 347 m->req = p9_tag_lookup(m->client, tag);
351 if (!m->req || (m->req->status != REQ_STATUS_SENT && 348 if (!m->req || (m->req->status != REQ_STATUS_SENT)) {
352 m->req->status != REQ_STATUS_FLSH)) {
353 p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", 349 p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
354 tag); 350 tag);
355 err = -EIO; 351 err = -EIO;
@@ -375,10 +371,10 @@ static void p9_read_work(struct work_struct *work)
375 p9_debug(P9_DEBUG_TRANS, "got new packet\n"); 371 p9_debug(P9_DEBUG_TRANS, "got new packet\n");
376 spin_lock(&m->client->lock); 372 spin_lock(&m->client->lock);
377 if (m->req->status != REQ_STATUS_ERROR) 373 if (m->req->status != REQ_STATUS_ERROR)
378 m->req->status = REQ_STATUS_RCVD; 374 status = REQ_STATUS_RCVD;
379 list_del(&m->req->req_list); 375 list_del(&m->req->req_list);
380 spin_unlock(&m->client->lock); 376 spin_unlock(&m->client->lock);
381 p9_client_cb(m->client, m->req); 377 p9_client_cb(m->client, m->req, status);
382 m->rbuf = NULL; 378 m->rbuf = NULL;
383 m->rpos = 0; 379 m->rpos = 0;
384 m->rsize = 0; 380 m->rsize = 0;
@@ -573,21 +569,19 @@ p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
573} 569}
574 570
575/** 571/**
576 * p9_conn_create - allocate and initialize the per-session mux data 572 * p9_conn_create - initialize the per-session mux data
577 * @client: client instance 573 * @client: client instance
578 * 574 *
579 * Note: Creates the polling task if this is the first session. 575 * Note: Creates the polling task if this is the first session.
580 */ 576 */
581 577
582static struct p9_conn *p9_conn_create(struct p9_client *client) 578static void p9_conn_create(struct p9_client *client)
583{ 579{
584 int n; 580 int n;
585 struct p9_conn *m; 581 struct p9_trans_fd *ts = client->trans;
582 struct p9_conn *m = &ts->conn;
586 583
587 p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize); 584 p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
588 m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
589 if (!m)
590 return ERR_PTR(-ENOMEM);
591 585
592 INIT_LIST_HEAD(&m->mux_list); 586 INIT_LIST_HEAD(&m->mux_list);
593 m->client = client; 587 m->client = client;
@@ -609,8 +603,6 @@ static struct p9_conn *p9_conn_create(struct p9_client *client)
609 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); 603 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
610 set_bit(Wpending, &m->wsched); 604 set_bit(Wpending, &m->wsched);
611 } 605 }
612
613 return m;
614} 606}
615 607
616/** 608/**
@@ -669,7 +661,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
669{ 661{
670 int n; 662 int n;
671 struct p9_trans_fd *ts = client->trans; 663 struct p9_trans_fd *ts = client->trans;
672 struct p9_conn *m = ts->conn; 664 struct p9_conn *m = &ts->conn;
673 665
674 p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", 666 p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
675 m, current, req->tc, req->tc->id); 667 m, current, req->tc, req->tc->id);
@@ -704,14 +696,26 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
704 list_del(&req->req_list); 696 list_del(&req->req_list);
705 req->status = REQ_STATUS_FLSHD; 697 req->status = REQ_STATUS_FLSHD;
706 ret = 0; 698 ret = 0;
707 } else if (req->status == REQ_STATUS_SENT) 699 }
708 req->status = REQ_STATUS_FLSH;
709
710 spin_unlock(&client->lock); 700 spin_unlock(&client->lock);
711 701
712 return ret; 702 return ret;
713} 703}
714 704
705static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
706{
707 p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
708
709 /* we haven't received a response for oldreq,
710 * remove it from the list.
711 */
712 spin_lock(&client->lock);
713 list_del(&req->req_list);
714 spin_unlock(&client->lock);
715
716 return 0;
717}
718
715/** 719/**
716 * parse_opts - parse mount options into p9_fd_opts structure 720 * parse_opts - parse mount options into p9_fd_opts structure
717 * @params: options string passed from mount 721 * @params: options string passed from mount
@@ -780,7 +784,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
780 784
781static int p9_fd_open(struct p9_client *client, int rfd, int wfd) 785static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
782{ 786{
783 struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd), 787 struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
784 GFP_KERNEL); 788 GFP_KERNEL);
785 if (!ts) 789 if (!ts)
786 return -ENOMEM; 790 return -ENOMEM;
@@ -806,9 +810,8 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
806{ 810{
807 struct p9_trans_fd *p; 811 struct p9_trans_fd *p;
808 struct file *file; 812 struct file *file;
809 int ret;
810 813
811 p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); 814 p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
812 if (!p) 815 if (!p)
813 return -ENOMEM; 816 return -ENOMEM;
814 817
@@ -829,20 +832,12 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
829 832
830 p->rd->f_flags |= O_NONBLOCK; 833 p->rd->f_flags |= O_NONBLOCK;
831 834
832 p->conn = p9_conn_create(client); 835 p9_conn_create(client);
833 if (IS_ERR(p->conn)) {
834 ret = PTR_ERR(p->conn);
835 p->conn = NULL;
836 kfree(p);
837 sockfd_put(csocket);
838 sockfd_put(csocket);
839 return ret;
840 }
841 return 0; 836 return 0;
842} 837}
843 838
844/** 839/**
845 * p9_mux_destroy - cancels all pending requests and frees mux resources 840 * p9_mux_destroy - cancels all pending requests of mux
846 * @m: mux to destroy 841 * @m: mux to destroy
847 * 842 *
848 */ 843 */
@@ -859,7 +854,6 @@ static void p9_conn_destroy(struct p9_conn *m)
859 p9_conn_cancel(m, -ECONNRESET); 854 p9_conn_cancel(m, -ECONNRESET);
860 855
861 m->client = NULL; 856 m->client = NULL;
862 kfree(m);
863} 857}
864 858
865/** 859/**
@@ -881,7 +875,7 @@ static void p9_fd_close(struct p9_client *client)
881 875
882 client->status = Disconnected; 876 client->status = Disconnected;
883 877
884 p9_conn_destroy(ts->conn); 878 p9_conn_destroy(&ts->conn);
885 879
886 if (ts->rd) 880 if (ts->rd)
887 fput(ts->rd); 881 fput(ts->rd);
@@ -1033,14 +1027,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
1033 return err; 1027 return err;
1034 1028
1035 p = (struct p9_trans_fd *) client->trans; 1029 p = (struct p9_trans_fd *) client->trans;
1036 p->conn = p9_conn_create(client); 1030 p9_conn_create(client);
1037 if (IS_ERR(p->conn)) {
1038 err = PTR_ERR(p->conn);
1039 p->conn = NULL;
1040 fput(p->rd);
1041 fput(p->wr);
1042 return err;
1043 }
1044 1031
1045 return 0; 1032 return 0;
1046} 1033}
@@ -1053,6 +1040,7 @@ static struct p9_trans_module p9_tcp_trans = {
1053 .close = p9_fd_close, 1040 .close = p9_fd_close,
1054 .request = p9_fd_request, 1041 .request = p9_fd_request,
1055 .cancel = p9_fd_cancel, 1042 .cancel = p9_fd_cancel,
1043 .cancelled = p9_fd_cancelled,
1056 .owner = THIS_MODULE, 1044 .owner = THIS_MODULE,
1057}; 1045};
1058 1046
@@ -1064,6 +1052,7 @@ static struct p9_trans_module p9_unix_trans = {
1064 .close = p9_fd_close, 1052 .close = p9_fd_close,
1065 .request = p9_fd_request, 1053 .request = p9_fd_request,
1066 .cancel = p9_fd_cancel, 1054 .cancel = p9_fd_cancel,
1055 .cancelled = p9_fd_cancelled,
1067 .owner = THIS_MODULE, 1056 .owner = THIS_MODULE,
1068}; 1057};
1069 1058
@@ -1075,6 +1064,7 @@ static struct p9_trans_module p9_fd_trans = {
1075 .close = p9_fd_close, 1064 .close = p9_fd_close,
1076 .request = p9_fd_request, 1065 .request = p9_fd_request,
1077 .cancel = p9_fd_cancel, 1066 .cancel = p9_fd_cancel,
1067 .cancelled = p9_fd_cancelled,
1078 .owner = THIS_MODULE, 1068 .owner = THIS_MODULE,
1079}; 1069};
1080 1070
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 8f68df5d2973..14ad43b5cf89 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -193,6 +193,8 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
193 if (!*p) 193 if (!*p)
194 continue; 194 continue;
195 token = match_token(p, tokens, args); 195 token = match_token(p, tokens, args);
196 if (token == Opt_err)
197 continue;
196 r = match_int(&args[0], &option); 198 r = match_int(&args[0], &option);
197 if (r < 0) { 199 if (r < 0) {
198 p9_debug(P9_DEBUG_ERROR, 200 p9_debug(P9_DEBUG_ERROR,
@@ -305,8 +307,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
305 } 307 }
306 308
307 req->rc = c->rc; 309 req->rc = c->rc;
308 req->status = REQ_STATUS_RCVD; 310 p9_client_cb(client, req, REQ_STATUS_RCVD);
309 p9_client_cb(client, req);
310 311
311 return; 312 return;
312 313
@@ -511,6 +512,11 @@ dont_need_post_recv:
511 goto send_error; 512 goto send_error;
512 } 513 }
513 514
515 /* Mark request as `sent' *before* we actually send it,
516 * because doing if after could erase the REQ_STATUS_RCVD
517 * status in case of a very fast reply.
518 */
519 req->status = REQ_STATUS_SENT;
514 err = ib_post_send(rdma->qp, &wr, &bad_wr); 520 err = ib_post_send(rdma->qp, &wr, &bad_wr);
515 if (err) 521 if (err)
516 goto send_error; 522 goto send_error;
@@ -520,6 +526,7 @@ dont_need_post_recv:
520 526
521 /* Handle errors that happened during or while preparing the send: */ 527 /* Handle errors that happened during or while preparing the send: */
522 send_error: 528 send_error:
529 req->status = REQ_STATUS_ERROR;
523 kfree(c); 530 kfree(c);
524 p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err); 531 p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
525 532
@@ -582,12 +589,24 @@ static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
582 return rdma; 589 return rdma;
583} 590}
584 591
585/* its not clear to me we can do anything after send has been posted */
586static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) 592static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
587{ 593{
594 /* Nothing to do here.
595 * We will take care of it (if we have to) in rdma_cancelled()
596 */
588 return 1; 597 return 1;
589} 598}
590 599
600/* A request has been fully flushed without a reply.
601 * That means we have posted one buffer in excess.
602 */
603static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
604{
605 struct p9_trans_rdma *rdma = client->trans;
606 atomic_inc(&rdma->excess_rc);
607 return 0;
608}
609
591/** 610/**
592 * trans_create_rdma - Transport method for creating atransport instance 611 * trans_create_rdma - Transport method for creating atransport instance
593 * @client: client instance 612 * @client: client instance
@@ -721,6 +740,7 @@ static struct p9_trans_module p9_rdma_trans = {
721 .close = rdma_close, 740 .close = rdma_close,
722 .request = rdma_request, 741 .request = rdma_request,
723 .cancel = rdma_cancel, 742 .cancel = rdma_cancel,
743 .cancelled = rdma_cancelled,
724}; 744};
725 745
726/** 746/**
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index ac2666c1d011..6940d8fe8971 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -164,8 +164,7 @@ static void req_done(struct virtqueue *vq)
164 p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc); 164 p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc);
165 p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); 165 p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
166 req = p9_tag_lookup(chan->client, rc->tag); 166 req = p9_tag_lookup(chan->client, rc->tag);
167 req->status = REQ_STATUS_RCVD; 167 p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
168 p9_client_cb(chan->client, req);
169 } 168 }
170} 169}
171 170
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 8215f7cb170b..ba291ce4bdff 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -68,7 +68,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
68 68
69 sk = sk_atm(atmarpd); 69 sk = sk_atm(atmarpd);
70 skb_queue_tail(&sk->sk_receive_queue, skb); 70 skb_queue_tail(&sk->sk_receive_queue, skb);
71 sk->sk_data_ready(sk, skb->len); 71 sk->sk_data_ready(sk);
72 return 0; 72 return 0;
73} 73}
74 74
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5a2f602d07e1..4c5b8ba0f84f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -152,7 +152,7 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
152 atm_force_charge(priv->lecd, skb2->truesize); 152 atm_force_charge(priv->lecd, skb2->truesize);
153 sk = sk_atm(priv->lecd); 153 sk = sk_atm(priv->lecd);
154 skb_queue_tail(&sk->sk_receive_queue, skb2); 154 skb_queue_tail(&sk->sk_receive_queue, skb2);
155 sk->sk_data_ready(sk, skb2->len); 155 sk->sk_data_ready(sk);
156 } 156 }
157} 157}
158#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 158#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
@@ -447,7 +447,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
447 atm_force_charge(priv->lecd, skb2->truesize); 447 atm_force_charge(priv->lecd, skb2->truesize);
448 sk = sk_atm(priv->lecd); 448 sk = sk_atm(priv->lecd);
449 skb_queue_tail(&sk->sk_receive_queue, skb2); 449 skb_queue_tail(&sk->sk_receive_queue, skb2);
450 sk->sk_data_ready(sk, skb2->len); 450 sk->sk_data_ready(sk);
451 } 451 }
452 } 452 }
453#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 453#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
@@ -530,13 +530,13 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
530 atm_force_charge(priv->lecd, skb->truesize); 530 atm_force_charge(priv->lecd, skb->truesize);
531 sk = sk_atm(priv->lecd); 531 sk = sk_atm(priv->lecd);
532 skb_queue_tail(&sk->sk_receive_queue, skb); 532 skb_queue_tail(&sk->sk_receive_queue, skb);
533 sk->sk_data_ready(sk, skb->len); 533 sk->sk_data_ready(sk);
534 534
535 if (data != NULL) { 535 if (data != NULL) {
536 pr_debug("about to send %d bytes of data\n", data->len); 536 pr_debug("about to send %d bytes of data\n", data->len);
537 atm_force_charge(priv->lecd, data->truesize); 537 atm_force_charge(priv->lecd, data->truesize);
538 skb_queue_tail(&sk->sk_receive_queue, data); 538 skb_queue_tail(&sk->sk_receive_queue, data);
539 sk->sk_data_ready(sk, skb->len); 539 sk->sk_data_ready(sk);
540 } 540 }
541 541
542 return 0; 542 return 0;
@@ -616,7 +616,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
616 616
617 pr_debug("%s: To daemon\n", dev->name); 617 pr_debug("%s: To daemon\n", dev->name);
618 skb_queue_tail(&sk->sk_receive_queue, skb); 618 skb_queue_tail(&sk->sk_receive_queue, skb);
619 sk->sk_data_ready(sk, skb->len); 619 sk->sk_data_ready(sk);
620 } else { /* Data frame, queue to protocol handlers */ 620 } else { /* Data frame, queue to protocol handlers */
621 struct lec_arp_table *entry; 621 struct lec_arp_table *entry;
622 unsigned char *src, *dst; 622 unsigned char *src, *dst;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 91dc58f1124d..e8e0e7a8a23d 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -706,7 +706,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
706 dprintk("(%s) control packet arrived\n", dev->name); 706 dprintk("(%s) control packet arrived\n", dev->name);
707 /* Pass control packets to daemon */ 707 /* Pass control packets to daemon */
708 skb_queue_tail(&sk->sk_receive_queue, skb); 708 skb_queue_tail(&sk->sk_receive_queue, skb);
709 sk->sk_data_ready(sk, skb->len); 709 sk->sk_data_ready(sk);
710 return; 710 return;
711 } 711 }
712 712
@@ -992,7 +992,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
992 992
993 sk = sk_atm(mpc->mpoad_vcc); 993 sk = sk_atm(mpc->mpoad_vcc);
994 skb_queue_tail(&sk->sk_receive_queue, skb); 994 skb_queue_tail(&sk->sk_receive_queue, skb);
995 sk->sk_data_ready(sk, skb->len); 995 sk->sk_data_ready(sk);
996 996
997 return 0; 997 return 0;
998} 998}
@@ -1273,7 +1273,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1273 1273
1274 sk = sk_atm(vcc); 1274 sk = sk_atm(vcc);
1275 skb_queue_tail(&sk->sk_receive_queue, skb); 1275 skb_queue_tail(&sk->sk_receive_queue, skb);
1276 sk->sk_data_ready(sk, skb->len); 1276 sk->sk_data_ready(sk);
1277 dprintk("exiting\n"); 1277 dprintk("exiting\n");
1278} 1278}
1279 1279
diff --git a/net/atm/raw.c b/net/atm/raw.c
index b4f7b9ff3c74..2e17e97a7a8b 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -25,7 +25,7 @@ static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
25 struct sock *sk = sk_atm(vcc); 25 struct sock *sk = sk_atm(vcc);
26 26
27 skb_queue_tail(&sk->sk_receive_queue, skb); 27 skb_queue_tail(&sk->sk_receive_queue, skb);
28 sk->sk_data_ready(sk, skb->len); 28 sk->sk_data_ready(sk);
29 } 29 }
30} 30}
31 31
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 4176887e72eb..523bce72f698 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -51,7 +51,7 @@ static void sigd_put_skb(struct sk_buff *skb)
51#endif 51#endif
52 atm_force_charge(sigd, skb->truesize); 52 atm_force_charge(sigd, skb->truesize);
53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); 53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
54 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len); 54 sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
55} 55}
56 56
57static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg) 57static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 96f4cab3a2f9..7ed8ab724819 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -422,7 +422,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
422 422
423 if (sk) { 423 if (sk) {
424 if (!sock_flag(sk, SOCK_DEAD)) 424 if (!sock_flag(sk, SOCK_DEAD))
425 sk->sk_data_ready(sk, skb->len); 425 sk->sk_data_ready(sk);
426 sock_put(sk); 426 sock_put(sk);
427 } else { 427 } else {
428free: 428free:
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f59e00c2daa9..ef5e5b04f34f 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1271,7 +1271,7 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
1271 1271
1272 if (parent) { 1272 if (parent) {
1273 bt_accept_unlink(sk); 1273 bt_accept_unlink(sk);
1274 parent->sk_data_ready(parent, 0); 1274 parent->sk_data_ready(parent);
1275 } else { 1275 } else {
1276 sk->sk_state_change(sk); 1276 sk->sk_state_change(sk);
1277 } 1277 }
@@ -1327,7 +1327,7 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1327 sk->sk_state_change(sk); 1327 sk->sk_state_change(sk);
1328 1328
1329 if (parent) 1329 if (parent)
1330 parent->sk_data_ready(parent, 0); 1330 parent->sk_data_ready(parent);
1331 1331
1332 release_sock(sk); 1332 release_sock(sk);
1333} 1333}
@@ -1340,7 +1340,7 @@ static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
1340 1340
1341 parent = bt_sk(sk)->parent; 1341 parent = bt_sk(sk)->parent;
1342 if (parent) 1342 if (parent)
1343 parent->sk_data_ready(parent, 0); 1343 parent->sk_data_ready(parent);
1344 1344
1345 release_sock(sk); 1345 release_sock(sk);
1346} 1346}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 633cceeb943e..cf620260affa 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -186,9 +186,9 @@ static void rfcomm_l2state_change(struct sock *sk)
186 rfcomm_schedule(); 186 rfcomm_schedule();
187} 187}
188 188
189static void rfcomm_l2data_ready(struct sock *sk, int bytes) 189static void rfcomm_l2data_ready(struct sock *sk)
190{ 190{
191 BT_DBG("%p bytes %d", sk, bytes); 191 BT_DBG("%p", sk);
192 rfcomm_schedule(); 192 rfcomm_schedule();
193} 193}
194 194
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index eabd25ab5ad9..c603a5eb4720 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -54,7 +54,7 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
54 54
55 atomic_add(skb->len, &sk->sk_rmem_alloc); 55 atomic_add(skb->len, &sk->sk_rmem_alloc);
56 skb_queue_tail(&sk->sk_receive_queue, skb); 56 skb_queue_tail(&sk->sk_receive_queue, skb);
57 sk->sk_data_ready(sk, skb->len); 57 sk->sk_data_ready(sk);
58 58
59 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 59 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
60 rfcomm_dlc_throttle(d); 60 rfcomm_dlc_throttle(d);
@@ -84,7 +84,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
84 sock_set_flag(sk, SOCK_ZAPPED); 84 sock_set_flag(sk, SOCK_ZAPPED);
85 bt_accept_unlink(sk); 85 bt_accept_unlink(sk);
86 } 86 }
87 parent->sk_data_ready(parent, 0); 87 parent->sk_data_ready(parent);
88 } else { 88 } else {
89 if (d->state == BT_CONNECTED) 89 if (d->state == BT_CONNECTED)
90 rfcomm_session_getaddr(d->session, 90 rfcomm_session_getaddr(d->session,
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index ab1e6fcca4c5..c06dbd3938e8 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1024,7 +1024,7 @@ static void sco_conn_ready(struct sco_conn *conn)
1024 sk->sk_state = BT_CONNECTED; 1024 sk->sk_state = BT_CONNECTED;
1025 1025
1026 /* Wake up parent */ 1026 /* Wake up parent */
1027 parent->sk_data_ready(parent, 1); 1027 parent->sk_data_ready(parent);
1028 1028
1029 bh_unlock_sock(parent); 1029 bh_unlock_sock(parent);
1030 1030
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index d0cca3c65f01..7985deaff52f 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -73,7 +73,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
73 goto drop; 73 goto drop;
74 74
75 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid)) 75 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
76 goto drop; 76 goto out;
77 77
78 /* insert into forwarding database after filtering to avoid spoofing */ 78 /* insert into forwarding database after filtering to avoid spoofing */
79 br = p->br; 79 br = p->br;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 91510712c7a7..4a3716102789 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -170,7 +170,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
170 * rejected. 170 * rejected.
171 */ 171 */
172 if (!v) 172 if (!v)
173 return false; 173 goto drop;
174 174
175 /* If vlan tx offload is disabled on bridge device and frame was 175 /* If vlan tx offload is disabled on bridge device and frame was
176 * sent from vlan device on the bridge device, it does not have 176 * sent from vlan device on the bridge device, it does not have
@@ -193,7 +193,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
193 * vlan untagged or priority-tagged traffic belongs to. 193 * vlan untagged or priority-tagged traffic belongs to.
194 */ 194 */
195 if (pvid == VLAN_N_VID) 195 if (pvid == VLAN_N_VID)
196 return false; 196 goto drop;
197 197
198 /* PVID is set on this port. Any untagged or priority-tagged 198 /* PVID is set on this port. Any untagged or priority-tagged
199 * ingress frame is considered to belong to this vlan. 199 * ingress frame is considered to belong to this vlan.
@@ -216,7 +216,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
216 /* Frame had a valid vlan tag. See if vlan is allowed */ 216 /* Frame had a valid vlan tag. See if vlan is allowed */
217 if (test_bit(*vid, v->vlan_bitmap)) 217 if (test_bit(*vid, v->vlan_bitmap))
218 return true; 218 return true;
219 219drop:
220 kfree_skb(skb);
220 return false; 221 return false;
221} 222}
222 223
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index d6be3edb7a43..e8437094d15f 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -124,7 +124,6 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
124static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 124static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
125{ 125{
126 int err; 126 int err;
127 int skb_len;
128 unsigned long flags; 127 unsigned long flags;
129 struct sk_buff_head *list = &sk->sk_receive_queue; 128 struct sk_buff_head *list = &sk->sk_receive_queue;
130 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
@@ -153,14 +152,13 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
153 * may be freed by other threads of control pulling packets 152 * may be freed by other threads of control pulling packets
154 * from the queue. 153 * from the queue.
155 */ 154 */
156 skb_len = skb->len;
157 spin_lock_irqsave(&list->lock, flags); 155 spin_lock_irqsave(&list->lock, flags);
158 if (!sock_flag(sk, SOCK_DEAD)) 156 if (!sock_flag(sk, SOCK_DEAD))
159 __skb_queue_tail(list, skb); 157 __skb_queue_tail(list, skb);
160 spin_unlock_irqrestore(&list->lock, flags); 158 spin_unlock_irqrestore(&list->lock, flags);
161 159
162 if (!sock_flag(sk, SOCK_DEAD)) 160 if (!sock_flag(sk, SOCK_DEAD))
163 sk->sk_data_ready(sk, skb_len); 161 sk->sk_data_ready(sk);
164 else 162 else
165 kfree_skb(skb); 163 kfree_skb(skb);
166 return 0; 164 return 0;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 4f55f9ce63fa..dac7f9b98687 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -383,7 +383,7 @@ static void con_sock_state_closed(struct ceph_connection *con)
383 */ 383 */
384 384
385/* data available on socket, or listen socket received a connect */ 385/* data available on socket, or listen socket received a connect */
386static void ceph_sock_data_ready(struct sock *sk, int count_unused) 386static void ceph_sock_data_ready(struct sock *sk)
387{ 387{
388 struct ceph_connection *con = sk->sk_user_data; 388 struct ceph_connection *con = sk->sk_user_data;
389 if (atomic_read(&con->msgr->stopping)) { 389 if (atomic_read(&con->msgr->stopping)) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 14dac0654f28..5b3042e69f85 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2284,7 +2284,7 @@ EXPORT_SYMBOL(skb_checksum_help);
2284__be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2284__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2285{ 2285{
2286 __be16 type = skb->protocol; 2286 __be16 type = skb->protocol;
2287 int vlan_depth = ETH_HLEN; 2287 int vlan_depth = skb->mac_len;
2288 2288
2289 /* Tunnel gso handlers can set protocol to ethernet. */ 2289 /* Tunnel gso handlers can set protocol to ethernet. */
2290 if (type == htons(ETH_P_TEB)) { 2290 if (type == htons(ETH_P_TEB)) {
diff --git a/net/core/dst.c b/net/core/dst.c
index ca4231ec7347..80d6286c8b62 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -142,12 +142,12 @@ loop:
142 mutex_unlock(&dst_gc_mutex); 142 mutex_unlock(&dst_gc_mutex);
143} 143}
144 144
145int dst_discard(struct sk_buff *skb) 145int dst_discard_sk(struct sock *sk, struct sk_buff *skb)
146{ 146{
147 kfree_skb(skb); 147 kfree_skb(skb);
148 return 0; 148 return 0;
149} 149}
150EXPORT_SYMBOL(dst_discard); 150EXPORT_SYMBOL(dst_discard_sk);
151 151
152const u32 dst_default_metrics[RTAX_MAX + 1] = { 152const u32 dst_default_metrics[RTAX_MAX + 1] = {
153 /* This initializer is needed to force linker to place this variable 153 /* This initializer is needed to force linker to place this variable
@@ -184,7 +184,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
184 dst->xfrm = NULL; 184 dst->xfrm = NULL;
185#endif 185#endif
186 dst->input = dst_discard; 186 dst->input = dst_discard;
187 dst->output = dst_discard; 187 dst->output = dst_discard_sk;
188 dst->error = 0; 188 dst->error = 0;
189 dst->obsolete = initial_obsolete; 189 dst->obsolete = initial_obsolete;
190 dst->header_len = 0; 190 dst->header_len = 0;
@@ -209,8 +209,10 @@ static void ___dst_free(struct dst_entry *dst)
209 /* The first case (dev==NULL) is required, when 209 /* The first case (dev==NULL) is required, when
210 protocol module is unloaded. 210 protocol module is unloaded.
211 */ 211 */
212 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) 212 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
213 dst->input = dst->output = dst_discard; 213 dst->input = dst_discard;
214 dst->output = dst_discard_sk;
215 }
214 dst->obsolete = DST_OBSOLETE_DEAD; 216 dst->obsolete = DST_OBSOLETE_DEAD;
215} 217}
216 218
@@ -361,7 +363,8 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
361 return; 363 return;
362 364
363 if (!unregister) { 365 if (!unregister) {
364 dst->input = dst->output = dst_discard; 366 dst->input = dst_discard;
367 dst->output = dst_discard_sk;
365 } else { 368 } else {
366 dst->dev = dev_net(dst->dev)->loopback_dev; 369 dst->dev = dev_net(dst->dev)->loopback_dev;
367 dev_hold(dst->dev); 370 dev_hold(dst->dev);
diff --git a/net/core/filter.c b/net/core/filter.c
index e08b3822c72a..cd58614660cf 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -600,6 +600,9 @@ static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
600 if (skb_is_nonlinear(skb)) 600 if (skb_is_nonlinear(skb))
601 return 0; 601 return 0;
602 602
603 if (skb->len < sizeof(struct nlattr))
604 return 0;
605
603 if (A > skb->len - sizeof(struct nlattr)) 606 if (A > skb->len - sizeof(struct nlattr))
604 return 0; 607 return 0;
605 608
@@ -618,11 +621,14 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
618 if (skb_is_nonlinear(skb)) 621 if (skb_is_nonlinear(skb))
619 return 0; 622 return 0;
620 623
624 if (skb->len < sizeof(struct nlattr))
625 return 0;
626
621 if (A > skb->len - sizeof(struct nlattr)) 627 if (A > skb->len - sizeof(struct nlattr))
622 return 0; 628 return 0;
623 629
624 nla = (struct nlattr *) &skb->data[A]; 630 nla = (struct nlattr *) &skb->data[A];
625 if (nla->nla_len > A - skb->len) 631 if (nla->nla_len > skb->len - A)
626 return 0; 632 return 0;
627 633
628 nla = nla_find_nested(nla, X); 634 nla = nla_find_nested(nla, X);
@@ -1737,7 +1743,6 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
1737 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS, 1743 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
1738 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS, 1744 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
1739 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS, 1745 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
1740 [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
1741 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, 1746 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
1742 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, 1747 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
1743 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, 1748 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index d068ec25db1e..0304f981f7ff 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3338,7 +3338,9 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3338 queue_map = skb_get_queue_mapping(pkt_dev->skb); 3338 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3339 txq = netdev_get_tx_queue(odev, queue_map); 3339 txq = netdev_get_tx_queue(odev, queue_map);
3340 3340
3341 __netif_tx_lock_bh(txq); 3341 local_bh_disable();
3342
3343 HARD_TX_LOCK(odev, txq, smp_processor_id());
3342 3344
3343 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { 3345 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) {
3344 ret = NETDEV_TX_BUSY; 3346 ret = NETDEV_TX_BUSY;
@@ -3374,7 +3376,9 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3374 pkt_dev->last_ok = 0; 3376 pkt_dev->last_ok = 0;
3375 } 3377 }
3376unlock: 3378unlock:
3377 __netif_tx_unlock_bh(txq); 3379 HARD_TX_UNLOCK(odev, txq);
3380
3381 local_bh_enable();
3378 3382
3379 /* If pkt_dev->count is zero, then run forever */ 3383 /* If pkt_dev->count is zero, then run forever */
3380 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3384 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 30c7d35dd862..1b62343f5837 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3458,8 +3458,6 @@ static void sock_rmem_free(struct sk_buff *skb)
3458 */ 3458 */
3459int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3459int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3460{ 3460{
3461 int len = skb->len;
3462
3463 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3461 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3464 (unsigned int)sk->sk_rcvbuf) 3462 (unsigned int)sk->sk_rcvbuf)
3465 return -ENOMEM; 3463 return -ENOMEM;
@@ -3474,7 +3472,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3474 3472
3475 skb_queue_tail(&sk->sk_error_queue, skb); 3473 skb_queue_tail(&sk->sk_error_queue, skb);
3476 if (!sock_flag(sk, SOCK_DEAD)) 3474 if (!sock_flag(sk, SOCK_DEAD))
3477 sk->sk_data_ready(sk, len); 3475 sk->sk_data_ready(sk);
3478 return 0; 3476 return 0;
3479} 3477}
3480EXPORT_SYMBOL(sock_queue_err_skb); 3478EXPORT_SYMBOL(sock_queue_err_skb);
@@ -3937,12 +3935,14 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
3937unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 3935unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
3938{ 3936{
3939 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3937 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3940 unsigned int hdr_len;
3941 3938
3942 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3939 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3943 hdr_len = tcp_hdrlen(skb); 3940 return tcp_hdrlen(skb) + shinfo->gso_size;
3944 else 3941
3945 hdr_len = sizeof(struct udphdr); 3942 /* UFO sets gso_size to the size of the fragmentation
3946 return hdr_len + shinfo->gso_size; 3943 * payload, i.e. the size of the L4 (UDP) header is already
3944 * accounted for.
3945 */
3946 return shinfo->gso_size;
3947} 3947}
3948EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 3948EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
diff --git a/net/core/sock.c b/net/core/sock.c
index c0fc6bdad1e3..b4fff008136f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -428,7 +428,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
428 spin_unlock_irqrestore(&list->lock, flags); 428 spin_unlock_irqrestore(&list->lock, flags);
429 429
430 if (!sock_flag(sk, SOCK_DEAD)) 430 if (!sock_flag(sk, SOCK_DEAD))
431 sk->sk_data_ready(sk, skb_len); 431 sk->sk_data_ready(sk);
432 return 0; 432 return 0;
433} 433}
434EXPORT_SYMBOL(sock_queue_rcv_skb); 434EXPORT_SYMBOL(sock_queue_rcv_skb);
@@ -2196,7 +2196,7 @@ static void sock_def_error_report(struct sock *sk)
2196 rcu_read_unlock(); 2196 rcu_read_unlock();
2197} 2197}
2198 2198
2199static void sock_def_readable(struct sock *sk, int len) 2199static void sock_def_readable(struct sock *sk)
2200{ 2200{
2201 struct socket_wq *wq; 2201 struct socket_wq *wq;
2202 2202
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 14cdafad7a90..3c8ec7d4a34e 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -28,7 +28,7 @@ static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
28 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4); 28 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
29 __skb_queue_tail(&sk->sk_receive_queue, skb); 29 __skb_queue_tail(&sk->sk_receive_queue, skb);
30 skb_set_owner_r(skb, sk); 30 skb_set_owner_r(skb, sk);
31 sk->sk_data_ready(sk, 0); 31 sk->sk_data_ready(sk);
32} 32}
33 33
34static void dccp_fin(struct sock *sk, struct sk_buff *skb) 34static void dccp_fin(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 9e2f78bc1553..c69eb9c4fbb8 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -237,7 +237,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
237 237
238 /* Wakeup parent, send SIGIO */ 238 /* Wakeup parent, send SIGIO */
239 if (state == DCCP_RESPOND && child->sk_state != state) 239 if (state == DCCP_RESPOND && child->sk_state != state)
240 parent->sk_data_ready(parent, 0); 240 parent->sk_data_ready(parent);
241 } else { 241 } else {
242 /* Alas, it is possible again, because we do lookup 242 /* Alas, it is possible again, because we do lookup
243 * in main socket hash table and lock on listening 243 * in main socket hash table and lock on listening
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 8876078859da..0248e8a3460c 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -138,7 +138,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
138 138
139 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 139 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
140 140
141 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); 141 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
142 return net_xmit_eval(err); 142 return net_xmit_eval(err);
143 } 143 }
144 return -ENOBUFS; 144 return -ENOBUFS;
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index c344163e6ac0..fe5f01485d33 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -585,7 +585,6 @@ out:
585static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) 585static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
586{ 586{
587 int err; 587 int err;
588 int skb_len;
589 588
590 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 589 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
591 number of warnings when compiling with -W --ANK 590 number of warnings when compiling with -W --ANK
@@ -600,12 +599,11 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
600 if (err) 599 if (err)
601 goto out; 600 goto out;
602 601
603 skb_len = skb->len;
604 skb_set_owner_r(skb, sk); 602 skb_set_owner_r(skb, sk);
605 skb_queue_tail(queue, skb); 603 skb_queue_tail(queue, skb);
606 604
607 if (!sock_flag(sk, SOCK_DEAD)) 605 if (!sock_flag(sk, SOCK_DEAD))
608 sk->sk_data_ready(sk, skb_len); 606 sk->sk_data_ready(sk);
609out: 607out:
610 return err; 608 return err;
611} 609}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index ce0cbbfe0f43..daccc4a36d80 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -752,7 +752,7 @@ static int dn_to_neigh_output(struct sk_buff *skb)
752 return n->output(n, skb); 752 return n->output(n, skb);
753} 753}
754 754
755static int dn_output(struct sk_buff *skb) 755static int dn_output(struct sock *sk, struct sk_buff *skb)
756{ 756{
757 struct dst_entry *dst = skb_dst(skb); 757 struct dst_entry *dst = skb_dst(skb);
758 struct dn_route *rt = (struct dn_route *)dst; 758 struct dn_route *rt = (struct dn_route *)dst;
@@ -838,6 +838,18 @@ drop:
838 * Used to catch bugs. This should never normally get 838 * Used to catch bugs. This should never normally get
839 * called. 839 * called.
840 */ 840 */
841static int dn_rt_bug_sk(struct sock *sk, struct sk_buff *skb)
842{
843 struct dn_skb_cb *cb = DN_SKB_CB(skb);
844
845 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
846 le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
847
848 kfree_skb(skb);
849
850 return NET_RX_DROP;
851}
852
841static int dn_rt_bug(struct sk_buff *skb) 853static int dn_rt_bug(struct sk_buff *skb)
842{ 854{
843 struct dn_skb_cb *cb = DN_SKB_CB(skb); 855 struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -1463,7 +1475,7 @@ make_route:
1463 1475
1464 rt->n = neigh; 1476 rt->n = neigh;
1465 rt->dst.lastuse = jiffies; 1477 rt->dst.lastuse = jiffies;
1466 rt->dst.output = dn_rt_bug; 1478 rt->dst.output = dn_rt_bug_sk;
1467 switch (res.type) { 1479 switch (res.type) {
1468 case RTN_UNICAST: 1480 case RTN_UNICAST:
1469 rt->dst.input = dn_forward; 1481 rt->dst.input = dn_forward;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index ec4f762efda5..94213c891565 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -463,6 +463,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
463static void ipgre_tunnel_setup(struct net_device *dev) 463static void ipgre_tunnel_setup(struct net_device *dev)
464{ 464{
465 dev->netdev_ops = &ipgre_netdev_ops; 465 dev->netdev_ops = &ipgre_netdev_ops;
466 dev->type = ARPHRD_IPGRE;
466 ip_tunnel_setup(dev, ipgre_net_id); 467 ip_tunnel_setup(dev, ipgre_net_id);
467} 468}
468 469
@@ -501,7 +502,6 @@ static int ipgre_tunnel_init(struct net_device *dev)
501 memcpy(dev->dev_addr, &iph->saddr, 4); 502 memcpy(dev->dev_addr, &iph->saddr, 4);
502 memcpy(dev->broadcast, &iph->daddr, 4); 503 memcpy(dev->broadcast, &iph->daddr, 4);
503 504
504 dev->type = ARPHRD_IPGRE;
505 dev->flags = IFF_NOARP; 505 dev->flags = IFF_NOARP;
506 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 506 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
507 dev->addr_len = 4; 507 dev->addr_len = 4;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 1a0755fea491..1cbeba5edff9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -101,17 +101,17 @@ int __ip_local_out(struct sk_buff *skb)
101 skb_dst(skb)->dev, dst_output); 101 skb_dst(skb)->dev, dst_output);
102} 102}
103 103
104int ip_local_out(struct sk_buff *skb) 104int ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
105{ 105{
106 int err; 106 int err;
107 107
108 err = __ip_local_out(skb); 108 err = __ip_local_out(skb);
109 if (likely(err == 1)) 109 if (likely(err == 1))
110 err = dst_output(skb); 110 err = dst_output_sk(sk, skb);
111 111
112 return err; 112 return err;
113} 113}
114EXPORT_SYMBOL_GPL(ip_local_out); 114EXPORT_SYMBOL_GPL(ip_local_out_sk);
115 115
116static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) 116static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
117{ 117{
@@ -226,9 +226,8 @@ static int ip_finish_output(struct sk_buff *skb)
226 return ip_finish_output2(skb); 226 return ip_finish_output2(skb);
227} 227}
228 228
229int ip_mc_output(struct sk_buff *skb) 229int ip_mc_output(struct sock *sk, struct sk_buff *skb)
230{ 230{
231 struct sock *sk = skb->sk;
232 struct rtable *rt = skb_rtable(skb); 231 struct rtable *rt = skb_rtable(skb);
233 struct net_device *dev = rt->dst.dev; 232 struct net_device *dev = rt->dst.dev;
234 233
@@ -287,7 +286,7 @@ int ip_mc_output(struct sk_buff *skb)
287 !(IPCB(skb)->flags & IPSKB_REROUTED)); 286 !(IPCB(skb)->flags & IPSKB_REROUTED));
288} 287}
289 288
290int ip_output(struct sk_buff *skb) 289int ip_output(struct sock *sk, struct sk_buff *skb)
291{ 290{
292 struct net_device *dev = skb_dst(skb)->dev; 291 struct net_device *dev = skb_dst(skb)->dev;
293 292
@@ -315,9 +314,9 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
315 sizeof(fl4->saddr) + sizeof(fl4->daddr)); 314 sizeof(fl4->saddr) + sizeof(fl4->daddr));
316} 315}
317 316
318int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl) 317/* Note: skb->sk can be different from sk, in case of tunnels */
318int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
319{ 319{
320 struct sock *sk = skb->sk;
321 struct inet_sock *inet = inet_sk(sk); 320 struct inet_sock *inet = inet_sk(sk);
322 struct ip_options_rcu *inet_opt; 321 struct ip_options_rcu *inet_opt;
323 struct flowi4 *fl4; 322 struct flowi4 *fl4;
@@ -389,6 +388,7 @@ packet_routed:
389 ip_select_ident_more(skb, &rt->dst, sk, 388 ip_select_ident_more(skb, &rt->dst, sk,
390 (skb_shinfo(skb)->gso_segs ?: 1) - 1); 389 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
391 390
391 /* TODO : should we use skb->sk here instead of sk ? */
392 skb->priority = sk->sk_priority; 392 skb->priority = sk->sk_priority;
393 skb->mark = sk->sk_mark; 393 skb->mark = sk->sk_mark;
394 394
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index e77381d1df9a..484d0ce27ef7 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -670,7 +670,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
670 return; 670 return;
671 } 671 }
672 672
673 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, 673 err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol,
674 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); 674 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
675 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 675 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
676 676
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index e0c2b1d2ea4e..bcf206c79005 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -46,7 +46,7 @@
46#include <net/netns/generic.h> 46#include <net/netns/generic.h>
47#include <net/rtnetlink.h> 47#include <net/rtnetlink.h>
48 48
49int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, 49int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
50 __be32 src, __be32 dst, __u8 proto, 50 __be32 src, __be32 dst, __u8 proto,
51 __u8 tos, __u8 ttl, __be16 df, bool xnet) 51 __u8 tos, __u8 ttl, __be16 df, bool xnet)
52{ 52{
@@ -76,7 +76,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
76 iph->ttl = ttl; 76 iph->ttl = ttl;
77 __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); 77 __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
78 78
79 err = ip_local_out(skb); 79 err = ip_local_out_sk(sk, skb);
80 if (unlikely(net_xmit_eval(err))) 80 if (unlikely(net_xmit_eval(err)))
81 pkt_len = 0; 81 pkt_len = 0;
82 return pkt_len; 82 return pkt_len;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 687ddef4e574..afcee51b90ed 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -337,6 +337,7 @@ static const struct net_device_ops vti_netdev_ops = {
337static void vti_tunnel_setup(struct net_device *dev) 337static void vti_tunnel_setup(struct net_device *dev)
338{ 338{
339 dev->netdev_ops = &vti_netdev_ops; 339 dev->netdev_ops = &vti_netdev_ops;
340 dev->type = ARPHRD_TUNNEL;
340 ip_tunnel_setup(dev, vti_net_id); 341 ip_tunnel_setup(dev, vti_net_id);
341} 342}
342 343
@@ -348,7 +349,6 @@ static int vti_tunnel_init(struct net_device *dev)
348 memcpy(dev->dev_addr, &iph->saddr, 4); 349 memcpy(dev->dev_addr, &iph->saddr, 4);
349 memcpy(dev->broadcast, &iph->daddr, 4); 350 memcpy(dev->broadcast, &iph->daddr, 4);
350 351
351 dev->type = ARPHRD_TUNNEL;
352 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); 352 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
353 dev->mtu = ETH_DATA_LEN; 353 dev->mtu = ETH_DATA_LEN;
354 dev->flags = IFF_NOARP; 354 dev->flags = IFF_NOARP;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index f4b19e5dde54..8210964a9f19 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -252,26 +252,33 @@ int ping_init_sock(struct sock *sk)
252{ 252{
253 struct net *net = sock_net(sk); 253 struct net *net = sock_net(sk);
254 kgid_t group = current_egid(); 254 kgid_t group = current_egid();
255 struct group_info *group_info = get_current_groups(); 255 struct group_info *group_info;
256 int i, j, count = group_info->ngroups; 256 int i, j, count;
257 kgid_t low, high; 257 kgid_t low, high;
258 int ret = 0;
258 259
259 inet_get_ping_group_range_net(net, &low, &high); 260 inet_get_ping_group_range_net(net, &low, &high);
260 if (gid_lte(low, group) && gid_lte(group, high)) 261 if (gid_lte(low, group) && gid_lte(group, high))
261 return 0; 262 return 0;
262 263
264 group_info = get_current_groups();
265 count = group_info->ngroups;
263 for (i = 0; i < group_info->nblocks; i++) { 266 for (i = 0; i < group_info->nblocks; i++) {
264 int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); 267 int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
265 for (j = 0; j < cp_count; j++) { 268 for (j = 0; j < cp_count; j++) {
266 kgid_t gid = group_info->blocks[i][j]; 269 kgid_t gid = group_info->blocks[i][j];
267 if (gid_lte(low, gid) && gid_lte(gid, high)) 270 if (gid_lte(low, gid) && gid_lte(gid, high))
268 return 0; 271 goto out_release_group;
269 } 272 }
270 273
271 count -= cp_count; 274 count -= cp_count;
272 } 275 }
273 276
274 return -EACCES; 277 ret = -EACCES;
278
279out_release_group:
280 put_group_info(group_info);
281 return ret;
275} 282}
276EXPORT_SYMBOL_GPL(ping_init_sock); 283EXPORT_SYMBOL_GPL(ping_init_sock);
277 284
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 34d094cadb11..1485aafcad59 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1129,7 +1129,7 @@ static void ipv4_link_failure(struct sk_buff *skb)
1129 dst_set_expires(&rt->dst, 0); 1129 dst_set_expires(&rt->dst, 0);
1130} 1130}
1131 1131
1132static int ip_rt_bug(struct sk_buff *skb) 1132static int ip_rt_bug(struct sock *sk, struct sk_buff *skb)
1133{ 1133{
1134 pr_debug("%s: %pI4 -> %pI4, %s\n", 1134 pr_debug("%s: %pI4 -> %pI4, %s\n",
1135 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1135 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
@@ -2218,7 +2218,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2218 2218
2219 new->__use = 1; 2219 new->__use = 1;
2220 new->input = dst_discard; 2220 new->input = dst_discard;
2221 new->output = dst_discard; 2221 new->output = dst_discard_sk;
2222 2222
2223 new->dev = ort->dst.dev; 2223 new->dev = ort->dst.dev;
2224 if (new->dev) 2224 if (new->dev)
@@ -2357,7 +2357,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2357 } 2357 }
2358 } else 2358 } else
2359#endif 2359#endif
2360 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) 2360 if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2361 goto nla_put_failure; 2361 goto nla_put_failure;
2362 } 2362 }
2363 2363
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e1661f46fd19..d6b46eb2f94c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4413,7 +4413,7 @@ queue_and_out:
4413 if (eaten > 0) 4413 if (eaten > 0)
4414 kfree_skb_partial(skb, fragstolen); 4414 kfree_skb_partial(skb, fragstolen);
4415 if (!sock_flag(sk, SOCK_DEAD)) 4415 if (!sock_flag(sk, SOCK_DEAD))
4416 sk->sk_data_ready(sk, 0); 4416 sk->sk_data_ready(sk);
4417 return; 4417 return;
4418 } 4418 }
4419 4419
@@ -4914,7 +4914,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
4914 BUG(); 4914 BUG();
4915 tp->urg_data = TCP_URG_VALID | tmp; 4915 tp->urg_data = TCP_URG_VALID | tmp;
4916 if (!sock_flag(sk, SOCK_DEAD)) 4916 if (!sock_flag(sk, SOCK_DEAD))
4917 sk->sk_data_ready(sk, 0); 4917 sk->sk_data_ready(sk);
4918 } 4918 }
4919 } 4919 }
4920} 4920}
@@ -5000,11 +5000,11 @@ static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5000 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 5000 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
5001 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 5001 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
5002 tp->ucopy.wakeup = 1; 5002 tp->ucopy.wakeup = 1;
5003 sk->sk_data_ready(sk, 0); 5003 sk->sk_data_ready(sk);
5004 } 5004 }
5005 } else if (chunk > 0) { 5005 } else if (chunk > 0) {
5006 tp->ucopy.wakeup = 1; 5006 tp->ucopy.wakeup = 1;
5007 sk->sk_data_ready(sk, 0); 5007 sk->sk_data_ready(sk);
5008 } 5008 }
5009out: 5009out:
5010 return copied_early; 5010 return copied_early;
@@ -5275,7 +5275,7 @@ no_ack:
5275#endif 5275#endif
5276 if (eaten) 5276 if (eaten)
5277 kfree_skb_partial(skb, fragstolen); 5277 kfree_skb_partial(skb, fragstolen);
5278 sk->sk_data_ready(sk, 0); 5278 sk->sk_data_ready(sk);
5279 return; 5279 return;
5280 } 5280 }
5281 } 5281 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6379894ec210..438f3b95143d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1434,7 +1434,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1435 tp->syn_data_acked = 1; 1435 tp->syn_data_acked = 1;
1436 } 1436 }
1437 sk->sk_data_ready(sk, 0); 1437 sk->sk_data_ready(sk);
1438 bh_unlock_sock(child); 1438 bh_unlock_sock(child);
1439 sock_put(child); 1439 sock_put(child);
1440 WARN_ON(req->sk == NULL); 1440 WARN_ON(req->sk == NULL);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ca788ada5bd3..05c1b155251d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -745,7 +745,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
745 skb->len); 745 skb->len);
746 /* Wakeup parent, send SIGIO */ 746 /* Wakeup parent, send SIGIO */
747 if (state == TCP_SYN_RECV && child->sk_state != state) 747 if (state == TCP_SYN_RECV && child->sk_state != state)
748 parent->sk_data_ready(parent, 0); 748 parent->sk_data_ready(parent);
749 } else { 749 } else {
750 /* Alas, it is possible again, because we do lookup 750 /* Alas, it is possible again, because we do lookup
751 * in main socket hash table and lock on listening 751 * in main socket hash table and lock on listening
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 699fb102e971..025e25093984 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -981,7 +981,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
981 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 981 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
982 tcp_skb_pcount(skb)); 982 tcp_skb_pcount(skb));
983 983
984 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); 984 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
985 if (likely(err <= 0)) 985 if (likely(err <= 0))
986 return err; 986 return err;
987 987
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index baa0f63731fd..40e701f2e1e0 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -86,7 +86,7 @@ int xfrm4_output_finish(struct sk_buff *skb)
86 return xfrm_output(skb); 86 return xfrm_output(skb);
87} 87}
88 88
89int xfrm4_output(struct sk_buff *skb) 89int xfrm4_output(struct sock *sk, struct sk_buff *skb)
90{ 90{
91 struct dst_entry *dst = skb_dst(skb); 91 struct dst_entry *dst = skb_dst(skb);
92 struct xfrm_state *x = dst->xfrm; 92 struct xfrm_state *x = dst->xfrm;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index c9138189415a..d4ade34ab375 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -224,9 +224,8 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
224 return dst; 224 return dst;
225} 225}
226 226
227int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) 227int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused)
228{ 228{
229 struct sock *sk = skb->sk;
230 struct ipv6_pinfo *np = inet6_sk(sk); 229 struct ipv6_pinfo *np = inet6_sk(sk);
231 struct flowi6 fl6; 230 struct flowi6 fl6;
232 struct dst_entry *dst; 231 struct dst_entry *dst;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c98338b81d30..9d921462b57f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1559,6 +1559,15 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1559 return 0; 1559 return 0;
1560} 1560}
1561 1561
1562static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
1563{
1564 struct net *net = dev_net(dev);
1565 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1566
1567 if (dev != ign->fb_tunnel_dev)
1568 unregister_netdevice_queue(dev, head);
1569}
1570
1562static size_t ip6gre_get_size(const struct net_device *dev) 1571static size_t ip6gre_get_size(const struct net_device *dev)
1563{ 1572{
1564 return 1573 return
@@ -1636,6 +1645,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
1636 .validate = ip6gre_tunnel_validate, 1645 .validate = ip6gre_tunnel_validate,
1637 .newlink = ip6gre_newlink, 1646 .newlink = ip6gre_newlink,
1638 .changelink = ip6gre_changelink, 1647 .changelink = ip6gre_changelink,
1648 .dellink = ip6gre_dellink,
1639 .get_size = ip6gre_get_size, 1649 .get_size = ip6gre_get_size,
1640 .fill_info = ip6gre_fill_info, 1650 .fill_info = ip6gre_fill_info,
1641}; 1651};
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 3284d61577c0..40e7581374f7 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -132,7 +132,7 @@ static int ip6_finish_output(struct sk_buff *skb)
132 return ip6_finish_output2(skb); 132 return ip6_finish_output2(skb);
133} 133}
134 134
135int ip6_output(struct sk_buff *skb) 135int ip6_output(struct sock *sk, struct sk_buff *skb)
136{ 136{
137 struct net_device *dev = skb_dst(skb)->dev; 137 struct net_device *dev = skb_dst(skb)->dev;
138 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); 138 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5015c50a5ba7..4011617cca68 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -84,9 +84,9 @@ static void ip6_dst_ifdown(struct dst_entry *,
84static int ip6_dst_gc(struct dst_ops *ops); 84static int ip6_dst_gc(struct dst_ops *ops);
85 85
86static int ip6_pkt_discard(struct sk_buff *skb); 86static int ip6_pkt_discard(struct sk_buff *skb);
87static int ip6_pkt_discard_out(struct sk_buff *skb); 87static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
88static int ip6_pkt_prohibit(struct sk_buff *skb); 88static int ip6_pkt_prohibit(struct sk_buff *skb);
89static int ip6_pkt_prohibit_out(struct sk_buff *skb); 89static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
90static void ip6_link_failure(struct sk_buff *skb); 90static void ip6_link_failure(struct sk_buff *skb);
91static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 91static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
92 struct sk_buff *skb, u32 mtu); 92 struct sk_buff *skb, u32 mtu);
@@ -290,7 +290,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
290 .obsolete = DST_OBSOLETE_FORCE_CHK, 290 .obsolete = DST_OBSOLETE_FORCE_CHK,
291 .error = -EINVAL, 291 .error = -EINVAL,
292 .input = dst_discard, 292 .input = dst_discard,
293 .output = dst_discard, 293 .output = dst_discard_sk,
294 }, 294 },
295 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 295 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
296 .rt6i_protocol = RTPROT_KERNEL, 296 .rt6i_protocol = RTPROT_KERNEL,
@@ -1058,7 +1058,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
1058 1058
1059 new->__use = 1; 1059 new->__use = 1;
1060 new->input = dst_discard; 1060 new->input = dst_discard;
1061 new->output = dst_discard; 1061 new->output = dst_discard_sk;
1062 1062
1063 if (dst_metrics_read_only(&ort->dst)) 1063 if (dst_metrics_read_only(&ort->dst))
1064 new->_metrics = ort->dst._metrics; 1064 new->_metrics = ort->dst._metrics;
@@ -1338,7 +1338,7 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
1338 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 1338 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1339 1339
1340 if (mtu) 1340 if (mtu)
1341 return mtu; 1341 goto out;
1342 1342
1343 mtu = IPV6_MIN_MTU; 1343 mtu = IPV6_MIN_MTU;
1344 1344
@@ -1348,7 +1348,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
1348 mtu = idev->cnf.mtu6; 1348 mtu = idev->cnf.mtu6;
1349 rcu_read_unlock(); 1349 rcu_read_unlock();
1350 1350
1351 return mtu; 1351out:
1352 return min_t(unsigned int, mtu, IP6_MAX_MTU);
1352} 1353}
1353 1354
1354static struct dst_entry *icmp6_dst_gc_list; 1355static struct dst_entry *icmp6_dst_gc_list;
@@ -1576,7 +1577,7 @@ int ip6_route_add(struct fib6_config *cfg)
1576 switch (cfg->fc_type) { 1577 switch (cfg->fc_type) {
1577 case RTN_BLACKHOLE: 1578 case RTN_BLACKHOLE:
1578 rt->dst.error = -EINVAL; 1579 rt->dst.error = -EINVAL;
1579 rt->dst.output = dst_discard; 1580 rt->dst.output = dst_discard_sk;
1580 rt->dst.input = dst_discard; 1581 rt->dst.input = dst_discard;
1581 break; 1582 break;
1582 case RTN_PROHIBIT: 1583 case RTN_PROHIBIT:
@@ -2128,7 +2129,7 @@ static int ip6_pkt_discard(struct sk_buff *skb)
2128 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); 2129 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2129} 2130}
2130 2131
2131static int ip6_pkt_discard_out(struct sk_buff *skb) 2132static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
2132{ 2133{
2133 skb->dev = skb_dst(skb)->dev; 2134 skb->dev = skb_dst(skb)->dev;
2134 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); 2135 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
@@ -2139,7 +2140,7 @@ static int ip6_pkt_prohibit(struct sk_buff *skb)
2139 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); 2140 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2140} 2141}
2141 2142
2142static int ip6_pkt_prohibit_out(struct sk_buff *skb) 2143static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
2143{ 2144{
2144 skb->dev = skb_dst(skb)->dev; 2145 skb->dev = skb_dst(skb)->dev;
2145 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); 2146 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1693c8d885f0..8da8268d65f8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -974,8 +974,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
974 goto out; 974 goto out;
975 } 975 }
976 976
977 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, 977 err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr,
978 ttl, df, !net_eq(tunnel->net, dev_net(dev))); 978 IPPROTO_IPV6, tos, ttl, df,
979 !net_eq(tunnel->net, dev_net(dev)));
979 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 980 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
980 return NETDEV_TX_OK; 981 return NETDEV_TX_OK;
981 982
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5ca56cee2dae..e289830ed6e3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -798,7 +798,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
798 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); 798 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
799 799
800 fl6.flowi6_proto = IPPROTO_TCP; 800 fl6.flowi6_proto = IPPROTO_TCP;
801 if (rt6_need_strict(&fl6.daddr) || !oif) 801 if (rt6_need_strict(&fl6.daddr) && !oif)
802 fl6.flowi6_oif = inet6_iif(skb); 802 fl6.flowi6_oif = inet6_iif(skb);
803 else 803 else
804 fl6.flowi6_oif = oif; 804 fl6.flowi6_oif = oif;
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 6cd625e37706..19ef329bdbf8 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -163,7 +163,7 @@ static int __xfrm6_output(struct sk_buff *skb)
163 return x->outer_mode->afinfo->output_finish(skb); 163 return x->outer_mode->afinfo->output_finish(skb);
164} 164}
165 165
166int xfrm6_output(struct sk_buff *skb) 166int xfrm6_output(struct sock *sk, struct sk_buff *skb)
167{ 167{
168 return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, 168 return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
169 skb_dst(skb)->dev, __xfrm6_output); 169 skb_dst(skb)->dev, __xfrm6_output);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a5e03119107a..01e77b0ae075 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1757,7 +1757,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1757 1757
1758 /* Wake up accept */ 1758 /* Wake up accept */
1759 nsk->sk_state = IUCV_CONNECTED; 1759 nsk->sk_state = IUCV_CONNECTED;
1760 sk->sk_data_ready(sk, 1); 1760 sk->sk_data_ready(sk);
1761 err = 0; 1761 err = 0;
1762fail: 1762fail:
1763 bh_unlock_sock(sk); 1763 bh_unlock_sock(sk);
@@ -1968,7 +1968,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1968 if (!err) { 1968 if (!err) {
1969 iucv_accept_enqueue(sk, nsk); 1969 iucv_accept_enqueue(sk, nsk);
1970 nsk->sk_state = IUCV_CONNECTED; 1970 nsk->sk_state = IUCV_CONNECTED;
1971 sk->sk_data_ready(sk, 1); 1971 sk->sk_data_ready(sk);
1972 } else 1972 } else
1973 iucv_sock_kill(nsk); 1973 iucv_sock_kill(nsk);
1974 bh_unlock_sock(sk); 1974 bh_unlock_sock(sk);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index e72589a8400d..f3c83073afc4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -205,7 +205,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
206 skb_set_owner_r(*skb2, sk); 206 skb_set_owner_r(*skb2, sk);
207 skb_queue_tail(&sk->sk_receive_queue, *skb2); 207 skb_queue_tail(&sk->sk_receive_queue, *skb2);
208 sk->sk_data_ready(sk, (*skb2)->len); 208 sk->sk_data_ready(sk);
209 *skb2 = NULL; 209 *skb2 = NULL;
210 err = 0; 210 err = 0;
211 } 211 }
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 47f7a5490555..a4e37d7158dc 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1131,10 +1131,10 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1131 skb->local_df = 1; 1131 skb->local_df = 1;
1132#if IS_ENABLED(CONFIG_IPV6) 1132#if IS_ENABLED(CONFIG_IPV6)
1133 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) 1133 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
1134 error = inet6_csk_xmit(skb, NULL); 1134 error = inet6_csk_xmit(tunnel->sock, skb, NULL);
1135 else 1135 else
1136#endif 1136#endif
1137 error = ip_queue_xmit(skb, fl); 1137 error = ip_queue_xmit(tunnel->sock, skb, fl);
1138 1138
1139 /* Update stats */ 1139 /* Update stats */
1140 if (error >= 0) { 1140 if (error >= 0) {
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 0b44d855269c..3397fe6897c0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -487,7 +487,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
487 487
488xmit: 488xmit:
489 /* Queue the packet to IP for output */ 489 /* Queue the packet to IP for output */
490 rc = ip_queue_xmit(skb, &inet->cork.fl); 490 rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
491 rcu_read_unlock(); 491 rcu_read_unlock();
492 492
493error: 493error:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index d276e2d4a589..950909f04ee6 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -753,9 +753,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
753 session->deref = pppol2tp_session_sock_put; 753 session->deref = pppol2tp_session_sock_put;
754 754
755 /* If PMTU discovery was enabled, use the MTU that was discovered */ 755 /* If PMTU discovery was enabled, use the MTU that was discovered */
756 dst = sk_dst_get(sk); 756 dst = sk_dst_get(tunnel->sock);
757 if (dst != NULL) { 757 if (dst != NULL) {
758 u32 pmtu = dst_mtu(__sk_dst_get(sk)); 758 u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
759 if (pmtu != 0) 759 if (pmtu != 0)
760 session->mtu = session->mru = pmtu - 760 session->mtu = session->mru = pmtu -
761 PPPOL2TP_HEADER_OVERHEAD; 761 PPPOL2TP_HEADER_OVERHEAD;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 6dba48efe01e..75421f2ba8be 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1795,6 +1795,7 @@ int nf_conntrack_init_net(struct net *net)
1795 int cpu; 1795 int cpu;
1796 1796
1797 atomic_set(&net->ct.count, 0); 1797 atomic_set(&net->ct.count, 0);
1798 seqcount_init(&net->ct.generation);
1798 1799
1799 net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); 1800 net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
1800 if (!net->ct.pcpu_lists) 1801 if (!net->ct.pcpu_lists)
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 7bd03decd36c..825c3e3f8305 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -605,32 +605,14 @@ static struct nf_conntrack_helper pptp __read_mostly = {
605 .expect_policy = &pptp_exp_policy, 605 .expect_policy = &pptp_exp_policy,
606}; 606};
607 607
608static void nf_conntrack_pptp_net_exit(struct net *net)
609{
610 nf_ct_gre_keymap_flush(net);
611}
612
613static struct pernet_operations nf_conntrack_pptp_net_ops = {
614 .exit = nf_conntrack_pptp_net_exit,
615};
616
617static int __init nf_conntrack_pptp_init(void) 608static int __init nf_conntrack_pptp_init(void)
618{ 609{
619 int rv; 610 return nf_conntrack_helper_register(&pptp);
620
621 rv = nf_conntrack_helper_register(&pptp);
622 if (rv < 0)
623 return rv;
624 rv = register_pernet_subsys(&nf_conntrack_pptp_net_ops);
625 if (rv < 0)
626 nf_conntrack_helper_unregister(&pptp);
627 return rv;
628} 611}
629 612
630static void __exit nf_conntrack_pptp_fini(void) 613static void __exit nf_conntrack_pptp_fini(void)
631{ 614{
632 nf_conntrack_helper_unregister(&pptp); 615 nf_conntrack_helper_unregister(&pptp);
633 unregister_pernet_subsys(&nf_conntrack_pptp_net_ops);
634} 616}
635 617
636module_init(nf_conntrack_pptp_init); 618module_init(nf_conntrack_pptp_init);
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 9d9c0dade602..d5665739e3b1 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -66,7 +66,7 @@ static inline struct netns_proto_gre *gre_pernet(struct net *net)
66 return net_generic(net, proto_gre_net_id); 66 return net_generic(net, proto_gre_net_id);
67} 67}
68 68
69void nf_ct_gre_keymap_flush(struct net *net) 69static void nf_ct_gre_keymap_flush(struct net *net)
70{ 70{
71 struct netns_proto_gre *net_gre = gre_pernet(net); 71 struct netns_proto_gre *net_gre = gre_pernet(net);
72 struct nf_ct_gre_keymap *km, *tmp; 72 struct nf_ct_gre_keymap *km, *tmp;
@@ -78,7 +78,6 @@ void nf_ct_gre_keymap_flush(struct net *net)
78 } 78 }
79 write_unlock_bh(&net_gre->keymap_lock); 79 write_unlock_bh(&net_gre->keymap_lock);
80} 80}
81EXPORT_SYMBOL(nf_ct_gre_keymap_flush);
82 81
83static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km, 82static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
84 const struct nf_conntrack_tuple *t) 83 const struct nf_conntrack_tuple *t)
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 90998a6ff8b9..804105391b9a 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -25,9 +25,8 @@ static void nft_cmp_fast_eval(const struct nft_expr *expr,
25 struct nft_data data[NFT_REG_MAX + 1]) 25 struct nft_data data[NFT_REG_MAX + 1])
26{ 26{
27 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); 27 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
28 u32 mask; 28 u32 mask = nft_cmp_fast_mask(priv->len);
29 29
30 mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - priv->len);
31 if ((data[priv->sreg].data[0] & mask) == priv->data) 30 if ((data[priv->sreg].data[0] & mask) == priv->data)
32 return; 31 return;
33 data[NFT_REG_VERDICT].verdict = NFT_BREAK; 32 data[NFT_REG_VERDICT].verdict = NFT_BREAK;
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 954925db414d..e2b3f51c81f1 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -128,7 +128,7 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
128 BUG_ON(err < 0); 128 BUG_ON(err < 0);
129 desc.len *= BITS_PER_BYTE; 129 desc.len *= BITS_PER_BYTE;
130 130
131 mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - desc.len); 131 mask = nft_cmp_fast_mask(desc.len);
132 priv->data = data.data[0] & mask; 132 priv->data = data.data[0] & mask;
133 priv->len = desc.len; 133 priv->len = desc.len;
134 return 0; 134 return 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c2d585c4f7c5..894cda0206bb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1653,7 +1653,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1653 else 1653 else
1654#endif /* CONFIG_NETLINK_MMAP */ 1654#endif /* CONFIG_NETLINK_MMAP */
1655 skb_queue_tail(&sk->sk_receive_queue, skb); 1655 skb_queue_tail(&sk->sk_receive_queue, skb);
1656 sk->sk_data_ready(sk, len); 1656 sk->sk_data_ready(sk);
1657 return len; 1657 return len;
1658} 1658}
1659 1659
@@ -2394,7 +2394,7 @@ out:
2394 return err ? : copied; 2394 return err ? : copied;
2395} 2395}
2396 2396
2397static void netlink_data_ready(struct sock *sk, int len) 2397static void netlink_data_ready(struct sock *sk)
2398{ 2398{
2399 BUG(); 2399 BUG();
2400} 2400}
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index b74aa0755521..ede50d197e10 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1011,7 +1011,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
1011 skb_queue_head(&sk->sk_receive_queue, skb); 1011 skb_queue_head(&sk->sk_receive_queue, skb);
1012 1012
1013 if (!sock_flag(sk, SOCK_DEAD)) 1013 if (!sock_flag(sk, SOCK_DEAD))
1014 sk->sk_data_ready(sk, skb->len); 1014 sk->sk_data_ready(sk);
1015 1015
1016 bh_unlock_sock(sk); 1016 bh_unlock_sock(sk);
1017 1017
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index b486f12ae243..b4671958fcf9 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -976,7 +976,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
976 new_sk->sk_state = LLCP_CONNECTED; 976 new_sk->sk_state = LLCP_CONNECTED;
977 977
978 /* Wake the listening processes */ 978 /* Wake the listening processes */
979 parent->sk_data_ready(parent, 0); 979 parent->sk_data_ready(parent);
980 980
981 /* Send CC */ 981 /* Send CC */
982 nfc_llcp_send_cc(new_sock); 982 nfc_llcp_send_cc(new_sock);
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index a3d6951602db..ebb6e2442554 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -174,7 +174,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
174 174
175 skb->local_df = 1; 175 skb->local_df = 1;
176 176
177 return iptunnel_xmit(rt, skb, fl.saddr, 177 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
178 OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, 178 OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
179 OVS_CB(skb)->tun_key->ipv4_tos, 179 OVS_CB(skb)->tun_key->ipv4_tos,
180 OVS_CB(skb)->tun_key->ipv4_ttl, df, false); 180 OVS_CB(skb)->tun_key->ipv4_ttl, df, false);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 72e0c71fb01d..b85c67ccb797 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1848,7 +1848,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1848 skb->dropcount = atomic_read(&sk->sk_drops); 1848 skb->dropcount = atomic_read(&sk->sk_drops);
1849 __skb_queue_tail(&sk->sk_receive_queue, skb); 1849 __skb_queue_tail(&sk->sk_receive_queue, skb);
1850 spin_unlock(&sk->sk_receive_queue.lock); 1850 spin_unlock(&sk->sk_receive_queue.lock);
1851 sk->sk_data_ready(sk, skb->len); 1851 sk->sk_data_ready(sk);
1852 return 0; 1852 return 0;
1853 1853
1854drop_n_acct: 1854drop_n_acct:
@@ -2054,7 +2054,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2054 else 2054 else
2055 prb_clear_blk_fill_status(&po->rx_ring); 2055 prb_clear_blk_fill_status(&po->rx_ring);
2056 2056
2057 sk->sk_data_ready(sk, 0); 2057 sk->sk_data_ready(sk);
2058 2058
2059drop_n_restore: 2059drop_n_restore:
2060 if (skb_head != skb->data && skb_shared(skb)) { 2060 if (skb_head != skb->data && skb_shared(skb)) {
@@ -2069,7 +2069,7 @@ ring_is_full:
2069 po->stats.stats1.tp_drops++; 2069 po->stats.stats1.tp_drops++;
2070 spin_unlock(&sk->sk_receive_queue.lock); 2070 spin_unlock(&sk->sk_receive_queue.lock);
2071 2071
2072 sk->sk_data_ready(sk, 0); 2072 sk->sk_data_ready(sk);
2073 kfree_skb(copy_skb); 2073 kfree_skb(copy_skb);
2074 goto drop_n_restore; 2074 goto drop_n_restore;
2075} 2075}
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index a2fba7edfd1f..66dc65e7c6a1 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -37,7 +37,7 @@
37struct gprs_dev { 37struct gprs_dev {
38 struct sock *sk; 38 struct sock *sk;
39 void (*old_state_change)(struct sock *); 39 void (*old_state_change)(struct sock *);
40 void (*old_data_ready)(struct sock *, int); 40 void (*old_data_ready)(struct sock *);
41 void (*old_write_space)(struct sock *); 41 void (*old_write_space)(struct sock *);
42 42
43 struct net_device *dev; 43 struct net_device *dev;
@@ -146,7 +146,7 @@ drop:
146 return err; 146 return err;
147} 147}
148 148
149static void gprs_data_ready(struct sock *sk, int len) 149static void gprs_data_ready(struct sock *sk)
150{ 150{
151 struct gprs_dev *gp = sk->sk_user_data; 151 struct gprs_dev *gp = sk->sk_user_data;
152 struct sk_buff *skb; 152 struct sk_buff *skb;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index e77411735de8..70a547ea5177 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -462,10 +462,9 @@ out:
462queue: 462queue:
463 skb->dev = NULL; 463 skb->dev = NULL;
464 skb_set_owner_r(skb, sk); 464 skb_set_owner_r(skb, sk);
465 err = skb->len;
466 skb_queue_tail(queue, skb); 465 skb_queue_tail(queue, skb);
467 if (!sock_flag(sk, SOCK_DEAD)) 466 if (!sock_flag(sk, SOCK_DEAD))
468 sk->sk_data_ready(sk, err); 467 sk->sk_data_ready(sk);
469 return NET_RX_SUCCESS; 468 return NET_RX_SUCCESS;
470} 469}
471 470
@@ -587,10 +586,9 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
587 pn->rx_credits--; 586 pn->rx_credits--;
588 skb->dev = NULL; 587 skb->dev = NULL;
589 skb_set_owner_r(skb, sk); 588 skb_set_owner_r(skb, sk);
590 err = skb->len;
591 skb_queue_tail(&sk->sk_receive_queue, skb); 589 skb_queue_tail(&sk->sk_receive_queue, skb);
592 if (!sock_flag(sk, SOCK_DEAD)) 590 if (!sock_flag(sk, SOCK_DEAD))
593 sk->sk_data_ready(sk, err); 591 sk->sk_data_ready(sk);
594 return NET_RX_SUCCESS; 592 return NET_RX_SUCCESS;
595 593
596 case PNS_PEP_CONNECT_RESP: 594 case PNS_PEP_CONNECT_RESP:
@@ -698,7 +696,7 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
698 skb_queue_head(&sk->sk_receive_queue, skb); 696 skb_queue_head(&sk->sk_receive_queue, skb);
699 sk_acceptq_added(sk); 697 sk_acceptq_added(sk);
700 if (!sock_flag(sk, SOCK_DEAD)) 698 if (!sock_flag(sk, SOCK_DEAD))
701 sk->sk_data_ready(sk, 0); 699 sk->sk_data_ready(sk);
702 return NET_RX_SUCCESS; 700 return NET_RX_SUCCESS;
703 701
704 case PNS_PEP_DISCONNECT_REQ: 702 case PNS_PEP_DISCONNECT_REQ:
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 9cf2927d0021..65637491f728 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -61,12 +61,12 @@ void rds_tcp_state_change(struct sock *sk);
61/* tcp_listen.c */ 61/* tcp_listen.c */
62int rds_tcp_listen_init(void); 62int rds_tcp_listen_init(void);
63void rds_tcp_listen_stop(void); 63void rds_tcp_listen_stop(void);
64void rds_tcp_listen_data_ready(struct sock *sk, int bytes); 64void rds_tcp_listen_data_ready(struct sock *sk);
65 65
66/* tcp_recv.c */ 66/* tcp_recv.c */
67int rds_tcp_recv_init(void); 67int rds_tcp_recv_init(void);
68void rds_tcp_recv_exit(void); 68void rds_tcp_recv_exit(void);
69void rds_tcp_data_ready(struct sock *sk, int bytes); 69void rds_tcp_data_ready(struct sock *sk);
70int rds_tcp_recv(struct rds_connection *conn); 70int rds_tcp_recv(struct rds_connection *conn);
71void rds_tcp_inc_free(struct rds_incoming *inc); 71void rds_tcp_inc_free(struct rds_incoming *inc);
72int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 72int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 7787537e9c2e..4e638f851185 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -108,9 +108,9 @@ static void rds_tcp_accept_worker(struct work_struct *work)
108 cond_resched(); 108 cond_resched();
109} 109}
110 110
111void rds_tcp_listen_data_ready(struct sock *sk, int bytes) 111void rds_tcp_listen_data_ready(struct sock *sk)
112{ 112{
113 void (*ready)(struct sock *sk, int bytes); 113 void (*ready)(struct sock *sk);
114 114
115 rdsdebug("listen data ready sk %p\n", sk); 115 rdsdebug("listen data ready sk %p\n", sk);
116 116
@@ -132,7 +132,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
132 132
133out: 133out:
134 read_unlock(&sk->sk_callback_lock); 134 read_unlock(&sk->sk_callback_lock);
135 ready(sk, bytes); 135 ready(sk);
136} 136}
137 137
138int rds_tcp_listen_init(void) 138int rds_tcp_listen_init(void)
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 4fac4f2bb9dc..9ae6e0a264ec 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -314,13 +314,13 @@ int rds_tcp_recv(struct rds_connection *conn)
314 return ret; 314 return ret;
315} 315}
316 316
317void rds_tcp_data_ready(struct sock *sk, int bytes) 317void rds_tcp_data_ready(struct sock *sk)
318{ 318{
319 void (*ready)(struct sock *sk, int bytes); 319 void (*ready)(struct sock *sk);
320 struct rds_connection *conn; 320 struct rds_connection *conn;
321 struct rds_tcp_connection *tc; 321 struct rds_tcp_connection *tc;
322 322
323 rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 323 rdsdebug("data ready sk %p\n", sk);
324 324
325 read_lock(&sk->sk_callback_lock); 325 read_lock(&sk->sk_callback_lock);
326 conn = sk->sk_user_data; 326 conn = sk->sk_user_data;
@@ -337,7 +337,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
338out: 338out:
339 read_unlock(&sk->sk_callback_lock); 339 read_unlock(&sk->sk_callback_lock);
340 ready(sk, bytes); 340 ready(sk);
341} 341}
342 342
343int rds_tcp_recv_init(void) 343int rds_tcp_recv_init(void)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index c2cca2ee6aef..8451c8cdc9de 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1041,7 +1041,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
1041 rose_start_heartbeat(make); 1041 rose_start_heartbeat(make);
1042 1042
1043 if (!sock_flag(sk, SOCK_DEAD)) 1043 if (!sock_flag(sk, SOCK_DEAD))
1044 sk->sk_data_ready(sk, skb->len); 1044 sk->sk_data_ready(sk);
1045 1045
1046 return 1; 1046 return 1;
1047} 1047}
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 73742647c135..63b21e580de9 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -113,7 +113,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
113 spin_unlock_bh(&sk->sk_receive_queue.lock); 113 spin_unlock_bh(&sk->sk_receive_queue.lock);
114 114
115 if (!sock_flag(sk, SOCK_DEAD)) 115 if (!sock_flag(sk, SOCK_DEAD))
116 sk->sk_data_ready(sk, skb_len); 116 sk->sk_data_ready(sk);
117 } 117 }
118 skb = NULL; 118 skb = NULL;
119 } else { 119 } else {
@@ -632,14 +632,14 @@ cant_find_conn:
632 * handle data received on the local endpoint 632 * handle data received on the local endpoint
633 * - may be called in interrupt context 633 * - may be called in interrupt context
634 */ 634 */
635void rxrpc_data_ready(struct sock *sk, int count) 635void rxrpc_data_ready(struct sock *sk)
636{ 636{
637 struct rxrpc_skb_priv *sp; 637 struct rxrpc_skb_priv *sp;
638 struct rxrpc_local *local; 638 struct rxrpc_local *local;
639 struct sk_buff *skb; 639 struct sk_buff *skb;
640 int ret; 640 int ret;
641 641
642 _enter("%p, %d", sk, count); 642 _enter("%p", sk);
643 643
644 ASSERT(!irqs_disabled()); 644 ASSERT(!irqs_disabled());
645 645
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index c831d44b0841..ba9fd36d3f15 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -518,7 +518,7 @@ void rxrpc_UDP_error_handler(struct work_struct *);
518 */ 518 */
519extern const char *rxrpc_pkts[]; 519extern const char *rxrpc_pkts[];
520 520
521void rxrpc_data_ready(struct sock *, int); 521void rxrpc_data_ready(struct sock *);
522int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool); 522int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
523void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); 523void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
524 524
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 4f6d6f9d1274..39579c3e0d14 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1395,35 +1395,44 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1395 return false; 1395 return false;
1396} 1396}
1397 1397
1398/* Update asoc's rwnd for the approximated state in the buffer, 1398/* Increase asoc's rwnd by len and send any window update SACK if needed. */
1399 * and check whether SACK needs to be sent. 1399void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1400 */
1401void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
1402{ 1400{
1403 int rx_count;
1404 struct sctp_chunk *sack; 1401 struct sctp_chunk *sack;
1405 struct timer_list *timer; 1402 struct timer_list *timer;
1406 1403
1407 if (asoc->ep->rcvbuf_policy) 1404 if (asoc->rwnd_over) {
1408 rx_count = atomic_read(&asoc->rmem_alloc); 1405 if (asoc->rwnd_over >= len) {
1409 else 1406 asoc->rwnd_over -= len;
1410 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); 1407 } else {
1408 asoc->rwnd += (len - asoc->rwnd_over);
1409 asoc->rwnd_over = 0;
1410 }
1411 } else {
1412 asoc->rwnd += len;
1413 }
1411 1414
1412 if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) 1415 /* If we had window pressure, start recovering it
1413 asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; 1416 * once our rwnd had reached the accumulated pressure
1414 else 1417 * threshold. The idea is to recover slowly, but up
1415 asoc->rwnd = 0; 1418 * to the initial advertised window.
1419 */
1420 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1421 int change = min(asoc->pathmtu, asoc->rwnd_press);
1422 asoc->rwnd += change;
1423 asoc->rwnd_press -= change;
1424 }
1416 1425
1417 pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", 1426 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1418 __func__, asoc, asoc->rwnd, rx_count, 1427 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1419 asoc->base.sk->sk_rcvbuf); 1428 asoc->a_rwnd);
1420 1429
1421 /* Send a window update SACK if the rwnd has increased by at least the 1430 /* Send a window update SACK if the rwnd has increased by at least the
1422 * minimum of the association's PMTU and half of the receive buffer. 1431 * minimum of the association's PMTU and half of the receive buffer.
1423 * The algorithm used is similar to the one described in 1432 * The algorithm used is similar to the one described in
1424 * Section 4.2.3.3 of RFC 1122. 1433 * Section 4.2.3.3 of RFC 1122.
1425 */ 1434 */
1426 if (update_peer && sctp_peer_needs_update(asoc)) { 1435 if (sctp_peer_needs_update(asoc)) {
1427 asoc->a_rwnd = asoc->rwnd; 1436 asoc->a_rwnd = asoc->rwnd;
1428 1437
1429 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " 1438 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
@@ -1445,6 +1454,45 @@ void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
1445 } 1454 }
1446} 1455}
1447 1456
1457/* Decrease asoc's rwnd by len. */
1458void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1459{
1460 int rx_count;
1461 int over = 0;
1462
1463 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1464 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1465 "asoc->rwnd_over:%u!\n", __func__, asoc,
1466 asoc->rwnd, asoc->rwnd_over);
1467
1468 if (asoc->ep->rcvbuf_policy)
1469 rx_count = atomic_read(&asoc->rmem_alloc);
1470 else
1471 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1472
1473 /* If we've reached or overflowed our receive buffer, announce
1474 * a 0 rwnd if rwnd would still be positive. Store the
1475 * the potential pressure overflow so that the window can be restored
1476 * back to original value.
1477 */
1478 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1479 over = 1;
1480
1481 if (asoc->rwnd >= len) {
1482 asoc->rwnd -= len;
1483 if (over) {
1484 asoc->rwnd_press += asoc->rwnd;
1485 asoc->rwnd = 0;
1486 }
1487 } else {
1488 asoc->rwnd_over = len - asoc->rwnd;
1489 asoc->rwnd = 0;
1490 }
1491
1492 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1493 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1494 asoc->rwnd_press);
1495}
1448 1496
1449/* Build the bind address list for the association based on info from the 1497/* Build the bind address list for the association based on info from the
1450 * local endpoint and the remote peer. 1498 * local endpoint and the remote peer.
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 4e1d0fcb028e..c09757fbf803 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -957,7 +957,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
957 957
958 SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); 958 SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
959 959
960 return ip_queue_xmit(skb, &transport->fl); 960 return ip_queue_xmit(&inet->sk, skb, &transport->fl);
961} 961}
962 962
963static struct sctp_af sctp_af_inet; 963static struct sctp_af sctp_af_inet;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 01e002430c85..ae9fbeba40b0 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -6178,7 +6178,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6178 * PMTU. In cases, such as loopback, this might be a rather 6178 * PMTU. In cases, such as loopback, this might be a rather
6179 * large spill over. 6179 * large spill over.
6180 */ 6180 */
6181 if ((!chunk->data_accepted) && (!asoc->rwnd || 6181 if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
6182 (datalen > asoc->rwnd + asoc->frag_point))) { 6182 (datalen > asoc->rwnd + asoc->frag_point))) {
6183 6183
6184 /* If this is the next TSN, consider reneging to make 6184 /* If this is the next TSN, consider reneging to make
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5f83a6a2fa67..ff20e2dbbbc7 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2115,6 +2115,12 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
2115 sctp_skb_pull(skb, copied); 2115 sctp_skb_pull(skb, copied);
2116 skb_queue_head(&sk->sk_receive_queue, skb); 2116 skb_queue_head(&sk->sk_receive_queue, skb);
2117 2117
2118 /* When only partial message is copied to the user, increase
2119 * rwnd by that amount. If all the data in the skb is read,
2120 * rwnd is updated when the event is freed.
2121 */
2122 if (!sctp_ulpevent_is_notification(event))
2123 sctp_assoc_rwnd_increase(event->asoc, copied);
2118 goto out; 2124 goto out;
2119 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2125 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
2120 (event->msg_flags & MSG_EOR)) 2126 (event->msg_flags & MSG_EOR))
@@ -6604,6 +6610,12 @@ static void sctp_wake_up_waiters(struct sock *sk,
6604 if (asoc->ep->sndbuf_policy) 6610 if (asoc->ep->sndbuf_policy)
6605 return __sctp_write_space(asoc); 6611 return __sctp_write_space(asoc);
6606 6612
6613 /* If association goes down and is just flushing its
6614 * outq, then just normally notify others.
6615 */
6616 if (asoc->base.dead)
6617 return sctp_write_space(sk);
6618
6607 /* Accounting for the sndbuf space is per socket, so we 6619 /* Accounting for the sndbuf space is per socket, so we
6608 * need to wake up others, try to be fair and in case of 6620 * need to wake up others, try to be fair and in case of
6609 * other associations, let them have a go first instead 6621 * other associations, let them have a go first instead
@@ -6739,7 +6751,7 @@ do_nonblock:
6739 goto out; 6751 goto out;
6740} 6752}
6741 6753
6742void sctp_data_ready(struct sock *sk, int len) 6754void sctp_data_ready(struct sock *sk)
6743{ 6755{
6744 struct socket_wq *wq; 6756 struct socket_wq *wq;
6745 6757
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 8d198ae03606..85c64658bd0b 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
989 skb = sctp_event2skb(event); 989 skb = sctp_event2skb(event);
990 /* Set the owner and charge rwnd for bytes received. */ 990 /* Set the owner and charge rwnd for bytes received. */
991 sctp_ulpevent_set_owner(event, asoc); 991 sctp_ulpevent_set_owner(event, asoc);
992 sctp_assoc_rwnd_update(asoc, false); 992 sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
993 993
994 if (!skb->data_len) 994 if (!skb->data_len)
995 return; 995 return;
@@ -1011,7 +1011,6 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
1011{ 1011{
1012 struct sk_buff *skb, *frag; 1012 struct sk_buff *skb, *frag;
1013 unsigned int len; 1013 unsigned int len;
1014 struct sctp_association *asoc;
1015 1014
1016 /* Current stack structures assume that the rcv buffer is 1015 /* Current stack structures assume that the rcv buffer is
1017 * per socket. For UDP style sockets this is not true as 1016 * per socket. For UDP style sockets this is not true as
@@ -1036,11 +1035,8 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
1036 } 1035 }
1037 1036
1038done: 1037done:
1039 asoc = event->asoc; 1038 sctp_assoc_rwnd_increase(event->asoc, len);
1040 sctp_association_hold(asoc);
1041 sctp_ulpevent_release_owner(event); 1039 sctp_ulpevent_release_owner(event);
1042 sctp_assoc_rwnd_update(asoc, true);
1043 sctp_association_put(asoc);
1044} 1040}
1045 1041
1046static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) 1042static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 5dc94117e9d4..7144eb6a1b95 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -259,7 +259,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
259 sctp_ulpq_clear_pd(ulpq); 259 sctp_ulpq_clear_pd(ulpq);
260 260
261 if (queue == &sk->sk_receive_queue) 261 if (queue == &sk->sk_receive_queue)
262 sk->sk_data_ready(sk, 0); 262 sk->sk_data_ready(sk);
263 return 1; 263 return 1;
264 264
265out_free: 265out_free:
@@ -1135,5 +1135,5 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1135 1135
1136 /* If there is data waiting, send it up the socket now. */ 1136 /* If there is data waiting, send it up the socket now. */
1137 if (sctp_ulpq_clear_pd(ulpq) || ev) 1137 if (sctp_ulpq_clear_pd(ulpq) || ev)
1138 sk->sk_data_ready(sk, 0); 1138 sk->sk_data_ready(sk);
1139} 1139}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d06cb8752dcd..43bcb4699d69 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -60,7 +60,7 @@
60 60
61static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 61static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
62 int flags); 62 int flags);
63static void svc_udp_data_ready(struct sock *, int); 63static void svc_udp_data_ready(struct sock *);
64static int svc_udp_recvfrom(struct svc_rqst *); 64static int svc_udp_recvfrom(struct svc_rqst *);
65static int svc_udp_sendto(struct svc_rqst *); 65static int svc_udp_sendto(struct svc_rqst *);
66static void svc_sock_detach(struct svc_xprt *); 66static void svc_sock_detach(struct svc_xprt *);
@@ -403,14 +403,14 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
403/* 403/*
404 * INET callback when data has been received on the socket. 404 * INET callback when data has been received on the socket.
405 */ 405 */
406static void svc_udp_data_ready(struct sock *sk, int count) 406static void svc_udp_data_ready(struct sock *sk)
407{ 407{
408 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 408 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
409 wait_queue_head_t *wq = sk_sleep(sk); 409 wait_queue_head_t *wq = sk_sleep(sk);
410 410
411 if (svsk) { 411 if (svsk) {
412 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", 412 dprintk("svc: socket %p(inet %p), busy=%d\n",
413 svsk, sk, count, 413 svsk, sk,
414 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); 414 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
415 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 415 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
416 svc_xprt_enqueue(&svsk->sk_xprt); 416 svc_xprt_enqueue(&svsk->sk_xprt);
@@ -731,7 +731,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
731 * A data_ready event on a listening socket means there's a connection 731 * A data_ready event on a listening socket means there's a connection
732 * pending. Do not use state_change as a substitute for it. 732 * pending. Do not use state_change as a substitute for it.
733 */ 733 */
734static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 734static void svc_tcp_listen_data_ready(struct sock *sk)
735{ 735{
736 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 736 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
737 wait_queue_head_t *wq; 737 wait_queue_head_t *wq;
@@ -783,7 +783,7 @@ static void svc_tcp_state_change(struct sock *sk)
783 wake_up_interruptible_all(wq); 783 wake_up_interruptible_all(wq);
784} 784}
785 785
786static void svc_tcp_data_ready(struct sock *sk, int count) 786static void svc_tcp_data_ready(struct sock *sk)
787{ 787{
788 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 788 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
789 wait_queue_head_t *wq = sk_sleep(sk); 789 wait_queue_head_t *wq = sk_sleep(sk);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6735e1d1e9bb..25a3dcf15cae 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -254,7 +254,7 @@ struct sock_xprt {
254 /* 254 /*
255 * Saved socket callback addresses 255 * Saved socket callback addresses
256 */ 256 */
257 void (*old_data_ready)(struct sock *, int); 257 void (*old_data_ready)(struct sock *);
258 void (*old_state_change)(struct sock *); 258 void (*old_state_change)(struct sock *);
259 void (*old_write_space)(struct sock *); 259 void (*old_write_space)(struct sock *);
260 void (*old_error_report)(struct sock *); 260 void (*old_error_report)(struct sock *);
@@ -951,7 +951,7 @@ static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
951 * 951 *
952 * Currently this assumes we can read the whole reply in a single gulp. 952 * Currently this assumes we can read the whole reply in a single gulp.
953 */ 953 */
954static void xs_local_data_ready(struct sock *sk, int len) 954static void xs_local_data_ready(struct sock *sk)
955{ 955{
956 struct rpc_task *task; 956 struct rpc_task *task;
957 struct rpc_xprt *xprt; 957 struct rpc_xprt *xprt;
@@ -1014,7 +1014,7 @@ static void xs_local_data_ready(struct sock *sk, int len)
1014 * @len: how much data to read 1014 * @len: how much data to read
1015 * 1015 *
1016 */ 1016 */
1017static void xs_udp_data_ready(struct sock *sk, int len) 1017static void xs_udp_data_ready(struct sock *sk)
1018{ 1018{
1019 struct rpc_task *task; 1019 struct rpc_task *task;
1020 struct rpc_xprt *xprt; 1020 struct rpc_xprt *xprt;
@@ -1437,7 +1437,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
1437 * @bytes: how much data to read 1437 * @bytes: how much data to read
1438 * 1438 *
1439 */ 1439 */
1440static void xs_tcp_data_ready(struct sock *sk, int bytes) 1440static void xs_tcp_data_ready(struct sock *sk)
1441{ 1441{
1442 struct rpc_xprt *xprt; 1442 struct rpc_xprt *xprt;
1443 read_descriptor_t rd_desc; 1443 read_descriptor_t rd_desc;
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 646a930eefbf..a538a02f869b 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -119,7 +119,7 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
119 return con; 119 return con;
120} 120}
121 121
122static void sock_data_ready(struct sock *sk, int unused) 122static void sock_data_ready(struct sock *sk)
123{ 123{
124 struct tipc_conn *con; 124 struct tipc_conn *con;
125 125
@@ -297,7 +297,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
297 newcon->usr_data = s->tipc_conn_new(newcon->conid); 297 newcon->usr_data = s->tipc_conn_new(newcon->conid);
298 298
299 /* Wake up receive process in case of 'SYN+' message */ 299 /* Wake up receive process in case of 'SYN+' message */
300 newsock->sk->sk_data_ready(newsock->sk, 0); 300 newsock->sk->sk_data_ready(newsock->sk);
301 return ret; 301 return ret;
302} 302}
303 303
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index adc12e227303..3c0256962f7d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -45,7 +45,7 @@
45#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 45#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
46 46
47static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 47static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
48static void tipc_data_ready(struct sock *sk, int len); 48static void tipc_data_ready(struct sock *sk);
49static void tipc_write_space(struct sock *sk); 49static void tipc_write_space(struct sock *sk);
50static int tipc_release(struct socket *sock); 50static int tipc_release(struct socket *sock);
51static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 51static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
@@ -1248,7 +1248,7 @@ static void tipc_write_space(struct sock *sk)
1248 * @sk: socket 1248 * @sk: socket
1249 * @len: the length of messages 1249 * @len: the length of messages
1250 */ 1250 */
1251static void tipc_data_ready(struct sock *sk, int len) 1251static void tipc_data_ready(struct sock *sk)
1252{ 1252{
1253 struct socket_wq *wq; 1253 struct socket_wq *wq;
1254 1254
@@ -1410,7 +1410,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1410 __skb_queue_tail(&sk->sk_receive_queue, buf); 1410 __skb_queue_tail(&sk->sk_receive_queue, buf);
1411 skb_set_owner_r(buf, sk); 1411 skb_set_owner_r(buf, sk);
1412 1412
1413 sk->sk_data_ready(sk, 0); 1413 sk->sk_data_ready(sk);
1414 return TIPC_OK; 1414 return TIPC_OK;
1415} 1415}
1416 1416
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 94404f19f9de..bb7e8ba821f4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1217,7 +1217,7 @@ restart:
1217 __skb_queue_tail(&other->sk_receive_queue, skb); 1217 __skb_queue_tail(&other->sk_receive_queue, skb);
1218 spin_unlock(&other->sk_receive_queue.lock); 1218 spin_unlock(&other->sk_receive_queue.lock);
1219 unix_state_unlock(other); 1219 unix_state_unlock(other);
1220 other->sk_data_ready(other, 0); 1220 other->sk_data_ready(other);
1221 sock_put(other); 1221 sock_put(other);
1222 return 0; 1222 return 0;
1223 1223
@@ -1600,7 +1600,7 @@ restart:
1600 if (max_level > unix_sk(other)->recursion_level) 1600 if (max_level > unix_sk(other)->recursion_level)
1601 unix_sk(other)->recursion_level = max_level; 1601 unix_sk(other)->recursion_level = max_level;
1602 unix_state_unlock(other); 1602 unix_state_unlock(other);
1603 other->sk_data_ready(other, len); 1603 other->sk_data_ready(other);
1604 sock_put(other); 1604 sock_put(other);
1605 scm_destroy(siocb->scm); 1605 scm_destroy(siocb->scm);
1606 return len; 1606 return len;
@@ -1706,7 +1706,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1706 if (max_level > unix_sk(other)->recursion_level) 1706 if (max_level > unix_sk(other)->recursion_level)
1707 unix_sk(other)->recursion_level = max_level; 1707 unix_sk(other)->recursion_level = max_level;
1708 unix_state_unlock(other); 1708 unix_state_unlock(other);
1709 other->sk_data_ready(other, size); 1709 other->sk_data_ready(other);
1710 sent += size; 1710 sent += size;
1711 } 1711 }
1712 1712
diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
index 9a730744e7bc..9b7f207f2bee 100644
--- a/net/vmw_vsock/vmci_transport_notify.c
+++ b/net/vmw_vsock/vmci_transport_notify.c
@@ -315,7 +315,7 @@ vmci_transport_handle_wrote(struct sock *sk,
315 struct vsock_sock *vsk = vsock_sk(sk); 315 struct vsock_sock *vsk = vsock_sk(sk);
316 PKT_FIELD(vsk, sent_waiting_read) = false; 316 PKT_FIELD(vsk, sent_waiting_read) = false;
317#endif 317#endif
318 sk->sk_data_ready(sk, 0); 318 sk->sk_data_ready(sk);
319} 319}
320 320
321static void vmci_transport_notify_pkt_socket_init(struct sock *sk) 321static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
index 622bd7aa1016..dc9c7929a2f9 100644
--- a/net/vmw_vsock/vmci_transport_notify_qstate.c
+++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
@@ -92,7 +92,7 @@ vmci_transport_handle_wrote(struct sock *sk,
92 bool bottom_half, 92 bool bottom_half,
93 struct sockaddr_vm *dst, struct sockaddr_vm *src) 93 struct sockaddr_vm *dst, struct sockaddr_vm *src)
94{ 94{
95 sk->sk_data_ready(sk, 0); 95 sk->sk_data_ready(sk);
96} 96}
97 97
98static void vsock_block_update_write_window(struct sock *sk) 98static void vsock_block_update_write_window(struct sock *sk)
@@ -290,7 +290,7 @@ vmci_transport_notify_pkt_recv_post_dequeue(
290 /* See the comment in 290 /* See the comment in
291 * vmci_transport_notify_pkt_send_post_enqueue(). 291 * vmci_transport_notify_pkt_send_post_enqueue().
292 */ 292 */
293 sk->sk_data_ready(sk, 0); 293 sk->sk_data_ready(sk);
294 } 294 }
295 295
296 return err; 296 return err;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 6177479c7de9..5ad4418ef093 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1064,7 +1064,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
1064 x25_start_heartbeat(make); 1064 x25_start_heartbeat(make);
1065 1065
1066 if (!sock_flag(sk, SOCK_DEAD)) 1066 if (!sock_flag(sk, SOCK_DEAD))
1067 sk->sk_data_ready(sk, skb->len); 1067 sk->sk_data_ready(sk);
1068 rc = 1; 1068 rc = 1;
1069 sock_put(sk); 1069 sock_put(sk);
1070out: 1070out:
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index d1b0dc79bb6f..7ac50098a375 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -79,7 +79,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
79 skb_set_owner_r(skbn, sk); 79 skb_set_owner_r(skbn, sk);
80 skb_queue_tail(&sk->sk_receive_queue, skbn); 80 skb_queue_tail(&sk->sk_receive_queue, skbn);
81 if (!sock_flag(sk, SOCK_DEAD)) 81 if (!sock_flag(sk, SOCK_DEAD))
82 sk->sk_data_ready(sk, skbn->len); 82 sk->sk_data_ready(sk);
83 83
84 return 0; 84 return 0;
85} 85}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f02f511b7107..c08fbd11ceff 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1842,7 +1842,7 @@ purge_queue:
1842 xfrm_pol_put(pol); 1842 xfrm_pol_put(pol);
1843} 1843}
1844 1844
1845static int xdst_queue_output(struct sk_buff *skb) 1845static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
1846{ 1846{
1847 unsigned long sched_next; 1847 unsigned long sched_next;
1848 struct dst_entry *dst = skb_dst(skb); 1848 struct dst_entry *dst = skb_dst(skb);
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 9f0ee22b914f..003bc263105a 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -65,12 +65,22 @@ warning- := $(empty)
65warning-1 := -Wextra -Wunused -Wno-unused-parameter 65warning-1 := -Wextra -Wunused -Wno-unused-parameter
66warning-1 += -Wmissing-declarations 66warning-1 += -Wmissing-declarations
67warning-1 += -Wmissing-format-attribute 67warning-1 += -Wmissing-format-attribute
68warning-1 += -Wmissing-prototypes 68warning-1 += $(call cc-option, -Wmissing-prototypes)
69warning-1 += -Wold-style-definition 69warning-1 += -Wold-style-definition
70warning-1 += $(call cc-option, -Wmissing-include-dirs) 70warning-1 += $(call cc-option, -Wmissing-include-dirs)
71warning-1 += $(call cc-option, -Wunused-but-set-variable) 71warning-1 += $(call cc-option, -Wunused-but-set-variable)
72warning-1 += $(call cc-disable-warning, missing-field-initializers) 72warning-1 += $(call cc-disable-warning, missing-field-initializers)
73 73
74# Clang
75warning-1 += $(call cc-disable-warning, initializer-overrides)
76warning-1 += $(call cc-disable-warning, unused-value)
77warning-1 += $(call cc-disable-warning, format)
78warning-1 += $(call cc-disable-warning, unknown-warning-option)
79warning-1 += $(call cc-disable-warning, sign-compare)
80warning-1 += $(call cc-disable-warning, format-zero-length)
81warning-1 += $(call cc-disable-warning, uninitialized)
82warning-1 += $(call cc-option, -fcatch-undefined-behavior)
83
74warning-2 := -Waggregate-return 84warning-2 := -Waggregate-return
75warning-2 += -Wcast-align 85warning-2 += -Wcast-align
76warning-2 += -Wdisabled-optimization 86warning-2 += -Wdisabled-optimization
diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl
index b78fca994a15..9ca667bcaee9 100644
--- a/scripts/bootgraph.pl
+++ b/scripts/bootgraph.pl
@@ -38,6 +38,31 @@
38# 38#
39 39
40use strict; 40use strict;
41use Getopt::Long;
42my $header = 0;
43
44sub help {
45 my $text = << "EOM";
46Usage:
471) dmesg | perl scripts/bootgraph.pl [OPTION] > output.svg
482) perl scripts/bootgraph.pl -h
49
50Options:
51 -header Insert kernel version and date
52EOM
53 my $std=shift;
54 if ($std == 1) {
55 print STDERR $text;
56 } else {
57 print $text;
58 }
59 exit;
60}
61
62GetOptions(
63 'h|help' =>\&help,
64 'header' =>\$header
65);
41 66
42my %start; 67my %start;
43my %end; 68my %end;
@@ -49,6 +74,11 @@ my $count = 0;
49my %pids; 74my %pids;
50my %pidctr; 75my %pidctr;
51 76
77my $headerstep = 20;
78my $xheader = 15;
79my $yheader = 25;
80my $cyheader = 0;
81
52while (<>) { 82while (<>) {
53 my $line = $_; 83 my $line = $_;
54 if ($line =~ /([0-9\.]+)\] calling ([a-zA-Z0-9\_\.]+)\+/) { 84 if ($line =~ /([0-9\.]+)\] calling ([a-zA-Z0-9\_\.]+)\+/) {
@@ -112,15 +142,23 @@ if ($count == 0) {
112 print STDERR <<END; 142 print STDERR <<END;
113No data found in the dmesg. Make sure that 'printk.time=1' and 143No data found in the dmesg. Make sure that 'printk.time=1' and
114'initcall_debug' are passed on the kernel command line. 144'initcall_debug' are passed on the kernel command line.
115Usage:
116 dmesg | perl scripts/bootgraph.pl > output.svg
117END 145END
146 help(1);
118 exit 1; 147 exit 1;
119} 148}
120 149
121print "<?xml version=\"1.0\" standalone=\"no\"?> \n"; 150print "<?xml version=\"1.0\" standalone=\"no\"?> \n";
122print "<svg width=\"2000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n"; 151print "<svg width=\"2000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n";
123 152
153
154if ($header) {
155 my $version = `uname -a`;
156 my $date = `date`;
157 print "<text transform=\"translate($xheader,$yheader)\">Kernel version: $version</text>\n";
158 $cyheader = $yheader+$headerstep;
159 print "<text transform=\"translate($xheader,$cyheader)\">Date: $date</text>\n";
160}
161
124my @styles; 162my @styles;
125 163
126$styles[0] = "fill:rgb(0,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)"; 164$styles[0] = "fill:rgb(0,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
diff --git a/scripts/coccinelle/api/ptr_ret.cocci b/scripts/coccinelle/api/ptr_ret.cocci
index e18f8402e37c..dd58dab5d411 100644
--- a/scripts/coccinelle/api/ptr_ret.cocci
+++ b/scripts/coccinelle/api/ptr_ret.cocci
@@ -7,7 +7,7 @@
7// URL: http://coccinelle.lip6.fr/ 7// URL: http://coccinelle.lip6.fr/
8// Options: --no-includes --include-headers 8// Options: --no-includes --include-headers
9// 9//
10// Keywords: ERR_PTR, PTR_ERR, PTR_RET, PTR_ERR_OR_ZERO 10// Keywords: ERR_PTR, PTR_ERR, PTR_ERR_OR_ZERO
11// Version min: 2.6.39 11// Version min: 2.6.39
12// 12//
13 13
@@ -62,35 +62,35 @@ position p3;
62p << r1.p1; 62p << r1.p1;
63@@ 63@@
64 64
65coccilib.org.print_todo(p[0], "WARNING: PTR_RET can be used") 65coccilib.org.print_todo(p[0], "WARNING: PTR_ERR_OR_ZERO can be used")
66 66
67 67
68@script:python depends on org@ 68@script:python depends on org@
69p << r2.p2; 69p << r2.p2;
70@@ 70@@
71 71
72coccilib.org.print_todo(p[0], "WARNING: PTR_RET can be used") 72coccilib.org.print_todo(p[0], "WARNING: PTR_ERR_OR_ZERO can be used")
73 73
74@script:python depends on org@ 74@script:python depends on org@
75p << r3.p3; 75p << r3.p3;
76@@ 76@@
77 77
78coccilib.org.print_todo(p[0], "WARNING: PTR_RET can be used") 78coccilib.org.print_todo(p[0], "WARNING: PTR_ERR_OR_ZERO can be used")
79 79
80@script:python depends on report@ 80@script:python depends on report@
81p << r1.p1; 81p << r1.p1;
82@@ 82@@
83 83
84coccilib.report.print_report(p[0], "WARNING: PTR_RET can be used") 84coccilib.report.print_report(p[0], "WARNING: PTR_ERR_OR_ZERO can be used")
85 85
86@script:python depends on report@ 86@script:python depends on report@
87p << r2.p2; 87p << r2.p2;
88@@ 88@@
89 89
90coccilib.report.print_report(p[0], "WARNING: PTR_RET can be used") 90coccilib.report.print_report(p[0], "WARNING: PTR_ERR_OR_ZERO can be used")
91 91
92@script:python depends on report@ 92@script:python depends on report@
93p << r3.p3; 93p << r3.p3;
94@@ 94@@
95 95
96coccilib.report.print_report(p[0], "WARNING: PTR_RET can be used") 96coccilib.report.print_report(p[0], "WARNING: PTR_ERR_OR_ZERO can be used")
diff --git a/scripts/coccinelle/misc/memcpy-assign.cocci b/scripts/coccinelle/misc/memcpy-assign.cocci
deleted file mode 100644
index afd058be497f..000000000000
--- a/scripts/coccinelle/misc/memcpy-assign.cocci
+++ /dev/null
@@ -1,103 +0,0 @@
1//
2// Replace memcpy with struct assignment.
3//
4// Confidence: High
5// Copyright: (C) 2012 Peter Senna Tschudin, INRIA/LIP6. GPLv2.
6// URL: http://coccinelle.lip6.fr/
7// Comments:
8// Options: --no-includes --include-headers
9
10virtual patch
11virtual report
12virtual context
13virtual org
14
15@r1 depends on !patch@
16identifier struct_name;
17struct struct_name to;
18struct struct_name from;
19struct struct_name *top;
20struct struct_name *fromp;
21position p;
22@@
23memcpy@p(\(&(to)\|top\), \(&(from)\|fromp\), \(sizeof(to)\|sizeof(from)\|sizeof(struct struct_name)\|sizeof(*top)\|sizeof(*fromp)\))
24
25@script:python depends on report@
26p << r1.p;
27@@
28coccilib.report.print_report(p[0],"Replace memcpy with struct assignment")
29
30@depends on context@
31position r1.p;
32@@
33*memcpy@p(...);
34
35@script:python depends on org@
36p << r1.p;
37@@
38cocci.print_main("Replace memcpy with struct assignment",p)
39
40@depends on patch@
41identifier struct_name;
42struct struct_name to;
43struct struct_name from;
44@@
45(
46-memcpy(&(to), &(from), sizeof(to));
47+to = from;
48|
49-memcpy(&(to), &(from), sizeof(from));
50+to = from;
51|
52-memcpy(&(to), &(from), sizeof(struct struct_name));
53+to = from;
54)
55
56@depends on patch@
57identifier struct_name;
58struct struct_name to;
59struct struct_name *from;
60@@
61(
62-memcpy(&(to), from, sizeof(to));
63+to = *from;
64|
65-memcpy(&(to), from, sizeof(*from));
66+to = *from;
67|
68-memcpy(&(to), from, sizeof(struct struct_name));
69+to = *from;
70)
71
72@depends on patch@
73identifier struct_name;
74struct struct_name *to;
75struct struct_name from;
76@@
77(
78-memcpy(to, &(from), sizeof(*to));
79+ *to = from;
80|
81-memcpy(to, &(from), sizeof(from));
82+ *to = from;
83|
84-memcpy(to, &(from), sizeof(struct struct_name));
85+ *to = from;
86)
87
88@depends on patch@
89identifier struct_name;
90struct struct_name *to;
91struct struct_name *from;
92@@
93(
94-memcpy(to, from, sizeof(*to));
95+ *to = *from;
96|
97-memcpy(to, from, sizeof(*from));
98+ *to = *from;
99|
100-memcpy(to, from, sizeof(struct struct_name));
101+ *to = *from;
102)
103
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index f221ddf69080..cfb8440cc0b2 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -76,7 +76,7 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
76 echo \#define LINUX_COMPILE_BY \"`echo $LINUX_COMPILE_BY | $UTS_TRUNCATE`\" 76 echo \#define LINUX_COMPILE_BY \"`echo $LINUX_COMPILE_BY | $UTS_TRUNCATE`\"
77 echo \#define LINUX_COMPILE_HOST \"`echo $LINUX_COMPILE_HOST | $UTS_TRUNCATE`\" 77 echo \#define LINUX_COMPILE_HOST \"`echo $LINUX_COMPILE_HOST | $UTS_TRUNCATE`\"
78 78
79 echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\" 79 echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | grep ' version '`\"
80) > .tmpcompile 80) > .tmpcompile
81 81
82# Only replace the real compile.h if the new one is different, 82# Only replace the real compile.h if the new one is different,
diff --git a/scripts/objdiff b/scripts/objdiff
new file mode 100755
index 000000000000..b3e4f10bfc3e
--- /dev/null
+++ b/scripts/objdiff
@@ -0,0 +1,141 @@
1#!/bin/bash
2
3# objdiff - a small script for validating that a commit or series of commits
4# didn't change object code.
5#
6# Copyright 2014, Jason Cooper <jason@lakedaemon.net>
7#
8# Licensed under the terms of the GNU GPL version 2
9
10# usage example:
11#
12# $ git checkout COMMIT_A
13# $ <your fancy build command here>
14# $ ./scripts/objdiff record path/to/*.o
15#
16# $ git checkout COMMIT_B
17# $ <your fancy build command here>
18# $ ./scripts/objdiff record path/to/*.o
19#
20# $ ./scripts/objdiff diff COMMIT_A COMMIT_B
21# $
22
23# And to clean up (everything is in .tmp_objdiff/*)
24# $ ./scripts/objdiff clean all
25#
26# Note: 'make mrproper' will also remove .tmp_objdiff
27
28GIT_DIR="`git rev-parse --git-dir`"
29
30if [ -d "$GIT_DIR" ]; then
31 TMPD="${GIT_DIR%git}tmp_objdiff"
32
33 [ -d "$TMPD" ] || mkdir "$TMPD"
34else
35 echo "ERROR: git directory not found."
36 exit 1
37fi
38
39usage() {
40 echo "Usage: $0 <command> <args>"
41 echo " record <list of object files>"
42 echo " diff <commitA> <commitB>"
43 echo " clean all | <commit>"
44 exit 1
45}
46
47dorecord() {
48 [ $# -eq 0 ] && usage
49
50 FILES="$*"
51
52 CMT="`git rev-parse --short HEAD`"
53
54 OBJDUMP="${CROSS_COMPILE}objdump"
55 OBJDIFFD="$TMPD/$CMT"
56
57 [ ! -d "$OBJDIFFD" ] && mkdir -p "$OBJDIFFD"
58
59 for f in $FILES; do
60 dn="${f%/*}"
61 bn="${f##*/}"
62
63 [ ! -d "$OBJDIFFD/$dn" ] && mkdir -p "$OBJDIFFD/$dn"
64
65 # remove addresses for a more clear diff
66 # http://dummdida.tumblr.com/post/60924060451/binary-diff-between-libc-from-scientificlinux-and
67 $OBJDUMP -D "$f" | sed "s/^[[:space:]]\+[0-9a-f]\+//" \
68 >"$OBJDIFFD/$dn/$bn"
69 done
70}
71
72dodiff() {
73 [ $# -ne 2 ] && [ $# -ne 0 ] && usage
74
75 if [ $# -eq 0 ]; then
76 SRC="`git rev-parse --short HEAD^`"
77 DST="`git rev-parse --short HEAD`"
78 else
79 SRC="`git rev-parse --short $1`"
80 DST="`git rev-parse --short $2`"
81 fi
82
83 DIFF="`which colordiff`"
84
85 if [ ${#DIFF} -eq 0 ] || [ ! -x "$DIFF" ]; then
86 DIFF="`which diff`"
87 fi
88
89 SRCD="$TMPD/$SRC"
90 DSTD="$TMPD/$DST"
91
92 if [ ! -d "$SRCD" ]; then
93 echo "ERROR: $SRCD doesn't exist"
94 exit 1
95 fi
96
97 if [ ! -d "$DSTD" ]; then
98 echo "ERROR: $DSTD doesn't exist"
99 exit 1
100 fi
101
102 $DIFF -Nurd $SRCD $DSTD
103}
104
105doclean() {
106 [ $# -eq 0 ] && usage
107 [ $# -gt 1 ] && usage
108
109 if [ "x$1" = "xall" ]; then
110 rm -rf $TMPD/*
111 else
112 CMT="`git rev-parse --short $1`"
113
114 if [ -d "$TMPD/$CMT" ]; then
115 rm -rf $TMPD/$CMT
116 else
117 echo "$CMT not found"
118 fi
119 fi
120}
121
122[ $# -eq 0 ] && usage
123
124case "$1" in
125 record)
126 shift
127 dorecord $*
128 ;;
129 diff)
130 shift
131 dodiff $*
132 ;;
133 clean)
134 shift
135 doclean $*
136 ;;
137 *)
138 echo "Unrecognized command '$1'"
139 exit 1
140 ;;
141esac
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 58c455929091..f2c5b006a3d7 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -11,11 +11,10 @@ if [ "$KBUILD_VERBOSE" = "1" ]; then
11 set -x 11 set -x
12fi 12fi
13 13
14# This is a duplicate of RCS_FIND_IGNORE without escaped '()' 14# RCS_FIND_IGNORE has escaped ()s -- remove them.
15ignore="( -name SCCS -o -name BitKeeper -o -name .svn -o \ 15ignore="$(echo "$RCS_FIND_IGNORE" | sed 's|\\||g' )"
16 -name CVS -o -name .pc -o -name .hg -o \ 16# tags and cscope files should also ignore MODVERSION *.mod.c files
17 -name .git ) \ 17ignore="$ignore ( -name *.mod.c ) -prune -o"
18 -prune -o"
19 18
20# Do not use full path if we do not use O=.. builds 19# Do not use full path if we do not use O=.. builds
21# Use make O=. {tags|cscope} 20# Use make O=. {tags|cscope}
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index babd8626bf96..6b540f1822e0 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -139,7 +139,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
139 int error; 139 int error;
140 int size; 140 int size;
141 141
142 if (!inode->i_op || !inode->i_op->getxattr) 142 if (!inode->i_op->getxattr)
143 return -EOPNOTSUPP; 143 return -EOPNOTSUPP;
144 desc = init_desc(type); 144 desc = init_desc(type);
145 if (IS_ERR(desc)) 145 if (IS_ERR(desc))
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 996092f21b64..6e0bd933b6a9 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -64,7 +64,7 @@ static int evm_find_protected_xattrs(struct dentry *dentry)
64 int error; 64 int error;
65 int count = 0; 65 int count = 0;
66 66
67 if (!inode->i_op || !inode->i_op->getxattr) 67 if (!inode->i_op->getxattr)
68 return -EOPNOTSUPP; 68 return -EOPNOTSUPP;
69 69
70 for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) { 70 for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c
index aab9fa5a8231..90987d15b6fe 100644
--- a/security/integrity/integrity_audit.c
+++ b/security/integrity/integrity_audit.c
@@ -40,7 +40,7 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
40 40
41 ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno); 41 ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
42 audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u", 42 audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
43 current->pid, 43 task_pid_nr(current),
44 from_kuid(&init_user_ns, current_cred()->uid), 44 from_kuid(&init_user_ns, current_cred()->uid),
45 from_kuid(&init_user_ns, audit_get_loginuid(current)), 45 from_kuid(&init_user_ns, audit_get_loginuid(current)),
46 audit_get_sessionid(current)); 46 audit_get_sessionid(current));
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 9a62045e6282..69fdf3bc765b 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -220,7 +220,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
220 */ 220 */
221 BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2); 221 BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
222 222
223 audit_log_format(ab, " pid=%d comm=", tsk->pid); 223 audit_log_format(ab, " pid=%d comm=", task_pid_nr(tsk));
224 audit_log_untrustedstring(ab, tsk->comm); 224 audit_log_untrustedstring(ab, tsk->comm);
225 225
226 switch (a->type) { 226 switch (a->type) {
@@ -278,9 +278,12 @@ static void dump_common_audit_data(struct audit_buffer *ab,
278 } 278 }
279 case LSM_AUDIT_DATA_TASK: 279 case LSM_AUDIT_DATA_TASK:
280 tsk = a->u.tsk; 280 tsk = a->u.tsk;
281 if (tsk && tsk->pid) { 281 if (tsk) {
282 audit_log_format(ab, " pid=%d comm=", tsk->pid); 282 pid_t pid = task_pid_nr(tsk);
283 audit_log_untrustedstring(ab, tsk->comm); 283 if (pid) {
284 audit_log_format(ab, " pid=%d comm=", pid);
285 audit_log_untrustedstring(ab, tsk->comm);
286 }
284 } 287 }
285 break; 288 break;
286 case LSM_AUDIT_DATA_NET: 289 case LSM_AUDIT_DATA_NET:
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 80a09c37cac8..a3386d119425 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -173,7 +173,7 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
173 * Use filesystem name if filesystem does not support rename() 173 * Use filesystem name if filesystem does not support rename()
174 * operation. 174 * operation.
175 */ 175 */
176 if (inode->i_op && !inode->i_op->rename) 176 if (!inode->i_op->rename)
177 goto prepend_filesystem_name; 177 goto prepend_filesystem_name;
178 } 178 }
179 /* Prepend device name. */ 179 /* Prepend device name. */
@@ -282,7 +282,7 @@ char *tomoyo_realpath_from_path(struct path *path)
282 * Get local name for filesystems without rename() operation 282 * Get local name for filesystems without rename() operation
283 * or dentry without vfsmount. 283 * or dentry without vfsmount.
284 */ 284 */
285 if (!path->mnt || (inode->i_op && !inode->i_op->rename)) 285 if (!path->mnt || !inode->i_op->rename)
286 pos = tomoyo_get_local_path(path->dentry, buf, 286 pos = tomoyo_get_local_path(path->dentry, buf,
287 buf_len - 1); 287 buf_len - 1);
288 /* Get absolute name for the rest. */ 288 /* Get absolute name for the rest. */
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index d4b601547f1f..2458a1dc2ba9 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -97,6 +97,14 @@ static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
97 bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS); 97 bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
98} 98}
99 99
100static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
101
102static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
103{
104 if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
105 kvm_rtc_eoi_tracking_restore_all(ioapic);
106}
107
100static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) 108static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
101{ 109{
102 bool new_val, old_val; 110 bool new_val, old_val;
@@ -120,9 +128,8 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
120 } else { 128 } else {
121 __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); 129 __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
122 ioapic->rtc_status.pending_eoi--; 130 ioapic->rtc_status.pending_eoi--;
131 rtc_status_pending_eoi_check_valid(ioapic);
123 } 132 }
124
125 WARN_ON(ioapic->rtc_status.pending_eoi < 0);
126} 133}
127 134
128void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) 135void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
@@ -149,10 +156,10 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
149 156
150static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) 157static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
151{ 158{
152 if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) 159 if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
153 --ioapic->rtc_status.pending_eoi; 160 --ioapic->rtc_status.pending_eoi;
154 161 rtc_status_pending_eoi_check_valid(ioapic);
155 WARN_ON(ioapic->rtc_status.pending_eoi < 0); 162 }
156} 163}
157 164
158static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) 165static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
@@ -353,10 +360,16 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
353 ioapic->irr &= ~(1 << irq); 360 ioapic->irr &= ~(1 << irq);
354 361
355 if (irq == RTC_GSI && line_status) { 362 if (irq == RTC_GSI && line_status) {
363 /*
364 * pending_eoi cannot ever become negative (see
365 * rtc_status_pending_eoi_check_valid) and the caller
366 * ensures that it is only called if it is >= zero, namely
367 * if rtc_irq_check_coalesced returns false).
368 */
356 BUG_ON(ioapic->rtc_status.pending_eoi != 0); 369 BUG_ON(ioapic->rtc_status.pending_eoi != 0);
357 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, 370 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
358 ioapic->rtc_status.dest_map); 371 ioapic->rtc_status.dest_map);
359 ioapic->rtc_status.pending_eoi = ret; 372 ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
360 } else 373 } else
361 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); 374 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
362 375