aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--Documentation/00-INDEX24
-rw-r--r--Documentation/RCU/00-INDEX2
-rw-r--r--Documentation/arm/00-INDEX14
-rw-r--r--Documentation/blackfin/00-INDEX6
-rw-r--r--Documentation/block/00-INDEX2
-rw-r--r--Documentation/devicetree/00-INDEX2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt18
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt5
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt5
-rw-r--r--Documentation/dvb/contributors.txt2
-rw-r--r--Documentation/fb/00-INDEX6
-rw-r--r--Documentation/filesystems/00-INDEX2
-rw-r--r--Documentation/filesystems/nfs/00-INDEX4
-rw-r--r--Documentation/ide/00-INDEX2
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/laptops/00-INDEX6
-rw-r--r--Documentation/leds/00-INDEX8
-rw-r--r--Documentation/m68k/00-INDEX2
-rw-r--r--Documentation/networking/00-INDEX30
-rw-r--r--Documentation/power/00-INDEX6
-rw-r--r--Documentation/ptp/testptp.c11
-rw-r--r--Documentation/s390/00-INDEX8
-rw-r--r--Documentation/scheduler/00-INDEX2
-rw-r--r--Documentation/scsi/00-INDEX16
-rw-r--r--Documentation/serial/00-INDEX6
-rw-r--r--Documentation/spi/00-INDEX22
-rw-r--r--Documentation/timers/00-INDEX2
-rw-r--r--Documentation/virtual/kvm/00-INDEX2
-rw-r--r--Documentation/vm/00-INDEX4
-rw-r--r--Documentation/w1/masters/00-INDEX4
-rw-r--r--Documentation/w1/slaves/00-INDEX2
-rw-r--r--Documentation/x86/00-INDEX18
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi4
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi4
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi4
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/configs/defconfig18
-rw-r--r--arch/arm64/include/asm/atomic.h53
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h17
-rw-r--r--arch/arm64/include/asm/esr.h2
-rw-r--r--arch/arm64/include/asm/futex.h10
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/spinlock.h10
-rw-r--r--arch/arm64/include/asm/unistd32.h5
-rw-r--r--arch/arm64/kernel/kuser32.S6
-rw-r--r--arch/arm64/kernel/vdso.c4
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S7
-rw-r--r--arch/arm64/lib/bitops.S3
-rw-r--r--arch/arm64/mm/dma-mapping.c1
-rw-r--r--arch/arm64/mm/mmu.c12
-rw-r--r--arch/arm64/mm/pgd.c11
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/microblaze/include/asm/delay.h2
-rw-r--r--arch/microblaze/include/asm/io.h6
-rw-r--r--arch/microblaze/kernel/head.S2
-rw-r--r--arch/mips/alchemy/devboards/db1000.c7
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/parisc/hpux/fs.c15
-rw-r--r--arch/s390/appldata/appldata_base.c1
-rw-r--r--arch/s390/crypto/aes_s390.c65
-rw-r--r--arch/s390/crypto/des_s390.c95
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/mm/page-states.c10
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/include/asm/amd_nb.h2
-rw-r--r--arch/x86/include/asm/pgtable.h14
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c10
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c43
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c4
-rw-r--r--arch/x86/kernel/irq.c9
-rw-r--r--arch/x86/kernel/quirks.c37
-rw-r--r--arch/x86/mm/numa.c21
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/srat.c16
-rw-r--r--arch/x86/mm/tlb.c52
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c10
-rw-r--r--arch/x86/xen/enlighten.c12
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/x86/xen/p2m.c17
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/acpi/utils.c4
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/block/nvme-core.c610
-rw-r--r--drivers/block/nvme-scsi.c147
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c21
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c144
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c330
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c93
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c467
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c6
-rw-r--r--drivers/hwmon/da9055-hwmon.c4
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c68
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c4
-rw-r--r--drivers/irqchip/irq-zevio.c127
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/media/dvb-frontends/cx24117.c10
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/s5k5baf.c30
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c7
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c1
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c12
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c10
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c5
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/misc/sgi-gru/grukdump.c11
-rw-r--r--drivers/net/bonding/bond_main.c26
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/dev.c15
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/janz-ican3.c20
-rw-r--r--drivers/net/can/vcan.c9
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c3
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c17
-rw-r--r--drivers/net/ethernet/ethoc.c138
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c14
-rw-r--r--drivers/net/irda/Kconfig7
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ep7211-sir.c70
-rw-r--r--drivers/net/phy/dp83640.c19
-rw-r--r--drivers/net/phy/mdio-sun4i.c3
-rw-r--r--drivers/net/phy/phy_device.c38
-rw-r--r--drivers/net/usb/Kconfig16
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/hso.c32
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c17
-rw-r--r--drivers/net/usb/sr9800.c870
-rw-r--r--drivers/net/usb/sr9800.h202
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wan/dlci.c5
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c73
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c23
-rw-r--r--drivers/net/xen-netback/common.h6
-rw-r--r--drivers/net/xen-netback/interface.c1
-rw-r--r--drivers/net/xen-netback/netback.c16
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c55
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/pinctrl-at91.c10
-rw-r--r--drivers/pinctrl/pinctrl-imx1-core.c10
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c2
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/regulator/ab3100.c4
-rw-r--r--drivers/regulator/core.c9
-rw-r--r--drivers/regulator/s2mps11.c1
-rw-r--r--drivers/s390/cio/cio.c40
-rw-r--r--drivers/s390/cio/qdio.h14
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/staging/media/go7007/go7007-loader.c4
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/xen/gntdev.c13
-rw-r--r--drivers/xen/grant-table.c89
-rw-r--r--fs/btrfs/check-integrity.c4
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/send.c12
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/cifs/cifsacl.c28
-rw-r--r--fs/cifs/cifsglob.h9
-rw-r--r--fs/cifs/cifsproto.h3
-rw-r--r--fs/cifs/cifssmb.c15
-rw-r--r--fs/cifs/file.c35
-rw-r--r--fs/cifs/inode.c13
-rw-r--r--fs/cifs/smb1ops.c8
-rw-r--r--fs/cifs/smb2pdu.c5
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/cifs/xattr.c15
-rw-r--r--fs/exec.c45
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/file.c2
-rw-r--r--fs/jfs/xattr.c14
-rw-r--r--fs/kernfs/dir.c12
-rw-r--r--fs/namei.c30
-rw-r--r--fs/nfs/dir.c5
-rw-r--r--fs/nfs/nfs3acl.c34
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfs/nfs4session.c25
-rw-r--r--fs/nfs/nfs4session.h2
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ocfs2/alloc.c40
-rw-r--r--fs/ocfs2/file.c52
-rw-r--r--fs/ocfs2/localalloc.c42
-rw-r--r--fs/ocfs2/localalloc.h6
-rw-r--r--fs/ocfs2/namei.c17
-rw-r--r--fs/posix_acl.c18
-rw-r--r--fs/proc/vmcore.c26
-rw-r--r--fs/sync.c17
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/can/skb.h38
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nvme.h6
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dn_route.h2
-rw-r--r--include/net/ethoc.h1
-rw-r--r--include/net/ipx.h11
-rw-r--r--include/net/net_namespace.h8
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nft_reject.h25
-rw-r--r--include/uapi/linux/in6.h23
-rw-r--r--include/uapi/linux/nvme.h11
-rw-r--r--include/xen/grant_table.h8
-rw-r--r--init/main.c2
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/irq/Kconfig1
-rw-r--r--kernel/kmod.c2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile1
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/memory-failure.c6
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/slub.c38
-rw-r--r--mm/swap_state.c63
-rw-r--r--mm/swapfile.c11
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c5
-rw-r--r--net/bridge/br_device.c54
-rw-r--r--net/bridge/br_fdb.c137
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_private.h13
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_vlan.c27
-rw-r--r--net/caif/caif_dev.c1
-rw-r--r--net/caif/cfsrvl.c1
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/can/raw.c1
-rw-r--r--net/ceph/messenger.c8
-rw-r--r--net/ceph/osd_client.c84
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/fib_rules.c7
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/sock.c6
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/ieee802154/6lowpan.c23
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/ip_tunnel.c29
-rw-r--r--net/ipv4/netfilter/Kconfig5
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c5
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c75
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_output.c15
-rw-r--r--net/ipv4/udp_offload.c17
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/netfilter/Kconfig5
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c76
-rw-r--r--net/ipx/af_ipx.c22
-rw-r--r--net/ipx/ipx_route.c4
-rw-r--r--net/mac80211/cfg.c44
-rw-r--r--net/mac80211/ht.c4
-rw-r--r--net/mac80211/ibss.c5
-rw-r--r--net/mac80211/iface.c27
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c55
-rw-r--r--net/netfilter/nf_synproxy_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c82
-rw-r--r--net/netfilter/nf_tables_core.c6
-rw-r--r--net/netfilter/nft_ct.c16
-rw-r--r--net/netfilter/nft_log.c5
-rw-r--r--net/netfilter/nft_lookup.c1
-rw-r--r--net/netfilter/nft_queue.c4
-rw-r--r--net/netfilter/nft_rbtree.c16
-rw-r--r--net/netfilter/nft_reject.c89
-rw-r--r--net/netfilter/nft_reject_inet.c63
-rw-r--r--net/netfilter/xt_CT.c7
-rw-r--r--net/openvswitch/datapath.c23
-rw-r--r--net/openvswitch/flow_table.c88
-rw-r--r--net/openvswitch/flow_table.h2
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sunrpc/svc_xprt.c6
-rw-r--r--net/wireless/core.c17
-rw-r--r--net/wireless/core.h4
-rw-r--r--net/wireless/nl80211.c32
-rw-r--r--net/wireless/nl80211.h8
-rw-r--r--net/wireless/scan.c40
-rw-r--r--net/wireless/sme.c2
-rwxr-xr-xscripts/checkpatch.pl4
-rwxr-xr-xscripts/get_maintainer.pl2
-rw-r--r--security/Kconfig2
-rw-r--r--security/selinux/nlmsgtab.c2
-rw-r--r--security/selinux/ss/services.c4
-rw-r--r--sound/pci/hda/patch_analog.c27
-rw-r--r--sound/pci/hda/patch_realtek.c12
-rw-r--r--sound/usb/Kconfig1
-rw-r--r--tools/perf/builtin-buildid-cache.c33
-rw-r--r--tools/perf/builtin-record.c10
-rw-r--r--tools/perf/design.txt1
-rw-r--r--tools/perf/perf.h4
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c10
-rw-r--r--tools/perf/util/event.c36
-rw-r--r--tools/perf/util/event.h6
-rw-r--r--tools/perf/util/include/asm/hash.h6
-rw-r--r--tools/perf/util/machine.c42
-rw-r--r--tools/perf/util/machine.h2
-rw-r--r--tools/perf/util/map.c5
-rw-r--r--tools/perf/util/map.h1
-rw-r--r--tools/perf/util/symbol-elf.c4
-rw-r--r--tools/perf/util/symbol.c65
408 files changed, 6020 insertions, 1936 deletions
diff --git a/.gitignore b/.gitignore
index 7e9932e55475..42fa0d5626a9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -92,3 +92,6 @@ extra_certificates
92signing_key.priv 92signing_key.priv
93signing_key.x509 93signing_key.x509
94x509.genkey 94x509.genkey
95
96# Kconfig presets
97all.config
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 38f8444bdd0e..07de7e19b4ce 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -29,6 +29,8 @@ DMA-ISA-LPC.txt
29 - How to do DMA with ISA (and LPC) devices. 29 - How to do DMA with ISA (and LPC) devices.
30DMA-attributes.txt 30DMA-attributes.txt
31 - listing of the various possible attributes a DMA region can have 31 - listing of the various possible attributes a DMA region can have
32dmatest.txt
33 - how to compile, configure and use the dmatest system.
32DocBook/ 34DocBook/
33 - directory with DocBook templates etc. for kernel documentation. 35 - directory with DocBook templates etc. for kernel documentation.
34EDID/ 36EDID/
@@ -77,6 +79,8 @@ arm/
77 - directory with info about Linux on the ARM architecture. 79 - directory with info about Linux on the ARM architecture.
78arm64/ 80arm64/
79 - directory with info about Linux on the 64 bit ARM architecture. 81 - directory with info about Linux on the 64 bit ARM architecture.
82assoc_array.txt
83 - generic associative array intro.
80atomic_ops.txt 84atomic_ops.txt
81 - semantics and behavior of atomic and bitmask operations. 85 - semantics and behavior of atomic and bitmask operations.
82auxdisplay/ 86auxdisplay/
@@ -87,6 +91,8 @@ bad_memory.txt
87 - how to use kernel parameters to exclude bad RAM regions. 91 - how to use kernel parameters to exclude bad RAM regions.
88basic_profiling.txt 92basic_profiling.txt
89 - basic instructions for those who wants to profile Linux kernel. 93 - basic instructions for those who wants to profile Linux kernel.
94bcache.txt
95 - Block-layer cache on fast SSDs to improve slow (raid) I/O performance.
90binfmt_misc.txt 96binfmt_misc.txt
91 - info on the kernel support for extra binary formats. 97 - info on the kernel support for extra binary formats.
92blackfin/ 98blackfin/
@@ -171,6 +177,8 @@ early-userspace/
171 - info about initramfs, klibc, and userspace early during boot. 177 - info about initramfs, klibc, and userspace early during boot.
172edac.txt 178edac.txt
173 - information on EDAC - Error Detection And Correction 179 - information on EDAC - Error Detection And Correction
180efi-stub.txt
181 - How to use the EFI boot stub to bypass GRUB or elilo on EFI systems.
174eisa.txt 182eisa.txt
175 - info on EISA bus support. 183 - info on EISA bus support.
176email-clients.txt 184email-clients.txt
@@ -195,8 +203,8 @@ futex-requeue-pi.txt
195 - info on requeueing of tasks from a non-PI futex to a PI futex 203 - info on requeueing of tasks from a non-PI futex to a PI futex
196gcov.txt 204gcov.txt
197 - use of GCC's coverage testing tool "gcov" with the Linux kernel 205 - use of GCC's coverage testing tool "gcov" with the Linux kernel
198gpio.txt 206gpio/
199 - overview of GPIO (General Purpose Input/Output) access conventions. 207 - gpio related documentation
200hid/ 208hid/
201 - directory with information on human interface devices 209 - directory with information on human interface devices
202highuid.txt 210highuid.txt
@@ -255,6 +263,8 @@ kernel-docs.txt
255 - listing of various WWW + books that document kernel internals. 263 - listing of various WWW + books that document kernel internals.
256kernel-parameters.txt 264kernel-parameters.txt
257 - summary listing of command line / boot prompt args for the kernel. 265 - summary listing of command line / boot prompt args for the kernel.
266kernel-per-CPU-kthreads.txt
267 - List of all per-CPU kthreads and how they introduce jitter.
258kmemcheck.txt 268kmemcheck.txt
259 - info on dynamic checker that detects uses of uninitialized memory. 269 - info on dynamic checker that detects uses of uninitialized memory.
260kmemleak.txt 270kmemleak.txt
@@ -299,8 +309,6 @@ memory-devices/
299 - directory with info on parts like the Texas Instruments EMIF driver 309 - directory with info on parts like the Texas Instruments EMIF driver
300memory-hotplug.txt 310memory-hotplug.txt
301 - Hotpluggable memory support, how to use and current status. 311 - Hotpluggable memory support, how to use and current status.
302memory.txt
303 - info on typical Linux memory problems.
304metag/ 312metag/
305 - directory with info about Linux on Meta architecture. 313 - directory with info about Linux on Meta architecture.
306mips/ 314mips/
@@ -311,6 +319,8 @@ mmc/
311 - directory with info about the MMC subsystem 319 - directory with info about the MMC subsystem
312mn10300/ 320mn10300/
313 - directory with info about the mn10300 architecture port 321 - directory with info about the mn10300 architecture port
322module-signing.txt
323 - Kernel module signing for increased security when loading modules.
314mtd/ 324mtd/
315 - directory with info about memory technology devices (flash) 325 - directory with info about memory technology devices (flash)
316mono.txt 326mono.txt
@@ -343,6 +353,8 @@ pcmcia/
343 - info on the Linux PCMCIA driver. 353 - info on the Linux PCMCIA driver.
344percpu-rw-semaphore.txt 354percpu-rw-semaphore.txt
345 - RCU based read-write semaphore optimized for locking for reading 355 - RCU based read-write semaphore optimized for locking for reading
356phy.txt
357 - Description of the generic PHY framework.
346pi-futex.txt 358pi-futex.txt
347 - documentation on lightweight priority inheritance futexes. 359 - documentation on lightweight priority inheritance futexes.
348pinctrl.txt 360pinctrl.txt
@@ -431,6 +443,8 @@ sysrq.txt
431 - info on the magic SysRq key. 443 - info on the magic SysRq key.
432target/ 444target/
433 - directory with info on generating TCM v4 fabric .ko modules 445 - directory with info on generating TCM v4 fabric .ko modules
446this_cpu_ops.txt
447 - List rationale behind and the way to use this_cpu operations.
434thermal/ 448thermal/
435 - directory with information on managing thermal issues (CPU/temp) 449 - directory with information on managing thermal issues (CPU/temp)
436trace/ 450trace/
@@ -469,6 +483,8 @@ wimax/
469 - directory with info about Intel Wireless Wimax Connections 483 - directory with info about Intel Wireless Wimax Connections
470workqueue.txt 484workqueue.txt
471 - information on the Concurrency Managed Workqueue implementation 485 - information on the Concurrency Managed Workqueue implementation
486ww-mutex-design.txt
487 - Intro to Mutex wait/would deadlock handling.s
472x86/x86_64/ 488x86/x86_64/
473 - directory with info on Linux support for AMD x86-64 (Hammer) machines. 489 - directory with info on Linux support for AMD x86-64 (Hammer) machines.
474xtensa/ 490xtensa/
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 1d7a885761f5..fa57139f50bf 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -8,6 +8,8 @@ listRCU.txt
8 - Using RCU to Protect Read-Mostly Linked Lists 8 - Using RCU to Protect Read-Mostly Linked Lists
9lockdep.txt 9lockdep.txt
10 - RCU and lockdep checking 10 - RCU and lockdep checking
11lockdep-splat.txt
12 - RCU Lockdep splats explained.
11NMI-RCU.txt 13NMI-RCU.txt
12 - Using RCU to Protect Dynamic NMI Handlers 14 - Using RCU to Protect Dynamic NMI Handlers
13rcubarrier.txt 15rcubarrier.txt
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index 36420e116c90..a94090cc785d 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -4,6 +4,8 @@ Booting
4 - requirements for booting 4 - requirements for booting
5Interrupts 5Interrupts
6 - ARM Interrupt subsystem documentation 6 - ARM Interrupt subsystem documentation
7IXP4xx
8 - Intel IXP4xx Network processor.
7msm 9msm
8 - MSM specific documentation 10 - MSM specific documentation
9Netwinder 11Netwinder
@@ -24,8 +26,16 @@ SPEAr
24 - ST SPEAr platform Linux Overview 26 - ST SPEAr platform Linux Overview
25VFP/ 27VFP/
26 - Release notes for Linux Kernel Vector Floating Point support code 28 - Release notes for Linux Kernel Vector Floating Point support code
29cluster-pm-race-avoidance.txt
30 - Algorithm for CPU and Cluster setup/teardown
27empeg/ 31empeg/
28 - Ltd's Empeg MP3 Car Audio Player 32 - Ltd's Empeg MP3 Car Audio Player
33firmware.txt
34 - Secure firmware registration and calling.
35kernel_mode_neon.txt
36 - How to use NEON instructions in kernel mode
37kernel_user_helpers.txt
38 - Helper functions in kernel space made available for userspace.
29mem_alignment 39mem_alignment
30 - alignment abort handler documentation 40 - alignment abort handler documentation
31memory.txt 41memory.txt
@@ -34,3 +44,7 @@ nwfpe/
34 - NWFPE floating point emulator documentation 44 - NWFPE floating point emulator documentation
35swp_emulation 45swp_emulation
36 - SWP/SWPB emulation handler/logging description 46 - SWP/SWPB emulation handler/logging description
47tcm.txt
48 - ARM Tightly Coupled Memory
49vlocks.txt
50 - Voting locks, low-level mechanism relying on memory system atomic writes.
diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX
index 2df0365f2dff..c54fcdd4ae9f 100644
--- a/Documentation/blackfin/00-INDEX
+++ b/Documentation/blackfin/00-INDEX
@@ -1,8 +1,10 @@
100-INDEX 100-INDEX
2 - This file 2 - This file
3 3Makefile
4 - Makefile for gptimers example file.
4bfin-gpio-notes.txt 5bfin-gpio-notes.txt
5 - Notes in developing/using bfin-gpio driver. 6 - Notes in developing/using bfin-gpio driver.
6
7bfin-spi-notes.txt 7bfin-spi-notes.txt
8 - Notes for using bfin spi bus driver. 8 - Notes for using bfin spi bus driver.
9gptimers-example.c
10 - gptimers example
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index 929d9904f74b..e840b47613f7 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -14,6 +14,8 @@ deadline-iosched.txt
14 - Deadline IO scheduler tunables 14 - Deadline IO scheduler tunables
15ioprio.txt 15ioprio.txt
16 - Block io priorities (in CFQ scheduler) 16 - Block io priorities (in CFQ scheduler)
17null_blk.txt
18 - Null block for block-layer benchmarking.
17queue-sysfs.txt 19queue-sysfs.txt
18 - Queue's sysfs entries 20 - Queue's sysfs entries
19request.txt 21request.txt
diff --git a/Documentation/devicetree/00-INDEX b/Documentation/devicetree/00-INDEX
index b78f691fd847..8c4102c6a5e7 100644
--- a/Documentation/devicetree/00-INDEX
+++ b/Documentation/devicetree/00-INDEX
@@ -8,3 +8,5 @@ https://lists.ozlabs.org/listinfo/devicetree-discuss
8 - this file 8 - this file
9booting-without-of.txt 9booting-without-of.txt
10 - Booting Linux without Open Firmware, describes history and format of device trees. 10 - Booting Linux without Open Firmware, describes history and format of device trees.
11usage-model.txt
12 - How Linux uses DT and what DT aims to solve. \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
new file mode 100644
index 000000000000..aee38e7c13e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
@@ -0,0 +1,18 @@
1TI-NSPIRE interrupt controller
2
3Required properties:
4- compatible: Compatible property value should be "lsi,zevio-intc".
5
6- reg: Physical base address of the controller and length of memory mapped
7 region.
8
9- interrupt-controller : Identifies the node as an interrupt controller
10
11Example:
12
13interrupt-controller {
14 compatible = "lsi,zevio-intc";
15 interrupt-controller;
16 reg = <0xDC000000 0x1000>;
17 #interrupt-cells = <1>;
18};
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
index b90bfcd138ff..863d5b8155c7 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
@@ -1,7 +1,8 @@
1* Allwinner EMAC ethernet controller 1* Allwinner EMAC ethernet controller
2 2
3Required properties: 3Required properties:
4- compatible: should be "allwinner,sun4i-emac". 4- compatible: should be "allwinner,sun4i-a10-emac" (Deprecated:
5 "allwinner,sun4i-emac")
5- reg: address and length of the register set for the device. 6- reg: address and length of the register set for the device.
6- interrupts: interrupt for the device 7- interrupts: interrupt for the device
7- phy: A phandle to a phy node defining the PHY address (as the reg 8- phy: A phandle to a phy node defining the PHY address (as the reg
@@ -14,7 +15,7 @@ Optional properties:
14Example: 15Example:
15 16
16emac: ethernet@01c0b000 { 17emac: ethernet@01c0b000 {
17 compatible = "allwinner,sun4i-emac"; 18 compatible = "allwinner,sun4i-a10-emac";
18 reg = <0x01c0b000 0x1000>; 19 reg = <0x01c0b000 0x1000>;
19 interrupts = <55>; 20 interrupts = <55>;
20 clocks = <&ahb_gates 17>; 21 clocks = <&ahb_gates 17>;
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
index 00b9f9a3ec1d..4ec56413779d 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
@@ -1,7 +1,8 @@
1* Allwinner A10 MDIO Ethernet Controller interface 1* Allwinner A10 MDIO Ethernet Controller interface
2 2
3Required properties: 3Required properties:
4- compatible: should be "allwinner,sun4i-mdio". 4- compatible: should be "allwinner,sun4i-a10-mdio"
5 (Deprecated: "allwinner,sun4i-mdio").
5- reg: address and length of the register set for the device. 6- reg: address and length of the register set for the device.
6 7
7Optional properties: 8Optional properties:
@@ -9,7 +10,7 @@ Optional properties:
9 10
10Example at the SoC level: 11Example at the SoC level:
11mdio@01c0b080 { 12mdio@01c0b080 {
12 compatible = "allwinner,sun4i-mdio"; 13 compatible = "allwinner,sun4i-a10-mdio";
13 reg = <0x01c0b080 0x14>; 14 reg = <0x01c0b080 0x14>;
14 #address-cells = <1>; 15 #address-cells = <1>;
15 #size-cells = <0>; 16 #size-cells = <0>;
diff --git a/Documentation/dvb/contributors.txt b/Documentation/dvb/contributors.txt
index 47c30098dab6..731a009723c7 100644
--- a/Documentation/dvb/contributors.txt
+++ b/Documentation/dvb/contributors.txt
@@ -78,7 +78,7 @@ Peter Beutner <p.beutner@gmx.net>
78Wilson Michaels <wilsonmichaels@earthlink.net> 78Wilson Michaels <wilsonmichaels@earthlink.net>
79 for the lgdt330x frontend driver, and various bugfixes 79 for the lgdt330x frontend driver, and various bugfixes
80 80
81Michael Krufky <mkrufky@m1k.net> 81Michael Krufky <mkrufky@linuxtv.org>
82 for maintaining v4l/dvb inter-tree dependencies 82 for maintaining v4l/dvb inter-tree dependencies
83 83
84Taylor Jacob <rtjacob@earthlink.net> 84Taylor Jacob <rtjacob@earthlink.net>
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
index 30a70542e823..fe85e7c5907a 100644
--- a/Documentation/fb/00-INDEX
+++ b/Documentation/fb/00-INDEX
@@ -5,6 +5,8 @@ please mail me.
5 5
600-INDEX 600-INDEX
7 - this file. 7 - this file.
8api.txt
9 - The frame buffer API between applications and buffer devices.
8arkfb.txt 10arkfb.txt
9 - info on the fbdev driver for ARK Logic chips. 11 - info on the fbdev driver for ARK Logic chips.
10aty128fb.txt 12aty128fb.txt
@@ -51,12 +53,16 @@ sh7760fb.txt
51 - info on the SH7760/SH7763 integrated LCDC Framebuffer driver. 53 - info on the SH7760/SH7763 integrated LCDC Framebuffer driver.
52sisfb.txt 54sisfb.txt
53 - info on the framebuffer device driver for various SiS chips. 55 - info on the framebuffer device driver for various SiS chips.
56sm501.txt
57 - info on the framebuffer device driver for sm501 videoframebuffer.
54sstfb.txt 58sstfb.txt
55 - info on the frame buffer driver for 3dfx' Voodoo Graphics boards. 59 - info on the frame buffer driver for 3dfx' Voodoo Graphics boards.
56tgafb.txt 60tgafb.txt
57 - info on the TGA (DECChip 21030) frame buffer driver. 61 - info on the TGA (DECChip 21030) frame buffer driver.
58tridentfb.txt 62tridentfb.txt
59 info on the framebuffer driver for some Trident chip based cards. 63 info on the framebuffer driver for some Trident chip based cards.
64udlfb.txt
65 - Driver for DisplayLink USB 2.0 chips.
60uvesafb.txt 66uvesafb.txt
61 - info on the userspace VESA (VBE2+ compliant) frame buffer device. 67 - info on the userspace VESA (VBE2+ compliant) frame buffer device.
62vesafb.txt 68vesafb.txt
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 632211cbdd56..ac28149aede4 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -2,6 +2,8 @@
2 - this file (info on some of the filesystems supported by linux). 2 - this file (info on some of the filesystems supported by linux).
3Locking 3Locking
4 - info on locking rules as they pertain to Linux VFS. 4 - info on locking rules as they pertain to Linux VFS.
5Makefile
6 - Makefile for building the filsystems-part of DocBook.
59p.txt 79p.txt
6 - 9p (v9fs) is an implementation of the Plan 9 remote fs protocol. 8 - 9p (v9fs) is an implementation of the Plan 9 remote fs protocol.
7adfs.txt 9adfs.txt
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
index 66eb6c8c5334..53f3b596ac0d 100644
--- a/Documentation/filesystems/nfs/00-INDEX
+++ b/Documentation/filesystems/nfs/00-INDEX
@@ -12,6 +12,8 @@ nfs41-server.txt
12 - info on the Linux server implementation of NFSv4 minor version 1. 12 - info on the Linux server implementation of NFSv4 minor version 1.
13nfs-rdma.txt 13nfs-rdma.txt
14 - how to install and setup the Linux NFS/RDMA client and server software 14 - how to install and setup the Linux NFS/RDMA client and server software
15nfsd-admin-interfaces.txt
16 - Administrative interfaces for nfsd.
15nfsroot.txt 17nfsroot.txt
16 - short guide on setting up a diskless box with NFS root filesystem. 18 - short guide on setting up a diskless box with NFS root filesystem.
17pnfs.txt 19pnfs.txt
@@ -20,5 +22,5 @@ rpc-cache.txt
20 - introduction to the caching mechanisms in the sunrpc layer. 22 - introduction to the caching mechanisms in the sunrpc layer.
21idmapper.txt 23idmapper.txt
22 - information for configuring request-keys to be used by idmapper 24 - information for configuring request-keys to be used by idmapper
23knfsd-rpcgss.txt 25rpc-server-gss.txt
24 - Information on GSS authentication support in the NFS Server 26 - Information on GSS authentication support in the NFS Server
diff --git a/Documentation/ide/00-INDEX b/Documentation/ide/00-INDEX
index d6b778842b75..22f98ca79539 100644
--- a/Documentation/ide/00-INDEX
+++ b/Documentation/ide/00-INDEX
@@ -10,3 +10,5 @@ ide-tape.txt
10 - info on the IDE ATAPI streaming tape driver 10 - info on the IDE ATAPI streaming tape driver
11ide.txt 11ide.txt
12 - important info for users of ATA devices (IDE/EIDE disks and CD-ROMS). 12 - important info for users of ATA devices (IDE/EIDE disks and CD-ROMS).
13warm-plug-howto.txt
14 - using sysfs to remove and add IDE devices. \ No newline at end of file
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8f441dab0396..7116fda7077f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1726,16 +1726,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1726 option description. 1726 option description.
1727 1727
1728 memmap=nn[KMG]@ss[KMG] 1728 memmap=nn[KMG]@ss[KMG]
1729 [KNL] Force usage of a specific region of memory 1729 [KNL] Force usage of a specific region of memory.
1730 Region of memory to be used, from ss to ss+nn. 1730 Region of memory to be used is from ss to ss+nn.
1731 1731
1732 memmap=nn[KMG]#ss[KMG] 1732 memmap=nn[KMG]#ss[KMG]
1733 [KNL,ACPI] Mark specific memory as ACPI data. 1733 [KNL,ACPI] Mark specific memory as ACPI data.
1734 Region of memory to be used, from ss to ss+nn. 1734 Region of memory to be marked is from ss to ss+nn.
1735 1735
1736 memmap=nn[KMG]$ss[KMG] 1736 memmap=nn[KMG]$ss[KMG]
1737 [KNL,ACPI] Mark specific memory as reserved. 1737 [KNL,ACPI] Mark specific memory as reserved.
1738 Region of memory to be used, from ss to ss+nn. 1738 Region of memory to be reserved is from ss to ss+nn.
1739 Example: Exclude memory from 0x18690000-0x1869ffff 1739 Example: Exclude memory from 0x18690000-0x1869ffff
1740 memmap=64K$0x18690000 1740 memmap=64K$0x18690000
1741 or 1741 or
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index fa688538e757..d13b9a9a9e00 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -1,13 +1,15 @@
100-INDEX 100-INDEX
2 - This file 2 - This file
3acer-wmi.txt 3Makefile
4 - information on the Acer Laptop WMI Extras driver. 4 - Makefile for building dslm example program.
5asus-laptop.txt 5asus-laptop.txt
6 - information on the Asus Laptop Extras driver. 6 - information on the Asus Laptop Extras driver.
7disk-shock-protection.txt 7disk-shock-protection.txt
8 - information on hard disk shock protection. 8 - information on hard disk shock protection.
9dslm.c 9dslm.c
10 - Simple Disk Sleep Monitor program 10 - Simple Disk Sleep Monitor program
11hpfall.c
12 - (HP) laptop accelerometer program for disk protection.
11laptop-mode.txt 13laptop-mode.txt
12 - how to conserve battery power using laptop-mode. 14 - how to conserve battery power using laptop-mode.
13sony-laptop.txt 15sony-laptop.txt
diff --git a/Documentation/leds/00-INDEX b/Documentation/leds/00-INDEX
index 1ecd1596633e..b4ef1f34e25f 100644
--- a/Documentation/leds/00-INDEX
+++ b/Documentation/leds/00-INDEX
@@ -1,3 +1,7 @@
100-INDEX
2 - This file
3leds-blinkm.txt
4 - Driver for BlinkM LED-devices.
1leds-class.txt 5leds-class.txt
2 - documents LED handling under Linux. 6 - documents LED handling under Linux.
3leds-lp3944.txt 7leds-lp3944.txt
@@ -12,3 +16,7 @@ leds-lp55xx.txt
12 - description about lp55xx common driver. 16 - description about lp55xx common driver.
13leds-lm3556.txt 17leds-lm3556.txt
14 - notes on how to use the leds-lm3556 driver. 18 - notes on how to use the leds-lm3556 driver.
19ledtrig-oneshot.txt
20 - One-shot LED trigger for both sporadic and dense events.
21ledtrig-transient.txt
22 - LED Transient Trigger, one shot timer activation.
diff --git a/Documentation/m68k/00-INDEX b/Documentation/m68k/00-INDEX
index a014e9f00765..2be8c6b00e74 100644
--- a/Documentation/m68k/00-INDEX
+++ b/Documentation/m68k/00-INDEX
@@ -1,5 +1,7 @@
100-INDEX 100-INDEX
2 - this file 2 - this file
3README.buddha
4 - Amiga Buddha and Catweasel IDE Driver
3kernel-options.txt 5kernel-options.txt
4 - command line options for Linux/m68k 6 - command line options for Linux/m68k
5 7
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index f11580f8719a..557b6ef70c26 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -6,8 +6,14 @@
6 - information on the 3Com Etherlink III Series Ethernet cards. 6 - information on the 3Com Etherlink III Series Ethernet cards.
76pack.txt 76pack.txt
8 - info on the 6pack protocol, an alternative to KISS for AX.25 8 - info on the 6pack protocol, an alternative to KISS for AX.25
9DLINK.txt 9LICENSE.qla3xxx
10 - info on the D-Link DE-600/DE-620 parallel port pocket adapters 10 - GPLv2 for QLogic Linux Networking HBA Driver
11LICENSE.qlge
12 - GPLv2 for QLogic Linux qlge NIC Driver
13LICENSE.qlcnic
14 - GPLv2 for QLogic Linux qlcnic NIC Driver
15Makefile
16 - Makefile for docsrc.
11PLIP.txt 17PLIP.txt
12 - PLIP: The Parallel Line Internet Protocol device driver 18 - PLIP: The Parallel Line Internet Protocol device driver
13README.ipw2100 19README.ipw2100
@@ -17,7 +23,7 @@ README.ipw2200
17README.sb1000 23README.sb1000
18 - info on General Instrument/NextLevel SURFboard1000 cable modem. 24 - info on General Instrument/NextLevel SURFboard1000 cable modem.
19alias.txt 25alias.txt
20 - info on using alias network devices 26 - info on using alias network devices.
21arcnet-hardware.txt 27arcnet-hardware.txt
22 - tons of info on ARCnet, hubs, jumper settings for ARCnet cards, etc. 28 - tons of info on ARCnet, hubs, jumper settings for ARCnet cards, etc.
23arcnet.txt 29arcnet.txt
@@ -80,7 +86,7 @@ framerelay.txt
80 - info on using Frame Relay/Data Link Connection Identifier (DLCI). 86 - info on using Frame Relay/Data Link Connection Identifier (DLCI).
81gen_stats.txt 87gen_stats.txt
82 - Generic networking statistics for netlink users. 88 - Generic networking statistics for netlink users.
83generic_hdlc.txt 89generic-hdlc.txt
84 - The generic High Level Data Link Control (HDLC) layer. 90 - The generic High Level Data Link Control (HDLC) layer.
85generic_netlink.txt 91generic_netlink.txt
86 - info on Generic Netlink 92 - info on Generic Netlink
@@ -88,6 +94,8 @@ gianfar.txt
88 - Gianfar Ethernet Driver. 94 - Gianfar Ethernet Driver.
89i40e.txt 95i40e.txt
90 - README for the Intel Ethernet Controller XL710 Driver (i40e). 96 - README for the Intel Ethernet Controller XL710 Driver (i40e).
97i40evf.txt
98 - Short note on the Driver for the Intel(R) XL710 X710 Virtual Function
91ieee802154.txt 99ieee802154.txt
92 - Linux IEEE 802.15.4 implementation, API and drivers 100 - Linux IEEE 802.15.4 implementation, API and drivers
93igb.txt 101igb.txt
@@ -102,6 +110,8 @@ ipddp.txt
102 - AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation 110 - AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation
103iphase.txt 111iphase.txt
104 - Interphase PCI ATM (i)Chip IA Linux driver info. 112 - Interphase PCI ATM (i)Chip IA Linux driver info.
113ipsec.txt
114 - Note on not compressing IPSec payload and resulting failed policy check.
105ipv6.txt 115ipv6.txt
106 - Options to the ipv6 kernel module. 116 - Options to the ipv6 kernel module.
107ipvs-sysctl.txt 117ipvs-sysctl.txt
@@ -120,6 +130,8 @@ lapb-module.txt
120 - programming information of the LAPB module. 130 - programming information of the LAPB module.
121ltpc.txt 131ltpc.txt
122 - the Apple or Farallon LocalTalk PC card driver 132 - the Apple or Farallon LocalTalk PC card driver
133mac80211-auth-assoc-deauth.txt
134 - authentication and association / deauth-disassoc with max80211
123mac80211-injection.txt 135mac80211-injection.txt
124 - HOWTO use packet injection with mac80211 136 - HOWTO use packet injection with mac80211
125multiqueue.txt 137multiqueue.txt
@@ -134,6 +146,10 @@ netdevices.txt
134 - info on network device driver functions exported to the kernel. 146 - info on network device driver functions exported to the kernel.
135netif-msg.txt 147netif-msg.txt
136 - Design of the network interface message level setting (NETIF_MSG_*). 148 - Design of the network interface message level setting (NETIF_MSG_*).
149netlink_mmap.txt
150 - memory mapped I/O with netlink
151nf_conntrack-sysctl.txt
152 - list of netfilter-sysctl knobs.
137nfc.txt 153nfc.txt
138 - The Linux Near Field Communication (NFS) subsystem. 154 - The Linux Near Field Communication (NFS) subsystem.
139openvswitch.txt 155openvswitch.txt
@@ -176,7 +192,7 @@ skfp.txt
176 - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info. 192 - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info.
177smc9.txt 193smc9.txt
178 - the driver for SMC's 9000 series of Ethernet cards 194 - the driver for SMC's 9000 series of Ethernet cards
179spider-net.txt 195spider_net.txt
180 - README for the Spidernet Driver (as found in PS3 / Cell BE). 196 - README for the Spidernet Driver (as found in PS3 / Cell BE).
181stmmac.txt 197stmmac.txt
182 - README for the STMicro Synopsys Ethernet driver. 198 - README for the STMicro Synopsys Ethernet driver.
@@ -188,6 +204,8 @@ tcp.txt
188 - short blurb on how TCP output takes place. 204 - short blurb on how TCP output takes place.
189tcp-thin.txt 205tcp-thin.txt
190 - kernel tuning options for low rate 'thin' TCP streams. 206 - kernel tuning options for low rate 'thin' TCP streams.
207team.txt
208 - pointer to information for ethernet teaming devices.
191tlan.txt 209tlan.txt
192 - ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info. 210 - ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info.
193tproxy.txt 211tproxy.txt
@@ -200,6 +218,8 @@ vortex.txt
200 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. 218 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
201vxge.txt 219vxge.txt
202 - README for the Neterion X3100 PCIe Server Adapter. 220 - README for the Neterion X3100 PCIe Server Adapter.
221vxlan.txt
222 - Virtual extensible LAN overview
203x25.txt 223x25.txt
204 - general info on X.25 development. 224 - general info on X.25 development.
205x25-iface.txt 225x25-iface.txt
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
index a4d682f54231..ad04cc8097ed 100644
--- a/Documentation/power/00-INDEX
+++ b/Documentation/power/00-INDEX
@@ -4,6 +4,8 @@ apm-acpi.txt
4 - basic info about the APM and ACPI support. 4 - basic info about the APM and ACPI support.
5basic-pm-debugging.txt 5basic-pm-debugging.txt
6 - Debugging suspend and resume 6 - Debugging suspend and resume
7charger-manager.txt
8 - Battery charger management.
7devices.txt 9devices.txt
8 - How drivers interact with system-wide power management 10 - How drivers interact with system-wide power management
9drivers-testing.txt 11drivers-testing.txt
@@ -22,6 +24,8 @@ pm_qos_interface.txt
22 - info on Linux PM Quality of Service interface 24 - info on Linux PM Quality of Service interface
23power_supply_class.txt 25power_supply_class.txt
24 - Tells userspace about battery, UPS, AC or DC power supply properties 26 - Tells userspace about battery, UPS, AC or DC power supply properties
27runtime_pm.txt
28 - Power management framework for I/O devices.
25s2ram.txt 29s2ram.txt
26 - How to get suspend to ram working (and debug it when it isn't) 30 - How to get suspend to ram working (and debug it when it isn't)
27states.txt 31states.txt
@@ -38,7 +42,5 @@ tricks.txt
38 - How to trick software suspend (to disk) into working when it isn't 42 - How to trick software suspend (to disk) into working when it isn't
39userland-swsusp.txt 43userland-swsusp.txt
40 - Experimental implementation of software suspend in userspace 44 - Experimental implementation of software suspend in userspace
41video_extension.txt
42 - ACPI video extensions
43video.txt 45video.txt
44 - Video issues during resume from suspend 46 - Video issues during resume from suspend
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index a74d0a84d329..4aba0436da65 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -117,6 +117,7 @@ static void usage(char *progname)
117 " -f val adjust the ptp clock frequency by 'val' ppb\n" 117 " -f val adjust the ptp clock frequency by 'val' ppb\n"
118 " -g get the ptp clock time\n" 118 " -g get the ptp clock time\n"
119 " -h prints this message\n" 119 " -h prints this message\n"
120 " -i val index for event/trigger\n"
120 " -k val measure the time offset between system and phc clock\n" 121 " -k val measure the time offset between system and phc clock\n"
121 " for 'val' times (Maximum 25)\n" 122 " for 'val' times (Maximum 25)\n"
122 " -p val enable output with a period of 'val' nanoseconds\n" 123 " -p val enable output with a period of 'val' nanoseconds\n"
@@ -154,6 +155,7 @@ int main(int argc, char *argv[])
154 int capabilities = 0; 155 int capabilities = 0;
155 int extts = 0; 156 int extts = 0;
156 int gettime = 0; 157 int gettime = 0;
158 int index = 0;
157 int oneshot = 0; 159 int oneshot = 0;
158 int pct_offset = 0; 160 int pct_offset = 0;
159 int n_samples = 0; 161 int n_samples = 0;
@@ -167,7 +169,7 @@ int main(int argc, char *argv[])
167 169
168 progname = strrchr(argv[0], '/'); 170 progname = strrchr(argv[0], '/');
169 progname = progname ? 1+progname : argv[0]; 171 progname = progname ? 1+progname : argv[0];
170 while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghk:p:P:sSt:v"))) { 172 while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghi:k:p:P:sSt:v"))) {
171 switch (c) { 173 switch (c) {
172 case 'a': 174 case 'a':
173 oneshot = atoi(optarg); 175 oneshot = atoi(optarg);
@@ -190,6 +192,9 @@ int main(int argc, char *argv[])
190 case 'g': 192 case 'g':
191 gettime = 1; 193 gettime = 1;
192 break; 194 break;
195 case 'i':
196 index = atoi(optarg);
197 break;
193 case 'k': 198 case 'k':
194 pct_offset = 1; 199 pct_offset = 1;
195 n_samples = atoi(optarg); 200 n_samples = atoi(optarg);
@@ -301,7 +306,7 @@ int main(int argc, char *argv[])
301 306
302 if (extts) { 307 if (extts) {
303 memset(&extts_request, 0, sizeof(extts_request)); 308 memset(&extts_request, 0, sizeof(extts_request));
304 extts_request.index = 0; 309 extts_request.index = index;
305 extts_request.flags = PTP_ENABLE_FEATURE; 310 extts_request.flags = PTP_ENABLE_FEATURE;
306 if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) { 311 if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
307 perror("PTP_EXTTS_REQUEST"); 312 perror("PTP_EXTTS_REQUEST");
@@ -375,7 +380,7 @@ int main(int argc, char *argv[])
375 return -1; 380 return -1;
376 } 381 }
377 memset(&perout_request, 0, sizeof(perout_request)); 382 memset(&perout_request, 0, sizeof(perout_request));
378 perout_request.index = 0; 383 perout_request.index = index;
379 perout_request.start.sec = ts.tv_sec + 2; 384 perout_request.start.sec = ts.tv_sec + 2;
380 perout_request.start.nsec = 0; 385 perout_request.start.nsec = 0;
381 perout_request.period.sec = 0; 386 perout_request.period.sec = 0;
diff --git a/Documentation/s390/00-INDEX b/Documentation/s390/00-INDEX
index 3a2b96302ecc..10c874ebdfe5 100644
--- a/Documentation/s390/00-INDEX
+++ b/Documentation/s390/00-INDEX
@@ -16,11 +16,13 @@ Debugging390.txt
16 - hints for debugging on s390 systems. 16 - hints for debugging on s390 systems.
17driver-model.txt 17driver-model.txt
18 - information on s390 devices and the driver model. 18 - information on s390 devices and the driver model.
19kvm.txt
20 - ioctl calls to /dev/kvm on s390.
19monreader.txt 21monreader.txt
20 - information on accessing the z/VM monitor stream from Linux. 22 - information on accessing the z/VM monitor stream from Linux.
23qeth.txt
24 - HiperSockets Bridge Port Support.
21s390dbf.txt 25s390dbf.txt
22 - information on using the s390 debug feature. 26 - information on using the s390 debug feature.
23TAPE 27zfcpdump.txt
24 - information on the driver for channel-attached tapes.
25zfcpdump
26 - information on the s390 SCSI dump tool. 28 - information on the s390 SCSI dump tool.
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
index 46702e4f89c9..eccf7ad2e7f9 100644
--- a/Documentation/scheduler/00-INDEX
+++ b/Documentation/scheduler/00-INDEX
@@ -2,6 +2,8 @@
2 - this file. 2 - this file.
3sched-arch.txt 3sched-arch.txt
4 - CPU Scheduler implementation hints for architecture specific code. 4 - CPU Scheduler implementation hints for architecture specific code.
5sched-bwc.txt
6 - CFS bandwidth control overview.
5sched-design-CFS.txt 7sched-design-CFS.txt
6 - goals, design and implementation of the Completely Fair Scheduler. 8 - goals, design and implementation of the Completely Fair Scheduler.
7sched-domains.txt 9sched-domains.txt
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index 2044be565d93..c4b978a72f78 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -36,6 +36,8 @@ NinjaSCSI.txt
36 - info on WorkBiT NinjaSCSI-32/32Bi driver 36 - info on WorkBiT NinjaSCSI-32/32Bi driver
37aacraid.txt 37aacraid.txt
38 - Driver supporting Adaptec RAID controllers 38 - Driver supporting Adaptec RAID controllers
39advansys.txt
40 - List of Advansys Host Adapters
39aha152x.txt 41aha152x.txt
40 - info on driver for Adaptec AHA152x based adapters 42 - info on driver for Adaptec AHA152x based adapters
41aic79xx.txt 43aic79xx.txt
@@ -44,6 +46,12 @@ aic7xxx.txt
44 - info on driver for Adaptec controllers 46 - info on driver for Adaptec controllers
45arcmsr_spec.txt 47arcmsr_spec.txt
46 - ARECA FIRMWARE SPEC (for IOP331 adapter) 48 - ARECA FIRMWARE SPEC (for IOP331 adapter)
49bfa.txt
50 - Brocade FC/FCOE adapter driver.
51bnx2fc.txt
52 - FCoE hardware offload for Broadcom network interfaces.
53cxgb3i.txt
54 - Chelsio iSCSI Linux Driver
47dc395x.txt 55dc395x.txt
48 - README file for the dc395x SCSI driver 56 - README file for the dc395x SCSI driver
49dpti.txt 57dpti.txt
@@ -52,18 +60,24 @@ dtc3x80.txt
52 - info on driver for DTC 2x80 based adapters 60 - info on driver for DTC 2x80 based adapters
53g_NCR5380.txt 61g_NCR5380.txt
54 - info on driver for NCR5380 and NCR53c400 based adapters 62 - info on driver for NCR5380 and NCR53c400 based adapters
63hpsa.txt
64 - HP Smart Array Controller SCSI driver.
55hptiop.txt 65hptiop.txt
56 - HIGHPOINT ROCKETRAID 3xxx RAID DRIVER 66 - HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
57in2000.txt 67in2000.txt
58 - info on in2000 driver 68 - info on in2000 driver
59libsas.txt 69libsas.txt
60 - Serial Attached SCSI management layer. 70 - Serial Attached SCSI management layer.
71link_power_management_policy.txt
72 - Link power management options.
61lpfc.txt 73lpfc.txt
62 - LPFC driver release notes 74 - LPFC driver release notes
63megaraid.txt 75megaraid.txt
64 - Common Management Module, shared code handling ioctls for LSI drivers 76 - Common Management Module, shared code handling ioctls for LSI drivers
65ncr53c8xx.txt 77ncr53c8xx.txt
66 - info on driver for NCR53c8xx based adapters 78 - info on driver for NCR53c8xx based adapters
79osd.txt
80 Object-Based Storage Device, command set introduction.
67osst.txt 81osst.txt
68 - info on driver for OnStream SC-x0 SCSI tape 82 - info on driver for OnStream SC-x0 SCSI tape
69ppa.txt 83ppa.txt
@@ -74,6 +88,8 @@ scsi-changer.txt
74 - README for the SCSI media changer driver 88 - README for the SCSI media changer driver
75scsi-generic.txt 89scsi-generic.txt
76 - info on the sg driver for generic (non-disk/CD/tape) SCSI devices. 90 - info on the sg driver for generic (non-disk/CD/tape) SCSI devices.
91scsi-parameters.txt
92 - List of SCSI-parameters to pass to the kernel at module load-time.
77scsi.txt 93scsi.txt
78 - short blurb on using SCSI support as a module. 94 - short blurb on using SCSI support as a module.
79scsi_mid_low_api.txt 95scsi_mid_low_api.txt
diff --git a/Documentation/serial/00-INDEX b/Documentation/serial/00-INDEX
index 1f1b22fbd739..f9c6b5ed03e7 100644
--- a/Documentation/serial/00-INDEX
+++ b/Documentation/serial/00-INDEX
@@ -4,10 +4,12 @@ README.cycladesZ
4 - info on Cyclades-Z firmware loading. 4 - info on Cyclades-Z firmware loading.
5digiepca.txt 5digiepca.txt
6 - info on Digi Intl. {PC,PCI,EISA}Xx and Xem series cards. 6 - info on Digi Intl. {PC,PCI,EISA}Xx and Xem series cards.
7hayes-esp.txt 7driver
8 - info on using the Hayes ESP serial driver. 8 - intro to the low level serial driver.
9moxa-smartio 9moxa-smartio
10 - file with info on installing/using Moxa multiport serial driver. 10 - file with info on installing/using Moxa multiport serial driver.
11n_gsm.txt
12 - GSM 0710 tty multiplexer howto.
11riscom8.txt 13riscom8.txt
12 - notes on using the RISCom/8 multi-port serial driver. 14 - notes on using the RISCom/8 multi-port serial driver.
13rocket.txt 15rocket.txt
diff --git a/Documentation/spi/00-INDEX b/Documentation/spi/00-INDEX
new file mode 100644
index 000000000000..a128fa835512
--- /dev/null
+++ b/Documentation/spi/00-INDEX
@@ -0,0 +1,22 @@
100-INDEX
2 - this file.
3Makefile
4 - Makefile for the example sourcefiles.
5butterfly
6 - AVR Butterfly SPI driver overview and pin configuration.
7ep93xx_spi
8 - Basic EP93xx SPI driver configuration.
9pxa2xx
10 - PXA2xx SPI master controller build by spi_message fifo wq
11spidev
12 - Intro to the userspace API for spi devices
13spidev_fdx.c
14 - spidev example file
15spi-lm70llp
16 - Connecting an LM70-LLP sensor to the kernel via the SPI subsys.
17spi-sc18is602
18 - NXP SC18IS602/603 I2C-bus to SPI bridge
19spi-summary
20 - (Linux) SPI overview. If unsure about SPI or SPI in Linux, start here.
21spidev_test.c
22 - SPI testing utility.
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index ef2ccbf77fa2..6d042dc1cce0 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -8,6 +8,8 @@ hpet_example.c
8 - sample hpet timer test program 8 - sample hpet timer test program
9hrtimers.txt 9hrtimers.txt
10 - subsystem for high-resolution kernel timers 10 - subsystem for high-resolution kernel timers
11Makefile
12 - Build and link hpet_example
11NO_HZ.txt 13NO_HZ.txt
12 - Summary of the different methods for the scheduler clock-interrupts management. 14 - Summary of the different methods for the scheduler clock-interrupts management.
13timers-howto.txt 15timers-howto.txt
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
index 641ec9220179..fee9f2bf9c64 100644
--- a/Documentation/virtual/kvm/00-INDEX
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -20,5 +20,7 @@ ppc-pv.txt
20 - the paravirtualization interface on PowerPC. 20 - the paravirtualization interface on PowerPC.
21review-checklist.txt 21review-checklist.txt
22 - review checklist for KVM patches. 22 - review checklist for KVM patches.
23s390-diag.txt
24 - Diagnose hypercall description (for IBM S/390)
23timekeeping.txt 25timekeeping.txt
24 - timekeeping virtualization for x86-based architectures. 26 - timekeeping virtualization for x86-based architectures.
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index a39d06680e1c..081c49777abb 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -16,8 +16,6 @@ hwpoison.txt
16 - explains what hwpoison is 16 - explains what hwpoison is
17ksm.txt 17ksm.txt
18 - how to use the Kernel Samepage Merging feature. 18 - how to use the Kernel Samepage Merging feature.
19locking
20 - info on how locking and synchronization is done in the Linux vm code.
21numa 19numa
22 - information about NUMA specific code in the Linux vm. 20 - information about NUMA specific code in the Linux vm.
23numa_memory_policy.txt 21numa_memory_policy.txt
@@ -32,6 +30,8 @@ slub.txt
32 - a short users guide for SLUB. 30 - a short users guide for SLUB.
33soft-dirty.txt 31soft-dirty.txt
34 - short explanation for soft-dirty PTEs 32 - short explanation for soft-dirty PTEs
33split_page_table_lock
34 - Separate per-table lock to improve scalability of the old page_table_lock.
35transhuge.txt 35transhuge.txt
36 - Transparent Hugepage Support, alternative way of using hugepages. 36 - Transparent Hugepage Support, alternative way of using hugepages.
37unevictable-lru.txt 37unevictable-lru.txt
diff --git a/Documentation/w1/masters/00-INDEX b/Documentation/w1/masters/00-INDEX
index d63fa024ac05..8330cf9325f0 100644
--- a/Documentation/w1/masters/00-INDEX
+++ b/Documentation/w1/masters/00-INDEX
@@ -4,7 +4,9 @@ ds2482
4 - The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses. 4 - The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
5ds2490 5ds2490
6 - The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges. 6 - The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
7mxc_w1 7mxc-w1
8 - W1 master controller driver found on Freescale MX2/MX3 SoCs 8 - W1 master controller driver found on Freescale MX2/MX3 SoCs
9omap-hdq
10 - HDQ/1-wire module of TI OMAP 2430/3430.
9w1-gpio 11w1-gpio
10 - GPIO 1-wire bus master driver. 12 - GPIO 1-wire bus master driver.
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index 75613c9ac4db..6e18c70c3474 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -4,3 +4,5 @@ w1_therm
4 - The Maxim/Dallas Semiconductor ds18*20 temperature sensor. 4 - The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
5w1_ds2423 5w1_ds2423
6 - The Maxim/Dallas Semiconductor ds2423 counter device. 6 - The Maxim/Dallas Semiconductor ds2423 counter device.
7w1_ds28e04
8 - The Maxim/Dallas Semiconductor ds28e04 eeprom.
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
index f37b46d34861..692264456f0f 100644
--- a/Documentation/x86/00-INDEX
+++ b/Documentation/x86/00-INDEX
@@ -1,6 +1,20 @@
100-INDEX 100-INDEX
2 - this file 2 - this file
3mtrr.txt 3boot.txt
4 - how to use x86 Memory Type Range Registers to increase performance 4 - List of boot protocol versions
5early-microcode.txt
6 - How to load microcode from an initrd-CPIO archive early to fix CPU issues.
7earlyprintk.txt
8 - Using earlyprintk with a USB2 debug port key.
9entry_64.txt
10 - Describe (some of the) kernel entry points for x86.
5exception-tables.txt 11exception-tables.txt
6 - why and how Linux kernel uses exception tables on x86 12 - why and how Linux kernel uses exception tables on x86
13mtrr.txt
14 - how to use x86 Memory Type Range Registers to increase performance
15pat.txt
16 - Page Attribute Table intro and API
17usb-legacy-support.txt
18 - how to fix/avoid quirks when using emulated PS/2 mouse/keyboard.
19zero-page.txt
20 - layout of the first page of memory.
diff --git a/MAINTAINERS b/MAINTAINERS
index b2cf5cfb4d29..091b50edaf35 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7196,7 +7196,7 @@ S: Maintained
7196F: drivers/net/ethernet/rdc/r6040.c 7196F: drivers/net/ethernet/rdc/r6040.c
7197 7197
7198RDS - RELIABLE DATAGRAM SOCKETS 7198RDS - RELIABLE DATAGRAM SOCKETS
7199M: Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com> 7199M: Chien Yen <chien.yen@oracle.com>
7200L: rds-devel@oss.oracle.com (moderated for non-subscribers) 7200L: rds-devel@oss.oracle.com (moderated for non-subscribers)
7201S: Supported 7201S: Supported
7202F: net/rds/ 7202F: net/rds/
diff --git a/Makefile b/Makefile
index 606ef7c4a544..933e1def6baf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 14 2PATCHLEVEL = 14
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 040bb0eba152..10666ca8aee1 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -315,7 +315,7 @@
315 ranges; 315 ranges;
316 316
317 emac: ethernet@01c0b000 { 317 emac: ethernet@01c0b000 {
318 compatible = "allwinner,sun4i-emac"; 318 compatible = "allwinner,sun4i-a10-emac";
319 reg = <0x01c0b000 0x1000>; 319 reg = <0x01c0b000 0x1000>;
320 interrupts = <55>; 320 interrupts = <55>;
321 clocks = <&ahb_gates 17>; 321 clocks = <&ahb_gates 17>;
@@ -323,7 +323,7 @@
323 }; 323 };
324 324
325 mdio@01c0b080 { 325 mdio@01c0b080 {
326 compatible = "allwinner,sun4i-mdio"; 326 compatible = "allwinner,sun4i-a10-mdio";
327 reg = <0x01c0b080 0x14>; 327 reg = <0x01c0b080 0x14>;
328 status = "disabled"; 328 status = "disabled";
329 #address-cells = <1>; 329 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index ea16054857a4..64961595e8d6 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -278,7 +278,7 @@
278 ranges; 278 ranges;
279 279
280 emac: ethernet@01c0b000 { 280 emac: ethernet@01c0b000 {
281 compatible = "allwinner,sun4i-emac"; 281 compatible = "allwinner,sun4i-a10-emac";
282 reg = <0x01c0b000 0x1000>; 282 reg = <0x01c0b000 0x1000>;
283 interrupts = <55>; 283 interrupts = <55>;
284 clocks = <&ahb_gates 17>; 284 clocks = <&ahb_gates 17>;
@@ -286,7 +286,7 @@
286 }; 286 };
287 287
288 mdio@01c0b080 { 288 mdio@01c0b080 {
289 compatible = "allwinner,sun4i-mdio"; 289 compatible = "allwinner,sun4i-a10-mdio";
290 reg = <0x01c0b080 0x14>; 290 reg = <0x01c0b080 0x14>;
291 status = "disabled"; 291 status = "disabled";
292 #address-cells = <1>; 292 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 119f066f0d98..9ff09484847b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -340,7 +340,7 @@
340 ranges; 340 ranges;
341 341
342 emac: ethernet@01c0b000 { 342 emac: ethernet@01c0b000 {
343 compatible = "allwinner,sun4i-emac"; 343 compatible = "allwinner,sun4i-a10-emac";
344 reg = <0x01c0b000 0x1000>; 344 reg = <0x01c0b000 0x1000>;
345 interrupts = <0 55 4>; 345 interrupts = <0 55 4>;
346 clocks = <&ahb_gates 17>; 346 clocks = <&ahb_gates 17>;
@@ -348,7 +348,7 @@
348 }; 348 };
349 349
350 mdio@01c0b080 { 350 mdio@01c0b080 {
351 compatible = "allwinner,sun4i-mdio"; 351 compatible = "allwinner,sun4i-a10-mdio";
352 reg = <0x01c0b080 0x14>; 352 reg = <0x01c0b080 0x14>;
353 status = "disabled"; 353 status = "disabled";
354 #address-cells = <1>; 354 #address-cells = <1>;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index dd4327f09ba4..27bbcfc7202a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -36,6 +36,7 @@ config ARM64
36 select HAVE_GENERIC_DMA_COHERENT 36 select HAVE_GENERIC_DMA_COHERENT
37 select HAVE_HW_BREAKPOINT if PERF_EVENTS 37 select HAVE_HW_BREAKPOINT if PERF_EVENTS
38 select HAVE_MEMBLOCK 38 select HAVE_MEMBLOCK
39 select HAVE_PATA_PLATFORM
39 select HAVE_PERF_EVENTS 40 select HAVE_PERF_EVENTS
40 select IRQ_DOMAIN 41 select IRQ_DOMAIN
41 select MODULES_USE_ELF_RELA 42 select MODULES_USE_ELF_RELA
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 84139be62ae6..7959dd0ca5d5 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
3# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
@@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y
19CONFIG_KALLSYMS_ALL=y 18CONFIG_KALLSYMS_ALL=y
20# CONFIG_COMPAT_BRK is not set 19# CONFIG_COMPAT_BRK is not set
21CONFIG_PROFILING=y 20CONFIG_PROFILING=y
21CONFIG_JUMP_LABEL=y
22CONFIG_MODULES=y 22CONFIG_MODULES=y
23CONFIG_MODULE_UNLOAD=y 23CONFIG_MODULE_UNLOAD=y
24# CONFIG_BLK_DEV_BSG is not set 24# CONFIG_BLK_DEV_BSG is not set
@@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y
27CONFIG_ARCH_XGENE=y 27CONFIG_ARCH_XGENE=y
28CONFIG_SMP=y 28CONFIG_SMP=y
29CONFIG_PREEMPT=y 29CONFIG_PREEMPT=y
30CONFIG_CMA=y
30CONFIG_CMDLINE="console=ttyAMA0" 31CONFIG_CMDLINE="console=ttyAMA0"
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 32# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_COMPAT=y 33CONFIG_COMPAT=y
@@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y
42# CONFIG_WIRELESS is not set 43# CONFIG_WIRELESS is not set
43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 44CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_DEVTMPFS=y 45CONFIG_DEVTMPFS=y
45CONFIG_BLK_DEV=y 46CONFIG_DMA_CMA=y
46CONFIG_SCSI=y 47CONFIG_SCSI=y
47# CONFIG_SCSI_PROC_FS is not set 48# CONFIG_SCSI_PROC_FS is not set
48CONFIG_BLK_DEV_SD=y 49CONFIG_BLK_DEV_SD=y
49# CONFIG_SCSI_LOWLEVEL is not set 50# CONFIG_SCSI_LOWLEVEL is not set
51CONFIG_ATA=y
52CONFIG_PATA_PLATFORM=y
53CONFIG_PATA_OF_PLATFORM=y
50CONFIG_NETDEVICES=y 54CONFIG_NETDEVICES=y
51CONFIG_MII=y
52CONFIG_SMC91X=y 55CONFIG_SMC91X=y
56CONFIG_SMSC911X=y
53# CONFIG_WLAN is not set 57# CONFIG_WLAN is not set
54CONFIG_INPUT_EVDEV=y 58CONFIG_INPUT_EVDEV=y
55# CONFIG_SERIO_I8042 is not set 59# CONFIG_SERIO_I8042 is not set
@@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y
62CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 66CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
63# CONFIG_HW_RANDOM is not set 67# CONFIG_HW_RANDOM is not set
64# CONFIG_HWMON is not set 68# CONFIG_HWMON is not set
69CONFIG_REGULATOR=y
70CONFIG_REGULATOR_FIXED_VOLTAGE=y
65CONFIG_FB=y 71CONFIG_FB=y
66# CONFIG_VGA_CONSOLE is not set 72# CONFIG_VGA_CONSOLE is not set
67CONFIG_FRAMEBUFFER_CONSOLE=y 73CONFIG_FRAMEBUFFER_CONSOLE=y
68CONFIG_LOGO=y 74CONFIG_LOGO=y
69# CONFIG_LOGO_LINUX_MONO is not set 75# CONFIG_LOGO_LINUX_MONO is not set
70# CONFIG_LOGO_LINUX_VGA16 is not set 76# CONFIG_LOGO_LINUX_VGA16 is not set
71# CONFIG_USB_SUPPORT is not set 77CONFIG_USB=y
78CONFIG_USB_ISP1760_HCD=y
79CONFIG_USB_STORAGE=y
80CONFIG_MMC=y
81CONFIG_MMC_ARMMMCI=y
72# CONFIG_IOMMU_SUPPORT is not set 82# CONFIG_IOMMU_SUPPORT is not set
73CONFIG_EXT2_FS=y 83CONFIG_EXT2_FS=y
74CONFIG_EXT3_FS=y 84CONFIG_EXT3_FS=y
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 01de5aaa3edc..0237f0867e37 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
54" stxr %w1, %w0, %2\n" 54" stxr %w1, %w0, %2\n"
55" cbnz %w1, 1b" 55" cbnz %w1, 1b"
56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
57 : "Ir" (i) 57 : "Ir" (i));
58 : "cc");
59} 58}
60 59
61static inline int atomic_add_return(int i, atomic_t *v) 60static inline int atomic_add_return(int i, atomic_t *v)
@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
64 int result; 63 int result;
65 64
66 asm volatile("// atomic_add_return\n" 65 asm volatile("// atomic_add_return\n"
67"1: ldaxr %w0, %2\n" 66"1: ldxr %w0, %2\n"
68" add %w0, %w0, %w3\n" 67" add %w0, %w0, %w3\n"
69" stlxr %w1, %w0, %2\n" 68" stlxr %w1, %w0, %2\n"
70" cbnz %w1, 1b" 69" cbnz %w1, 1b"
71 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 70 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
72 : "Ir" (i) 71 : "Ir" (i)
73 : "cc", "memory"); 72 : "memory");
74 73
74 smp_mb();
75 return result; 75 return result;
76} 76}
77 77
@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
86" stxr %w1, %w0, %2\n" 86" stxr %w1, %w0, %2\n"
87" cbnz %w1, 1b" 87" cbnz %w1, 1b"
88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
89 : "Ir" (i) 89 : "Ir" (i));
90 : "cc");
91} 90}
92 91
93static inline int atomic_sub_return(int i, atomic_t *v) 92static inline int atomic_sub_return(int i, atomic_t *v)
@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
96 int result; 95 int result;
97 96
98 asm volatile("// atomic_sub_return\n" 97 asm volatile("// atomic_sub_return\n"
99"1: ldaxr %w0, %2\n" 98"1: ldxr %w0, %2\n"
100" sub %w0, %w0, %w3\n" 99" sub %w0, %w0, %w3\n"
101" stlxr %w1, %w0, %2\n" 100" stlxr %w1, %w0, %2\n"
102" cbnz %w1, 1b" 101" cbnz %w1, 1b"
103 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 102 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
104 : "Ir" (i) 103 : "Ir" (i)
105 : "cc", "memory"); 104 : "memory");
106 105
106 smp_mb();
107 return result; 107 return result;
108} 108}
109 109
@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
112 unsigned long tmp; 112 unsigned long tmp;
113 int oldval; 113 int oldval;
114 114
115 smp_mb();
116
115 asm volatile("// atomic_cmpxchg\n" 117 asm volatile("// atomic_cmpxchg\n"
116"1: ldaxr %w1, %2\n" 118"1: ldxr %w1, %2\n"
117" cmp %w1, %w3\n" 119" cmp %w1, %w3\n"
118" b.ne 2f\n" 120" b.ne 2f\n"
119" stlxr %w0, %w4, %2\n" 121" stxr %w0, %w4, %2\n"
120" cbnz %w0, 1b\n" 122" cbnz %w0, 1b\n"
121"2:" 123"2:"
122 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) 124 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
123 : "Ir" (old), "r" (new) 125 : "Ir" (old), "r" (new)
124 : "cc", "memory"); 126 : "cc");
125 127
128 smp_mb();
126 return oldval; 129 return oldval;
127} 130}
128 131
@@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
173" stxr %w1, %0, %2\n" 176" stxr %w1, %0, %2\n"
174" cbnz %w1, 1b" 177" cbnz %w1, 1b"
175 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 178 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
176 : "Ir" (i) 179 : "Ir" (i));
177 : "cc");
178} 180}
179 181
180static inline long atomic64_add_return(long i, atomic64_t *v) 182static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
183 unsigned long tmp; 185 unsigned long tmp;
184 186
185 asm volatile("// atomic64_add_return\n" 187 asm volatile("// atomic64_add_return\n"
186"1: ldaxr %0, %2\n" 188"1: ldxr %0, %2\n"
187" add %0, %0, %3\n" 189" add %0, %0, %3\n"
188" stlxr %w1, %0, %2\n" 190" stlxr %w1, %0, %2\n"
189" cbnz %w1, 1b" 191" cbnz %w1, 1b"
190 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 192 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
191 : "Ir" (i) 193 : "Ir" (i)
192 : "cc", "memory"); 194 : "memory");
193 195
196 smp_mb();
194 return result; 197 return result;
195} 198}
196 199
@@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
205" stxr %w1, %0, %2\n" 208" stxr %w1, %0, %2\n"
206" cbnz %w1, 1b" 209" cbnz %w1, 1b"
207 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 210 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
208 : "Ir" (i) 211 : "Ir" (i));
209 : "cc");
210} 212}
211 213
212static inline long atomic64_sub_return(long i, atomic64_t *v) 214static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
215 unsigned long tmp; 217 unsigned long tmp;
216 218
217 asm volatile("// atomic64_sub_return\n" 219 asm volatile("// atomic64_sub_return\n"
218"1: ldaxr %0, %2\n" 220"1: ldxr %0, %2\n"
219" sub %0, %0, %3\n" 221" sub %0, %0, %3\n"
220" stlxr %w1, %0, %2\n" 222" stlxr %w1, %0, %2\n"
221" cbnz %w1, 1b" 223" cbnz %w1, 1b"
222 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 224 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
223 : "Ir" (i) 225 : "Ir" (i)
224 : "cc", "memory"); 226 : "memory");
225 227
228 smp_mb();
226 return result; 229 return result;
227} 230}
228 231
@@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
231 long oldval; 234 long oldval;
232 unsigned long res; 235 unsigned long res;
233 236
237 smp_mb();
238
234 asm volatile("// atomic64_cmpxchg\n" 239 asm volatile("// atomic64_cmpxchg\n"
235"1: ldaxr %1, %2\n" 240"1: ldxr %1, %2\n"
236" cmp %1, %3\n" 241" cmp %1, %3\n"
237" b.ne 2f\n" 242" b.ne 2f\n"
238" stlxr %w0, %4, %2\n" 243" stxr %w0, %4, %2\n"
239" cbnz %w0, 1b\n" 244" cbnz %w0, 1b\n"
240"2:" 245"2:"
241 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) 246 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
242 : "Ir" (old), "r" (new) 247 : "Ir" (old), "r" (new)
243 : "cc", "memory"); 248 : "cc");
244 249
250 smp_mb();
245 return oldval; 251 return oldval;
246} 252}
247 253
@@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
253 unsigned long tmp; 259 unsigned long tmp;
254 260
255 asm volatile("// atomic64_dec_if_positive\n" 261 asm volatile("// atomic64_dec_if_positive\n"
256"1: ldaxr %0, %2\n" 262"1: ldxr %0, %2\n"
257" subs %0, %0, #1\n" 263" subs %0, %0, #1\n"
258" b.mi 2f\n" 264" b.mi 2f\n"
259" stlxr %w1, %0, %2\n" 265" stlxr %w1, %0, %2\n"
260" cbnz %w1, 1b\n" 266" cbnz %w1, 1b\n"
267" dmb ish\n"
261"2:" 268"2:"
262 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 269 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
263 : 270 :
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 78e20ba8806b..409ca370cfe2 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,7 +25,7 @@
25#define wfi() asm volatile("wfi" : : : "memory") 25#define wfi() asm volatile("wfi" : : : "memory")
26 26
27#define isb() asm volatile("isb" : : : "memory") 27#define isb() asm volatile("isb" : : : "memory")
28#define dsb() asm volatile("dsb sy" : : : "memory") 28#define dsb(opt) asm volatile("dsb sy" : : : "memory")
29 29
30#define mb() dsb() 30#define mb() dsb()
31#define rmb() asm volatile("dsb ld" : : : "memory") 31#define rmb() asm volatile("dsb ld" : : : "memory")
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index fea9ee327206..889324981aa4 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
116static inline void __flush_icache_all(void) 116static inline void __flush_icache_all(void)
117{ 117{
118 asm("ic ialluis"); 118 asm("ic ialluis");
119 dsb();
119} 120}
120 121
121#define flush_dcache_mmap_lock(mapping) \ 122#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 56166d7f4a25..57c0fa7bf711 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
29 switch (size) { 29 switch (size) {
30 case 1: 30 case 1:
31 asm volatile("// __xchg1\n" 31 asm volatile("// __xchg1\n"
32 "1: ldaxrb %w0, %2\n" 32 "1: ldxrb %w0, %2\n"
33 " stlxrb %w1, %w3, %2\n" 33 " stlxrb %w1, %w3, %2\n"
34 " cbnz %w1, 1b\n" 34 " cbnz %w1, 1b\n"
35 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) 35 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
36 : "r" (x) 36 : "r" (x)
37 : "cc", "memory"); 37 : "memory");
38 break; 38 break;
39 case 2: 39 case 2:
40 asm volatile("// __xchg2\n" 40 asm volatile("// __xchg2\n"
41 "1: ldaxrh %w0, %2\n" 41 "1: ldxrh %w0, %2\n"
42 " stlxrh %w1, %w3, %2\n" 42 " stlxrh %w1, %w3, %2\n"
43 " cbnz %w1, 1b\n" 43 " cbnz %w1, 1b\n"
44 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) 44 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
45 : "r" (x) 45 : "r" (x)
46 : "cc", "memory"); 46 : "memory");
47 break; 47 break;
48 case 4: 48 case 4:
49 asm volatile("// __xchg4\n" 49 asm volatile("// __xchg4\n"
50 "1: ldaxr %w0, %2\n" 50 "1: ldxr %w0, %2\n"
51 " stlxr %w1, %w3, %2\n" 51 " stlxr %w1, %w3, %2\n"
52 " cbnz %w1, 1b\n" 52 " cbnz %w1, 1b\n"
53 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) 53 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
54 : "r" (x) 54 : "r" (x)
55 : "cc", "memory"); 55 : "memory");
56 break; 56 break;
57 case 8: 57 case 8:
58 asm volatile("// __xchg8\n" 58 asm volatile("// __xchg8\n"
59 "1: ldaxr %0, %2\n" 59 "1: ldxr %0, %2\n"
60 " stlxr %w1, %3, %2\n" 60 " stlxr %w1, %3, %2\n"
61 " cbnz %w1, 1b\n" 61 " cbnz %w1, 1b\n"
62 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) 62 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
63 : "r" (x) 63 : "r" (x)
64 : "cc", "memory"); 64 : "memory");
65 break; 65 break;
66 default: 66 default:
67 BUILD_BUG(); 67 BUILD_BUG();
68 } 68 }
69 69
70 smp_mb();
70 return ret; 71 return ret;
71} 72}
72 73
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 78834123a32e..c4a7f940b387 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -42,7 +42,7 @@
42#define ESR_EL1_EC_SP_ALIGN (0x26) 42#define ESR_EL1_EC_SP_ALIGN (0x26)
43#define ESR_EL1_EC_FP_EXC32 (0x28) 43#define ESR_EL1_EC_FP_EXC32 (0x28)
44#define ESR_EL1_EC_FP_EXC64 (0x2C) 44#define ESR_EL1_EC_FP_EXC64 (0x2C)
45#define ESR_EL1_EC_SERRROR (0x2F) 45#define ESR_EL1_EC_SERROR (0x2F)
46#define ESR_EL1_EC_BREAKPT_EL0 (0x30) 46#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
47#define ESR_EL1_EC_BREAKPT_EL1 (0x31) 47#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
48#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) 48#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 78cc3aba5d69..5f750dc96e0f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -24,10 +24,11 @@
24 24
25#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ 25#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
26 asm volatile( \ 26 asm volatile( \
27"1: ldaxr %w1, %2\n" \ 27"1: ldxr %w1, %2\n" \
28 insn "\n" \ 28 insn "\n" \
29"2: stlxr %w3, %w0, %2\n" \ 29"2: stlxr %w3, %w0, %2\n" \
30" cbnz %w3, 1b\n" \ 30" cbnz %w3, 1b\n" \
31" dmb ish\n" \
31"3:\n" \ 32"3:\n" \
32" .pushsection .fixup,\"ax\"\n" \ 33" .pushsection .fixup,\"ax\"\n" \
33" .align 2\n" \ 34" .align 2\n" \
@@ -40,7 +41,7 @@
40" .popsection\n" \ 41" .popsection\n" \
41 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ 42 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
42 : "r" (oparg), "Ir" (-EFAULT) \ 43 : "r" (oparg), "Ir" (-EFAULT) \
43 : "cc", "memory") 44 : "memory")
44 45
45static inline int 46static inline int
46futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 47futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
111 return -EFAULT; 112 return -EFAULT;
112 113
113 asm volatile("// futex_atomic_cmpxchg_inatomic\n" 114 asm volatile("// futex_atomic_cmpxchg_inatomic\n"
114"1: ldaxr %w1, %2\n" 115"1: ldxr %w1, %2\n"
115" sub %w3, %w1, %w4\n" 116" sub %w3, %w1, %w4\n"
116" cbnz %w3, 3f\n" 117" cbnz %w3, 3f\n"
117"2: stlxr %w3, %w5, %2\n" 118"2: stlxr %w3, %w5, %2\n"
118" cbnz %w3, 1b\n" 119" cbnz %w3, 1b\n"
120" dmb ish\n"
119"3:\n" 121"3:\n"
120" .pushsection .fixup,\"ax\"\n" 122" .pushsection .fixup,\"ax\"\n"
121"4: mov %w0, %w6\n" 123"4: mov %w0, %w6\n"
@@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
127" .popsection\n" 129" .popsection\n"
128 : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) 130 : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
129 : "r" (oldval), "r" (newval), "Ir" (-EFAULT) 131 : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
130 : "cc", "memory"); 132 : "memory");
131 133
132 *uval = val; 134 *uval = val;
133 return ret; 135 return ret;
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c98ef4771c73..0eb398655378 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -231,7 +231,7 @@
231#define ESR_EL2_EC_SP_ALIGN (0x26) 231#define ESR_EL2_EC_SP_ALIGN (0x26)
232#define ESR_EL2_EC_FP_EXC32 (0x28) 232#define ESR_EL2_EC_FP_EXC32 (0x28)
233#define ESR_EL2_EC_FP_EXC64 (0x2C) 233#define ESR_EL2_EC_FP_EXC64 (0x2C)
234#define ESR_EL2_EC_SERRROR (0x2F) 234#define ESR_EL2_EC_SERROR (0x2F)
235#define ESR_EL2_EC_BREAKPT (0x30) 235#define ESR_EL2_EC_BREAKPT (0x30)
236#define ESR_EL2_EC_BREAKPT_HYP (0x31) 236#define ESR_EL2_EC_BREAKPT_HYP (0x31)
237#define ESR_EL2_EC_SOFTSTP (0x32) 237#define ESR_EL2_EC_SOFTSTP (0x32)
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 3d5cf064d7a1..c45b7b1b7197 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
132 " cbnz %w0, 2b\n" 132 " cbnz %w0, 2b\n"
133 : "=&r" (tmp), "+Q" (rw->lock) 133 : "=&r" (tmp), "+Q" (rw->lock)
134 : "r" (0x80000000) 134 : "r" (0x80000000)
135 : "cc", "memory"); 135 : "memory");
136} 136}
137 137
138static inline int arch_write_trylock(arch_rwlock_t *rw) 138static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
146 "1:\n" 146 "1:\n"
147 : "=&r" (tmp), "+Q" (rw->lock) 147 : "=&r" (tmp), "+Q" (rw->lock)
148 : "r" (0x80000000) 148 : "r" (0x80000000)
149 : "cc", "memory"); 149 : "memory");
150 150
151 return !tmp; 151 return !tmp;
152} 152}
@@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
187 " cbnz %w1, 2b\n" 187 " cbnz %w1, 2b\n"
188 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) 188 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
189 : 189 :
190 : "cc", "memory"); 190 : "memory");
191} 191}
192 192
193static inline void arch_read_unlock(arch_rwlock_t *rw) 193static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
201 " cbnz %w1, 1b\n" 201 " cbnz %w1, 1b\n"
202 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) 202 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
203 : 203 :
204 : "cc", "memory"); 204 : "memory");
205} 205}
206 206
207static inline int arch_read_trylock(arch_rwlock_t *rw) 207static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
216 "1:\n" 216 "1:\n"
217 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) 217 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
218 : 218 :
219 : "cc", "memory"); 219 : "memory");
220 220
221 return !tmp2; 221 return !tmp2;
222} 222}
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 58125bf008d3..bb8eb8a78e67 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg)
399__SYSCALL(375, sys_setns) 399__SYSCALL(375, sys_setns)
400__SYSCALL(376, compat_sys_process_vm_readv) 400__SYSCALL(376, compat_sys_process_vm_readv)
401__SYSCALL(377, compat_sys_process_vm_writev) 401__SYSCALL(377, compat_sys_process_vm_writev)
402__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ 402__SYSCALL(378, sys_kcmp)
403__SYSCALL(379, sys_finit_module)
404__SYSCALL(380, sys_sched_setattr)
405__SYSCALL(381, sys_sched_getattr)
403 406
404#define __NR_compat_syscalls 379 407#define __NR_compat_syscalls 379
405 408
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 63c48ffdf230..7787208e8cc6 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60
38 .inst 0xe92d00f0 // push {r4, r5, r6, r7} 38 .inst 0xe92d00f0 // push {r4, r5, r6, r7}
39 .inst 0xe1c040d0 // ldrd r4, r5, [r0] 39 .inst 0xe1c040d0 // ldrd r4, r5, [r0]
40 .inst 0xe1c160d0 // ldrd r6, r7, [r1] 40 .inst 0xe1c160d0 // ldrd r6, r7, [r1]
41 .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2] 41 .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
42 .inst 0xe0303004 // eors r3, r0, r4 42 .inst 0xe0303004 // eors r3, r0, r4
43 .inst 0x00313005 // eoreqs r3, r1, r5 43 .inst 0x00313005 // eoreqs r3, r1, r5
44 .inst 0x01a23e96 // stlexdeq r3, r6, [r2] 44 .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
45 .inst 0x03330001 // teqeq r3, #1 45 .inst 0x03330001 // teqeq r3, #1
46 .inst 0x0afffff9 // beq 1b 46 .inst 0x0afffff9 // beq 1b
47 .inst 0xf57ff05b // dmb ish
47 .inst 0xe2730000 // rsbs r0, r3, #0 48 .inst 0xe2730000 // rsbs r0, r3, #0
48 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} 49 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
49 .inst 0xe12fff1e // bx lr 50 .inst 0xe12fff1e // bx lr
@@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0
55 56
56 .align 5 57 .align 5
57__kuser_cmpxchg: // 0xffff0fc0 58__kuser_cmpxchg: // 0xffff0fc0
58 .inst 0xe1923e9f // 1: ldaex r3, [r2] 59 .inst 0xe1923f9f // 1: ldrex r3, [r2]
59 .inst 0xe0533000 // subs r3, r3, r0 60 .inst 0xe0533000 // subs r3, r3, r0
60 .inst 0x01823e91 // stlexeq r3, r1, [r2] 61 .inst 0x01823e91 // stlexeq r3, r1, [r2]
61 .inst 0x03330001 // teqeq r3, #1 62 .inst 0x03330001 // teqeq r3, #1
62 .inst 0x0afffffa // beq 1b 63 .inst 0x0afffffa // beq 1b
64 .inst 0xf57ff05b // dmb ish
63 .inst 0xe2730000 // rsbs r0, r3, #0 65 .inst 0xe2730000 // rsbs r0, r3, #0
64 .inst 0xe12fff1e // bx lr 66 .inst 0xe12fff1e // bx lr
65 67
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 65d40cf6945a..a7149cae1615 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk)
238 vdso_data->use_syscall = use_syscall; 238 vdso_data->use_syscall = use_syscall;
239 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 239 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
240 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 240 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
241 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
242 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
241 243
242 if (!use_syscall) { 244 if (!use_syscall) {
243 vdso_data->cs_cycle_last = tk->clock->cycle_last; 245 vdso_data->cs_cycle_last = tk->clock->cycle_last;
@@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk)
245 vdso_data->xtime_clock_nsec = tk->xtime_nsec; 247 vdso_data->xtime_clock_nsec = tk->xtime_nsec;
246 vdso_data->cs_mult = tk->mult; 248 vdso_data->cs_mult = tk->mult;
247 vdso_data->cs_shift = tk->shift; 249 vdso_data->cs_shift = tk->shift;
248 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
249 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
250 } 250 }
251 251
252 smp_wmb(); 252 smp_wmb();
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d8064af42e62..6d20b7d162d8 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
48 48
49# Actual build commands 49# Actual build commands
50quiet_cmd_vdsold = VDSOL $@ 50quiet_cmd_vdsold = VDSOL $@
51 cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ 51 cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
52quiet_cmd_vdsoas = VDSOA $@ 52quiet_cmd_vdsoas = VDSOA $@
53 cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< 53 cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
54 54
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index f0a6d10b5211..fe652ffd34c2 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
103 bl __do_get_tspec 103 bl __do_get_tspec
104 seqcnt_check w9, 1b 104 seqcnt_check w9, 1b
105 105
106 mov x30, x2
107
106 cmp w0, #CLOCK_MONOTONIC 108 cmp w0, #CLOCK_MONOTONIC
107 b.ne 6f 109 b.ne 6f
108 110
@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
118 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne 120 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
119 b.ne 8f 121 b.ne 8f
120 122
123 /* xtime_coarse_nsec is already right-shifted */
124 mov x12, #0
125
121 /* Get coarse timespec. */ 126 /* Get coarse timespec. */
122 adr vdso_data, _vdso_data 127 adr vdso_data, _vdso_data
1233: seqcnt_acquire 1283: seqcnt_acquire
@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
156 lsr x11, x11, x12 161 lsr x11, x11, x12
157 stp x10, x11, [x1, #TSPEC_TV_SEC] 162 stp x10, x11, [x1, #TSPEC_TV_SEC]
158 mov x0, xzr 163 mov x0, xzr
159 ret x2 164 ret
1607: 1657:
161 mov x30, x2 166 mov x30, x2
1628: /* Syscall fallback. */ 1678: /* Syscall fallback. */
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index e5db797790d3..7dac371cc9a2 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -46,11 +46,12 @@ ENTRY( \name )
46 mov x2, #1 46 mov x2, #1
47 add x1, x1, x0, lsr #3 // Get word offset 47 add x1, x1, x0, lsr #3 // Get word offset
48 lsl x4, x2, x3 // Create mask 48 lsl x4, x2, x3 // Create mask
491: ldaxr x2, [x1] 491: ldxr x2, [x1]
50 lsr x0, x2, x3 // Save old value of bit 50 lsr x0, x2, x3 // Save old value of bit
51 \instr x2, x2, x4 // toggle bit 51 \instr x2, x2, x4 // toggle bit
52 stlxr w5, x2, [x1] 52 stlxr w5, x2, [x1]
53 cbnz w5, 1b 53 cbnz w5, 1b
54 dmb ish
54 and x0, x0, #1 55 and x0, x0, #1
553: ret 563: ret
56ENDPROC(\name ) 57ENDPROC(\name )
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 45b5ab54c9ee..fbd76785c5db 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
45 if (IS_ENABLED(CONFIG_DMA_CMA)) { 45 if (IS_ENABLED(CONFIG_DMA_CMA)) {
46 struct page *page; 46 struct page *page;
47 47
48 size = PAGE_ALIGN(size);
48 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, 49 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
49 get_order(size)); 50 get_order(size));
50 if (!page) 51 if (!page)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f557ebbe7013..f8dc7e8fce6f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
203 do { 203 do {
204 next = pmd_addr_end(addr, end); 204 next = pmd_addr_end(addr, end);
205 /* try section mapping first */ 205 /* try section mapping first */
206 if (((addr | next | phys) & ~SECTION_MASK) == 0) 206 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
207 pmd_t old_pmd =*pmd;
207 set_pmd(pmd, __pmd(phys | prot_sect_kernel)); 208 set_pmd(pmd, __pmd(phys | prot_sect_kernel));
208 else 209 /*
210 * Check for previous table entries created during
211 * boot (__create_page_tables) and flush them.
212 */
213 if (!pmd_none(old_pmd))
214 flush_tlb_all();
215 } else {
209 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); 216 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
217 }
210 phys += next - addr; 218 phys += next - addr;
211 } while (pmd++, addr = next, addr != end); 219 } while (pmd++, addr = next, addr != end);
212} 220}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cdada657..62c6101df260 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
32 32
33pgd_t *pgd_alloc(struct mm_struct *mm) 33pgd_t *pgd_alloc(struct mm_struct *mm)
34{ 34{
35 pgd_t *new_pgd;
36
37 if (PGD_SIZE == PAGE_SIZE) 35 if (PGD_SIZE == PAGE_SIZE)
38 new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); 36 return (pgd_t *)get_zeroed_page(GFP_KERNEL);
39 else 37 else
40 new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL); 38 return kzalloc(PGD_SIZE, GFP_KERNEL);
41
42 if (!new_pgd)
43 return NULL;
44
45 return new_pgd;
46} 39}
47 40
48void pgd_free(struct mm_struct *mm, pgd_t *pgd) 41void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index afd45e0d552e..ae763d8bf55a 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
11 11
12 12
13 13
14#define NR_syscalls 312 /* length of syscall table */ 14#define NR_syscalls 314 /* length of syscall table */
15 15
16/* 16/*
17 * The following defines stop scripts/checksyscalls.sh from complaining about 17 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 34fd6fe46da1..715e85f858de 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -325,5 +325,7 @@
325#define __NR_process_vm_writev 1333 325#define __NR_process_vm_writev 1333
326#define __NR_accept4 1334 326#define __NR_accept4 1334
327#define __NR_finit_module 1335 327#define __NR_finit_module 1335
328#define __NR_sched_setattr 1336
329#define __NR_sched_getattr 1337
328 330
329#endif /* _UAPI_ASM_IA64_UNISTD_H */ 331#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ddea607f948a..fa8d61a312a7 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1773,6 +1773,8 @@ sys_call_table:
1773 data8 sys_process_vm_writev 1773 data8 sys_process_vm_writev
1774 data8 sys_accept4 1774 data8 sys_accept4
1775 data8 sys_finit_module // 1335 1775 data8 sys_finit_module // 1335
1776 data8 sys_sched_setattr
1777 data8 sys_sched_getattr
1776 1778
1777 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1779 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1778#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1780#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h
index 05b7d39e4391..66fc24c24238 100644
--- a/arch/microblaze/include/asm/delay.h
+++ b/arch/microblaze/include/asm/delay.h
@@ -13,6 +13,8 @@
13#ifndef _ASM_MICROBLAZE_DELAY_H 13#ifndef _ASM_MICROBLAZE_DELAY_H
14#define _ASM_MICROBLAZE_DELAY_H 14#define _ASM_MICROBLAZE_DELAY_H
15 15
16#include <linux/param.h>
17
16extern inline void __delay(unsigned long loops) 18extern inline void __delay(unsigned long loops)
17{ 19{
18 asm volatile ("# __delay \n\t" \ 20 asm volatile ("# __delay \n\t" \
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index a2cea7206077..3fbb7f1db3bc 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -89,6 +89,11 @@ static inline unsigned int readl(const volatile void __iomem *addr)
89{ 89{
90 return le32_to_cpu(*(volatile unsigned int __force *)addr); 90 return le32_to_cpu(*(volatile unsigned int __force *)addr);
91} 91}
92#define readq readq
93static inline u64 readq(const volatile void __iomem *addr)
94{
95 return le64_to_cpu(__raw_readq(addr));
96}
92static inline void writeb(unsigned char v, volatile void __iomem *addr) 97static inline void writeb(unsigned char v, volatile void __iomem *addr)
93{ 98{
94 *(volatile unsigned char __force *)addr = v; 99 *(volatile unsigned char __force *)addr = v;
@@ -101,6 +106,7 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
101{ 106{
102 *(volatile unsigned int __force *)addr = cpu_to_le32(v); 107 *(volatile unsigned int __force *)addr = cpu_to_le32(v);
103} 108}
109#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
104 110
105/* ioread and iowrite variants. thease are for now same as __raw_ 111/* ioread and iowrite variants. thease are for now same as __raw_
106 * variants of accessors. we might check for endianess in the feature 112 * variants of accessors. we might check for endianess in the feature
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index b7fb0438458c..17645b2e2f07 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -66,7 +66,7 @@ real_start:
66 mts rmsr, r0 66 mts rmsr, r0
67/* Disable stack protection from bootloader */ 67/* Disable stack protection from bootloader */
68 mts rslr, r0 68 mts rslr, r0
69 addi r8, r0, 0xFFFFFFF 69 addi r8, r0, 0xFFFFFFFF
70 mts rshr, r8 70 mts rshr, r8
71/* 71/*
72 * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' 72 * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 11f3ad20321c..5483906e0f86 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -534,13 +534,10 @@ static int __init db1000_dev_init(void)
534 s0 = AU1100_GPIO1_INT; 534 s0 = AU1100_GPIO1_INT;
535 s1 = AU1100_GPIO4_INT; 535 s1 = AU1100_GPIO4_INT;
536 536
537 gpio_request(19, "sd0_cd");
538 gpio_request(20, "sd1_cd");
537 gpio_direction_input(19); /* sd0 cd# */ 539 gpio_direction_input(19); /* sd0 cd# */
538 gpio_direction_input(20); /* sd1 cd# */ 540 gpio_direction_input(20); /* sd1 cd# */
539 gpio_direction_input(21); /* touch pendown# */
540 gpio_direction_input(207); /* SPI MISO */
541 gpio_direction_output(208, 0); /* SPI MOSI */
542 gpio_direction_output(209, 1); /* SPI SCK */
543 gpio_direction_output(210, 1); /* SPI CS# */
544 541
545 /* spi_gpio on SSI0 pins */ 542 /* spi_gpio on SSI0 pins */
546 pfc = __raw_readl((void __iomem *)SYS_PINFUNC); 543 pfc = __raw_readl((void __iomem *)SYS_PINFUNC);
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index cfe092fc720d..6b9749540edf 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -74,6 +74,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
74 default: 74 default:
75 BUG(); 75 BUG();
76 } 76 }
77
78 return SIGFPE;
77} 79}
78 80
79#define __disable_fpu() \ 81#define __disable_fpu() \
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 1dee279f9665..d6e154a9e6a5 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -369,16 +369,18 @@
369#define __NR_process_vm_writev (__NR_Linux + 346) 369#define __NR_process_vm_writev (__NR_Linux + 346)
370#define __NR_kcmp (__NR_Linux + 347) 370#define __NR_kcmp (__NR_Linux + 347)
371#define __NR_finit_module (__NR_Linux + 348) 371#define __NR_finit_module (__NR_Linux + 348)
372#define __NR_sched_setattr (__NR_Linux + 349)
373#define __NR_sched_getattr (__NR_Linux + 350)
372 374
373/* 375/*
374 * Offset of the last Linux o32 flavoured syscall 376 * Offset of the last Linux o32 flavoured syscall
375 */ 377 */
376#define __NR_Linux_syscalls 348 378#define __NR_Linux_syscalls 350
377 379
378#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 380#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
379 381
380#define __NR_O32_Linux 4000 382#define __NR_O32_Linux 4000
381#define __NR_O32_Linux_syscalls 348 383#define __NR_O32_Linux_syscalls 350
382 384
383#if _MIPS_SIM == _MIPS_SIM_ABI64 385#if _MIPS_SIM == _MIPS_SIM_ABI64
384 386
@@ -695,16 +697,18 @@
695#define __NR_kcmp (__NR_Linux + 306) 697#define __NR_kcmp (__NR_Linux + 306)
696#define __NR_finit_module (__NR_Linux + 307) 698#define __NR_finit_module (__NR_Linux + 307)
697#define __NR_getdents64 (__NR_Linux + 308) 699#define __NR_getdents64 (__NR_Linux + 308)
700#define __NR_sched_setattr (__NR_Linux + 309)
701#define __NR_sched_getattr (__NR_Linux + 310)
698 702
699/* 703/*
700 * Offset of the last Linux 64-bit flavoured syscall 704 * Offset of the last Linux 64-bit flavoured syscall
701 */ 705 */
702#define __NR_Linux_syscalls 308 706#define __NR_Linux_syscalls 310
703 707
704#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 708#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
705 709
706#define __NR_64_Linux 5000 710#define __NR_64_Linux 5000
707#define __NR_64_Linux_syscalls 308 711#define __NR_64_Linux_syscalls 310
708 712
709#if _MIPS_SIM == _MIPS_SIM_NABI32 713#if _MIPS_SIM == _MIPS_SIM_NABI32
710 714
@@ -1025,15 +1029,17 @@
1025#define __NR_process_vm_writev (__NR_Linux + 310) 1029#define __NR_process_vm_writev (__NR_Linux + 310)
1026#define __NR_kcmp (__NR_Linux + 311) 1030#define __NR_kcmp (__NR_Linux + 311)
1027#define __NR_finit_module (__NR_Linux + 312) 1031#define __NR_finit_module (__NR_Linux + 312)
1032#define __NR_sched_setattr (__NR_Linux + 313)
1033#define __NR_sched_getattr (__NR_Linux + 314)
1028 1034
1029/* 1035/*
1030 * Offset of the last N32 flavoured syscall 1036 * Offset of the last N32 flavoured syscall
1031 */ 1037 */
1032#define __NR_Linux_syscalls 312 1038#define __NR_Linux_syscalls 314
1033 1039
1034#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1040#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1035 1041
1036#define __NR_N32_Linux 6000 1042#define __NR_N32_Linux 6000
1037#define __NR_N32_Linux_syscalls 312 1043#define __NR_N32_Linux_syscalls 314
1038 1044
1039#endif /* _UAPI_ASM_UNISTD_H */ 1045#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index e8e541b40d86..a5b14f48e1af 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -563,3 +563,5 @@ EXPORT(sys_call_table)
563 PTR sys_process_vm_writev 563 PTR sys_process_vm_writev
564 PTR sys_kcmp 564 PTR sys_kcmp
565 PTR sys_finit_module 565 PTR sys_finit_module
566 PTR sys_sched_setattr
567 PTR sys_sched_getattr /* 4350 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 57e3742fec59..b56e254beb15 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -425,4 +425,6 @@ EXPORT(sys_call_table)
425 PTR sys_kcmp 425 PTR sys_kcmp
426 PTR sys_finit_module 426 PTR sys_finit_module
427 PTR sys_getdents64 427 PTR sys_getdents64
428 PTR sys_sched_setattr
429 PTR sys_sched_getattr /* 5310 */
428 .size sys_call_table,.-sys_call_table 430 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2f48f5934399..f7e5b72cf481 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -418,4 +418,6 @@ EXPORT(sysn32_call_table)
418 PTR compat_sys_process_vm_writev /* 6310 */ 418 PTR compat_sys_process_vm_writev /* 6310 */
419 PTR sys_kcmp 419 PTR sys_kcmp
420 PTR sys_finit_module 420 PTR sys_finit_module
421 PTR sys_sched_setattr
422 PTR sys_sched_getattr
421 .size sysn32_call_table,.-sysn32_call_table 423 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f1acdb429f4f..6788727d91af 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -541,4 +541,6 @@ EXPORT(sys32_call_table)
541 PTR compat_sys_process_vm_writev 541 PTR compat_sys_process_vm_writev
542 PTR sys_kcmp 542 PTR sys_kcmp
543 PTR sys_finit_module 543 PTR sys_finit_module
544 PTR sys_sched_setattr
545 PTR sys_sched_getattr /* 4350 */
544 .size sys32_call_table,.-sys32_call_table 546 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 88d0962de65a..2bedafea3d94 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -33,22 +33,9 @@
33 33
34int hpux_execve(struct pt_regs *regs) 34int hpux_execve(struct pt_regs *regs)
35{ 35{
36 int error; 36 return do_execve(getname((const char __user *) regs->gr[26]),
37 struct filename *filename;
38
39 filename = getname((const char __user *) regs->gr[26]);
40 error = PTR_ERR(filename);
41 if (IS_ERR(filename))
42 goto out;
43
44 error = do_execve(filename->name,
45 (const char __user *const __user *) regs->gr[25], 37 (const char __user *const __user *) regs->gr[25],
46 (const char __user *const __user *) regs->gr[24]); 38 (const char __user *const __user *) regs->gr[24]);
47
48 putname(filename);
49
50out:
51 return error;
52} 39}
53 40
54struct hpux_dirent { 41struct hpux_dirent {
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 4c4a1cef5208..47c8630c93cd 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -529,6 +529,7 @@ static int __init appldata_init(void)
529{ 529{
530 int rc; 530 int rc;
531 531
532 init_virt_timer(&appldata_timer);
532 appldata_timer.function = appldata_timer_function; 533 appldata_timer.function = appldata_timer_function;
533 appldata_timer.data = (unsigned long) &appldata_work; 534 appldata_timer.data = (unsigned long) &appldata_work;
534 535
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b3feabd39f31..cf3c0089bef2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -25,6 +25,7 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/spinlock.h>
28#include "crypt_s390.h" 29#include "crypt_s390.h"
29 30
30#define AES_KEYLEN_128 1 31#define AES_KEYLEN_128 1
@@ -32,6 +33,7 @@
32#define AES_KEYLEN_256 4 33#define AES_KEYLEN_256 4
33 34
34static u8 *ctrblk; 35static u8 *ctrblk;
36static DEFINE_SPINLOCK(ctrblk_lock);
35static char keylen_flag; 37static char keylen_flag;
36 38
37struct s390_aes_ctx { 39struct s390_aes_ctx {
@@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
758 return aes_set_key(tfm, in_key, key_len); 760 return aes_set_key(tfm, in_key, key_len);
759} 761}
760 762
763static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
764{
765 unsigned int i, n;
766
767 /* only use complete blocks, max. PAGE_SIZE */
768 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
769 for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
770 memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
771 AES_BLOCK_SIZE);
772 crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
773 }
774 return n;
775}
776
761static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, 777static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
762 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) 778 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
763{ 779{
764 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); 780 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
765 unsigned int i, n, nbytes; 781 unsigned int n, nbytes;
766 u8 buf[AES_BLOCK_SIZE]; 782 u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
767 u8 *out, *in; 783 u8 *out, *in, *ctrptr = ctrbuf;
768 784
769 if (!walk->nbytes) 785 if (!walk->nbytes)
770 return ret; 786 return ret;
771 787
772 memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); 788 if (spin_trylock(&ctrblk_lock))
789 ctrptr = ctrblk;
790
791 memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
773 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { 792 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
774 out = walk->dst.virt.addr; 793 out = walk->dst.virt.addr;
775 in = walk->src.virt.addr; 794 in = walk->src.virt.addr;
776 while (nbytes >= AES_BLOCK_SIZE) { 795 while (nbytes >= AES_BLOCK_SIZE) {
777 /* only use complete blocks, max. PAGE_SIZE */ 796 if (ctrptr == ctrblk)
778 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : 797 n = __ctrblk_init(ctrptr, nbytes);
779 nbytes & ~(AES_BLOCK_SIZE - 1); 798 else
780 for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { 799 n = AES_BLOCK_SIZE;
781 memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, 800 ret = crypt_s390_kmctr(func, sctx->key, out, in,
782 AES_BLOCK_SIZE); 801 n, ctrptr);
783 crypto_inc(ctrblk + i, AES_BLOCK_SIZE); 802 if (ret < 0 || ret != n) {
784 } 803 if (ctrptr == ctrblk)
785 ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); 804 spin_unlock(&ctrblk_lock);
786 if (ret < 0 || ret != n)
787 return -EIO; 805 return -EIO;
806 }
788 if (n > AES_BLOCK_SIZE) 807 if (n > AES_BLOCK_SIZE)
789 memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, 808 memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
790 AES_BLOCK_SIZE); 809 AES_BLOCK_SIZE);
791 crypto_inc(ctrblk, AES_BLOCK_SIZE); 810 crypto_inc(ctrptr, AES_BLOCK_SIZE);
792 out += n; 811 out += n;
793 in += n; 812 in += n;
794 nbytes -= n; 813 nbytes -= n;
795 } 814 }
796 ret = blkcipher_walk_done(desc, walk, nbytes); 815 ret = blkcipher_walk_done(desc, walk, nbytes);
797 } 816 }
817 if (ctrptr == ctrblk) {
818 if (nbytes)
819 memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
820 else
821 memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
822 spin_unlock(&ctrblk_lock);
823 }
798 /* 824 /*
799 * final block may be < AES_BLOCK_SIZE, copy only nbytes 825 * final block may be < AES_BLOCK_SIZE, copy only nbytes
800 */ 826 */
@@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
802 out = walk->dst.virt.addr; 828 out = walk->dst.virt.addr;
803 in = walk->src.virt.addr; 829 in = walk->src.virt.addr;
804 ret = crypt_s390_kmctr(func, sctx->key, buf, in, 830 ret = crypt_s390_kmctr(func, sctx->key, buf, in,
805 AES_BLOCK_SIZE, ctrblk); 831 AES_BLOCK_SIZE, ctrbuf);
806 if (ret < 0 || ret != AES_BLOCK_SIZE) 832 if (ret < 0 || ret != AES_BLOCK_SIZE)
807 return -EIO; 833 return -EIO;
808 memcpy(out, buf, nbytes); 834 memcpy(out, buf, nbytes);
809 crypto_inc(ctrblk, AES_BLOCK_SIZE); 835 crypto_inc(ctrbuf, AES_BLOCK_SIZE);
810 ret = blkcipher_walk_done(desc, walk, 0); 836 ret = blkcipher_walk_done(desc, walk, 0);
837 memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
811 } 838 }
812 memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE); 839
813 return ret; 840 return ret;
814} 841}
815 842
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 200f2a1b599d..0a5aac8a9412 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -25,6 +25,7 @@
25#define DES3_KEY_SIZE (3 * DES_KEY_SIZE) 25#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
26 26
27static u8 *ctrblk; 27static u8 *ctrblk;
28static DEFINE_SPINLOCK(ctrblk_lock);
28 29
29struct s390_des_ctx { 30struct s390_des_ctx {
30 u8 iv[DES_BLOCK_SIZE]; 31 u8 iv[DES_BLOCK_SIZE];
@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
105} 106}
106 107
107static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, 108static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
108 u8 *iv, struct blkcipher_walk *walk) 109 struct blkcipher_walk *walk)
109{ 110{
111 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
110 int ret = blkcipher_walk_virt(desc, walk); 112 int ret = blkcipher_walk_virt(desc, walk);
111 unsigned int nbytes = walk->nbytes; 113 unsigned int nbytes = walk->nbytes;
114 struct {
115 u8 iv[DES_BLOCK_SIZE];
116 u8 key[DES3_KEY_SIZE];
117 } param;
112 118
113 if (!nbytes) 119 if (!nbytes)
114 goto out; 120 goto out;
115 121
116 memcpy(iv, walk->iv, DES_BLOCK_SIZE); 122 memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
123 memcpy(param.key, ctx->key, DES3_KEY_SIZE);
117 do { 124 do {
118 /* only use complete blocks */ 125 /* only use complete blocks */
119 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); 126 unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
120 u8 *out = walk->dst.virt.addr; 127 u8 *out = walk->dst.virt.addr;
121 u8 *in = walk->src.virt.addr; 128 u8 *in = walk->src.virt.addr;
122 129
123 ret = crypt_s390_kmc(func, iv, out, in, n); 130 ret = crypt_s390_kmc(func, &param, out, in, n);
124 if (ret < 0 || ret != n) 131 if (ret < 0 || ret != n)
125 return -EIO; 132 return -EIO;
126 133
127 nbytes &= DES_BLOCK_SIZE - 1; 134 nbytes &= DES_BLOCK_SIZE - 1;
128 ret = blkcipher_walk_done(desc, walk, nbytes); 135 ret = blkcipher_walk_done(desc, walk, nbytes);
129 } while ((nbytes = walk->nbytes)); 136 } while ((nbytes = walk->nbytes));
130 memcpy(walk->iv, iv, DES_BLOCK_SIZE); 137 memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
131 138
132out: 139out:
133 return ret; 140 return ret;
@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
179 struct scatterlist *dst, struct scatterlist *src, 186 struct scatterlist *dst, struct scatterlist *src,
180 unsigned int nbytes) 187 unsigned int nbytes)
181{ 188{
182 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
183 struct blkcipher_walk walk; 189 struct blkcipher_walk walk;
184 190
185 blkcipher_walk_init(&walk, dst, src, nbytes); 191 blkcipher_walk_init(&walk, dst, src, nbytes);
186 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk); 192 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
187} 193}
188 194
189static int cbc_des_decrypt(struct blkcipher_desc *desc, 195static int cbc_des_decrypt(struct blkcipher_desc *desc,
190 struct scatterlist *dst, struct scatterlist *src, 196 struct scatterlist *dst, struct scatterlist *src,
191 unsigned int nbytes) 197 unsigned int nbytes)
192{ 198{
193 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
194 struct blkcipher_walk walk; 199 struct blkcipher_walk walk;
195 200
196 blkcipher_walk_init(&walk, dst, src, nbytes); 201 blkcipher_walk_init(&walk, dst, src, nbytes);
197 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk); 202 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
198} 203}
199 204
200static struct crypto_alg cbc_des_alg = { 205static struct crypto_alg cbc_des_alg = {
@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
327 struct scatterlist *dst, struct scatterlist *src, 332 struct scatterlist *dst, struct scatterlist *src,
328 unsigned int nbytes) 333 unsigned int nbytes)
329{ 334{
330 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
331 struct blkcipher_walk walk; 335 struct blkcipher_walk walk;
332 336
333 blkcipher_walk_init(&walk, dst, src, nbytes); 337 blkcipher_walk_init(&walk, dst, src, nbytes);
334 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk); 338 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
335} 339}
336 340
337static int cbc_des3_decrypt(struct blkcipher_desc *desc, 341static int cbc_des3_decrypt(struct blkcipher_desc *desc,
338 struct scatterlist *dst, struct scatterlist *src, 342 struct scatterlist *dst, struct scatterlist *src,
339 unsigned int nbytes) 343 unsigned int nbytes)
340{ 344{
341 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
342 struct blkcipher_walk walk; 345 struct blkcipher_walk walk;
343 346
344 blkcipher_walk_init(&walk, dst, src, nbytes); 347 blkcipher_walk_init(&walk, dst, src, nbytes);
345 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk); 348 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
346} 349}
347 350
348static struct crypto_alg cbc_des3_alg = { 351static struct crypto_alg cbc_des3_alg = {
@@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
366 } 369 }
367}; 370};
368 371
372static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
373{
374 unsigned int i, n;
375
376 /* align to block size, max. PAGE_SIZE */
377 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
378 for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
379 memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
380 crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
381 }
382 return n;
383}
384
369static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, 385static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
370 struct s390_des_ctx *ctx, struct blkcipher_walk *walk) 386 struct s390_des_ctx *ctx,
387 struct blkcipher_walk *walk)
371{ 388{
372 int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); 389 int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
373 unsigned int i, n, nbytes; 390 unsigned int n, nbytes;
374 u8 buf[DES_BLOCK_SIZE]; 391 u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
375 u8 *out, *in; 392 u8 *out, *in, *ctrptr = ctrbuf;
393
394 if (!walk->nbytes)
395 return ret;
376 396
377 memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE); 397 if (spin_trylock(&ctrblk_lock))
398 ctrptr = ctrblk;
399
400 memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
378 while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { 401 while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
379 out = walk->dst.virt.addr; 402 out = walk->dst.virt.addr;
380 in = walk->src.virt.addr; 403 in = walk->src.virt.addr;
381 while (nbytes >= DES_BLOCK_SIZE) { 404 while (nbytes >= DES_BLOCK_SIZE) {
382 /* align to block size, max. PAGE_SIZE */ 405 if (ctrptr == ctrblk)
383 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : 406 n = __ctrblk_init(ctrptr, nbytes);
384 nbytes & ~(DES_BLOCK_SIZE - 1); 407 else
385 for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { 408 n = DES_BLOCK_SIZE;
386 memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE, 409 ret = crypt_s390_kmctr(func, ctx->key, out, in,
387 DES_BLOCK_SIZE); 410 n, ctrptr);
388 crypto_inc(ctrblk + i, DES_BLOCK_SIZE); 411 if (ret < 0 || ret != n) {
389 } 412 if (ctrptr == ctrblk)
390 ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); 413 spin_unlock(&ctrblk_lock);
391 if (ret < 0 || ret != n)
392 return -EIO; 414 return -EIO;
415 }
393 if (n > DES_BLOCK_SIZE) 416 if (n > DES_BLOCK_SIZE)
394 memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, 417 memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
395 DES_BLOCK_SIZE); 418 DES_BLOCK_SIZE);
396 crypto_inc(ctrblk, DES_BLOCK_SIZE); 419 crypto_inc(ctrptr, DES_BLOCK_SIZE);
397 out += n; 420 out += n;
398 in += n; 421 in += n;
399 nbytes -= n; 422 nbytes -= n;
400 } 423 }
401 ret = blkcipher_walk_done(desc, walk, nbytes); 424 ret = blkcipher_walk_done(desc, walk, nbytes);
402 } 425 }
403 426 if (ctrptr == ctrblk) {
427 if (nbytes)
428 memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
429 else
430 memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
431 spin_unlock(&ctrblk_lock);
432 }
404 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ 433 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
405 if (nbytes) { 434 if (nbytes) {
406 out = walk->dst.virt.addr; 435 out = walk->dst.virt.addr;
407 in = walk->src.virt.addr; 436 in = walk->src.virt.addr;
408 ret = crypt_s390_kmctr(func, ctx->key, buf, in, 437 ret = crypt_s390_kmctr(func, ctx->key, buf, in,
409 DES_BLOCK_SIZE, ctrblk); 438 DES_BLOCK_SIZE, ctrbuf);
410 if (ret < 0 || ret != DES_BLOCK_SIZE) 439 if (ret < 0 || ret != DES_BLOCK_SIZE)
411 return -EIO; 440 return -EIO;
412 memcpy(out, buf, nbytes); 441 memcpy(out, buf, nbytes);
413 crypto_inc(ctrblk, DES_BLOCK_SIZE); 442 crypto_inc(ctrbuf, DES_BLOCK_SIZE);
414 ret = blkcipher_walk_done(desc, walk, 0); 443 ret = blkcipher_walk_done(desc, walk, 0);
444 memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
415 } 445 }
416 memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
417 return ret; 446 return ret;
418} 447}
419 448
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e25ae2579c..d7c00507568a 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -59,7 +59,7 @@ ENTRY(startup_continue)
59 .quad 0 # cr12: tracing off 59 .quad 0 # cr12: tracing off
60 .quad 0 # cr13: home space segment table 60 .quad 0 # cr13: home space segment table
61 .quad 0xc0000000 # cr14: machine check handling off 61 .quad 0xc0000000 # cr14: machine check handling off
62 .quad 0 # cr15: linkage stack operations 62 .quad .Llinkage_stack # cr15: linkage stack operations
63.Lpcmsk:.quad 0x0000000180000000 63.Lpcmsk:.quad 0x0000000180000000
64.L4malign:.quad 0xffffffffffc00000 64.L4malign:.quad 0xffffffffffc00000
65.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 65.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -67,12 +67,15 @@ ENTRY(startup_continue)
67.Lparmaddr: 67.Lparmaddr:
68 .quad PARMAREA 68 .quad PARMAREA
69 .align 64 69 .align 64
70.Lduct: .long 0,0,0,0,.Lduald,0,0,0 70.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
71 .long 0,0,0,0,0,0,0,0 71 .long 0,0,0,0,0,0,0,0
72.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
72 .align 128 73 .align 128
73.Lduald:.rept 8 74.Lduald:.rept 8
74 .long 0x80000000,0,0,0 # invalid access-list entries 75 .long 0x80000000,0,0,0 # invalid access-list entries
75 .endr 76 .endr
77.Llinkage_stack:
78 .long 0,0,0x89000000,0,0,0,0x8a000000,0
76 79
77ENTRY(_ehead) 80ENTRY(_ehead)
78 81
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e9dfb0..27c50f4d90cb 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -12,6 +12,8 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/setup.h>
16#include <asm/ipl.h>
15 17
16#define ESSA_SET_STABLE 1 18#define ESSA_SET_STABLE 1
17#define ESSA_SET_UNUSED 2 19#define ESSA_SET_UNUSED 2
@@ -41,6 +43,14 @@ void __init cmma_init(void)
41 43
42 if (!cmma_flag) 44 if (!cmma_flag)
43 return; 45 return;
46 /*
47 * Disable CMM for dump, otherwise the tprot based memory
48 * detection can fail because of unstable pages.
49 */
50 if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
51 cmma_flag = 0;
52 return;
53 }
44 asm volatile( 54 asm volatile(
45 " .insn rrf,0xb9ab0000,%1,%1,0,0\n" 55 " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
46 "0: la %0,0\n" 56 "0: la %0,0\n"
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 940e50ebfafa..0af5250d914f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -444,6 +444,7 @@ config X86_INTEL_MID
444 bool "Intel MID platform support" 444 bool "Intel MID platform support"
445 depends on X86_32 445 depends on X86_32
446 depends on X86_EXTENDED_PLATFORM 446 depends on X86_EXTENDED_PLATFORM
447 depends on X86_PLATFORM_DEVICES
447 depends on PCI 448 depends on PCI
448 depends on PCI_GOANY 449 depends on PCI_GOANY
449 depends on X86_IO_APIC 450 depends on X86_IO_APIC
@@ -1051,9 +1052,9 @@ config MICROCODE_INTEL
1051 This options enables microcode patch loading support for Intel 1052 This options enables microcode patch loading support for Intel
1052 processors. 1053 processors.
1053 1054
1054 For latest news and information on obtaining all the required 1055 For the current Intel microcode data package go to
1055 Intel ingredients for this driver, check: 1056 <https://downloadcenter.intel.com> and search for
1056 <http://www.urbanmyth.org/microcode/>. 1057 'Linux Processor Microcode Data File'.
1057 1058
1058config MICROCODE_AMD 1059config MICROCODE_AMD
1059 bool "AMD microcode loading support" 1060 bool "AMD microcode loading support"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 0f3621ed1db6..321a52ccf63a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -184,6 +184,7 @@ config HAVE_MMIOTRACE_SUPPORT
184config X86_DECODER_SELFTEST 184config X86_DECODER_SELFTEST
185 bool "x86 instruction decoder selftest" 185 bool "x86 instruction decoder selftest"
186 depends on DEBUG_KERNEL && KPROBES 186 depends on DEBUG_KERNEL && KPROBES
187 depends on !COMPILE_TEST
187 ---help--- 188 ---help---
188 Perform x86 instruction decoder selftests at build time. 189 Perform x86 instruction decoder selftests at build time.
189 This option is useful for checking the sanity of x86 instruction 190 This option is useful for checking the sanity of x86 instruction
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index a54ee1d054d9..aaac3b2fb746 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -19,7 +19,7 @@ extern int amd_cache_northbridges(void);
19extern void amd_flush_garts(void); 19extern void amd_flush_garts(void);
20extern int amd_numa_init(void); 20extern int amd_numa_init(void);
21extern int amd_get_subcaches(int); 21extern int amd_get_subcaches(int);
22extern int amd_set_subcaches(int, int); 22extern int amd_set_subcaches(int, unsigned long);
23 23
24struct amd_l3_cache { 24struct amd_l3_cache {
25 unsigned indices; 25 unsigned indices;
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index bbc8b12fa443..5ad38ad07890 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,10 +445,20 @@ static inline int pte_same(pte_t a, pte_t b)
445 return a.pte == b.pte; 445 return a.pte == b.pte;
446} 446}
447 447
448static inline int pteval_present(pteval_t pteval)
449{
450 /*
451 * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
452 * way clearly states that the intent is that protnone and numa
453 * hinting ptes are considered present for the purposes of
454 * pagetable operations like zapping, protection changes, gup etc.
455 */
456 return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
457}
458
448static inline int pte_present(pte_t a) 459static inline int pte_present(pte_t a)
449{ 460{
450 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | 461 return pteval_present(pte_flags(a));
451 _PAGE_NUMA);
452} 462}
453 463
454#define pte_accessible pte_accessible 464#define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index e6d90babc245..04905bfc508b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void)
62 62
63static inline void __flush_tlb_one(unsigned long addr) 63static inline void __flush_tlb_one(unsigned long addr)
64{ 64{
65 count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); 65 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
66 __flush_tlb_single(addr); 66 __flush_tlb_single(addr);
67} 67}
68 68
@@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr)
93 */ 93 */
94static inline void __flush_tlb_up(void) 94static inline void __flush_tlb_up(void)
95{ 95{
96 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 96 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
97 __flush_tlb(); 97 __flush_tlb();
98} 98}
99 99
100static inline void flush_tlb_all(void) 100static inline void flush_tlb_all(void)
101{ 101{
102 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 102 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
103 __flush_tlb_all(); 103 __flush_tlb_all();
104} 104}
105 105
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 787e1bb5aafc..3e276eb23d1b 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
52extern int m2p_add_override(unsigned long mfn, struct page *page, 52extern int m2p_add_override(unsigned long mfn, struct page *page,
53 struct gnttab_map_grant_ref *kmap_op); 53 struct gnttab_map_grant_ref *kmap_op);
54extern int m2p_remove_override(struct page *page, 54extern int m2p_remove_override(struct page *page,
55 struct gnttab_map_grant_ref *kmap_op, 55 struct gnttab_map_grant_ref *kmap_op);
56 unsigned long mfn);
57extern struct page *m2p_find_override(unsigned long mfn); 56extern struct page *m2p_find_override(unsigned long mfn);
58extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); 57extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
59 58
@@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
122 pfn = m2p_find_override_pfn(mfn, ~0); 121 pfn = m2p_find_override_pfn(mfn, ~0);
123 } 122 }
124 123
125 /* 124 /*
126 * pfn is ~0 if there are no entries in the m2p for mfn or if the 125 * pfn is ~0 if there are no entries in the m2p for mfn or if the
127 * entry doesn't map back to the mfn and m2p_override doesn't have a 126 * entry doesn't map back to the mfn and m2p_override doesn't have a
128 * valid entry for it. 127 * valid entry for it.
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 59554dca96ec..dec8de4e1663 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -179,7 +179,7 @@ int amd_get_subcaches(int cpu)
179 return (mask >> (4 * cuid)) & 0xf; 179 return (mask >> (4 * cuid)) & 0xf;
180} 180}
181 181
182int amd_set_subcaches(int cpu, int mask) 182int amd_set_subcaches(int cpu, unsigned long mask)
183{ 183{
184 static unsigned int reset, ban; 184 static unsigned int reset, ban;
185 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); 185 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index d3153e281d72..c67ffa686064 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -767,10 +767,7 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
767 767
768static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) 768static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
769{ 769{
770 tlb_flushall_shift = 5; 770 tlb_flushall_shift = 6;
771
772 if (c->x86 <= 0x11)
773 tlb_flushall_shift = 4;
774} 771}
775 772
776static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 773static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3db61c644e44..5cd9bfabd645 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -640,21 +640,17 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
640 case 0x61d: /* six-core 45 nm xeon "Dunnington" */ 640 case 0x61d: /* six-core 45 nm xeon "Dunnington" */
641 tlb_flushall_shift = -1; 641 tlb_flushall_shift = -1;
642 break; 642 break;
643 case 0x63a: /* Ivybridge */
644 tlb_flushall_shift = 2;
645 break;
643 case 0x61a: /* 45 nm nehalem, "Bloomfield" */ 646 case 0x61a: /* 45 nm nehalem, "Bloomfield" */
644 case 0x61e: /* 45 nm nehalem, "Lynnfield" */ 647 case 0x61e: /* 45 nm nehalem, "Lynnfield" */
645 case 0x625: /* 32 nm nehalem, "Clarkdale" */ 648 case 0x625: /* 32 nm nehalem, "Clarkdale" */
646 case 0x62c: /* 32 nm nehalem, "Gulftown" */ 649 case 0x62c: /* 32 nm nehalem, "Gulftown" */
647 case 0x62e: /* 45 nm nehalem-ex, "Beckton" */ 650 case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
648 case 0x62f: /* 32 nm Xeon E7 */ 651 case 0x62f: /* 32 nm Xeon E7 */
649 tlb_flushall_shift = 6;
650 break;
651 case 0x62a: /* SandyBridge */ 652 case 0x62a: /* SandyBridge */
652 case 0x62d: /* SandyBridge, "Romely-EP" */ 653 case 0x62d: /* SandyBridge, "Romely-EP" */
653 tlb_flushall_shift = 5;
654 break;
655 case 0x63a: /* Ivybridge */
656 tlb_flushall_shift = 1;
657 break;
658 default: 654 default:
659 tlb_flushall_shift = 6; 655 tlb_flushall_shift = 6;
660 } 656 }
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 8384c0fa206f..617a9e284245 100644
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -285,6 +285,15 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
285 285
286 uci->cpu_sig.sig = cpuid_eax(0x00000001); 286 uci->cpu_sig.sig = cpuid_eax(0x00000001);
287} 287}
288
289static void __init get_bsp_sig(void)
290{
291 unsigned int bsp = boot_cpu_data.cpu_index;
292 struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
293
294 if (!uci->cpu_sig.sig)
295 smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
296}
288#else 297#else
289void load_ucode_amd_ap(void) 298void load_ucode_amd_ap(void)
290{ 299{
@@ -337,31 +346,37 @@ void load_ucode_amd_ap(void)
337 346
338int __init save_microcode_in_initrd_amd(void) 347int __init save_microcode_in_initrd_amd(void)
339{ 348{
349 unsigned long cont;
340 enum ucode_state ret; 350 enum ucode_state ret;
341 u32 eax; 351 u32 eax;
342 352
343#ifdef CONFIG_X86_32 353 if (!container)
344 unsigned int bsp = boot_cpu_data.cpu_index; 354 return -EINVAL;
345 struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
346
347 if (!uci->cpu_sig.sig)
348 smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
349 355
356#ifdef CONFIG_X86_32
357 get_bsp_sig();
358 cont = (unsigned long)container;
359#else
350 /* 360 /*
351 * Take into account the fact that the ramdisk might get relocated 361 * We need the physical address of the container for both bitness since
352 * and therefore we need to recompute the container's position in 362 * boot_params.hdr.ramdisk_image is a physical address.
353 * virtual memory space.
354 */ 363 */
355 container = (u8 *)(__va((u32)relocated_ramdisk) + 364 cont = __pa(container);
356 ((u32)container - boot_params.hdr.ramdisk_image));
357#endif 365#endif
366
367 /*
368 * Take into account the fact that the ramdisk might get relocated and
369 * therefore we need to recompute the container's position in virtual
370 * memory space.
371 */
372 if (relocated_ramdisk)
373 container = (u8 *)(__va(relocated_ramdisk) +
374 (cont - boot_params.hdr.ramdisk_image));
375
358 if (ucode_new_rev) 376 if (ucode_new_rev)
359 pr_info("microcode: updated early to new patch_level=0x%08x\n", 377 pr_info("microcode: updated early to new patch_level=0x%08x\n",
360 ucode_new_rev); 378 ucode_new_rev);
361 379
362 if (!container)
363 return -EINVAL;
364
365 eax = cpuid_eax(0x00000001); 380 eax = cpuid_eax(0x00000001);
366 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 381 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
367 382
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index ce2d0a2c3e4f..0e25a1bc5ab5 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
683 } 683 }
684 684
685 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 685 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
686 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 686 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
687 __flush_tlb(); 687 __flush_tlb();
688 688
689 /* Save MTRR state */ 689 /* Save MTRR state */
@@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
697static void post_set(void) __releases(set_atomicity_lock) 697static void post_set(void) __releases(set_atomicity_lock)
698{ 698{
699 /* Flush TLBs (no need to flush caches - they are disabled) */ 699 /* Flush TLBs (no need to flush caches - they are disabled) */
700 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 700 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
701 __flush_tlb(); 701 __flush_tlb();
702 702
703 /* Intel (P6) standard MTRRs */ 703 /* Intel (P6) standard MTRRs */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index dbb60878b744..d99f31d9a750 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -266,6 +266,14 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
266EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); 266EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
267 267
268#ifdef CONFIG_HOTPLUG_CPU 268#ifdef CONFIG_HOTPLUG_CPU
269
270/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
271 * below, which is protected by stop_machine(). Putting them on the stack
272 * results in a stack frame overflow. Dynamically allocating could result in a
273 * failure so declare these two cpumasks as global.
274 */
275static struct cpumask affinity_new, online_new;
276
269/* 277/*
270 * This cpu is going to be removed and its vectors migrated to the remaining 278 * This cpu is going to be removed and its vectors migrated to the remaining
271 * online cpus. Check to see if there are enough vectors in the remaining cpus. 279 * online cpus. Check to see if there are enough vectors in the remaining cpus.
@@ -277,7 +285,6 @@ int check_irq_vectors_for_cpu_disable(void)
277 unsigned int this_cpu, vector, this_count, count; 285 unsigned int this_cpu, vector, this_count, count;
278 struct irq_desc *desc; 286 struct irq_desc *desc;
279 struct irq_data *data; 287 struct irq_data *data;
280 struct cpumask affinity_new, online_new;
281 288
282 this_cpu = smp_processor_id(); 289 this_cpu = smp_processor_id();
283 cpumask_copy(&online_new, cpu_online_mask); 290 cpumask_copy(&online_new, cpu_online_mask);
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 04ee1e2e4c02..7c6acd4b8995 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
571 quirk_amd_nb_node); 571 quirk_amd_nb_node);
572 572
573#endif 573#endif
574
575#ifdef CONFIG_PCI
576/*
577 * Processor does not ensure DRAM scrub read/write sequence
578 * is atomic wrt accesses to CC6 save state area. Therefore
579 * if a concurrent scrub read/write access is to same address
580 * the entry may appear as if it is not written. This quirk
581 * applies to Fam16h models 00h-0Fh
582 *
583 * See "Revision Guide" for AMD F16h models 00h-0fh,
584 * document 51810 rev. 3.04, Nov 2013
585 */
586static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
587{
588 u32 val;
589
590 /*
591 * Suggested workaround:
592 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
593 */
594 pci_read_config_dword(dev, 0x58, &val);
595 if (val & 0x1F) {
596 val &= ~(0x1F);
597 pci_write_config_dword(dev, 0x58, val);
598 }
599
600 pci_read_config_dword(dev, 0x5C, &val);
601 if (val & BIT(0)) {
602 val &= ~BIT(0);
603 pci_write_config_dword(dev, 0x5c, val);
604 }
605}
606
607DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
608 amd_disable_seq_and_redirect_scrub);
609
610#endif
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 81b2750f3666..27aa0455fab3 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -493,14 +493,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
493 struct numa_memblk *mb = &mi->blk[i]; 493 struct numa_memblk *mb = &mi->blk[i];
494 memblock_set_node(mb->start, mb->end - mb->start, 494 memblock_set_node(mb->start, mb->end - mb->start,
495 &memblock.memory, mb->nid); 495 &memblock.memory, mb->nid);
496
497 /*
498 * At this time, all memory regions reserved by memblock are
499 * used by the kernel. Set the nid in memblock.reserved will
500 * mark out all the nodes the kernel resides in.
501 */
502 memblock_set_node(mb->start, mb->end - mb->start,
503 &memblock.reserved, mb->nid);
504 } 496 }
505 497
506 /* 498 /*
@@ -565,10 +557,21 @@ static void __init numa_init_array(void)
565static void __init numa_clear_kernel_node_hotplug(void) 557static void __init numa_clear_kernel_node_hotplug(void)
566{ 558{
567 int i, nid; 559 int i, nid;
568 nodemask_t numa_kernel_nodes; 560 nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
569 unsigned long start, end; 561 unsigned long start, end;
570 struct memblock_type *type = &memblock.reserved; 562 struct memblock_type *type = &memblock.reserved;
571 563
564 /*
565 * At this time, all memory regions reserved by memblock are
566 * used by the kernel. Set the nid in memblock.reserved will
567 * mark out all the nodes the kernel resides in.
568 */
569 for (i = 0; i < numa_meminfo.nr_blks; i++) {
570 struct numa_memblk *mb = &numa_meminfo.blk[i];
571 memblock_set_node(mb->start, mb->end - mb->start,
572 &memblock.reserved, mb->nid);
573 }
574
572 /* Mark all kernel nodes. */ 575 /* Mark all kernel nodes. */
573 for (i = 0; i < type->cnt; i++) 576 for (i = 0; i < type->cnt; i++)
574 node_set(type->regions[i].nid, numa_kernel_nodes); 577 node_set(type->regions[i].nid, numa_kernel_nodes);
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 0342d27ca798..47b6436e41c2 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -52,6 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
52 nid, start, end); 52 nid, start, end);
53 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); 53 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
54 printk(KERN_DEBUG " "); 54 printk(KERN_DEBUG " ");
55 start = round_down(start, PAGES_PER_SECTION);
56 end = round_up(end, PAGES_PER_SECTION);
55 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 57 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
56 physnode_map[pfn / PAGES_PER_SECTION] = nid; 58 physnode_map[pfn / PAGES_PER_SECTION] = nid;
57 printk(KERN_CONT "%lx ", pfn); 59 printk(KERN_CONT "%lx ", pfn);
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 1a25187e151e..1953e9c9391a 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -42,15 +42,25 @@ static __init inline int srat_disabled(void)
42 return acpi_numa < 0; 42 return acpi_numa < 0;
43} 43}
44 44
45/* Callback for SLIT parsing */ 45/*
46 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
47 * I/O localities since SRAT does not list them. I/O localities are
48 * not supported at this point.
49 */
46void __init acpi_numa_slit_init(struct acpi_table_slit *slit) 50void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
47{ 51{
48 int i, j; 52 int i, j;
49 53
50 for (i = 0; i < slit->locality_count; i++) 54 for (i = 0; i < slit->locality_count; i++) {
51 for (j = 0; j < slit->locality_count; j++) 55 if (pxm_to_node(i) == NUMA_NO_NODE)
56 continue;
57 for (j = 0; j < slit->locality_count; j++) {
58 if (pxm_to_node(j) == NUMA_NO_NODE)
59 continue;
52 numa_set_distance(pxm_to_node(i), pxm_to_node(j), 60 numa_set_distance(pxm_to_node(i), pxm_to_node(j),
53 slit->entry[slit->locality_count * i + j]); 61 slit->entry[slit->locality_count * i + j]);
62 }
63 }
54} 64}
55 65
56/* Callback for Proximity Domain -> x2APIC mapping */ 66/* Callback for Proximity Domain -> x2APIC mapping */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ae699b3bbac8..dd8dda167a24 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -103,7 +103,7 @@ static void flush_tlb_func(void *info)
103 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) 103 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
104 return; 104 return;
105 105
106 count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 106 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
107 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 107 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
108 if (f->flush_end == TLB_FLUSH_ALL) 108 if (f->flush_end == TLB_FLUSH_ALL)
109 local_flush_tlb(); 109 local_flush_tlb();
@@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
131 info.flush_start = start; 131 info.flush_start = start;
132 info.flush_end = end; 132 info.flush_end = end;
133 133
134 count_vm_event(NR_TLB_REMOTE_FLUSH); 134 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
135 if (is_uv_system()) { 135 if (is_uv_system()) {
136 unsigned int cpu; 136 unsigned int cpu;
137 137
@@ -151,44 +151,19 @@ void flush_tlb_current_task(void)
151 151
152 preempt_disable(); 152 preempt_disable();
153 153
154 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 154 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
155 local_flush_tlb(); 155 local_flush_tlb();
156 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 156 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
157 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); 157 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
158 preempt_enable(); 158 preempt_enable();
159} 159}
160 160
161/*
162 * It can find out the THP large page, or
163 * HUGETLB page in tlb_flush when THP disabled
164 */
165static inline unsigned long has_large_page(struct mm_struct *mm,
166 unsigned long start, unsigned long end)
167{
168 pgd_t *pgd;
169 pud_t *pud;
170 pmd_t *pmd;
171 unsigned long addr = ALIGN(start, HPAGE_SIZE);
172 for (; addr < end; addr += HPAGE_SIZE) {
173 pgd = pgd_offset(mm, addr);
174 if (likely(!pgd_none(*pgd))) {
175 pud = pud_offset(pgd, addr);
176 if (likely(!pud_none(*pud))) {
177 pmd = pmd_offset(pud, addr);
178 if (likely(!pmd_none(*pmd)))
179 if (pmd_large(*pmd))
180 return addr;
181 }
182 }
183 }
184 return 0;
185}
186
187void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 161void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
188 unsigned long end, unsigned long vmflag) 162 unsigned long end, unsigned long vmflag)
189{ 163{
190 unsigned long addr; 164 unsigned long addr;
191 unsigned act_entries, tlb_entries = 0; 165 unsigned act_entries, tlb_entries = 0;
166 unsigned long nr_base_pages;
192 167
193 preempt_disable(); 168 preempt_disable();
194 if (current->active_mm != mm) 169 if (current->active_mm != mm)
@@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
210 tlb_entries = tlb_lli_4k[ENTRIES]; 185 tlb_entries = tlb_lli_4k[ENTRIES];
211 else 186 else
212 tlb_entries = tlb_lld_4k[ENTRIES]; 187 tlb_entries = tlb_lld_4k[ENTRIES];
188
213 /* Assume all of TLB entries was occupied by this task */ 189 /* Assume all of TLB entries was occupied by this task */
214 act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm; 190 act_entries = tlb_entries >> tlb_flushall_shift;
191 act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
192 nr_base_pages = (end - start) >> PAGE_SHIFT;
215 193
216 /* tlb_flushall_shift is on balance point, details in commit log */ 194 /* tlb_flushall_shift is on balance point, details in commit log */
217 if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) { 195 if (nr_base_pages > act_entries) {
218 count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); 196 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
219 local_flush_tlb(); 197 local_flush_tlb();
220 } else { 198 } else {
221 if (has_large_page(mm, start, end)) {
222 local_flush_tlb();
223 goto flush_all;
224 }
225 /* flush range by one by one 'invlpg' */ 199 /* flush range by one by one 'invlpg' */
226 for (addr = start; addr < end; addr += PAGE_SIZE) { 200 for (addr = start; addr < end; addr += PAGE_SIZE) {
227 count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); 201 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
228 __flush_tlb_single(addr); 202 __flush_tlb_single(addr);
229 } 203 }
230 204
@@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
262 236
263static void do_flush_tlb_all(void *info) 237static void do_flush_tlb_all(void *info)
264{ 238{
265 count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 239 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
266 __flush_tlb_all(); 240 __flush_tlb_all();
267 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) 241 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
268 leave_mm(smp_processor_id()); 242 leave_mm(smp_processor_id());
@@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info)
270 244
271void flush_tlb_all(void) 245void flush_tlb_all(void)
272{ 246{
273 count_vm_event(NR_TLB_REMOTE_FLUSH); 247 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
274 on_each_cpu(do_flush_tlb_all, NULL, 1); 248 on_each_cpu(do_flush_tlb_all, NULL, 1);
275} 249}
276 250
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index 7145ec63c520..4df9591eadad 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -49,7 +49,8 @@ void __init efi_bgrt_init(void)
49 49
50 image = efi_lookup_mapped_addr(bgrt_tab->image_address); 50 image = efi_lookup_mapped_addr(bgrt_tab->image_address);
51 if (!image) { 51 if (!image) {
52 image = ioremap(bgrt_tab->image_address, sizeof(bmp_header)); 52 image = early_memremap(bgrt_tab->image_address,
53 sizeof(bmp_header));
53 ioremapped = true; 54 ioremapped = true;
54 if (!image) 55 if (!image)
55 return; 56 return;
@@ -57,7 +58,7 @@ void __init efi_bgrt_init(void)
57 58
58 memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); 59 memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
59 if (ioremapped) 60 if (ioremapped)
60 iounmap(image); 61 early_iounmap(image, sizeof(bmp_header));
61 bgrt_image_size = bmp_header.size; 62 bgrt_image_size = bmp_header.size;
62 63
63 bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL); 64 bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL);
@@ -65,7 +66,8 @@ void __init efi_bgrt_init(void)
65 return; 66 return;
66 67
67 if (ioremapped) { 68 if (ioremapped) {
68 image = ioremap(bgrt_tab->image_address, bmp_header.size); 69 image = early_memremap(bgrt_tab->image_address,
70 bmp_header.size);
69 if (!image) { 71 if (!image) {
70 kfree(bgrt_image); 72 kfree(bgrt_image);
71 bgrt_image = NULL; 73 bgrt_image = NULL;
@@ -75,5 +77,5 @@ void __init efi_bgrt_init(void)
75 77
76 memcpy_fromio(bgrt_image, image, bgrt_image_size); 78 memcpy_fromio(bgrt_image, image, bgrt_image_size);
77 if (ioremapped) 79 if (ioremapped)
78 iounmap(image); 80 early_iounmap(image, bmp_header.size);
79} 81}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a4d7b647867f..201d09a7c46b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu)
1473 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests 1473 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
1474 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */ 1474 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
1475 write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM); 1475 write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
1476
1477 if (!cpu)
1478 return;
1479 /*
1480 * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
1481 * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
1482 */
1483 if (cpu_has_pse)
1484 set_in_cr4(X86_CR4_PSE);
1485
1486 if (cpu_has_pge)
1487 set_in_cr4(X86_CR4_PGE);
1476} 1488}
1477 1489
1478/* 1490/*
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2423ef04ffea..256282e7888b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
365/* Assume pteval_t is equivalent to all the other *val_t types. */ 365/* Assume pteval_t is equivalent to all the other *val_t types. */
366static pteval_t pte_mfn_to_pfn(pteval_t val) 366static pteval_t pte_mfn_to_pfn(pteval_t val)
367{ 367{
368 if (val & _PAGE_PRESENT) { 368 if (pteval_present(val)) {
369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
370 unsigned long pfn = mfn_to_pfn(mfn); 370 unsigned long pfn = mfn_to_pfn(mfn);
371 371
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
381 381
382static pteval_t pte_pfn_to_mfn(pteval_t val) 382static pteval_t pte_pfn_to_mfn(pteval_t val)
383{ 383{
384 if (val & _PAGE_PRESENT) { 384 if (pteval_present(val)) {
385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
386 pteval_t flags = val & PTE_FLAGS_MASK; 386 pteval_t flags = val & PTE_FLAGS_MASK;
387 unsigned long mfn; 387 unsigned long mfn;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8009acbe41e4..696c694986d0 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page,
899 "m2p_add_override: pfn %lx not mapped", pfn)) 899 "m2p_add_override: pfn %lx not mapped", pfn))
900 return -EINVAL; 900 return -EINVAL;
901 } 901 }
902 WARN_ON(PagePrivate(page));
903 SetPagePrivate(page);
904 set_page_private(page, mfn);
905 page->index = pfn_to_mfn(pfn);
906
907 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
908 return -ENOMEM;
902 909
903 if (kmap_op != NULL) { 910 if (kmap_op != NULL) {
904 if (!PageHighMem(page)) { 911 if (!PageHighMem(page)) {
@@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page,
937} 944}
938EXPORT_SYMBOL_GPL(m2p_add_override); 945EXPORT_SYMBOL_GPL(m2p_add_override);
939int m2p_remove_override(struct page *page, 946int m2p_remove_override(struct page *page,
940 struct gnttab_map_grant_ref *kmap_op, 947 struct gnttab_map_grant_ref *kmap_op)
941 unsigned long mfn)
942{ 948{
943 unsigned long flags; 949 unsigned long flags;
950 unsigned long mfn;
944 unsigned long pfn; 951 unsigned long pfn;
945 unsigned long uninitialized_var(address); 952 unsigned long uninitialized_var(address);
946 unsigned level; 953 unsigned level;
947 pte_t *ptep = NULL; 954 pte_t *ptep = NULL;
948 955
949 pfn = page_to_pfn(page); 956 pfn = page_to_pfn(page);
957 mfn = get_phys_to_machine(pfn);
958 if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
959 return -EINVAL;
950 960
951 if (!PageHighMem(page)) { 961 if (!PageHighMem(page)) {
952 address = (unsigned long)__va(pfn << PAGE_SHIFT); 962 address = (unsigned long)__va(pfn << PAGE_SHIFT);
@@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page,
960 spin_lock_irqsave(&m2p_override_lock, flags); 970 spin_lock_irqsave(&m2p_override_lock, flags);
961 list_del(&page->lru); 971 list_del(&page->lru);
962 spin_unlock_irqrestore(&m2p_override_lock, flags); 972 spin_unlock_irqrestore(&m2p_override_lock, flags);
973 WARN_ON(!PagePrivate(page));
974 ClearPagePrivate(page);
963 975
976 set_phys_to_machine(pfn, page->index);
964 if (kmap_op != NULL) { 977 if (kmap_op != NULL) {
965 if (!PageHighMem(page)) { 978 if (!PageHighMem(page)) {
966 struct multicall_space mcs; 979 struct multicall_space mcs;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 470e7542bf31..018a42883706 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -549,7 +549,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
549{ 549{
550 unsigned long x; 550 unsigned long x;
551 struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); 551 struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
552 if (sscanf(buf, "%ld\n", &x) == 1) 552 if (sscanf(buf, "%lu\n", &x) == 1)
553 battery->alarm = x/1000; 553 battery->alarm = x/1000;
554 if (acpi_battery_present(battery)) 554 if (acpi_battery_present(battery))
555 acpi_battery_set_alarm(battery); 555 acpi_battery_set_alarm(battery);
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 50fe34ffe932..75c28eae8860 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -60,7 +60,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
60 seq_printf(seq, "%c%-8s %s:%s\n", 60 seq_printf(seq, "%c%-8s %s:%s\n",
61 dev->wakeup.flags.run_wake ? '*' : ' ', 61 dev->wakeup.flags.run_wake ? '*' : ' ',
62 (device_may_wakeup(&dev->dev) || 62 (device_may_wakeup(&dev->dev) ||
63 (ldev && device_may_wakeup(ldev))) ? 63 device_may_wakeup(ldev)) ?
64 "enabled" : "disabled", 64 "enabled" : "disabled",
65 ldev->bus ? ldev->bus->name : 65 ldev->bus ? ldev->bus->name :
66 "no-bus", dev_name(ldev)); 66 "no-bus", dev_name(ldev));
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7384158c7f87..57b053f424d1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -484,7 +484,6 @@ static void acpi_device_hotplug(void *data, u32 src)
484static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) 484static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
485{ 485{
486 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; 486 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
487 struct acpi_scan_handler *handler = data;
488 struct acpi_device *adev; 487 struct acpi_device *adev;
489 acpi_status status; 488 acpi_status status;
490 489
@@ -500,7 +499,10 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
500 break; 499 break;
501 case ACPI_NOTIFY_EJECT_REQUEST: 500 case ACPI_NOTIFY_EJECT_REQUEST:
502 acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); 501 acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
503 if (!handler->hotplug.enabled) { 502 if (!adev->handler)
503 goto err_out;
504
505 if (!adev->handler->hotplug.enabled) {
504 acpi_handle_err(handle, "Eject disabled\n"); 506 acpi_handle_err(handle, "Eject disabled\n");
505 ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; 507 ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
506 goto err_out; 508 goto err_out;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 0347a37eb438..85e3b612bdc0 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -99,10 +99,6 @@ acpi_extract_package(union acpi_object *package,
99 99
100 union acpi_object *element = &(package->package.elements[i]); 100 union acpi_object *element = &(package->package.elements[i]);
101 101
102 if (!element) {
103 return AE_BAD_DATA;
104 }
105
106 switch (element->type) { 102 switch (element->type) {
107 103
108 case ACPI_TYPE_INTEGER: 104 case ACPI_TYPE_INTEGER:
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index f0447d3daf2c..a697b77b8865 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -170,6 +170,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
170 }, 170 },
171 { 171 {
172 .callback = video_detect_force_vendor, 172 .callback = video_detect_force_vendor,
173 .ident = "HP EliteBook Revolve 810",
174 .matches = {
175 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
176 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"),
177 },
178 },
179 {
180 .callback = video_detect_force_vendor,
173 .ident = "Lenovo Yoga 13", 181 .ident = "Lenovo Yoga 13",
174 .matches = { 182 .matches = {
175 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 183 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 1f14ac403945..51824d1f23ea 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -46,7 +46,6 @@
46#define NVME_Q_DEPTH 1024 46#define NVME_Q_DEPTH 1024
47#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 47#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
48#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 48#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
49#define NVME_MINORS 64
50#define ADMIN_TIMEOUT (60 * HZ) 49#define ADMIN_TIMEOUT (60 * HZ)
51 50
52static int nvme_major; 51static int nvme_major;
@@ -58,6 +57,17 @@ module_param(use_threaded_interrupts, int, 0);
58static DEFINE_SPINLOCK(dev_list_lock); 57static DEFINE_SPINLOCK(dev_list_lock);
59static LIST_HEAD(dev_list); 58static LIST_HEAD(dev_list);
60static struct task_struct *nvme_thread; 59static struct task_struct *nvme_thread;
60static struct workqueue_struct *nvme_workq;
61
62static void nvme_reset_failed_dev(struct work_struct *ws);
63
64struct async_cmd_info {
65 struct kthread_work work;
66 struct kthread_worker *worker;
67 u32 result;
68 int status;
69 void *ctx;
70};
61 71
62/* 72/*
63 * An NVM Express queue. Each device has at least two (one for admin 73 * An NVM Express queue. Each device has at least two (one for admin
@@ -66,6 +76,7 @@ static struct task_struct *nvme_thread;
66struct nvme_queue { 76struct nvme_queue {
67 struct device *q_dmadev; 77 struct device *q_dmadev;
68 struct nvme_dev *dev; 78 struct nvme_dev *dev;
79 char irqname[24]; /* nvme4294967295-65535\0 */
69 spinlock_t q_lock; 80 spinlock_t q_lock;
70 struct nvme_command *sq_cmds; 81 struct nvme_command *sq_cmds;
71 volatile struct nvme_completion *cqes; 82 volatile struct nvme_completion *cqes;
@@ -80,9 +91,11 @@ struct nvme_queue {
80 u16 sq_head; 91 u16 sq_head;
81 u16 sq_tail; 92 u16 sq_tail;
82 u16 cq_head; 93 u16 cq_head;
94 u16 qid;
83 u8 cq_phase; 95 u8 cq_phase;
84 u8 cqe_seen; 96 u8 cqe_seen;
85 u8 q_suspended; 97 u8 q_suspended;
98 struct async_cmd_info cmdinfo;
86 unsigned long cmdid_data[]; 99 unsigned long cmdid_data[];
87}; 100};
88 101
@@ -97,6 +110,7 @@ static inline void _nvme_check_size(void)
97 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 110 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
98 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 111 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
99 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 112 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
113 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
100 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 114 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
101 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 115 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
102 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 116 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -111,6 +125,7 @@ struct nvme_cmd_info {
111 nvme_completion_fn fn; 125 nvme_completion_fn fn;
112 void *ctx; 126 void *ctx;
113 unsigned long timeout; 127 unsigned long timeout;
128 int aborted;
114}; 129};
115 130
116static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) 131static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
@@ -154,6 +169,7 @@ static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
154 info[cmdid].fn = handler; 169 info[cmdid].fn = handler;
155 info[cmdid].ctx = ctx; 170 info[cmdid].ctx = ctx;
156 info[cmdid].timeout = jiffies + timeout; 171 info[cmdid].timeout = jiffies + timeout;
172 info[cmdid].aborted = 0;
157 return cmdid; 173 return cmdid;
158} 174}
159 175
@@ -172,6 +188,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
172#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 188#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
173#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 189#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
174#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 190#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
191#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
175 192
176static void special_completion(struct nvme_dev *dev, void *ctx, 193static void special_completion(struct nvme_dev *dev, void *ctx,
177 struct nvme_completion *cqe) 194 struct nvme_completion *cqe)
@@ -180,6 +197,10 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
180 return; 197 return;
181 if (ctx == CMD_CTX_FLUSH) 198 if (ctx == CMD_CTX_FLUSH)
182 return; 199 return;
200 if (ctx == CMD_CTX_ABORT) {
201 ++dev->abort_limit;
202 return;
203 }
183 if (ctx == CMD_CTX_COMPLETED) { 204 if (ctx == CMD_CTX_COMPLETED) {
184 dev_warn(&dev->pci_dev->dev, 205 dev_warn(&dev->pci_dev->dev,
185 "completed id %d twice on queue %d\n", 206 "completed id %d twice on queue %d\n",
@@ -196,6 +217,15 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
196 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); 217 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
197} 218}
198 219
220static void async_completion(struct nvme_dev *dev, void *ctx,
221 struct nvme_completion *cqe)
222{
223 struct async_cmd_info *cmdinfo = ctx;
224 cmdinfo->result = le32_to_cpup(&cqe->result);
225 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
226 queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
227}
228
199/* 229/*
200 * Called with local interrupts disabled and the q_lock held. May not sleep. 230 * Called with local interrupts disabled and the q_lock held. May not sleep.
201 */ 231 */
@@ -693,7 +723,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
693 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 723 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
694 return 0; 724 return 0;
695 725
696 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); 726 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
697 nvmeq->cq_head = head; 727 nvmeq->cq_head = head;
698 nvmeq->cq_phase = phase; 728 nvmeq->cq_phase = phase;
699 729
@@ -804,12 +834,34 @@ int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
804 return cmdinfo.status; 834 return cmdinfo.status;
805} 835}
806 836
837static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
838 struct nvme_command *cmd,
839 struct async_cmd_info *cmdinfo, unsigned timeout)
840{
841 int cmdid;
842
843 cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
844 if (cmdid < 0)
845 return cmdid;
846 cmdinfo->status = -EINTR;
847 cmd->common.command_id = cmdid;
848 nvme_submit_cmd(nvmeq, cmd);
849 return 0;
850}
851
807int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 852int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
808 u32 *result) 853 u32 *result)
809{ 854{
810 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); 855 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
811} 856}
812 857
858static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
859 struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
860{
861 return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
862 ADMIN_TIMEOUT);
863}
864
813static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 865static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
814{ 866{
815 int status; 867 int status;
@@ -920,6 +972,56 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
920} 972}
921 973
922/** 974/**
975 * nvme_abort_cmd - Attempt aborting a command
976 * @cmdid: Command id of a timed out IO
977 * @queue: The queue with timed out IO
978 *
979 * Schedule controller reset if the command was already aborted once before and
980 * still hasn't been returned to the driver, or if this is the admin queue.
981 */
982static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
983{
984 int a_cmdid;
985 struct nvme_command cmd;
986 struct nvme_dev *dev = nvmeq->dev;
987 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
988
989 if (!nvmeq->qid || info[cmdid].aborted) {
990 if (work_busy(&dev->reset_work))
991 return;
992 list_del_init(&dev->node);
993 dev_warn(&dev->pci_dev->dev,
994 "I/O %d QID %d timeout, reset controller\n", cmdid,
995 nvmeq->qid);
996 PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
997 queue_work(nvme_workq, &dev->reset_work);
998 return;
999 }
1000
1001 if (!dev->abort_limit)
1002 return;
1003
1004 a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
1005 ADMIN_TIMEOUT);
1006 if (a_cmdid < 0)
1007 return;
1008
1009 memset(&cmd, 0, sizeof(cmd));
1010 cmd.abort.opcode = nvme_admin_abort_cmd;
1011 cmd.abort.cid = cmdid;
1012 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1013 cmd.abort.command_id = a_cmdid;
1014
1015 --dev->abort_limit;
1016 info[cmdid].aborted = 1;
1017 info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
1018
1019 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
1020 nvmeq->qid);
1021 nvme_submit_cmd(dev->queues[0], &cmd);
1022}
1023
1024/**
923 * nvme_cancel_ios - Cancel outstanding I/Os 1025 * nvme_cancel_ios - Cancel outstanding I/Os
924 * @queue: The queue to cancel I/Os on 1026 * @queue: The queue to cancel I/Os on
925 * @timeout: True to only cancel I/Os which have timed out 1027 * @timeout: True to only cancel I/Os which have timed out
@@ -942,7 +1044,12 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
942 continue; 1044 continue;
943 if (info[cmdid].ctx == CMD_CTX_CANCELLED) 1045 if (info[cmdid].ctx == CMD_CTX_CANCELLED)
944 continue; 1046 continue;
945 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 1047 if (timeout && nvmeq->dev->initialized) {
1048 nvme_abort_cmd(cmdid, nvmeq);
1049 continue;
1050 }
1051 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
1052 nvmeq->qid);
946 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1053 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
947 fn(nvmeq->dev, ctx, &cqe); 1054 fn(nvmeq->dev, ctx, &cqe);
948 } 1055 }
@@ -964,26 +1071,31 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
964 kfree(nvmeq); 1071 kfree(nvmeq);
965} 1072}
966 1073
967static void nvme_free_queues(struct nvme_dev *dev) 1074static void nvme_free_queues(struct nvme_dev *dev, int lowest)
968{ 1075{
969 int i; 1076 int i;
970 1077
971 for (i = dev->queue_count - 1; i >= 0; i--) { 1078 for (i = dev->queue_count - 1; i >= lowest; i--) {
972 nvme_free_queue(dev->queues[i]); 1079 nvme_free_queue(dev->queues[i]);
973 dev->queue_count--; 1080 dev->queue_count--;
974 dev->queues[i] = NULL; 1081 dev->queues[i] = NULL;
975 } 1082 }
976} 1083}
977 1084
978static void nvme_disable_queue(struct nvme_dev *dev, int qid) 1085/**
1086 * nvme_suspend_queue - put queue into suspended state
1087 * @nvmeq - queue to suspend
1088 *
1089 * Returns 1 if already suspended, 0 otherwise.
1090 */
1091static int nvme_suspend_queue(struct nvme_queue *nvmeq)
979{ 1092{
980 struct nvme_queue *nvmeq = dev->queues[qid]; 1093 int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
981 int vector = dev->entry[nvmeq->cq_vector].vector;
982 1094
983 spin_lock_irq(&nvmeq->q_lock); 1095 spin_lock_irq(&nvmeq->q_lock);
984 if (nvmeq->q_suspended) { 1096 if (nvmeq->q_suspended) {
985 spin_unlock_irq(&nvmeq->q_lock); 1097 spin_unlock_irq(&nvmeq->q_lock);
986 return; 1098 return 1;
987 } 1099 }
988 nvmeq->q_suspended = 1; 1100 nvmeq->q_suspended = 1;
989 spin_unlock_irq(&nvmeq->q_lock); 1101 spin_unlock_irq(&nvmeq->q_lock);
@@ -991,18 +1103,35 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
991 irq_set_affinity_hint(vector, NULL); 1103 irq_set_affinity_hint(vector, NULL);
992 free_irq(vector, nvmeq); 1104 free_irq(vector, nvmeq);
993 1105
994 /* Don't tell the adapter to delete the admin queue */ 1106 return 0;
995 if (qid) { 1107}
996 adapter_delete_sq(dev, qid);
997 adapter_delete_cq(dev, qid);
998 }
999 1108
1109static void nvme_clear_queue(struct nvme_queue *nvmeq)
1110{
1000 spin_lock_irq(&nvmeq->q_lock); 1111 spin_lock_irq(&nvmeq->q_lock);
1001 nvme_process_cq(nvmeq); 1112 nvme_process_cq(nvmeq);
1002 nvme_cancel_ios(nvmeq, false); 1113 nvme_cancel_ios(nvmeq, false);
1003 spin_unlock_irq(&nvmeq->q_lock); 1114 spin_unlock_irq(&nvmeq->q_lock);
1004} 1115}
1005 1116
1117static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1118{
1119 struct nvme_queue *nvmeq = dev->queues[qid];
1120
1121 if (!nvmeq)
1122 return;
1123 if (nvme_suspend_queue(nvmeq))
1124 return;
1125
1126 /* Don't tell the adapter to delete the admin queue.
1127 * Don't tell a removed adapter to delete IO queues. */
1128 if (qid && readl(&dev->bar->csts) != -1) {
1129 adapter_delete_sq(dev, qid);
1130 adapter_delete_cq(dev, qid);
1131 }
1132 nvme_clear_queue(nvmeq);
1133}
1134
1006static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1135static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1007 int depth, int vector) 1136 int depth, int vector)
1008{ 1137{
@@ -1025,15 +1154,18 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1025 1154
1026 nvmeq->q_dmadev = dmadev; 1155 nvmeq->q_dmadev = dmadev;
1027 nvmeq->dev = dev; 1156 nvmeq->dev = dev;
1157 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1158 dev->instance, qid);
1028 spin_lock_init(&nvmeq->q_lock); 1159 spin_lock_init(&nvmeq->q_lock);
1029 nvmeq->cq_head = 0; 1160 nvmeq->cq_head = 0;
1030 nvmeq->cq_phase = 1; 1161 nvmeq->cq_phase = 1;
1031 init_waitqueue_head(&nvmeq->sq_full); 1162 init_waitqueue_head(&nvmeq->sq_full);
1032 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 1163 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
1033 bio_list_init(&nvmeq->sq_cong); 1164 bio_list_init(&nvmeq->sq_cong);
1034 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; 1165 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1035 nvmeq->q_depth = depth; 1166 nvmeq->q_depth = depth;
1036 nvmeq->cq_vector = vector; 1167 nvmeq->cq_vector = vector;
1168 nvmeq->qid = qid;
1037 nvmeq->q_suspended = 1; 1169 nvmeq->q_suspended = 1;
1038 dev->queue_count++; 1170 dev->queue_count++;
1039 1171
@@ -1052,11 +1184,10 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1052{ 1184{
1053 if (use_threaded_interrupts) 1185 if (use_threaded_interrupts)
1054 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, 1186 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
1055 nvme_irq_check, nvme_irq, 1187 nvme_irq_check, nvme_irq, IRQF_SHARED,
1056 IRQF_DISABLED | IRQF_SHARED,
1057 name, nvmeq); 1188 name, nvmeq);
1058 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 1189 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
1059 IRQF_DISABLED | IRQF_SHARED, name, nvmeq); 1190 IRQF_SHARED, name, nvmeq);
1060} 1191}
1061 1192
1062static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 1193static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1067,7 +1198,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1067 nvmeq->sq_tail = 0; 1198 nvmeq->sq_tail = 0;
1068 nvmeq->cq_head = 0; 1199 nvmeq->cq_head = 0;
1069 nvmeq->cq_phase = 1; 1200 nvmeq->cq_phase = 1;
1070 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; 1201 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1071 memset(nvmeq->cmdid_data, 0, extra); 1202 memset(nvmeq->cmdid_data, 0, extra);
1072 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 1203 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1073 nvme_cancel_ios(nvmeq, false); 1204 nvme_cancel_ios(nvmeq, false);
@@ -1087,13 +1218,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1087 if (result < 0) 1218 if (result < 0)
1088 goto release_cq; 1219 goto release_cq;
1089 1220
1090 result = queue_request_irq(dev, nvmeq, "nvme"); 1221 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1091 if (result < 0) 1222 if (result < 0)
1092 goto release_sq; 1223 goto release_sq;
1093 1224
1094 spin_lock(&nvmeq->q_lock); 1225 spin_lock_irq(&nvmeq->q_lock);
1095 nvme_init_queue(nvmeq, qid); 1226 nvme_init_queue(nvmeq, qid);
1096 spin_unlock(&nvmeq->q_lock); 1227 spin_unlock_irq(&nvmeq->q_lock);
1097 1228
1098 return result; 1229 return result;
1099 1230
@@ -1205,13 +1336,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1205 if (result) 1336 if (result)
1206 return result; 1337 return result;
1207 1338
1208 result = queue_request_irq(dev, nvmeq, "nvme admin"); 1339 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1209 if (result) 1340 if (result)
1210 return result; 1341 return result;
1211 1342
1212 spin_lock(&nvmeq->q_lock); 1343 spin_lock_irq(&nvmeq->q_lock);
1213 nvme_init_queue(nvmeq, 0); 1344 nvme_init_queue(nvmeq, 0);
1214 spin_unlock(&nvmeq->q_lock); 1345 spin_unlock_irq(&nvmeq->q_lock);
1215 return result; 1346 return result;
1216} 1347}
1217 1348
@@ -1487,10 +1618,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1487 } 1618 }
1488} 1619}
1489 1620
1621#ifdef CONFIG_COMPAT
1622static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
1623 unsigned int cmd, unsigned long arg)
1624{
1625 struct nvme_ns *ns = bdev->bd_disk->private_data;
1626
1627 switch (cmd) {
1628 case SG_IO:
1629 return nvme_sg_io32(ns, arg);
1630 }
1631 return nvme_ioctl(bdev, mode, cmd, arg);
1632}
1633#else
1634#define nvme_compat_ioctl NULL
1635#endif
1636
1637static int nvme_open(struct block_device *bdev, fmode_t mode)
1638{
1639 struct nvme_ns *ns = bdev->bd_disk->private_data;
1640 struct nvme_dev *dev = ns->dev;
1641
1642 kref_get(&dev->kref);
1643 return 0;
1644}
1645
1646static void nvme_free_dev(struct kref *kref);
1647
1648static void nvme_release(struct gendisk *disk, fmode_t mode)
1649{
1650 struct nvme_ns *ns = disk->private_data;
1651 struct nvme_dev *dev = ns->dev;
1652
1653 kref_put(&dev->kref, nvme_free_dev);
1654}
1655
1490static const struct block_device_operations nvme_fops = { 1656static const struct block_device_operations nvme_fops = {
1491 .owner = THIS_MODULE, 1657 .owner = THIS_MODULE,
1492 .ioctl = nvme_ioctl, 1658 .ioctl = nvme_ioctl,
1493 .compat_ioctl = nvme_ioctl, 1659 .compat_ioctl = nvme_compat_ioctl,
1660 .open = nvme_open,
1661 .release = nvme_release,
1494}; 1662};
1495 1663
1496static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1664static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
@@ -1514,13 +1682,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1514 1682
1515static int nvme_kthread(void *data) 1683static int nvme_kthread(void *data)
1516{ 1684{
1517 struct nvme_dev *dev; 1685 struct nvme_dev *dev, *next;
1518 1686
1519 while (!kthread_should_stop()) { 1687 while (!kthread_should_stop()) {
1520 set_current_state(TASK_INTERRUPTIBLE); 1688 set_current_state(TASK_INTERRUPTIBLE);
1521 spin_lock(&dev_list_lock); 1689 spin_lock(&dev_list_lock);
1522 list_for_each_entry(dev, &dev_list, node) { 1690 list_for_each_entry_safe(dev, next, &dev_list, node) {
1523 int i; 1691 int i;
1692 if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
1693 dev->initialized) {
1694 if (work_busy(&dev->reset_work))
1695 continue;
1696 list_del_init(&dev->node);
1697 dev_warn(&dev->pci_dev->dev,
1698 "Failed status, reset controller\n");
1699 PREPARE_WORK(&dev->reset_work,
1700 nvme_reset_failed_dev);
1701 queue_work(nvme_workq, &dev->reset_work);
1702 continue;
1703 }
1524 for (i = 0; i < dev->queue_count; i++) { 1704 for (i = 0; i < dev->queue_count; i++) {
1525 struct nvme_queue *nvmeq = dev->queues[i]; 1705 struct nvme_queue *nvmeq = dev->queues[i];
1526 if (!nvmeq) 1706 if (!nvmeq)
@@ -1541,33 +1721,6 @@ static int nvme_kthread(void *data)
1541 return 0; 1721 return 0;
1542} 1722}
1543 1723
1544static DEFINE_IDA(nvme_index_ida);
1545
1546static int nvme_get_ns_idx(void)
1547{
1548 int index, error;
1549
1550 do {
1551 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
1552 return -1;
1553
1554 spin_lock(&dev_list_lock);
1555 error = ida_get_new(&nvme_index_ida, &index);
1556 spin_unlock(&dev_list_lock);
1557 } while (error == -EAGAIN);
1558
1559 if (error)
1560 index = -1;
1561 return index;
1562}
1563
1564static void nvme_put_ns_idx(int index)
1565{
1566 spin_lock(&dev_list_lock);
1567 ida_remove(&nvme_index_ida, index);
1568 spin_unlock(&dev_list_lock);
1569}
1570
1571static void nvme_config_discard(struct nvme_ns *ns) 1724static void nvme_config_discard(struct nvme_ns *ns)
1572{ 1725{
1573 u32 logical_block_size = queue_logical_block_size(ns->queue); 1726 u32 logical_block_size = queue_logical_block_size(ns->queue);
@@ -1601,7 +1754,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1601 ns->dev = dev; 1754 ns->dev = dev;
1602 ns->queue->queuedata = ns; 1755 ns->queue->queuedata = ns;
1603 1756
1604 disk = alloc_disk(NVME_MINORS); 1757 disk = alloc_disk(0);
1605 if (!disk) 1758 if (!disk)
1606 goto out_free_queue; 1759 goto out_free_queue;
1607 ns->ns_id = nsid; 1760 ns->ns_id = nsid;
@@ -1614,12 +1767,12 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1614 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 1767 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
1615 1768
1616 disk->major = nvme_major; 1769 disk->major = nvme_major;
1617 disk->minors = NVME_MINORS; 1770 disk->first_minor = 0;
1618 disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
1619 disk->fops = &nvme_fops; 1771 disk->fops = &nvme_fops;
1620 disk->private_data = ns; 1772 disk->private_data = ns;
1621 disk->queue = ns->queue; 1773 disk->queue = ns->queue;
1622 disk->driverfs_dev = &dev->pci_dev->dev; 1774 disk->driverfs_dev = &dev->pci_dev->dev;
1775 disk->flags = GENHD_FL_EXT_DEVT;
1623 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 1776 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
1624 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 1777 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1625 1778
@@ -1635,15 +1788,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1635 return NULL; 1788 return NULL;
1636} 1789}
1637 1790
1638static void nvme_ns_free(struct nvme_ns *ns)
1639{
1640 int index = ns->disk->first_minor / NVME_MINORS;
1641 put_disk(ns->disk);
1642 nvme_put_ns_idx(index);
1643 blk_cleanup_queue(ns->queue);
1644 kfree(ns);
1645}
1646
1647static int set_queue_count(struct nvme_dev *dev, int count) 1791static int set_queue_count(struct nvme_dev *dev, int count)
1648{ 1792{
1649 int status; 1793 int status;
@@ -1659,11 +1803,12 @@ static int set_queue_count(struct nvme_dev *dev, int count)
1659 1803
1660static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 1804static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1661{ 1805{
1662 return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1806 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
1663} 1807}
1664 1808
1665static int nvme_setup_io_queues(struct nvme_dev *dev) 1809static int nvme_setup_io_queues(struct nvme_dev *dev)
1666{ 1810{
1811 struct nvme_queue *adminq = dev->queues[0];
1667 struct pci_dev *pdev = dev->pci_dev; 1812 struct pci_dev *pdev = dev->pci_dev;
1668 int result, cpu, i, vecs, nr_io_queues, size, q_depth; 1813 int result, cpu, i, vecs, nr_io_queues, size, q_depth;
1669 1814
@@ -1690,7 +1835,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1690 } 1835 }
1691 1836
1692 /* Deregister the admin queue's interrupt */ 1837 /* Deregister the admin queue's interrupt */
1693 free_irq(dev->entry[0].vector, dev->queues[0]); 1838 free_irq(dev->entry[0].vector, adminq);
1694 1839
1695 vecs = nr_io_queues; 1840 vecs = nr_io_queues;
1696 for (i = 0; i < vecs; i++) 1841 for (i = 0; i < vecs; i++)
@@ -1728,9 +1873,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1728 */ 1873 */
1729 nr_io_queues = vecs; 1874 nr_io_queues = vecs;
1730 1875
1731 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1876 result = queue_request_irq(dev, adminq, adminq->irqname);
1732 if (result) { 1877 if (result) {
1733 dev->queues[0]->q_suspended = 1; 1878 adminq->q_suspended = 1;
1734 goto free_queues; 1879 goto free_queues;
1735 } 1880 }
1736 1881
@@ -1739,9 +1884,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1739 for (i = dev->queue_count - 1; i > nr_io_queues; i--) { 1884 for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
1740 struct nvme_queue *nvmeq = dev->queues[i]; 1885 struct nvme_queue *nvmeq = dev->queues[i];
1741 1886
1742 spin_lock(&nvmeq->q_lock); 1887 spin_lock_irq(&nvmeq->q_lock);
1743 nvme_cancel_ios(nvmeq, false); 1888 nvme_cancel_ios(nvmeq, false);
1744 spin_unlock(&nvmeq->q_lock); 1889 spin_unlock_irq(&nvmeq->q_lock);
1745 1890
1746 nvme_free_queue(nvmeq); 1891 nvme_free_queue(nvmeq);
1747 dev->queue_count--; 1892 dev->queue_count--;
@@ -1782,7 +1927,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1782 return 0; 1927 return 0;
1783 1928
1784 free_queues: 1929 free_queues:
1785 nvme_free_queues(dev); 1930 nvme_free_queues(dev, 1);
1786 return result; 1931 return result;
1787} 1932}
1788 1933
@@ -1794,6 +1939,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1794 */ 1939 */
1795static int nvme_dev_add(struct nvme_dev *dev) 1940static int nvme_dev_add(struct nvme_dev *dev)
1796{ 1941{
1942 struct pci_dev *pdev = dev->pci_dev;
1797 int res; 1943 int res;
1798 unsigned nn, i; 1944 unsigned nn, i;
1799 struct nvme_ns *ns; 1945 struct nvme_ns *ns;
@@ -1803,8 +1949,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
1803 dma_addr_t dma_addr; 1949 dma_addr_t dma_addr;
1804 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 1950 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
1805 1951
1806 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, 1952 mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
1807 GFP_KERNEL);
1808 if (!mem) 1953 if (!mem)
1809 return -ENOMEM; 1954 return -ENOMEM;
1810 1955
@@ -1817,13 +1962,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
1817 ctrl = mem; 1962 ctrl = mem;
1818 nn = le32_to_cpup(&ctrl->nn); 1963 nn = le32_to_cpup(&ctrl->nn);
1819 dev->oncs = le16_to_cpup(&ctrl->oncs); 1964 dev->oncs = le16_to_cpup(&ctrl->oncs);
1965 dev->abort_limit = ctrl->acl + 1;
1820 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1966 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1821 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1967 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1822 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1968 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
1823 if (ctrl->mdts) 1969 if (ctrl->mdts)
1824 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); 1970 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
1825 if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) && 1971 if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
1826 (dev->pci_dev->device == 0x0953) && ctrl->vs[3]) 1972 (pdev->device == 0x0953) && ctrl->vs[3])
1827 dev->stripe_size = 1 << (ctrl->vs[3] + shift); 1973 dev->stripe_size = 1 << (ctrl->vs[3] + shift);
1828 1974
1829 id_ns = mem; 1975 id_ns = mem;
@@ -1871,16 +2017,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
1871 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 2017 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
1872 goto disable; 2018 goto disable;
1873 2019
1874 pci_set_drvdata(pdev, dev);
1875 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 2020 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1876 if (!dev->bar) 2021 if (!dev->bar)
1877 goto disable; 2022 goto disable;
1878 2023 if (readl(&dev->bar->csts) == -1) {
1879 dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap)); 2024 result = -ENODEV;
2025 goto unmap;
2026 }
2027 dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
1880 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2028 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1881 2029
1882 return 0; 2030 return 0;
1883 2031
2032 unmap:
2033 iounmap(dev->bar);
2034 dev->bar = NULL;
1884 disable: 2035 disable:
1885 pci_release_regions(pdev); 2036 pci_release_regions(pdev);
1886 disable_pci: 2037 disable_pci:
@@ -1898,37 +2049,183 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
1898 if (dev->bar) { 2049 if (dev->bar) {
1899 iounmap(dev->bar); 2050 iounmap(dev->bar);
1900 dev->bar = NULL; 2051 dev->bar = NULL;
2052 pci_release_regions(dev->pci_dev);
1901 } 2053 }
1902 2054
1903 pci_release_regions(dev->pci_dev);
1904 if (pci_is_enabled(dev->pci_dev)) 2055 if (pci_is_enabled(dev->pci_dev))
1905 pci_disable_device(dev->pci_dev); 2056 pci_disable_device(dev->pci_dev);
1906} 2057}
1907 2058
2059struct nvme_delq_ctx {
2060 struct task_struct *waiter;
2061 struct kthread_worker *worker;
2062 atomic_t refcount;
2063};
2064
2065static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
2066{
2067 dq->waiter = current;
2068 mb();
2069
2070 for (;;) {
2071 set_current_state(TASK_KILLABLE);
2072 if (!atomic_read(&dq->refcount))
2073 break;
2074 if (!schedule_timeout(ADMIN_TIMEOUT) ||
2075 fatal_signal_pending(current)) {
2076 set_current_state(TASK_RUNNING);
2077
2078 nvme_disable_ctrl(dev, readq(&dev->bar->cap));
2079 nvme_disable_queue(dev, 0);
2080
2081 send_sig(SIGKILL, dq->worker->task, 1);
2082 flush_kthread_worker(dq->worker);
2083 return;
2084 }
2085 }
2086 set_current_state(TASK_RUNNING);
2087}
2088
2089static void nvme_put_dq(struct nvme_delq_ctx *dq)
2090{
2091 atomic_dec(&dq->refcount);
2092 if (dq->waiter)
2093 wake_up_process(dq->waiter);
2094}
2095
2096static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
2097{
2098 atomic_inc(&dq->refcount);
2099 return dq;
2100}
2101
2102static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2103{
2104 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2105
2106 nvme_clear_queue(nvmeq);
2107 nvme_put_dq(dq);
2108}
2109
2110static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
2111 kthread_work_func_t fn)
2112{
2113 struct nvme_command c;
2114
2115 memset(&c, 0, sizeof(c));
2116 c.delete_queue.opcode = opcode;
2117 c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2118
2119 init_kthread_work(&nvmeq->cmdinfo.work, fn);
2120 return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
2121}
2122
2123static void nvme_del_cq_work_handler(struct kthread_work *work)
2124{
2125 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2126 cmdinfo.work);
2127 nvme_del_queue_end(nvmeq);
2128}
2129
2130static int nvme_delete_cq(struct nvme_queue *nvmeq)
2131{
2132 return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
2133 nvme_del_cq_work_handler);
2134}
2135
2136static void nvme_del_sq_work_handler(struct kthread_work *work)
2137{
2138 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2139 cmdinfo.work);
2140 int status = nvmeq->cmdinfo.status;
2141
2142 if (!status)
2143 status = nvme_delete_cq(nvmeq);
2144 if (status)
2145 nvme_del_queue_end(nvmeq);
2146}
2147
2148static int nvme_delete_sq(struct nvme_queue *nvmeq)
2149{
2150 return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
2151 nvme_del_sq_work_handler);
2152}
2153
2154static void nvme_del_queue_start(struct kthread_work *work)
2155{
2156 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2157 cmdinfo.work);
2158 allow_signal(SIGKILL);
2159 if (nvme_delete_sq(nvmeq))
2160 nvme_del_queue_end(nvmeq);
2161}
2162
2163static void nvme_disable_io_queues(struct nvme_dev *dev)
2164{
2165 int i;
2166 DEFINE_KTHREAD_WORKER_ONSTACK(worker);
2167 struct nvme_delq_ctx dq;
2168 struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
2169 &worker, "nvme%d", dev->instance);
2170
2171 if (IS_ERR(kworker_task)) {
2172 dev_err(&dev->pci_dev->dev,
2173 "Failed to create queue del task\n");
2174 for (i = dev->queue_count - 1; i > 0; i--)
2175 nvme_disable_queue(dev, i);
2176 return;
2177 }
2178
2179 dq.waiter = NULL;
2180 atomic_set(&dq.refcount, 0);
2181 dq.worker = &worker;
2182 for (i = dev->queue_count - 1; i > 0; i--) {
2183 struct nvme_queue *nvmeq = dev->queues[i];
2184
2185 if (nvme_suspend_queue(nvmeq))
2186 continue;
2187 nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
2188 nvmeq->cmdinfo.worker = dq.worker;
2189 init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
2190 queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
2191 }
2192 nvme_wait_dq(&dq, dev);
2193 kthread_stop(kworker_task);
2194}
2195
1908static void nvme_dev_shutdown(struct nvme_dev *dev) 2196static void nvme_dev_shutdown(struct nvme_dev *dev)
1909{ 2197{
1910 int i; 2198 int i;
1911 2199
1912 for (i = dev->queue_count - 1; i >= 0; i--) 2200 dev->initialized = 0;
1913 nvme_disable_queue(dev, i);
1914 2201
1915 spin_lock(&dev_list_lock); 2202 spin_lock(&dev_list_lock);
1916 list_del_init(&dev->node); 2203 list_del_init(&dev->node);
1917 spin_unlock(&dev_list_lock); 2204 spin_unlock(&dev_list_lock);
1918 2205
1919 if (dev->bar) 2206 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
2207 for (i = dev->queue_count - 1; i >= 0; i--) {
2208 struct nvme_queue *nvmeq = dev->queues[i];
2209 nvme_suspend_queue(nvmeq);
2210 nvme_clear_queue(nvmeq);
2211 }
2212 } else {
2213 nvme_disable_io_queues(dev);
1920 nvme_shutdown_ctrl(dev); 2214 nvme_shutdown_ctrl(dev);
2215 nvme_disable_queue(dev, 0);
2216 }
1921 nvme_dev_unmap(dev); 2217 nvme_dev_unmap(dev);
1922} 2218}
1923 2219
1924static void nvme_dev_remove(struct nvme_dev *dev) 2220static void nvme_dev_remove(struct nvme_dev *dev)
1925{ 2221{
1926 struct nvme_ns *ns, *next; 2222 struct nvme_ns *ns;
1927 2223
1928 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 2224 list_for_each_entry(ns, &dev->namespaces, list) {
1929 list_del(&ns->list); 2225 if (ns->disk->flags & GENHD_FL_UP)
1930 del_gendisk(ns->disk); 2226 del_gendisk(ns->disk);
1931 nvme_ns_free(ns); 2227 if (!blk_queue_dying(ns->queue))
2228 blk_cleanup_queue(ns->queue);
1932 } 2229 }
1933} 2230}
1934 2231
@@ -1985,14 +2282,22 @@ static void nvme_release_instance(struct nvme_dev *dev)
1985 spin_unlock(&dev_list_lock); 2282 spin_unlock(&dev_list_lock);
1986} 2283}
1987 2284
2285static void nvme_free_namespaces(struct nvme_dev *dev)
2286{
2287 struct nvme_ns *ns, *next;
2288
2289 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
2290 list_del(&ns->list);
2291 put_disk(ns->disk);
2292 kfree(ns);
2293 }
2294}
2295
1988static void nvme_free_dev(struct kref *kref) 2296static void nvme_free_dev(struct kref *kref)
1989{ 2297{
1990 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 2298 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
1991 nvme_dev_remove(dev); 2299
1992 nvme_dev_shutdown(dev); 2300 nvme_free_namespaces(dev);
1993 nvme_free_queues(dev);
1994 nvme_release_instance(dev);
1995 nvme_release_prp_pools(dev);
1996 kfree(dev->queues); 2301 kfree(dev->queues);
1997 kfree(dev->entry); 2302 kfree(dev->entry);
1998 kfree(dev); 2303 kfree(dev);
@@ -2056,6 +2361,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
2056 return result; 2361 return result;
2057 2362
2058 disable: 2363 disable:
2364 nvme_disable_queue(dev, 0);
2059 spin_lock(&dev_list_lock); 2365 spin_lock(&dev_list_lock);
2060 list_del_init(&dev->node); 2366 list_del_init(&dev->node);
2061 spin_unlock(&dev_list_lock); 2367 spin_unlock(&dev_list_lock);
@@ -2064,6 +2370,71 @@ static int nvme_dev_start(struct nvme_dev *dev)
2064 return result; 2370 return result;
2065} 2371}
2066 2372
2373static int nvme_remove_dead_ctrl(void *arg)
2374{
2375 struct nvme_dev *dev = (struct nvme_dev *)arg;
2376 struct pci_dev *pdev = dev->pci_dev;
2377
2378 if (pci_get_drvdata(pdev))
2379 pci_stop_and_remove_bus_device(pdev);
2380 kref_put(&dev->kref, nvme_free_dev);
2381 return 0;
2382}
2383
2384static void nvme_remove_disks(struct work_struct *ws)
2385{
2386 int i;
2387 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
2388
2389 nvme_dev_remove(dev);
2390 spin_lock(&dev_list_lock);
2391 for (i = dev->queue_count - 1; i > 0; i--) {
2392 BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
2393 nvme_free_queue(dev->queues[i]);
2394 dev->queue_count--;
2395 dev->queues[i] = NULL;
2396 }
2397 spin_unlock(&dev_list_lock);
2398}
2399
2400static int nvme_dev_resume(struct nvme_dev *dev)
2401{
2402 int ret;
2403
2404 ret = nvme_dev_start(dev);
2405 if (ret && ret != -EBUSY)
2406 return ret;
2407 if (ret == -EBUSY) {
2408 spin_lock(&dev_list_lock);
2409 PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
2410 queue_work(nvme_workq, &dev->reset_work);
2411 spin_unlock(&dev_list_lock);
2412 }
2413 dev->initialized = 1;
2414 return 0;
2415}
2416
2417static void nvme_dev_reset(struct nvme_dev *dev)
2418{
2419 nvme_dev_shutdown(dev);
2420 if (nvme_dev_resume(dev)) {
2421 dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
2422 kref_get(&dev->kref);
2423 if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
2424 dev->instance))) {
2425 dev_err(&dev->pci_dev->dev,
2426 "Failed to start controller remove task\n");
2427 kref_put(&dev->kref, nvme_free_dev);
2428 }
2429 }
2430}
2431
2432static void nvme_reset_failed_dev(struct work_struct *ws)
2433{
2434 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
2435 nvme_dev_reset(dev);
2436}
2437
2067static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2438static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2068{ 2439{
2069 int result = -ENOMEM; 2440 int result = -ENOMEM;
@@ -2082,8 +2453,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2082 goto free; 2453 goto free;
2083 2454
2084 INIT_LIST_HEAD(&dev->namespaces); 2455 INIT_LIST_HEAD(&dev->namespaces);
2456 INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
2085 dev->pci_dev = pdev; 2457 dev->pci_dev = pdev;
2086 2458 pci_set_drvdata(pdev, dev);
2087 result = nvme_set_instance(dev); 2459 result = nvme_set_instance(dev);
2088 if (result) 2460 if (result)
2089 goto free; 2461 goto free;
@@ -2099,6 +2471,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2099 goto release_pools; 2471 goto release_pools;
2100 } 2472 }
2101 2473
2474 kref_init(&dev->kref);
2102 result = nvme_dev_add(dev); 2475 result = nvme_dev_add(dev);
2103 if (result) 2476 if (result)
2104 goto shutdown; 2477 goto shutdown;
@@ -2113,15 +2486,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2113 if (result) 2486 if (result)
2114 goto remove; 2487 goto remove;
2115 2488
2116 kref_init(&dev->kref); 2489 dev->initialized = 1;
2117 return 0; 2490 return 0;
2118 2491
2119 remove: 2492 remove:
2120 nvme_dev_remove(dev); 2493 nvme_dev_remove(dev);
2494 nvme_free_namespaces(dev);
2121 shutdown: 2495 shutdown:
2122 nvme_dev_shutdown(dev); 2496 nvme_dev_shutdown(dev);
2123 release_pools: 2497 release_pools:
2124 nvme_free_queues(dev); 2498 nvme_free_queues(dev, 0);
2125 nvme_release_prp_pools(dev); 2499 nvme_release_prp_pools(dev);
2126 release: 2500 release:
2127 nvme_release_instance(dev); 2501 nvme_release_instance(dev);
@@ -2132,10 +2506,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2132 return result; 2506 return result;
2133} 2507}
2134 2508
2509static void nvme_shutdown(struct pci_dev *pdev)
2510{
2511 struct nvme_dev *dev = pci_get_drvdata(pdev);
2512 nvme_dev_shutdown(dev);
2513}
2514
2135static void nvme_remove(struct pci_dev *pdev) 2515static void nvme_remove(struct pci_dev *pdev)
2136{ 2516{
2137 struct nvme_dev *dev = pci_get_drvdata(pdev); 2517 struct nvme_dev *dev = pci_get_drvdata(pdev);
2518
2519 spin_lock(&dev_list_lock);
2520 list_del_init(&dev->node);
2521 spin_unlock(&dev_list_lock);
2522
2523 pci_set_drvdata(pdev, NULL);
2524 flush_work(&dev->reset_work);
2138 misc_deregister(&dev->miscdev); 2525 misc_deregister(&dev->miscdev);
2526 nvme_dev_remove(dev);
2527 nvme_dev_shutdown(dev);
2528 nvme_free_queues(dev, 0);
2529 nvme_release_instance(dev);
2530 nvme_release_prp_pools(dev);
2139 kref_put(&dev->kref, nvme_free_dev); 2531 kref_put(&dev->kref, nvme_free_dev);
2140} 2532}
2141 2533
@@ -2159,13 +2551,12 @@ static int nvme_resume(struct device *dev)
2159{ 2551{
2160 struct pci_dev *pdev = to_pci_dev(dev); 2552 struct pci_dev *pdev = to_pci_dev(dev);
2161 struct nvme_dev *ndev = pci_get_drvdata(pdev); 2553 struct nvme_dev *ndev = pci_get_drvdata(pdev);
2162 int ret;
2163 2554
2164 ret = nvme_dev_start(ndev); 2555 if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
2165 /* XXX: should remove gendisks if resume fails */ 2556 PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
2166 if (ret) 2557 queue_work(nvme_workq, &ndev->reset_work);
2167 nvme_free_queues(ndev); 2558 }
2168 return ret; 2559 return 0;
2169} 2560}
2170 2561
2171static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 2562static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2192,6 +2583,7 @@ static struct pci_driver nvme_driver = {
2192 .id_table = nvme_id_table, 2583 .id_table = nvme_id_table,
2193 .probe = nvme_probe, 2584 .probe = nvme_probe,
2194 .remove = nvme_remove, 2585 .remove = nvme_remove,
2586 .shutdown = nvme_shutdown,
2195 .driver = { 2587 .driver = {
2196 .pm = &nvme_dev_pm_ops, 2588 .pm = &nvme_dev_pm_ops,
2197 }, 2589 },
@@ -2206,9 +2598,14 @@ static int __init nvme_init(void)
2206 if (IS_ERR(nvme_thread)) 2598 if (IS_ERR(nvme_thread))
2207 return PTR_ERR(nvme_thread); 2599 return PTR_ERR(nvme_thread);
2208 2600
2601 result = -ENOMEM;
2602 nvme_workq = create_singlethread_workqueue("nvme");
2603 if (!nvme_workq)
2604 goto kill_kthread;
2605
2209 result = register_blkdev(nvme_major, "nvme"); 2606 result = register_blkdev(nvme_major, "nvme");
2210 if (result < 0) 2607 if (result < 0)
2211 goto kill_kthread; 2608 goto kill_workq;
2212 else if (result > 0) 2609 else if (result > 0)
2213 nvme_major = result; 2610 nvme_major = result;
2214 2611
@@ -2219,6 +2616,8 @@ static int __init nvme_init(void)
2219 2616
2220 unregister_blkdev: 2617 unregister_blkdev:
2221 unregister_blkdev(nvme_major, "nvme"); 2618 unregister_blkdev(nvme_major, "nvme");
2619 kill_workq:
2620 destroy_workqueue(nvme_workq);
2222 kill_kthread: 2621 kill_kthread:
2223 kthread_stop(nvme_thread); 2622 kthread_stop(nvme_thread);
2224 return result; 2623 return result;
@@ -2228,6 +2627,7 @@ static void __exit nvme_exit(void)
2228{ 2627{
2229 pci_unregister_driver(&nvme_driver); 2628 pci_unregister_driver(&nvme_driver);
2230 unregister_blkdev(nvme_major, "nvme"); 2629 unregister_blkdev(nvme_major, "nvme");
2630 destroy_workqueue(nvme_workq);
2231 kthread_stop(nvme_thread); 2631 kthread_stop(nvme_thread);
2232} 2632}
2233 2633
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a4ff4eb8e23..4a0ceb64e269 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -25,6 +25,7 @@
25#include <linux/bio.h> 25#include <linux/bio.h>
26#include <linux/bitops.h> 26#include <linux/bitops.h>
27#include <linux/blkdev.h> 27#include <linux/blkdev.h>
28#include <linux/compat.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
29#include <linux/errno.h> 30#include <linux/errno.h>
30#include <linux/fs.h> 31#include <linux/fs.h>
@@ -3038,6 +3039,152 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
3038 return retcode; 3039 return retcode;
3039} 3040}
3040 3041
3042#ifdef CONFIG_COMPAT
3043typedef struct sg_io_hdr32 {
3044 compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
3045 compat_int_t dxfer_direction; /* [i] data transfer direction */
3046 unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
3047 unsigned char mx_sb_len; /* [i] max length to write to sbp */
3048 unsigned short iovec_count; /* [i] 0 implies no scatter gather */
3049 compat_uint_t dxfer_len; /* [i] byte count of data transfer */
3050 compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
3051 or scatter gather list */
3052 compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
3053 compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
3054 compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
3055 compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
3056 compat_int_t pack_id; /* [i->o] unused internally (normally) */
3057 compat_uptr_t usr_ptr; /* [i->o] unused internally */
3058 unsigned char status; /* [o] scsi status */
3059 unsigned char masked_status; /* [o] shifted, masked scsi status */
3060 unsigned char msg_status; /* [o] messaging level data (optional) */
3061 unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
3062 unsigned short host_status; /* [o] errors from host adapter */
3063 unsigned short driver_status; /* [o] errors from software driver */
3064 compat_int_t resid; /* [o] dxfer_len - actual_transferred */
3065 compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
3066 compat_uint_t info; /* [o] auxiliary information */
3067} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
3068
3069typedef struct sg_iovec32 {
3070 compat_uint_t iov_base;
3071 compat_uint_t iov_len;
3072} sg_iovec32_t;
3073
3074static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
3075{
3076 sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
3077 sg_iovec32_t __user *iov32 = dxferp;
3078 int i;
3079
3080 for (i = 0; i < iovec_count; i++) {
3081 u32 base, len;
3082
3083 if (get_user(base, &iov32[i].iov_base) ||
3084 get_user(len, &iov32[i].iov_len) ||
3085 put_user(compat_ptr(base), &iov[i].iov_base) ||
3086 put_user(len, &iov[i].iov_len))
3087 return -EFAULT;
3088 }
3089
3090 if (put_user(iov, &sgio->dxferp))
3091 return -EFAULT;
3092 return 0;
3093}
3094
3095int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
3096{
3097 sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
3098 sg_io_hdr_t __user *sgio;
3099 u16 iovec_count;
3100 u32 data;
3101 void __user *dxferp;
3102 int err;
3103 int interface_id;
3104
3105 if (get_user(interface_id, &sgio32->interface_id))
3106 return -EFAULT;
3107 if (interface_id != 'S')
3108 return -EINVAL;
3109
3110 if (get_user(iovec_count, &sgio32->iovec_count))
3111 return -EFAULT;
3112
3113 {
3114 void __user *top = compat_alloc_user_space(0);
3115 void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
3116 (iovec_count * sizeof(sg_iovec_t)));
3117 if (new > top)
3118 return -EINVAL;
3119
3120 sgio = new;
3121 }
3122
3123 /* Ok, now construct. */
3124 if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
3125 (2 * sizeof(int)) +
3126 (2 * sizeof(unsigned char)) +
3127 (1 * sizeof(unsigned short)) +
3128 (1 * sizeof(unsigned int))))
3129 return -EFAULT;
3130
3131 if (get_user(data, &sgio32->dxferp))
3132 return -EFAULT;
3133 dxferp = compat_ptr(data);
3134 if (iovec_count) {
3135 if (sg_build_iovec(sgio, dxferp, iovec_count))
3136 return -EFAULT;
3137 } else {
3138 if (put_user(dxferp, &sgio->dxferp))
3139 return -EFAULT;
3140 }
3141
3142 {
3143 unsigned char __user *cmdp;
3144 unsigned char __user *sbp;
3145
3146 if (get_user(data, &sgio32->cmdp))
3147 return -EFAULT;
3148 cmdp = compat_ptr(data);
3149
3150 if (get_user(data, &sgio32->sbp))
3151 return -EFAULT;
3152 sbp = compat_ptr(data);
3153
3154 if (put_user(cmdp, &sgio->cmdp) ||
3155 put_user(sbp, &sgio->sbp))
3156 return -EFAULT;
3157 }
3158
3159 if (copy_in_user(&sgio->timeout, &sgio32->timeout,
3160 3 * sizeof(int)))
3161 return -EFAULT;
3162
3163 if (get_user(data, &sgio32->usr_ptr))
3164 return -EFAULT;
3165 if (put_user(compat_ptr(data), &sgio->usr_ptr))
3166 return -EFAULT;
3167
3168 err = nvme_sg_io(ns, sgio);
3169 if (err >= 0) {
3170 void __user *datap;
3171
3172 if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
3173 sizeof(int)) ||
3174 get_user(datap, &sgio->usr_ptr) ||
3175 put_user((u32)(unsigned long)datap,
3176 &sgio32->usr_ptr) ||
3177 copy_in_user(&sgio32->status, &sgio->status,
3178 (4 * sizeof(unsigned char)) +
3179 (2 * sizeof(unsigned short)) +
3180 (3 * sizeof(int))))
3181 err = -EFAULT;
3182 }
3183
3184 return err;
3185}
3186#endif
3187
3041int nvme_sg_get_version_num(int __user *ip) 3188int nvme_sg_get_version_num(int __user *ip)
3042{ 3189{
3043 return put_user(sg_version_num, ip); 3190 return put_user(sg_version_num, ip);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index da18046d0e07..4b97b86da926 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
285 285
286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || 286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 !rb_next(&persistent_gnt->node)) { 287 !rb_next(&persistent_gnt->node)) {
288 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); 288 ret = gnttab_unmap_refs(unmap, NULL, pages,
289 segs_to_unmap);
289 BUG_ON(ret); 290 BUG_ON(ret);
290 put_free_pages(blkif, pages, segs_to_unmap); 291 put_free_pages(blkif, pages, segs_to_unmap);
291 segs_to_unmap = 0; 292 segs_to_unmap = 0;
@@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work)
320 pages[segs_to_unmap] = persistent_gnt->page; 321 pages[segs_to_unmap] = persistent_gnt->page;
321 322
322 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 323 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
323 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); 324 ret = gnttab_unmap_refs(unmap, NULL, pages,
325 segs_to_unmap);
324 BUG_ON(ret); 326 BUG_ON(ret);
325 put_free_pages(blkif, pages, segs_to_unmap); 327 put_free_pages(blkif, pages, segs_to_unmap);
326 segs_to_unmap = 0; 328 segs_to_unmap = 0;
@@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work)
328 kfree(persistent_gnt); 330 kfree(persistent_gnt);
329 } 331 }
330 if (segs_to_unmap > 0) { 332 if (segs_to_unmap > 0) {
331 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); 333 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
332 BUG_ON(ret); 334 BUG_ON(ret);
333 put_free_pages(blkif, pages, segs_to_unmap); 335 put_free_pages(blkif, pages, segs_to_unmap);
334 } 336 }
@@ -668,14 +670,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
668 GNTMAP_host_map, pages[i]->handle); 670 GNTMAP_host_map, pages[i]->handle);
669 pages[i]->handle = BLKBACK_INVALID_HANDLE; 671 pages[i]->handle = BLKBACK_INVALID_HANDLE;
670 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 672 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
671 ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); 673 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
674 invcount);
672 BUG_ON(ret); 675 BUG_ON(ret);
673 put_free_pages(blkif, unmap_pages, invcount); 676 put_free_pages(blkif, unmap_pages, invcount);
674 invcount = 0; 677 invcount = 0;
675 } 678 }
676 } 679 }
677 if (invcount) { 680 if (invcount) {
678 ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); 681 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
679 BUG_ON(ret); 682 BUG_ON(ret);
680 put_free_pages(blkif, unmap_pages, invcount); 683 put_free_pages(blkif, unmap_pages, invcount);
681 } 684 }
@@ -737,7 +740,7 @@ again:
737 } 740 }
738 741
739 if (segs_to_map) { 742 if (segs_to_map) {
740 ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map); 743 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
741 BUG_ON(ret); 744 BUG_ON(ret);
742 } 745 }
743 746
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index feea87cc6b8f..6928d094451d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
890 } else { 890 } else {
891 /* Failback to copying a page */ 891 /* Failback to copying a page */
892 struct page *page = alloc_page(GFP_KERNEL); 892 struct page *page = alloc_page(GFP_KERNEL);
893 char *src = buf->ops->map(pipe, buf, 1); 893 char *src;
894 char *dst;
895 894
896 if (!page) 895 if (!page)
897 return -ENOMEM; 896 return -ENOMEM;
898 dst = kmap(page);
899 897
900 offset = sd->pos & ~PAGE_MASK; 898 offset = sd->pos & ~PAGE_MASK;
901 899
@@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
903 if (len + offset > PAGE_SIZE) 901 if (len + offset > PAGE_SIZE)
904 len = PAGE_SIZE - offset; 902 len = PAGE_SIZE - offset;
905 903
906 memcpy(dst + offset, src + buf->offset, len); 904 src = buf->ops->map(pipe, buf, 1);
907 905 memcpy(page_address(page) + offset, src + buf->offset, len);
908 kunmap(page);
909 buf->ops->unmap(pipe, buf, src); 906 buf->ops->unmap(pipe, buf, src);
910 907
911 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); 908 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7e257b233602..79606f473f48 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -57,6 +57,7 @@ struct sample {
57 int32_t core_pct_busy; 57 int32_t core_pct_busy;
58 u64 aperf; 58 u64 aperf;
59 u64 mperf; 59 u64 mperf;
60 unsigned long long tsc;
60 int freq; 61 int freq;
61}; 62};
62 63
@@ -96,6 +97,7 @@ struct cpudata {
96 97
97 u64 prev_aperf; 98 u64 prev_aperf;
98 u64 prev_mperf; 99 u64 prev_mperf;
100 unsigned long long prev_tsc;
99 int sample_ptr; 101 int sample_ptr;
100 struct sample samples[SAMPLE_COUNT]; 102 struct sample samples[SAMPLE_COUNT];
101}; 103};
@@ -548,30 +550,41 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
548 struct sample *sample) 550 struct sample *sample)
549{ 551{
550 u64 core_pct; 552 u64 core_pct;
551 core_pct = div64_u64(int_tofp(sample->aperf * 100), 553 u64 c0_pct;
552 sample->mperf);
553 sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
554 554
555 sample->core_pct_busy = core_pct; 555 core_pct = div64_u64(sample->aperf * 100, sample->mperf);
556
557 c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
558 sample->freq = fp_toint(
559 mul_fp(int_tofp(cpu->pstate.max_pstate),
560 int_tofp(core_pct * 1000)));
561
562 sample->core_pct_busy = mul_fp(int_tofp(core_pct),
563 div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
556} 564}
557 565
558static inline void intel_pstate_sample(struct cpudata *cpu) 566static inline void intel_pstate_sample(struct cpudata *cpu)
559{ 567{
560 u64 aperf, mperf; 568 u64 aperf, mperf;
569 unsigned long long tsc;
561 570
562 rdmsrl(MSR_IA32_APERF, aperf); 571 rdmsrl(MSR_IA32_APERF, aperf);
563 rdmsrl(MSR_IA32_MPERF, mperf); 572 rdmsrl(MSR_IA32_MPERF, mperf);
573 tsc = native_read_tsc();
564 574
565 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 575 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
566 cpu->samples[cpu->sample_ptr].aperf = aperf; 576 cpu->samples[cpu->sample_ptr].aperf = aperf;
567 cpu->samples[cpu->sample_ptr].mperf = mperf; 577 cpu->samples[cpu->sample_ptr].mperf = mperf;
578 cpu->samples[cpu->sample_ptr].tsc = tsc;
568 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; 579 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
569 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; 580 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
581 cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
570 582
571 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); 583 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
572 584
573 cpu->prev_aperf = aperf; 585 cpu->prev_aperf = aperf;
574 cpu->prev_mperf = mperf; 586 cpu->prev_mperf = mperf;
587 cpu->prev_tsc = tsc;
575} 588}
576 589
577static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 590static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 51c0362acf5c..8ec1747b1c39 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -61,7 +61,7 @@ static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
61 ret = kstrtol(val, 0, &l); 61 ret = kstrtol(val, 0, &l);
62 if (ret) 62 if (ret)
63 return ret; 63 return ret;
64 if ((int)l != l) 64 if (!l || ((int)l != l))
65 return -EINVAL; 65 return -EINVAL;
66 *((int *)kp->arg) = l; 66 *((int *)kp->arg) = l;
67 67
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 3f65dd6676b2..a28640f47c27 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
65 * then the BO is being moved and we should 65 * then the BO is being moved and we should
66 * store up the damage until later. 66 * store up the damage until later.
67 */ 67 */
68 if (!drm_can_sleep()) 68 if (drm_can_sleep())
69 ret = ast_bo_reserve(bo, true); 69 ret = ast_bo_reserve(bo, true);
70 if (ret) { 70 if (ret) {
71 if (ret != -EBUSY) 71 if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 2fd4a92162cb..32bbba0a787b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
39 * then the BO is being moved and we should 39 * then the BO is being moved and we should
40 * store up the damage until later. 40 * store up the damage until later.
41 */ 41 */
42 if (!drm_can_sleep()) 42 if (drm_can_sleep())
43 ret = cirrus_bo_reserve(bo, true); 43 ret = cirrus_bo_reserve(bo, true);
44 if (ret) { 44 if (ret) {
45 if (ret != -EBUSY) 45 if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index f9adc27ef32a..13b7dd83faa9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
41 * then the BO is being moved and we should 41 * then the BO is being moved and we should
42 * store up the damage until later. 42 * store up the damage until later.
43 */ 43 */
44 if (!drm_can_sleep()) 44 if (drm_can_sleep())
45 ret = mgag200_bo_reserve(bo, true); 45 ret = mgag200_bo_reserve(bo, true);
46 if (ret) { 46 if (ret) {
47 if (ret != -EBUSY) 47 if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index b8583f275e80..968374776db9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1519 (mga_vga_calculate_mode_bandwidth(mode, bpp) 1519 (mga_vga_calculate_mode_bandwidth(mode, bpp)
1520 > (32700 * 1024))) { 1520 > (32700 * 1024))) {
1521 return MODE_BANDWIDTH; 1521 return MODE_BANDWIDTH;
1522 } else if (mode->type == G200_EH && 1522 } else if (mdev->type == G200_EH &&
1523 (mga_vga_calculate_mode_bandwidth(mode, bpp) 1523 (mga_vga_calculate_mode_bandwidth(mode, bpp)
1524 > (37500 * 1024))) { 1524 > (37500 * 1024))) {
1525 return MODE_BANDWIDTH; 1525 return MODE_BANDWIDTH;
1526 } else if (mode->type == G200_ER && 1526 } else if (mdev->type == G200_ER &&
1527 (mga_vga_calculate_mode_bandwidth(mode, 1527 (mga_vga_calculate_mode_bandwidth(mode,
1528 bpp) > (55000 * 1024))) { 1528 bpp) > (55000 * 1024))) {
1529 return MODE_BANDWIDTH; 1529 return MODE_BANDWIDTH;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7b399dc5fd54..2812c7d1ae6f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1007 case R_008C64_SQ_VSTMP_RING_SIZE: 1007 case R_008C64_SQ_VSTMP_RING_SIZE:
1008 case R_0288C8_SQ_GS_VERT_ITEMSIZE: 1008 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1009 /* get value to populate the IB don't remove */ 1009 /* get value to populate the IB don't remove */
1010 tmp =radeon_get_ib_value(p, idx); 1010 /*tmp =radeon_get_ib_value(p, idx);
1011 ib[idx] = 0; 1011 ib[idx] = 0;*/
1012 break;
1013 case SQ_ESGS_RING_BASE:
1014 case SQ_GSVS_RING_BASE:
1015 case SQ_ESTMP_RING_BASE:
1016 case SQ_GSTMP_RING_BASE:
1017 case SQ_PSTMP_RING_BASE:
1018 case SQ_VSTMP_RING_BASE:
1019 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1020 if (r) {
1021 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1022 "0x%04X\n", reg);
1023 return -EINVAL;
1024 }
1025 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1012 break; 1026 break;
1013 case SQ_CONFIG: 1027 case SQ_CONFIG:
1014 track->sq_config = radeon_get_ib_value(p, idx); 1028 track->sq_config = radeon_get_ib_value(p, idx);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ec8c388eec17..84a1bbb75f91 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -78,9 +78,10 @@
78 * 2.34.0 - Add CIK tiling mode array query 78 * 2.34.0 - Add CIK tiling mode array query
79 * 2.35.0 - Add CIK macrotile mode array query 79 * 2.35.0 - Add CIK macrotile mode array query
80 * 2.36.0 - Fix CIK DCE tiling setup 80 * 2.36.0 - Fix CIK DCE tiling setup
81 * 2.37.0 - allow GS ring setup on r6xx/r7xx
81 */ 82 */
82#define KMS_DRIVER_MAJOR 2 83#define KMS_DRIVER_MAJOR 2
83#define KMS_DRIVER_MINOR 36 84#define KMS_DRIVER_MINOR 37
84#define KMS_DRIVER_PATCHLEVEL 0 85#define KMS_DRIVER_PATCHLEVEL 0
85int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 86int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
86int radeon_driver_unload_kms(struct drm_device *dev); 87int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 20bfbda7b3f1..ec0c6829c1dc 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -18,6 +18,7 @@ r600 0x9400
180x00028A3C VGT_GROUP_VECT_1_FMT_CNTL 180x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
190x00028A40 VGT_GS_MODE 190x00028A40 VGT_GS_MODE
200x00028A6C VGT_GS_OUT_PRIM_TYPE 200x00028A6C VGT_GS_OUT_PRIM_TYPE
210x00028B38 VGT_GS_MAX_VERT_OUT
210x000088C8 VGT_GS_PER_ES 220x000088C8 VGT_GS_PER_ES
220x000088E8 VGT_GS_PER_VS 230x000088E8 VGT_GS_PER_VS
230x000088D4 VGT_GS_VERTEX_REUSE 240x000088D4 VGT_GS_VERTEX_REUSE
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 37079859afc8..53b51c4e671a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
292 292
293 if (ret == 0) { 293 if (ret == 0) {
294 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); 294 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
295 if (!kref_get_unless_zero(&ref->kref)) { 295 if (kref_get_unless_zero(&ref->kref)) {
296 rcu_read_unlock(); 296 rcu_read_unlock();
297 break; 297 break;
298 } 298 }
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9af99084b344..75f319090043 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
380 pgoff_t i; 380 pgoff_t i;
381 struct page **page = ttm->pages; 381 struct page **page = ttm->pages;
382 382
383 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
384 return;
385
383 for (i = 0; i < ttm->num_pages; ++i) { 386 for (i = 0; i < ttm->num_pages; ++i) {
384 (*page)->mapping = NULL; 387 (*page)->mapping = NULL;
385 (*page++)->index = 0; 388 (*page++)->index = 0;
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d95335cb90bd..b645647b7776 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -2583,4 +2583,28 @@ typedef union {
2583 float f; 2583 float f;
2584} SVGA3dDevCapResult; 2584} SVGA3dDevCapResult;
2585 2585
2586typedef enum {
2587 SVGA3DCAPS_RECORD_UNKNOWN = 0,
2588 SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
2589 SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
2590 SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
2591} SVGA3dCapsRecordType;
2592
2593typedef
2594struct SVGA3dCapsRecordHeader {
2595 uint32 length;
2596 SVGA3dCapsRecordType type;
2597}
2598SVGA3dCapsRecordHeader;
2599
2600typedef
2601struct SVGA3dCapsRecord {
2602 SVGA3dCapsRecordHeader header;
2603 uint32 data[1];
2604}
2605SVGA3dCapsRecord;
2606
2607
2608typedef uint32 SVGA3dCapPair[2];
2609
2586#endif /* _SVGA3D_REG_H_ */ 2610#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 82c41daebc0e..9426c53fb483 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -37,7 +37,7 @@ struct vmw_user_context {
37 37
38 38
39 39
40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); 40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
41 41
42static void vmw_user_context_free(struct vmw_resource *res); 42static void vmw_user_context_free(struct vmw_resource *res);
43static struct vmw_resource * 43static struct vmw_resource *
@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
50 bool readback, 50 bool readback,
51 struct ttm_validate_buffer *val_buf); 51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_destroy(struct vmw_resource *res); 52static int vmw_gb_context_destroy(struct vmw_resource *res);
53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); 53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); 54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
55static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); 55 bool rebind);
56static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
57static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
56static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); 58static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
57static uint64_t vmw_user_context_size; 59static uint64_t vmw_user_context_size;
58 60
@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
111 113
112 if (res->func->destroy == vmw_gb_context_destroy) { 114 if (res->func->destroy == vmw_gb_context_destroy) {
113 mutex_lock(&dev_priv->cmdbuf_mutex); 115 mutex_lock(&dev_priv->cmdbuf_mutex);
116 mutex_lock(&dev_priv->binding_mutex);
117 (void) vmw_context_binding_state_kill
118 (&container_of(res, struct vmw_user_context, res)->cbs);
114 (void) vmw_gb_context_destroy(res); 119 (void) vmw_gb_context_destroy(res);
115 if (dev_priv->pinned_bo != NULL && 120 if (dev_priv->pinned_bo != NULL &&
116 !dev_priv->query_cid_valid) 121 !dev_priv->query_cid_valid)
117 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 122 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
123 mutex_unlock(&dev_priv->binding_mutex);
118 mutex_unlock(&dev_priv->cmdbuf_mutex); 124 mutex_unlock(&dev_priv->cmdbuf_mutex);
119 return; 125 return;
120 } 126 }
@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
328 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 334 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
329 335
330 mutex_lock(&dev_priv->binding_mutex); 336 mutex_lock(&dev_priv->binding_mutex);
331 vmw_context_binding_state_kill(&uctx->cbs); 337 vmw_context_binding_state_scrub(&uctx->cbs);
332 338
333 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 339 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
334 340
@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
378 SVGA3dCmdHeader header; 384 SVGA3dCmdHeader header;
379 SVGA3dCmdDestroyGBContext body; 385 SVGA3dCmdDestroyGBContext body;
380 } *cmd; 386 } *cmd;
381 struct vmw_user_context *uctx =
382 container_of(res, struct vmw_user_context, res);
383
384 BUG_ON(!list_empty(&uctx->cbs.list));
385 387
386 if (likely(res->id == -1)) 388 if (likely(res->id == -1))
387 return 0; 389 return 0;
@@ -528,8 +530,9 @@ out_unlock:
528 * vmw_context_scrub_shader - scrub a shader binding from a context. 530 * vmw_context_scrub_shader - scrub a shader binding from a context.
529 * 531 *
530 * @bi: single binding information. 532 * @bi: single binding information.
533 * @rebind: Whether to issue a bind instead of scrub command.
531 */ 534 */
532static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) 535static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
533{ 536{
534 struct vmw_private *dev_priv = bi->ctx->dev_priv; 537 struct vmw_private *dev_priv = bi->ctx->dev_priv;
535 struct { 538 struct {
@@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
548 cmd->header.size = sizeof(cmd->body); 551 cmd->header.size = sizeof(cmd->body);
549 cmd->body.cid = bi->ctx->id; 552 cmd->body.cid = bi->ctx->id;
550 cmd->body.type = bi->i1.shader_type; 553 cmd->body.type = bi->i1.shader_type;
551 cmd->body.shid = SVGA3D_INVALID_ID; 554 cmd->body.shid =
555 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
552 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 556 vmw_fifo_commit(dev_priv, sizeof(*cmd));
553 557
554 return 0; 558 return 0;
@@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
559 * from a context. 563 * from a context.
560 * 564 *
561 * @bi: single binding information. 565 * @bi: single binding information.
566 * @rebind: Whether to issue a bind instead of scrub command.
562 */ 567 */
563static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) 568static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
569 bool rebind)
564{ 570{
565 struct vmw_private *dev_priv = bi->ctx->dev_priv; 571 struct vmw_private *dev_priv = bi->ctx->dev_priv;
566 struct { 572 struct {
@@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
579 cmd->header.size = sizeof(cmd->body); 585 cmd->header.size = sizeof(cmd->body);
580 cmd->body.cid = bi->ctx->id; 586 cmd->body.cid = bi->ctx->id;
581 cmd->body.type = bi->i1.rt_type; 587 cmd->body.type = bi->i1.rt_type;
582 cmd->body.target.sid = SVGA3D_INVALID_ID; 588 cmd->body.target.sid =
589 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
583 cmd->body.target.face = 0; 590 cmd->body.target.face = 0;
584 cmd->body.target.mipmap = 0; 591 cmd->body.target.mipmap = 0;
585 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 592 vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
591 * vmw_context_scrub_texture - scrub a texture binding from a context. 598 * vmw_context_scrub_texture - scrub a texture binding from a context.
592 * 599 *
593 * @bi: single binding information. 600 * @bi: single binding information.
601 * @rebind: Whether to issue a bind instead of scrub command.
594 * 602 *
595 * TODO: Possibly complement this function with a function that takes 603 * TODO: Possibly complement this function with a function that takes
596 * a list of texture bindings and combines them to a single command. 604 * a list of texture bindings and combines them to a single command.
597 */ 605 */
598static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) 606static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
607 bool rebind)
599{ 608{
600 struct vmw_private *dev_priv = bi->ctx->dev_priv; 609 struct vmw_private *dev_priv = bi->ctx->dev_priv;
601 struct { 610 struct {
@@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
619 cmd->body.c.cid = bi->ctx->id; 628 cmd->body.c.cid = bi->ctx->id;
620 cmd->body.s1.stage = bi->i1.texture_stage; 629 cmd->body.s1.stage = bi->i1.texture_stage;
621 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; 630 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
622 cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; 631 cmd->body.s1.value =
632 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
623 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 633 vmw_fifo_commit(dev_priv, sizeof(*cmd));
624 634
625 return 0; 635 return 0;
@@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
692 vmw_context_binding_drop(loc); 702 vmw_context_binding_drop(loc);
693 703
694 loc->bi = *bi; 704 loc->bi = *bi;
705 loc->bi.scrubbed = false;
695 list_add_tail(&loc->ctx_list, &cbs->list); 706 list_add_tail(&loc->ctx_list, &cbs->list);
696 INIT_LIST_HEAD(&loc->res_list); 707 INIT_LIST_HEAD(&loc->res_list);
697 708
@@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
727 if (loc->bi.ctx != NULL) 738 if (loc->bi.ctx != NULL)
728 vmw_context_binding_drop(loc); 739 vmw_context_binding_drop(loc);
729 740
730 loc->bi = *bi; 741 if (bi->res != NULL) {
731 list_add_tail(&loc->ctx_list, &cbs->list); 742 loc->bi = *bi;
732 if (bi->res != NULL) 743 list_add_tail(&loc->ctx_list, &cbs->list);
733 list_add_tail(&loc->res_list, &bi->res->binding_head); 744 list_add_tail(&loc->res_list, &bi->res->binding_head);
734 else 745 }
735 INIT_LIST_HEAD(&loc->res_list);
736} 746}
737 747
738/** 748/**
@@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
746 */ 756 */
747static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) 757static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
748{ 758{
749 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); 759 if (!cb->bi.scrubbed) {
760 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
761 cb->bi.scrubbed = true;
762 }
750 vmw_context_binding_drop(cb); 763 vmw_context_binding_drop(cb);
751} 764}
752 765
@@ -768,6 +781,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
768} 781}
769 782
770/** 783/**
784 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
785 * struct vmw_ctx_binding state structure.
786 *
787 * @cbs: Pointer to the context binding state tracker.
788 *
789 * Emits commands to scrub all bindings associated with the
790 * context binding state tracker.
791 */
792static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
793{
794 struct vmw_ctx_binding *entry;
795
796 list_for_each_entry(entry, &cbs->list, ctx_list) {
797 if (!entry->bi.scrubbed) {
798 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
799 entry->bi.scrubbed = true;
800 }
801 }
802}
803
804/**
771 * vmw_context_binding_res_list_kill - Kill all bindings on a 805 * vmw_context_binding_res_list_kill - Kill all bindings on a
772 * resource binding list 806 * resource binding list
773 * 807 *
@@ -785,6 +819,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
785} 819}
786 820
787/** 821/**
822 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
823 * resource binding list
824 *
825 * @head: list head of resource binding list
826 *
827 * Scrub all bindings associated with a specific resource. Typically
828 * called before the resource is evicted.
829 */
830void vmw_context_binding_res_list_scrub(struct list_head *head)
831{
832 struct vmw_ctx_binding *entry;
833
834 list_for_each_entry(entry, head, res_list) {
835 if (!entry->bi.scrubbed) {
836 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
837 entry->bi.scrubbed = true;
838 }
839 }
840}
841
842/**
788 * vmw_context_binding_state_transfer - Commit staged binding info 843 * vmw_context_binding_state_transfer - Commit staged binding info
789 * 844 *
790 * @ctx: Pointer to context to commit the staged binding info to. 845 * @ctx: Pointer to context to commit the staged binding info to.
@@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
803 list_for_each_entry_safe(entry, next, &from->list, ctx_list) 858 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
804 vmw_context_binding_transfer(&uctx->cbs, &entry->bi); 859 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
805} 860}
861
862/**
863 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
864 *
865 * @ctx: The context resource
866 *
867 * Walks through the context binding list and rebinds all scrubbed
868 * resources.
869 */
870int vmw_context_rebind_all(struct vmw_resource *ctx)
871{
872 struct vmw_ctx_binding *entry;
873 struct vmw_user_context *uctx =
874 container_of(ctx, struct vmw_user_context, res);
875 struct vmw_ctx_binding_state *cbs = &uctx->cbs;
876 int ret;
877
878 list_for_each_entry(entry, &cbs->list, ctx_list) {
879 if (likely(!entry->bi.scrubbed))
880 continue;
881
882 if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
883 SVGA3D_INVALID_ID))
884 continue;
885
886 ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
887 if (unlikely(ret != 0))
888 return ret;
889
890 entry->bi.scrubbed = false;
891 }
892
893 return 0;
894}
895
896/**
897 * vmw_context_binding_list - Return a list of context bindings
898 *
899 * @ctx: The context resource
900 *
901 * Returns the current list of bindings of the given context. Note that
902 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
903 */
904struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
905{
906 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
907}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9893328f8fdc..3bdc0adc656d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
941 drm_master_put(&vmw_fp->locked_master); 941 drm_master_put(&vmw_fp->locked_master);
942 } 942 }
943 943
944 vmw_compat_shader_man_destroy(vmw_fp->shman);
944 ttm_object_file_release(&vmw_fp->tfile); 945 ttm_object_file_release(&vmw_fp->tfile);
945 kfree(vmw_fp); 946 kfree(vmw_fp);
946} 947}
@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
960 if (unlikely(vmw_fp->tfile == NULL)) 961 if (unlikely(vmw_fp->tfile == NULL))
961 goto out_no_tfile; 962 goto out_no_tfile;
962 963
964 vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
965 if (IS_ERR(vmw_fp->shman))
966 goto out_no_shman;
967
963 file_priv->driver_priv = vmw_fp; 968 file_priv->driver_priv = vmw_fp;
964 dev_priv->bdev.dev_mapping = dev->dev_mapping; 969 dev_priv->bdev.dev_mapping = dev->dev_mapping;
965 970
966 return 0; 971 return 0;
967 972
973out_no_shman:
974 ttm_object_file_release(&vmw_fp->tfile);
968out_no_tfile: 975out_no_tfile:
969 kfree(vmw_fp); 976 kfree(vmw_fp);
970 return ret; 977 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 554e7fa33082..ecaa302a6154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -75,10 +75,14 @@
75#define VMW_RES_FENCE ttm_driver_type3 75#define VMW_RES_FENCE ttm_driver_type3
76#define VMW_RES_SHADER ttm_driver_type4 76#define VMW_RES_SHADER ttm_driver_type4
77 77
78struct vmw_compat_shader_manager;
79
78struct vmw_fpriv { 80struct vmw_fpriv {
79 struct drm_master *locked_master; 81 struct drm_master *locked_master;
80 struct ttm_object_file *tfile; 82 struct ttm_object_file *tfile;
81 struct list_head fence_events; 83 struct list_head fence_events;
84 bool gb_aware;
85 struct vmw_compat_shader_manager *shman;
82}; 86};
83 87
84struct vmw_dma_buffer { 88struct vmw_dma_buffer {
@@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
272 struct vmw_resource *ctx; 276 struct vmw_resource *ctx;
273 struct vmw_resource *res; 277 struct vmw_resource *res;
274 enum vmw_ctx_binding_type bt; 278 enum vmw_ctx_binding_type bt;
279 bool scrubbed;
275 union { 280 union {
276 SVGA3dShaderType shader_type; 281 SVGA3dShaderType shader_type;
277 SVGA3dRenderTargetType rt_type; 282 SVGA3dRenderTargetType rt_type;
@@ -318,7 +323,7 @@ struct vmw_sw_context{
318 struct drm_open_hash res_ht; 323 struct drm_open_hash res_ht;
319 bool res_ht_initialized; 324 bool res_ht_initialized;
320 bool kernel; /**< is the called made from the kernel */ 325 bool kernel; /**< is the called made from the kernel */
321 struct ttm_object_file *tfile; 326 struct vmw_fpriv *fp;
322 struct list_head validate_nodes; 327 struct list_head validate_nodes;
323 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 328 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
324 uint32_t cur_reloc; 329 uint32_t cur_reloc;
@@ -336,6 +341,7 @@ struct vmw_sw_context{
336 bool needs_post_query_barrier; 341 bool needs_post_query_barrier;
337 struct vmw_resource *error_resource; 342 struct vmw_resource *error_resource;
338 struct vmw_ctx_binding_state staged_bindings; 343 struct vmw_ctx_binding_state staged_bindings;
344 struct list_head staged_shaders;
339}; 345};
340 346
341struct vmw_legacy_display; 347struct vmw_legacy_display;
@@ -569,6 +575,8 @@ struct vmw_user_resource_conv;
569 575
570extern void vmw_resource_unreference(struct vmw_resource **p_res); 576extern void vmw_resource_unreference(struct vmw_resource **p_res);
571extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 577extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
578extern struct vmw_resource *
579vmw_resource_reference_unless_doomed(struct vmw_resource *res);
572extern int vmw_resource_validate(struct vmw_resource *res); 580extern int vmw_resource_validate(struct vmw_resource *res);
573extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); 581extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
574extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 582extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -957,6 +965,9 @@ extern void
957vmw_context_binding_state_transfer(struct vmw_resource *res, 965vmw_context_binding_state_transfer(struct vmw_resource *res,
958 struct vmw_ctx_binding_state *cbs); 966 struct vmw_ctx_binding_state *cbs);
959extern void vmw_context_binding_res_list_kill(struct list_head *head); 967extern void vmw_context_binding_res_list_kill(struct list_head *head);
968extern void vmw_context_binding_res_list_scrub(struct list_head *head);
969extern int vmw_context_rebind_all(struct vmw_resource *ctx);
970extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
960 971
961/* 972/*
962 * Surface management - vmwgfx_surface.c 973 * Surface management - vmwgfx_surface.c
@@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
991 struct drm_file *file_priv); 1002 struct drm_file *file_priv);
992extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 1003extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
993 struct drm_file *file_priv); 1004 struct drm_file *file_priv);
1005extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
1006 SVGA3dShaderType shader_type,
1007 u32 *user_key);
1008extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
1009 struct list_head *list);
1010extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
1011 struct list_head *list);
1012extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
1013 u32 user_key,
1014 SVGA3dShaderType shader_type,
1015 struct list_head *list);
1016extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
1017 u32 user_key, const void *bytecode,
1018 SVGA3dShaderType shader_type,
1019 size_t size,
1020 struct ttm_object_file *tfile,
1021 struct list_head *list);
1022extern struct vmw_compat_shader_manager *
1023vmw_compat_shader_man_create(struct vmw_private *dev_priv);
1024extern void
1025vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
1026
994 1027
995/** 1028/**
996 * Inline helper functions 1029 * Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a5f1eb55c5a..269b85cc875a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
114 * persistent context binding tracker. 114 * persistent context binding tracker.
115 */ 115 */
116 if (unlikely(val->staged_bindings)) { 116 if (unlikely(val->staged_bindings)) {
117 vmw_context_binding_state_transfer 117 if (!backoff) {
118 (val->res, val->staged_bindings); 118 vmw_context_binding_state_transfer
119 (val->res, val->staged_bindings);
120 }
119 kfree(val->staged_bindings); 121 kfree(val->staged_bindings);
120 val->staged_bindings = NULL; 122 val->staged_bindings = NULL;
121 } 123 }
@@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
178} 180}
179 181
180/** 182/**
183 * vmw_resource_context_res_add - Put resources previously bound to a context on
184 * the validation list
185 *
186 * @dev_priv: Pointer to a device private structure
187 * @sw_context: Pointer to a software context used for this command submission
188 * @ctx: Pointer to the context resource
189 *
190 * This function puts all resources that were previously bound to @ctx on
191 * the resource validation list. This is part of the context state reemission
192 */
193static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 struct vmw_sw_context *sw_context,
195 struct vmw_resource *ctx)
196{
197 struct list_head *binding_list;
198 struct vmw_ctx_binding *entry;
199 int ret = 0;
200 struct vmw_resource *res;
201
202 mutex_lock(&dev_priv->binding_mutex);
203 binding_list = vmw_context_binding_list(ctx);
204
205 list_for_each_entry(entry, binding_list, ctx_list) {
206 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 if (unlikely(res == NULL))
208 continue;
209
210 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 vmw_resource_unreference(&res);
212 if (unlikely(ret != 0))
213 break;
214 }
215
216 mutex_unlock(&dev_priv->binding_mutex);
217 return ret;
218}
219
220/**
181 * vmw_resource_relocation_add - Add a relocation to the relocation list 221 * vmw_resource_relocation_add - Add a relocation to the relocation list
182 * 222 *
183 * @list: Pointer to head of relocation list. 223 * @list: Pointer to head of relocation list.
@@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
233{ 273{
234 struct vmw_resource_relocation *rel; 274 struct vmw_resource_relocation *rel;
235 275
236 list_for_each_entry(rel, list, head) 276 list_for_each_entry(rel, list, head) {
237 cb[rel->offset] = rel->res->id; 277 if (likely(rel->res != NULL))
278 cb[rel->offset] = rel->res->id;
279 else
280 cb[rel->offset] = SVGA_3D_CMD_NOP;
281 }
238} 282}
239 283
240static int vmw_cmd_invalid(struct vmw_private *dev_priv, 284static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
379} 423}
380 424
381/** 425/**
382 * vmw_cmd_res_check - Check that a resource is present and if so, put it 426 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
383 * on the resource validate list unless it's already there. 427 * on the resource validate list unless it's already there.
384 * 428 *
385 * @dev_priv: Pointer to a device private structure. 429 * @dev_priv: Pointer to a device private structure.
386 * @sw_context: Pointer to the software context. 430 * @sw_context: Pointer to the software context.
387 * @res_type: Resource type. 431 * @res_type: Resource type.
388 * @converter: User-space visisble type specific information. 432 * @converter: User-space visisble type specific information.
389 * @id: Pointer to the location in the command buffer currently being 433 * @id: user-space resource id handle.
434 * @id_loc: Pointer to the location in the command buffer currently being
390 * parsed from where the user-space resource id handle is located. 435 * parsed from where the user-space resource id handle is located.
436 * @p_val: Pointer to pointer to resource validalidation node. Populated
437 * on exit.
391 */ 438 */
392static int vmw_cmd_res_check(struct vmw_private *dev_priv, 439static int
393 struct vmw_sw_context *sw_context, 440vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
394 enum vmw_res_type res_type, 441 struct vmw_sw_context *sw_context,
395 const struct vmw_user_resource_conv *converter, 442 enum vmw_res_type res_type,
396 uint32_t *id, 443 const struct vmw_user_resource_conv *converter,
397 struct vmw_resource_val_node **p_val) 444 uint32_t id,
445 uint32_t *id_loc,
446 struct vmw_resource_val_node **p_val)
398{ 447{
399 struct vmw_res_cache_entry *rcache = 448 struct vmw_res_cache_entry *rcache =
400 &sw_context->res_cache[res_type]; 449 &sw_context->res_cache[res_type];
@@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
402 struct vmw_resource_val_node *node; 451 struct vmw_resource_val_node *node;
403 int ret; 452 int ret;
404 453
405 if (*id == SVGA3D_INVALID_ID) { 454 if (id == SVGA3D_INVALID_ID) {
406 if (p_val) 455 if (p_val)
407 *p_val = NULL; 456 *p_val = NULL;
408 if (res_type == vmw_res_context) { 457 if (res_type == vmw_res_context) {
@@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
417 * resource 466 * resource
418 */ 467 */
419 468
420 if (likely(rcache->valid && *id == rcache->handle)) { 469 if (likely(rcache->valid && id == rcache->handle)) {
421 const struct vmw_resource *res = rcache->res; 470 const struct vmw_resource *res = rcache->res;
422 471
423 rcache->node->first_usage = false; 472 rcache->node->first_usage = false;
@@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
426 475
427 return vmw_resource_relocation_add 476 return vmw_resource_relocation_add
428 (&sw_context->res_relocations, res, 477 (&sw_context->res_relocations, res,
429 id - sw_context->buf_start); 478 id_loc - sw_context->buf_start);
430 } 479 }
431 480
432 ret = vmw_user_resource_lookup_handle(dev_priv, 481 ret = vmw_user_resource_lookup_handle(dev_priv,
433 sw_context->tfile, 482 sw_context->fp->tfile,
434 *id, 483 id,
435 converter, 484 converter,
436 &res); 485 &res);
437 if (unlikely(ret != 0)) { 486 if (unlikely(ret != 0)) {
438 DRM_ERROR("Could not find or use resource 0x%08x.\n", 487 DRM_ERROR("Could not find or use resource 0x%08x.\n",
439 (unsigned) *id); 488 (unsigned) id);
440 dump_stack(); 489 dump_stack();
441 return ret; 490 return ret;
442 } 491 }
443 492
444 rcache->valid = true; 493 rcache->valid = true;
445 rcache->res = res; 494 rcache->res = res;
446 rcache->handle = *id; 495 rcache->handle = id;
447 496
448 ret = vmw_resource_relocation_add(&sw_context->res_relocations, 497 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
449 res, 498 res,
450 id - sw_context->buf_start); 499 id_loc - sw_context->buf_start);
451 if (unlikely(ret != 0)) 500 if (unlikely(ret != 0))
452 goto out_no_reloc; 501 goto out_no_reloc;
453 502
@@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
459 if (p_val) 508 if (p_val)
460 *p_val = node; 509 *p_val = node;
461 510
462 if (node->first_usage && res_type == vmw_res_context) { 511 if (dev_priv->has_mob && node->first_usage &&
512 res_type == vmw_res_context) {
513 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
514 if (unlikely(ret != 0))
515 goto out_no_reloc;
463 node->staged_bindings = 516 node->staged_bindings =
464 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); 517 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
465 if (node->staged_bindings == NULL) { 518 if (node->staged_bindings == NULL) {
@@ -481,6 +534,59 @@ out_no_reloc:
481} 534}
482 535
483/** 536/**
537 * vmw_cmd_res_check - Check that a resource is present and if so, put it
538 * on the resource validate list unless it's already there.
539 *
540 * @dev_priv: Pointer to a device private structure.
541 * @sw_context: Pointer to the software context.
542 * @res_type: Resource type.
543 * @converter: User-space visisble type specific information.
544 * @id_loc: Pointer to the location in the command buffer currently being
545 * parsed from where the user-space resource id handle is located.
546 * @p_val: Pointer to pointer to resource validalidation node. Populated
547 * on exit.
548 */
549static int
550vmw_cmd_res_check(struct vmw_private *dev_priv,
551 struct vmw_sw_context *sw_context,
552 enum vmw_res_type res_type,
553 const struct vmw_user_resource_conv *converter,
554 uint32_t *id_loc,
555 struct vmw_resource_val_node **p_val)
556{
557 return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
558 converter, *id_loc, id_loc, p_val);
559}
560
561/**
562 * vmw_rebind_contexts - Rebind all resources previously bound to
563 * referenced contexts.
564 *
565 * @sw_context: Pointer to the software context.
566 *
567 * Rebind context binding points that have been scrubbed because of eviction.
568 */
569static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
570{
571 struct vmw_resource_val_node *val;
572 int ret;
573
574 list_for_each_entry(val, &sw_context->resource_list, head) {
575 if (likely(!val->staged_bindings))
576 continue;
577
578 ret = vmw_context_rebind_all(val->res);
579 if (unlikely(ret != 0)) {
580 if (ret != -ERESTARTSYS)
581 DRM_ERROR("Failed to rebind context.\n");
582 return ret;
583 }
584 }
585
586 return 0;
587}
588
589/**
484 * vmw_cmd_cid_check - Check a command header for valid context information. 590 * vmw_cmd_cid_check - Check a command header for valid context information.
485 * 591 *
486 * @dev_priv: Pointer to a device private structure. 592 * @dev_priv: Pointer to a device private structure.
@@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
767 struct vmw_relocation *reloc; 873 struct vmw_relocation *reloc;
768 int ret; 874 int ret;
769 875
770 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 876 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
771 if (unlikely(ret != 0)) { 877 if (unlikely(ret != 0)) {
772 DRM_ERROR("Could not find or use MOB buffer.\n"); 878 DRM_ERROR("Could not find or use MOB buffer.\n");
773 return -EINVAL; 879 return -EINVAL;
@@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
828 struct vmw_relocation *reloc; 934 struct vmw_relocation *reloc;
829 int ret; 935 int ret;
830 936
831 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 937 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
832 if (unlikely(ret != 0)) { 938 if (unlikely(ret != 0)) {
833 DRM_ERROR("Could not find or use GMR region.\n"); 939 DRM_ERROR("Could not find or use GMR region.\n");
834 return -EINVAL; 940 return -EINVAL;
@@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
1127 1233
1128 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); 1234 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1129 1235
1130 vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); 1236 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1237 header);
1131 1238
1132out_no_surface: 1239out_no_surface:
1133 vmw_dmabuf_unreference(&vmw_bo); 1240 vmw_dmabuf_unreference(&vmw_bo);
@@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1478 &cmd->body.sid, NULL); 1585 &cmd->body.sid, NULL);
1479} 1586}
1480 1587
1588
1589/**
1590 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1591 * command
1592 *
1593 * @dev_priv: Pointer to a device private struct.
1594 * @sw_context: The software context being used for this batch.
1595 * @header: Pointer to the command header in the command stream.
1596 */
1597static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1598 struct vmw_sw_context *sw_context,
1599 SVGA3dCmdHeader *header)
1600{
1601 struct vmw_shader_define_cmd {
1602 SVGA3dCmdHeader header;
1603 SVGA3dCmdDefineShader body;
1604 } *cmd;
1605 int ret;
1606 size_t size;
1607
1608 cmd = container_of(header, struct vmw_shader_define_cmd,
1609 header);
1610
1611 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1612 user_context_converter, &cmd->body.cid,
1613 NULL);
1614 if (unlikely(ret != 0))
1615 return ret;
1616
1617 if (unlikely(!dev_priv->has_mob))
1618 return 0;
1619
1620 size = cmd->header.size - sizeof(cmd->body);
1621 ret = vmw_compat_shader_add(sw_context->fp->shman,
1622 cmd->body.shid, cmd + 1,
1623 cmd->body.type, size,
1624 sw_context->fp->tfile,
1625 &sw_context->staged_shaders);
1626 if (unlikely(ret != 0))
1627 return ret;
1628
1629 return vmw_resource_relocation_add(&sw_context->res_relocations,
1630 NULL, &cmd->header.id -
1631 sw_context->buf_start);
1632
1633 return 0;
1634}
1635
1636/**
1637 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1638 * command
1639 *
1640 * @dev_priv: Pointer to a device private struct.
1641 * @sw_context: The software context being used for this batch.
1642 * @header: Pointer to the command header in the command stream.
1643 */
1644static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1645 struct vmw_sw_context *sw_context,
1646 SVGA3dCmdHeader *header)
1647{
1648 struct vmw_shader_destroy_cmd {
1649 SVGA3dCmdHeader header;
1650 SVGA3dCmdDestroyShader body;
1651 } *cmd;
1652 int ret;
1653
1654 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1655 header);
1656
1657 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1658 user_context_converter, &cmd->body.cid,
1659 NULL);
1660 if (unlikely(ret != 0))
1661 return ret;
1662
1663 if (unlikely(!dev_priv->has_mob))
1664 return 0;
1665
1666 ret = vmw_compat_shader_remove(sw_context->fp->shman,
1667 cmd->body.shid,
1668 cmd->body.type,
1669 &sw_context->staged_shaders);
1670 if (unlikely(ret != 0))
1671 return ret;
1672
1673 return vmw_resource_relocation_add(&sw_context->res_relocations,
1674 NULL, &cmd->header.id -
1675 sw_context->buf_start);
1676
1677 return 0;
1678}
1679
1481/** 1680/**
1482 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER 1681 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1483 * command 1682 * command
@@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1509 if (dev_priv->has_mob) { 1708 if (dev_priv->has_mob) {
1510 struct vmw_ctx_bindinfo bi; 1709 struct vmw_ctx_bindinfo bi;
1511 struct vmw_resource_val_node *res_node; 1710 struct vmw_resource_val_node *res_node;
1512 1711 u32 shid = cmd->body.shid;
1513 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 1712
1514 user_shader_converter, 1713 if (shid != SVGA3D_INVALID_ID)
1515 &cmd->body.shid, &res_node); 1714 (void) vmw_compat_shader_lookup(sw_context->fp->shman,
1715 cmd->body.type,
1716 &shid);
1717
1718 ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
1719 vmw_res_shader,
1720 user_shader_converter,
1721 shid,
1722 &cmd->body.shid, &res_node);
1516 if (unlikely(ret != 0)) 1723 if (unlikely(ret != 0))
1517 return ret; 1724 return ret;
1518 1725
@@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1527} 1734}
1528 1735
1529/** 1736/**
1737 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1738 * command
1739 *
1740 * @dev_priv: Pointer to a device private struct.
1741 * @sw_context: The software context being used for this batch.
1742 * @header: Pointer to the command header in the command stream.
1743 */
1744static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1745 struct vmw_sw_context *sw_context,
1746 SVGA3dCmdHeader *header)
1747{
1748 struct vmw_set_shader_const_cmd {
1749 SVGA3dCmdHeader header;
1750 SVGA3dCmdSetShaderConst body;
1751 } *cmd;
1752 int ret;
1753
1754 cmd = container_of(header, struct vmw_set_shader_const_cmd,
1755 header);
1756
1757 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1758 user_context_converter, &cmd->body.cid,
1759 NULL);
1760 if (unlikely(ret != 0))
1761 return ret;
1762
1763 if (dev_priv->has_mob)
1764 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1765
1766 return 0;
1767}
1768
1769/**
1530 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER 1770 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1531 * command 1771 * command
1532 * 1772 *
@@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1634 true, false, false), 1874 true, false, false),
1635 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, 1875 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1636 false, false, false), 1876 false, false, false),
1637 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, 1877 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1638 true, true, false), 1878 true, false, false),
1639 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, 1879 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1640 true, true, false), 1880 true, false, false),
1641 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, 1881 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1642 true, false, false), 1882 true, false, false),
1643 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, 1883 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1644 true, true, false), 1884 true, false, false),
1645 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, 1885 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1646 true, false, false), 1886 true, false, false),
1647 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, 1887 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
@@ -2171,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2171 } else 2411 } else
2172 sw_context->kernel = true; 2412 sw_context->kernel = true;
2173 2413
2174 sw_context->tfile = vmw_fpriv(file_priv)->tfile; 2414 sw_context->fp = vmw_fpriv(file_priv);
2175 sw_context->cur_reloc = 0; 2415 sw_context->cur_reloc = 0;
2176 sw_context->cur_val_buf = 0; 2416 sw_context->cur_val_buf = 0;
2177 sw_context->fence_flags = 0; 2417 sw_context->fence_flags = 0;
@@ -2188,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2188 goto out_unlock; 2428 goto out_unlock;
2189 sw_context->res_ht_initialized = true; 2429 sw_context->res_ht_initialized = true;
2190 } 2430 }
2431 INIT_LIST_HEAD(&sw_context->staged_shaders);
2191 2432
2192 INIT_LIST_HEAD(&resource_list); 2433 INIT_LIST_HEAD(&resource_list);
2193 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 2434 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2194 command_size); 2435 command_size);
2195 if (unlikely(ret != 0)) 2436 if (unlikely(ret != 0))
2196 goto out_err; 2437 goto out_err_nores;
2197 2438
2198 ret = vmw_resources_reserve(sw_context); 2439 ret = vmw_resources_reserve(sw_context);
2199 if (unlikely(ret != 0)) 2440 if (unlikely(ret != 0))
2200 goto out_err; 2441 goto out_err_nores;
2201 2442
2202 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); 2443 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2203 if (unlikely(ret != 0)) 2444 if (unlikely(ret != 0))
@@ -2225,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2225 goto out_err; 2466 goto out_err;
2226 } 2467 }
2227 2468
2469 if (dev_priv->has_mob) {
2470 ret = vmw_rebind_contexts(sw_context);
2471 if (unlikely(ret != 0))
2472 goto out_err;
2473 }
2474
2228 cmd = vmw_fifo_reserve(dev_priv, command_size); 2475 cmd = vmw_fifo_reserve(dev_priv, command_size);
2229 if (unlikely(cmd == NULL)) { 2476 if (unlikely(cmd == NULL)) {
2230 DRM_ERROR("Failed reserving fifo space for commands.\n"); 2477 DRM_ERROR("Failed reserving fifo space for commands.\n");
@@ -2276,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2276 } 2523 }
2277 2524
2278 list_splice_init(&sw_context->resource_list, &resource_list); 2525 list_splice_init(&sw_context->resource_list, &resource_list);
2526 vmw_compat_shaders_commit(sw_context->fp->shman,
2527 &sw_context->staged_shaders);
2279 mutex_unlock(&dev_priv->cmdbuf_mutex); 2528 mutex_unlock(&dev_priv->cmdbuf_mutex);
2280 2529
2281 /* 2530 /*
@@ -2289,10 +2538,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2289out_unlock_binding: 2538out_unlock_binding:
2290 mutex_unlock(&dev_priv->binding_mutex); 2539 mutex_unlock(&dev_priv->binding_mutex);
2291out_err: 2540out_err:
2292 vmw_resource_relocations_free(&sw_context->res_relocations);
2293 vmw_free_relocations(sw_context);
2294 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); 2541 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2542out_err_nores:
2295 vmw_resource_list_unreserve(&sw_context->resource_list, true); 2543 vmw_resource_list_unreserve(&sw_context->resource_list, true);
2544 vmw_resource_relocations_free(&sw_context->res_relocations);
2545 vmw_free_relocations(sw_context);
2296 vmw_clear_validations(sw_context); 2546 vmw_clear_validations(sw_context);
2297 if (unlikely(dev_priv->pinned_bo != NULL && 2547 if (unlikely(dev_priv->pinned_bo != NULL &&
2298 !dev_priv->query_cid_valid)) 2548 !dev_priv->query_cid_valid))
@@ -2301,6 +2551,8 @@ out_unlock:
2301 list_splice_init(&sw_context->resource_list, &resource_list); 2551 list_splice_init(&sw_context->resource_list, &resource_list);
2302 error_resource = sw_context->error_resource; 2552 error_resource = sw_context->error_resource;
2303 sw_context->error_resource = NULL; 2553 sw_context->error_resource = NULL;
2554 vmw_compat_shaders_revert(sw_context->fp->shman,
2555 &sw_context->staged_shaders);
2304 mutex_unlock(&dev_priv->cmdbuf_mutex); 2556 mutex_unlock(&dev_priv->cmdbuf_mutex);
2305 2557
2306 /* 2558 /*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 116c49736763..f9881f9e62bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
29#include <drm/vmwgfx_drm.h> 29#include <drm/vmwgfx_drm.h>
30#include "vmwgfx_kms.h" 30#include "vmwgfx_kms.h"
31 31
32struct svga_3d_compat_cap {
33 SVGA3dCapsRecordHeader header;
34 SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
35};
36
32int vmw_getparam_ioctl(struct drm_device *dev, void *data, 37int vmw_getparam_ioctl(struct drm_device *dev, void *data,
33 struct drm_file *file_priv) 38 struct drm_file *file_priv)
34{ 39{
35 struct vmw_private *dev_priv = vmw_priv(dev); 40 struct vmw_private *dev_priv = vmw_priv(dev);
36 struct drm_vmw_getparam_arg *param = 41 struct drm_vmw_getparam_arg *param =
37 (struct drm_vmw_getparam_arg *)data; 42 (struct drm_vmw_getparam_arg *)data;
43 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
38 44
39 switch (param->param) { 45 switch (param->param) {
40 case DRM_VMW_PARAM_NUM_STREAMS: 46 case DRM_VMW_PARAM_NUM_STREAMS:
@@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
60 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 66 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
61 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 67 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
62 68
69 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
70 param->value = SVGA3D_HWVERSION_WS8_B1;
71 break;
72 }
73
63 param->value = 74 param->value =
64 ioread32(fifo_mem + 75 ioread32(fifo_mem +
65 ((fifo->capabilities & 76 ((fifo->capabilities &
@@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
69 break; 80 break;
70 } 81 }
71 case DRM_VMW_PARAM_MAX_SURF_MEMORY: 82 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
72 param->value = dev_priv->memory_size; 83 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
84 !vmw_fp->gb_aware)
85 param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
86 else
87 param->value = dev_priv->memory_size;
73 break; 88 break;
74 case DRM_VMW_PARAM_3D_CAPS_SIZE: 89 case DRM_VMW_PARAM_3D_CAPS_SIZE:
75 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) 90 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
76 param->value = SVGA3D_DEVCAP_MAX; 91 vmw_fp->gb_aware)
92 param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
93 else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
94 param->value = sizeof(struct svga_3d_compat_cap) +
95 sizeof(uint32_t);
77 else 96 else
78 param->value = (SVGA_FIFO_3D_CAPS_LAST - 97 param->value = (SVGA_FIFO_3D_CAPS_LAST -
79 SVGA_FIFO_3D_CAPS + 1); 98 SVGA_FIFO_3D_CAPS + 1) *
80 param->value *= sizeof(uint32_t); 99 sizeof(uint32_t);
81 break; 100 break;
82 case DRM_VMW_PARAM_MAX_MOB_MEMORY: 101 case DRM_VMW_PARAM_MAX_MOB_MEMORY:
102 vmw_fp->gb_aware = true;
83 param->value = dev_priv->max_mob_pages * PAGE_SIZE; 103 param->value = dev_priv->max_mob_pages * PAGE_SIZE;
84 break; 104 break;
85 default: 105 default:
@@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
91 return 0; 111 return 0;
92} 112}
93 113
114static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
115 size_t size)
116{
117 struct svga_3d_compat_cap *compat_cap =
118 (struct svga_3d_compat_cap *) bounce;
119 unsigned int i;
120 size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
121 unsigned int max_size;
122
123 if (size < pair_offset)
124 return -EINVAL;
125
126 max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
127
128 if (max_size > SVGA3D_DEVCAP_MAX)
129 max_size = SVGA3D_DEVCAP_MAX;
130
131 compat_cap->header.length =
132 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
133 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
134
135 mutex_lock(&dev_priv->hw_mutex);
136 for (i = 0; i < max_size; ++i) {
137 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
138 compat_cap->pairs[i][0] = i;
139 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
140 }
141 mutex_unlock(&dev_priv->hw_mutex);
142
143 return 0;
144}
145
94 146
95int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 147int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv) 148 struct drm_file *file_priv)
@@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
104 void *bounce; 156 void *bounce;
105 int ret; 157 int ret;
106 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); 158 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
159 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
107 160
108 if (unlikely(arg->pad64 != 0)) { 161 if (unlikely(arg->pad64 != 0)) {
109 DRM_ERROR("Illegal GET_3D_CAP argument.\n"); 162 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
110 return -EINVAL; 163 return -EINVAL;
111 } 164 }
112 165
113 if (gb_objects) 166 if (gb_objects && vmw_fp->gb_aware)
114 size = SVGA3D_DEVCAP_MAX; 167 size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
168 else if (gb_objects)
169 size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
115 else 170 else
116 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); 171 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
117 172 sizeof(uint32_t);
118 size *= sizeof(uint32_t);
119 173
120 if (arg->max_size < size) 174 if (arg->max_size < size)
121 size = arg->max_size; 175 size = arg->max_size;
122 176
123 bounce = vmalloc(size); 177 bounce = vzalloc(size);
124 if (unlikely(bounce == NULL)) { 178 if (unlikely(bounce == NULL)) {
125 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); 179 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
126 return -ENOMEM; 180 return -ENOMEM;
127 } 181 }
128 182
129 if (gb_objects) { 183 if (gb_objects && vmw_fp->gb_aware) {
130 int i; 184 int i, num;
131 uint32_t *bounce32 = (uint32_t *) bounce; 185 uint32_t *bounce32 = (uint32_t *) bounce;
132 186
187 num = size / sizeof(uint32_t);
188 if (num > SVGA3D_DEVCAP_MAX)
189 num = SVGA3D_DEVCAP_MAX;
190
133 mutex_lock(&dev_priv->hw_mutex); 191 mutex_lock(&dev_priv->hw_mutex);
134 for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { 192 for (i = 0; i < num; ++i) {
135 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 193 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
136 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 194 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
137 } 195 }
138 mutex_unlock(&dev_priv->hw_mutex); 196 mutex_unlock(&dev_priv->hw_mutex);
139 197 } else if (gb_objects) {
198 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
199 if (unlikely(ret != 0))
200 goto out_err;
140 } else { 201 } else {
141
142 fifo_mem = dev_priv->mmio_virt; 202 fifo_mem = dev_priv->mmio_virt;
143 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); 203 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
144 } 204 }
@@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
146 ret = copy_to_user(buffer, bounce, size); 206 ret = copy_to_user(buffer, bounce, size);
147 if (ret) 207 if (ret)
148 ret = -EFAULT; 208 ret = -EFAULT;
209out_err:
149 vfree(bounce); 210 vfree(bounce);
150 211
151 if (unlikely(ret != 0)) 212 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 4910e7b81811..d4a5a19cb8c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
134 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 134 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
135 if (unlikely(cmd == NULL)) { 135 if (unlikely(cmd == NULL)) {
136 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); 136 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
137 ret = -ENOMEM;
137 goto out_no_fifo; 138 goto out_no_fifo;
138 } 139 }
139 140
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6fdd82d42f65..2aa4bc6a4d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
88 return res; 88 return res;
89} 89}
90 90
91struct vmw_resource *
92vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93{
94 return kref_get_unless_zero(&res->kref) ? res : NULL;
95}
91 96
92/** 97/**
93 * vmw_resource_release_id - release a resource id to the id manager. 98 * vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
136 vmw_dmabuf_unreference(&res->backup); 141 vmw_dmabuf_unreference(&res->backup);
137 } 142 }
138 143
139 if (likely(res->hw_destroy != NULL)) 144 if (likely(res->hw_destroy != NULL)) {
140 res->hw_destroy(res); 145 res->hw_destroy(res);
146 mutex_lock(&dev_priv->binding_mutex);
147 vmw_context_binding_res_list_kill(&res->binding_head);
148 mutex_unlock(&dev_priv->binding_mutex);
149 }
141 150
142 id = res->id; 151 id = res->id;
143 if (res->res_free != NULL) 152 if (res->res_free != NULL)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 1457ec4b7125..217d941b8176 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,6 +29,8 @@
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32#define VMW_COMPAT_SHADER_HT_ORDER 12
33
32struct vmw_shader { 34struct vmw_shader {
33 struct vmw_resource res; 35 struct vmw_resource res;
34 SVGA3dShaderType type; 36 SVGA3dShaderType type;
@@ -40,6 +42,50 @@ struct vmw_user_shader {
40 struct vmw_shader shader; 42 struct vmw_shader shader;
41}; 43};
42 44
45/**
46 * enum vmw_compat_shader_state - Staging state for compat shaders
47 */
48enum vmw_compat_shader_state {
49 VMW_COMPAT_COMMITED,
50 VMW_COMPAT_ADD,
51 VMW_COMPAT_DEL
52};
53
54/**
55 * struct vmw_compat_shader - Metadata for compat shaders.
56 *
57 * @handle: The TTM handle of the guest backed shader.
58 * @tfile: The struct ttm_object_file the guest backed shader is registered
59 * with.
60 * @hash: Hash item for lookup.
61 * @head: List head for staging lists or the compat shader manager list.
62 * @state: Staging state.
63 *
64 * The structure is protected by the cmdbuf lock.
65 */
66struct vmw_compat_shader {
67 u32 handle;
68 struct ttm_object_file *tfile;
69 struct drm_hash_item hash;
70 struct list_head head;
71 enum vmw_compat_shader_state state;
72};
73
74/**
75 * struct vmw_compat_shader_manager - Compat shader manager.
76 *
77 * @shaders: Hash table containing staged and commited compat shaders
78 * @list: List of commited shaders.
79 * @dev_priv: Pointer to a device private structure.
80 *
81 * @shaders and @list are protected by the cmdbuf mutex for now.
82 */
83struct vmw_compat_shader_manager {
84 struct drm_open_hash shaders;
85 struct list_head list;
86 struct vmw_private *dev_priv;
87};
88
43static void vmw_user_shader_free(struct vmw_resource *res); 89static void vmw_user_shader_free(struct vmw_resource *res);
44static struct vmw_resource * 90static struct vmw_resource *
45vmw_user_shader_base_to_res(struct ttm_base_object *base); 91vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
258 return 0; 304 return 0;
259 305
260 mutex_lock(&dev_priv->binding_mutex); 306 mutex_lock(&dev_priv->binding_mutex);
261 vmw_context_binding_res_list_kill(&res->binding_head); 307 vmw_context_binding_res_list_scrub(&res->binding_head);
262 308
263 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 309 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
264 if (unlikely(cmd == NULL)) { 310 if (unlikely(cmd == NULL)) {
@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
325 TTM_REF_USAGE); 371 TTM_REF_USAGE);
326} 372}
327 373
374int vmw_shader_alloc(struct vmw_private *dev_priv,
375 struct vmw_dma_buffer *buffer,
376 size_t shader_size,
377 size_t offset,
378 SVGA3dShaderType shader_type,
379 struct ttm_object_file *tfile,
380 u32 *handle)
381{
382 struct vmw_user_shader *ushader;
383 struct vmw_resource *res, *tmp;
384 int ret;
385
386 /*
387 * Approximate idr memory usage with 128 bytes. It will be limited
388 * by maximum number_of shaders anyway.
389 */
390 if (unlikely(vmw_user_shader_size == 0))
391 vmw_user_shader_size =
392 ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
393
394 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
395 vmw_user_shader_size,
396 false, true);
397 if (unlikely(ret != 0)) {
398 if (ret != -ERESTARTSYS)
399 DRM_ERROR("Out of graphics memory for shader "
400 "creation.\n");
401 goto out;
402 }
403
404 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
405 if (unlikely(ushader == NULL)) {
406 ttm_mem_global_free(vmw_mem_glob(dev_priv),
407 vmw_user_shader_size);
408 ret = -ENOMEM;
409 goto out;
410 }
411
412 res = &ushader->shader.res;
413 ushader->base.shareable = false;
414 ushader->base.tfile = NULL;
415
416 /*
417 * From here on, the destructor takes over resource freeing.
418 */
419
420 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
421 offset, shader_type, buffer,
422 vmw_user_shader_free);
423 if (unlikely(ret != 0))
424 goto out;
425
426 tmp = vmw_resource_reference(res);
427 ret = ttm_base_object_init(tfile, &ushader->base, false,
428 VMW_RES_SHADER,
429 &vmw_user_shader_base_release, NULL);
430
431 if (unlikely(ret != 0)) {
432 vmw_resource_unreference(&tmp);
433 goto out_err;
434 }
435
436 if (handle)
437 *handle = ushader->base.hash.key;
438out_err:
439 vmw_resource_unreference(&res);
440out:
441 return ret;
442}
443
444
328int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 445int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
329 struct drm_file *file_priv) 446 struct drm_file *file_priv)
330{ 447{
331 struct vmw_private *dev_priv = vmw_priv(dev); 448 struct vmw_private *dev_priv = vmw_priv(dev);
332 struct vmw_user_shader *ushader;
333 struct vmw_resource *res;
334 struct vmw_resource *tmp;
335 struct drm_vmw_shader_create_arg *arg = 449 struct drm_vmw_shader_create_arg *arg =
336 (struct drm_vmw_shader_create_arg *)data; 450 (struct drm_vmw_shader_create_arg *)data;
337 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 451 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
373 goto out_bad_arg; 487 goto out_bad_arg;
374 } 488 }
375 489
376 /* 490 ret = ttm_read_lock(&vmaster->lock, true);
377 * Approximate idr memory usage with 128 bytes. It will be limited 491 if (unlikely(ret != 0))
378 * by maximum number_of shaders anyway. 492 goto out_bad_arg;
379 */
380 493
381 if (unlikely(vmw_user_shader_size == 0)) 494 ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
382 vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) 495 shader_type, tfile, &arg->shader_handle);
383 + 128;
384 496
385 ret = ttm_read_lock(&vmaster->lock, true); 497 ttm_read_unlock(&vmaster->lock);
498out_bad_arg:
499 vmw_dmabuf_unreference(&buffer);
500 return ret;
501}
502
503/**
504 * vmw_compat_shader_lookup - Look up a compat shader
505 *
506 * @man: Pointer to the compat shader manager.
507 * @shader_type: The shader type, that combined with the user_key identifies
508 * the shader.
509 * @user_key: On entry, this should be a pointer to the user_key.
510 * On successful exit, it will contain the guest-backed shader's TTM handle.
511 *
512 * Returns 0 on success. Non-zero on failure, in which case the value pointed
513 * to by @user_key is unmodified.
514 */
515int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
516 SVGA3dShaderType shader_type,
517 u32 *user_key)
518{
519 struct drm_hash_item *hash;
520 int ret;
521 unsigned long key = *user_key | (shader_type << 24);
522
523 ret = drm_ht_find_item(&man->shaders, key, &hash);
386 if (unlikely(ret != 0)) 524 if (unlikely(ret != 0))
387 return ret; 525 return ret;
388 526
389 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 527 *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
390 vmw_user_shader_size, 528 hash)->handle;
391 false, true); 529
392 if (unlikely(ret != 0)) { 530 return 0;
393 if (ret != -ERESTARTSYS) 531}
394 DRM_ERROR("Out of graphics memory for shader" 532
395 " creation.\n"); 533/**
396 goto out_unlock; 534 * vmw_compat_shader_free - Free a compat shader.
535 *
536 * @man: Pointer to the compat shader manager.
537 * @entry: Pointer to a struct vmw_compat_shader.
538 *
539 * Frees a struct vmw_compat_shder entry and drops its reference to the
540 * guest backed shader.
541 */
542static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
543 struct vmw_compat_shader *entry)
544{
545 list_del(&entry->head);
546 WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
547 WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
548 TTM_REF_USAGE));
549 kfree(entry);
550}
551
552/**
553 * vmw_compat_shaders_commit - Commit a list of compat shader actions.
554 *
555 * @man: Pointer to the compat shader manager.
556 * @list: Caller's list of compat shader actions.
557 *
558 * This function commits a list of compat shader additions or removals.
559 * It is typically called when the execbuf ioctl call triggering these
560 * actions has commited the fifo contents to the device.
561 */
562void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
563 struct list_head *list)
564{
565 struct vmw_compat_shader *entry, *next;
566
567 list_for_each_entry_safe(entry, next, list, head) {
568 list_del(&entry->head);
569 switch (entry->state) {
570 case VMW_COMPAT_ADD:
571 entry->state = VMW_COMPAT_COMMITED;
572 list_add_tail(&entry->head, &man->list);
573 break;
574 case VMW_COMPAT_DEL:
575 ttm_ref_object_base_unref(entry->tfile, entry->handle,
576 TTM_REF_USAGE);
577 kfree(entry);
578 break;
579 default:
580 BUG();
581 break;
582 }
397 } 583 }
584}
398 585
399 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); 586/**
400 if (unlikely(ushader == NULL)) { 587 * vmw_compat_shaders_revert - Revert a list of compat shader actions
401 ttm_mem_global_free(vmw_mem_glob(dev_priv), 588 *
402 vmw_user_shader_size); 589 * @man: Pointer to the compat shader manager.
403 ret = -ENOMEM; 590 * @list: Caller's list of compat shader actions.
404 goto out_unlock; 591 *
592 * This function reverts a list of compat shader additions or removals.
593 * It is typically called when the execbuf ioctl call triggering these
594 * actions failed for some reason, and the command stream was never
595 * submitted.
596 */
597void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
598 struct list_head *list)
599{
600 struct vmw_compat_shader *entry, *next;
601 int ret;
602
603 list_for_each_entry_safe(entry, next, list, head) {
604 switch (entry->state) {
605 case VMW_COMPAT_ADD:
606 vmw_compat_shader_free(man, entry);
607 break;
608 case VMW_COMPAT_DEL:
609 ret = drm_ht_insert_item(&man->shaders, &entry->hash);
610 list_del(&entry->head);
611 list_add_tail(&entry->head, &man->list);
612 entry->state = VMW_COMPAT_COMMITED;
613 break;
614 default:
615 BUG();
616 break;
617 }
405 } 618 }
619}
406 620
407 res = &ushader->shader.res; 621/**
408 ushader->base.shareable = false; 622 * vmw_compat_shader_remove - Stage a compat shader for removal.
409 ushader->base.tfile = NULL; 623 *
624 * @man: Pointer to the compat shader manager
625 * @user_key: The key that is used to identify the shader. The key is
626 * unique to the shader type.
627 * @shader_type: Shader type.
628 * @list: Caller's list of staged shader actions.
629 *
630 * This function stages a compat shader for removal and removes the key from
631 * the shader manager's hash table. If the shader was previously only staged
632 * for addition it is completely removed (But the execbuf code may keep a
633 * reference if it was bound to a context between addition and removal). If
634 * it was previously commited to the manager, it is staged for removal.
635 */
636int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
637 u32 user_key, SVGA3dShaderType shader_type,
638 struct list_head *list)
639{
640 struct vmw_compat_shader *entry;
641 struct drm_hash_item *hash;
642 int ret;
410 643
411 /* 644 ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
412 * From here on, the destructor takes over resource freeing. 645 &hash);
413 */ 646 if (likely(ret != 0))
647 return -EINVAL;
414 648
415 ret = vmw_gb_shader_init(dev_priv, res, arg->size, 649 entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
416 arg->offset, shader_type, buffer, 650
417 vmw_user_shader_free); 651 switch (entry->state) {
652 case VMW_COMPAT_ADD:
653 vmw_compat_shader_free(man, entry);
654 break;
655 case VMW_COMPAT_COMMITED:
656 (void) drm_ht_remove_item(&man->shaders, &entry->hash);
657 list_del(&entry->head);
658 entry->state = VMW_COMPAT_DEL;
659 list_add_tail(&entry->head, list);
660 break;
661 default:
662 BUG();
663 break;
664 }
665
666 return 0;
667}
668
669/**
670 * vmw_compat_shader_add - Create a compat shader and add the
671 * key to the manager
672 *
673 * @man: Pointer to the compat shader manager
674 * @user_key: The key that is used to identify the shader. The key is
675 * unique to the shader type.
676 * @bytecode: Pointer to the bytecode of the shader.
677 * @shader_type: Shader type.
678 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
679 * to be created with.
680 * @list: Caller's list of staged shader actions.
681 *
682 * Note that only the key is added to the shader manager's hash table.
683 * The shader is not yet added to the shader manager's list of shaders.
684 */
685int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
686 u32 user_key, const void *bytecode,
687 SVGA3dShaderType shader_type,
688 size_t size,
689 struct ttm_object_file *tfile,
690 struct list_head *list)
691{
692 struct vmw_dma_buffer *buf;
693 struct ttm_bo_kmap_obj map;
694 bool is_iomem;
695 struct vmw_compat_shader *compat;
696 u32 handle;
697 int ret;
698
699 if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
700 return -EINVAL;
701
702 /* Allocate and pin a DMA buffer */
703 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
704 if (unlikely(buf == NULL))
705 return -ENOMEM;
706
707 ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
708 true, vmw_dmabuf_bo_free);
418 if (unlikely(ret != 0)) 709 if (unlikely(ret != 0))
419 goto out_unlock; 710 goto out;
420 711
421 tmp = vmw_resource_reference(res); 712 ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
422 ret = ttm_base_object_init(tfile, &ushader->base, false, 713 if (unlikely(ret != 0))
423 VMW_RES_SHADER, 714 goto no_reserve;
424 &vmw_user_shader_base_release, NULL);
425 715
716 /* Map and copy shader bytecode. */
717 ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
718 &map);
426 if (unlikely(ret != 0)) { 719 if (unlikely(ret != 0)) {
427 vmw_resource_unreference(&tmp); 720 ttm_bo_unreserve(&buf->base);
428 goto out_err; 721 goto no_reserve;
429 } 722 }
430 723
431 arg->shader_handle = ushader->base.hash.key; 724 memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
432out_err: 725 WARN_ON(is_iomem);
433 vmw_resource_unreference(&res); 726
434out_unlock: 727 ttm_bo_kunmap(&map);
435 ttm_read_unlock(&vmaster->lock); 728 ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
436out_bad_arg: 729 WARN_ON(ret != 0);
437 vmw_dmabuf_unreference(&buffer); 730 ttm_bo_unreserve(&buf->base);
731
732 /* Create a guest-backed shader container backed by the dma buffer */
733 ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
734 tfile, &handle);
735 vmw_dmabuf_unreference(&buf);
736 if (unlikely(ret != 0))
737 goto no_reserve;
738 /*
739 * Create a compat shader structure and stage it for insertion
740 * in the manager
741 */
742 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
743 if (compat == NULL)
744 goto no_compat;
745
746 compat->hash.key = user_key | (shader_type << 24);
747 ret = drm_ht_insert_item(&man->shaders, &compat->hash);
748 if (unlikely(ret != 0))
749 goto out_invalid_key;
750
751 compat->state = VMW_COMPAT_ADD;
752 compat->handle = handle;
753 compat->tfile = tfile;
754 list_add_tail(&compat->head, list);
438 755
756 return 0;
757
758out_invalid_key:
759 kfree(compat);
760no_compat:
761 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
762no_reserve:
763out:
439 return ret; 764 return ret;
765}
766
767/**
768 * vmw_compat_shader_man_create - Create a compat shader manager
769 *
770 * @dev_priv: Pointer to a device private structure.
771 *
772 * Typically done at file open time. If successful returns a pointer to a
773 * compat shader manager. Otherwise returns an error pointer.
774 */
775struct vmw_compat_shader_manager *
776vmw_compat_shader_man_create(struct vmw_private *dev_priv)
777{
778 struct vmw_compat_shader_manager *man;
779 int ret;
780
781 man = kzalloc(sizeof(*man), GFP_KERNEL);
782
783 man->dev_priv = dev_priv;
784 INIT_LIST_HEAD(&man->list);
785 ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
786 if (ret == 0)
787 return man;
788
789 kfree(man);
790 return ERR_PTR(ret);
791}
792
793/**
794 * vmw_compat_shader_man_destroy - Destroy a compat shader manager
795 *
796 * @man: Pointer to the shader manager to destroy.
797 *
798 * Typically done at file close time.
799 */
800void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
801{
802 struct vmw_compat_shader *entry, *next;
803
804 mutex_lock(&man->dev_priv->cmdbuf_mutex);
805 list_for_each_entry_safe(entry, next, &man->list, head)
806 vmw_compat_shader_free(man, entry);
440 807
808 mutex_unlock(&man->dev_priv->cmdbuf_mutex);
809 kfree(man);
441} 810}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 979da1c246a5..82468d902915 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
908 rep->size_addr; 908 rep->size_addr;
909 909
910 if (user_sizes) 910 if (user_sizes)
911 ret = copy_to_user(user_sizes, srf->sizes, 911 ret = copy_to_user(user_sizes, &srf->base_size,
912 srf->num_sizes * sizeof(*srf->sizes)); 912 sizeof(srf->base_size));
913 if (unlikely(ret != 0)) { 913 if (unlikely(ret != 0)) {
914 DRM_ERROR("copy_to_user failed %p %u\n", 914 DRM_ERROR("copy_to_user failed %p %u\n",
915 user_sizes, srf->num_sizes); 915 user_sizes, srf->num_sizes);
@@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1111 return 0; 1111 return 0;
1112 1112
1113 mutex_lock(&dev_priv->binding_mutex); 1113 mutex_lock(&dev_priv->binding_mutex);
1114 vmw_context_binding_res_list_kill(&res->binding_head); 1114 vmw_context_binding_res_list_scrub(&res->binding_head);
1115 1115
1116 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1116 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1117 if (unlikely(cmd == NULL)) { 1117 if (unlikely(cmd == NULL)) {
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 029ecabc4380..73b3865f1207 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -278,10 +278,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
278 if (hwmon_irq < 0) 278 if (hwmon_irq < 0)
279 return hwmon_irq; 279 return hwmon_irq;
280 280
281 hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq);
282 if (hwmon_irq < 0)
283 return hwmon_irq;
284
285 ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq, 281 ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq,
286 NULL, da9055_auxadc_irq, 282 NULL, da9055_auxadc_irq,
287 IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 283 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 3cbf66e9d861..291d11fe93e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -90,7 +90,8 @@ struct pmbus_data {
90 90
91 u32 flags; /* from platform data */ 91 u32 flags; /* from platform data */
92 92
93 int exponent; /* linear mode: exponent for output voltages */ 93 int exponent[PMBUS_PAGES];
94 /* linear mode: exponent for output voltages */
94 95
95 const struct pmbus_driver_info *info; 96 const struct pmbus_driver_info *info;
96 97
@@ -410,7 +411,7 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
410 long val; 411 long val;
411 412
412 if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */ 413 if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
413 exponent = data->exponent; 414 exponent = data->exponent[sensor->page];
414 mantissa = (u16) sensor->data; 415 mantissa = (u16) sensor->data;
415 } else { /* LINEAR11 */ 416 } else { /* LINEAR11 */
416 exponent = ((s16)sensor->data) >> 11; 417 exponent = ((s16)sensor->data) >> 11;
@@ -516,7 +517,7 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
516#define MIN_MANTISSA (511 * 1000) 517#define MIN_MANTISSA (511 * 1000)
517 518
518static u16 pmbus_data2reg_linear(struct pmbus_data *data, 519static u16 pmbus_data2reg_linear(struct pmbus_data *data,
519 enum pmbus_sensor_classes class, long val) 520 struct pmbus_sensor *sensor, long val)
520{ 521{
521 s16 exponent = 0, mantissa; 522 s16 exponent = 0, mantissa;
522 bool negative = false; 523 bool negative = false;
@@ -525,7 +526,7 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
525 if (val == 0) 526 if (val == 0)
526 return 0; 527 return 0;
527 528
528 if (class == PSC_VOLTAGE_OUT) { 529 if (sensor->class == PSC_VOLTAGE_OUT) {
529 /* LINEAR16 does not support negative voltages */ 530 /* LINEAR16 does not support negative voltages */
530 if (val < 0) 531 if (val < 0)
531 return 0; 532 return 0;
@@ -534,10 +535,10 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
534 * For a static exponents, we don't have a choice 535 * For a static exponents, we don't have a choice
535 * but to adjust the value to it. 536 * but to adjust the value to it.
536 */ 537 */
537 if (data->exponent < 0) 538 if (data->exponent[sensor->page] < 0)
538 val <<= -data->exponent; 539 val <<= -data->exponent[sensor->page];
539 else 540 else
540 val >>= data->exponent; 541 val >>= data->exponent[sensor->page];
541 val = DIV_ROUND_CLOSEST(val, 1000); 542 val = DIV_ROUND_CLOSEST(val, 1000);
542 return val & 0xffff; 543 return val & 0xffff;
543 } 544 }
@@ -548,14 +549,14 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
548 } 549 }
549 550
550 /* Power is in uW. Convert to mW before converting. */ 551 /* Power is in uW. Convert to mW before converting. */
551 if (class == PSC_POWER) 552 if (sensor->class == PSC_POWER)
552 val = DIV_ROUND_CLOSEST(val, 1000L); 553 val = DIV_ROUND_CLOSEST(val, 1000L);
553 554
554 /* 555 /*
555 * For simplicity, convert fan data to milli-units 556 * For simplicity, convert fan data to milli-units
556 * before calculating the exponent. 557 * before calculating the exponent.
557 */ 558 */
558 if (class == PSC_FAN) 559 if (sensor->class == PSC_FAN)
559 val = val * 1000; 560 val = val * 1000;
560 561
561 /* Reduce large mantissa until it fits into 10 bit */ 562 /* Reduce large mantissa until it fits into 10 bit */
@@ -585,22 +586,22 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
585} 586}
586 587
587static u16 pmbus_data2reg_direct(struct pmbus_data *data, 588static u16 pmbus_data2reg_direct(struct pmbus_data *data,
588 enum pmbus_sensor_classes class, long val) 589 struct pmbus_sensor *sensor, long val)
589{ 590{
590 long m, b, R; 591 long m, b, R;
591 592
592 m = data->info->m[class]; 593 m = data->info->m[sensor->class];
593 b = data->info->b[class]; 594 b = data->info->b[sensor->class];
594 R = data->info->R[class]; 595 R = data->info->R[sensor->class];
595 596
596 /* Power is in uW. Adjust R and b. */ 597 /* Power is in uW. Adjust R and b. */
597 if (class == PSC_POWER) { 598 if (sensor->class == PSC_POWER) {
598 R -= 3; 599 R -= 3;
599 b *= 1000; 600 b *= 1000;
600 } 601 }
601 602
602 /* Calculate Y = (m * X + b) * 10^R */ 603 /* Calculate Y = (m * X + b) * 10^R */
603 if (class != PSC_FAN) { 604 if (sensor->class != PSC_FAN) {
604 R -= 3; /* Adjust R and b for data in milli-units */ 605 R -= 3; /* Adjust R and b for data in milli-units */
605 b *= 1000; 606 b *= 1000;
606 } 607 }
@@ -619,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
619} 620}
620 621
621static u16 pmbus_data2reg_vid(struct pmbus_data *data, 622static u16 pmbus_data2reg_vid(struct pmbus_data *data,
622 enum pmbus_sensor_classes class, long val) 623 struct pmbus_sensor *sensor, long val)
623{ 624{
624 val = clamp_val(val, 500, 1600); 625 val = clamp_val(val, 500, 1600);
625 626
@@ -627,20 +628,20 @@ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
627} 628}
628 629
629static u16 pmbus_data2reg(struct pmbus_data *data, 630static u16 pmbus_data2reg(struct pmbus_data *data,
630 enum pmbus_sensor_classes class, long val) 631 struct pmbus_sensor *sensor, long val)
631{ 632{
632 u16 regval; 633 u16 regval;
633 634
634 switch (data->info->format[class]) { 635 switch (data->info->format[sensor->class]) {
635 case direct: 636 case direct:
636 regval = pmbus_data2reg_direct(data, class, val); 637 regval = pmbus_data2reg_direct(data, sensor, val);
637 break; 638 break;
638 case vid: 639 case vid:
639 regval = pmbus_data2reg_vid(data, class, val); 640 regval = pmbus_data2reg_vid(data, sensor, val);
640 break; 641 break;
641 case linear: 642 case linear:
642 default: 643 default:
643 regval = pmbus_data2reg_linear(data, class, val); 644 regval = pmbus_data2reg_linear(data, sensor, val);
644 break; 645 break;
645 } 646 }
646 return regval; 647 return regval;
@@ -746,7 +747,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
746 return -EINVAL; 747 return -EINVAL;
747 748
748 mutex_lock(&data->update_lock); 749 mutex_lock(&data->update_lock);
749 regval = pmbus_data2reg(data, sensor->class, val); 750 regval = pmbus_data2reg(data, sensor, val);
750 ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval); 751 ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
751 if (ret < 0) 752 if (ret < 0)
752 rv = ret; 753 rv = ret;
@@ -1643,12 +1644,13 @@ static int pmbus_find_attributes(struct i2c_client *client,
1643 * This function is called for all chips. 1644 * This function is called for all chips.
1644 */ 1645 */
1645static int pmbus_identify_common(struct i2c_client *client, 1646static int pmbus_identify_common(struct i2c_client *client,
1646 struct pmbus_data *data) 1647 struct pmbus_data *data, int page)
1647{ 1648{
1648 int vout_mode = -1; 1649 int vout_mode = -1;
1649 1650
1650 if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) 1651 if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
1651 vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); 1652 vout_mode = _pmbus_read_byte_data(client, page,
1653 PMBUS_VOUT_MODE);
1652 if (vout_mode >= 0 && vout_mode != 0xff) { 1654 if (vout_mode >= 0 && vout_mode != 0xff) {
1653 /* 1655 /*
1654 * Not all chips support the VOUT_MODE command, 1656 * Not all chips support the VOUT_MODE command,
@@ -1659,7 +1661,7 @@ static int pmbus_identify_common(struct i2c_client *client,
1659 if (data->info->format[PSC_VOLTAGE_OUT] != linear) 1661 if (data->info->format[PSC_VOLTAGE_OUT] != linear)
1660 return -ENODEV; 1662 return -ENODEV;
1661 1663
1662 data->exponent = ((s8)(vout_mode << 3)) >> 3; 1664 data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
1663 break; 1665 break;
1664 case 1: /* VID mode */ 1666 case 1: /* VID mode */
1665 if (data->info->format[PSC_VOLTAGE_OUT] != vid) 1667 if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@@ -1674,7 +1676,7 @@ static int pmbus_identify_common(struct i2c_client *client,
1674 } 1676 }
1675 } 1677 }
1676 1678
1677 pmbus_clear_fault_page(client, 0); 1679 pmbus_clear_fault_page(client, page);
1678 return 0; 1680 return 0;
1679} 1681}
1680 1682
@@ -1682,7 +1684,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
1682 struct pmbus_driver_info *info) 1684 struct pmbus_driver_info *info)
1683{ 1685{
1684 struct device *dev = &client->dev; 1686 struct device *dev = &client->dev;
1685 int ret; 1687 int page, ret;
1686 1688
1687 /* 1689 /*
1688 * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try 1690 * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
@@ -1715,10 +1717,12 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
1715 return -ENODEV; 1717 return -ENODEV;
1716 } 1718 }
1717 1719
1718 ret = pmbus_identify_common(client, data); 1720 for (page = 0; page < info->pages; page++) {
1719 if (ret < 0) { 1721 ret = pmbus_identify_common(client, data, page);
1720 dev_err(dev, "Failed to identify chip capabilities\n"); 1722 if (ret < 0) {
1721 return ret; 1723 dev_err(dev, "Failed to identify chip capabilities\n");
1724 return ret;
1725 }
1722 } 1726 }
1723 return 0; 1727 return 0;
1724} 1728}
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 86b484cb3ec2..5194afb39e78 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
21obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o 21obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
22obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o 22obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
23obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o 23obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
24obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
24obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o 25obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
25obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o 26obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
26obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o 27obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 9300bc32784e..540956465ed2 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -381,7 +381,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
381 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) 381 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
382 & PCI_MSI_DOORBELL_MASK; 382 & PCI_MSI_DOORBELL_MASK;
383 383
384 writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base + 384 writel(~msimask, per_cpu_int_base +
385 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 385 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
386 386
387 for (msinr = PCI_MSI_DOORBELL_START; 387 for (msinr = PCI_MSI_DOORBELL_START;
@@ -407,7 +407,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
407 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) 407 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
408 & IPI_DOORBELL_MASK; 408 & IPI_DOORBELL_MASK;
409 409
410 writel(~IPI_DOORBELL_MASK, per_cpu_int_base + 410 writel(~ipimask, per_cpu_int_base +
411 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 411 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
412 412
413 /* Handle all pending doorbells */ 413 /* Handle all pending doorbells */
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644
index 000000000000..8ed04c4a43ee
--- /dev/null
+++ b/drivers/irqchip/irq-zevio.c
@@ -0,0 +1,127 @@
1/*
2 * linux/drivers/irqchip/irq-zevio.c
3 *
4 * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2, as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/io.h>
13#include <linux/irq.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/of_irq.h>
17
18#include <asm/mach/irq.h>
19#include <asm/exception.h>
20
21#include "irqchip.h"
22
23#define IO_STATUS 0x000
24#define IO_RAW_STATUS 0x004
25#define IO_ENABLE 0x008
26#define IO_DISABLE 0x00C
27#define IO_CURRENT 0x020
28#define IO_RESET 0x028
29#define IO_MAX_PRIOTY 0x02C
30
31#define IO_IRQ_BASE 0x000
32#define IO_FIQ_BASE 0x100
33
34#define IO_INVERT_SEL 0x200
35#define IO_STICKY_SEL 0x204
36#define IO_PRIORITY_SEL 0x300
37
38#define MAX_INTRS 32
39#define FIQ_START MAX_INTRS
40
41static struct irq_domain *zevio_irq_domain;
42static void __iomem *zevio_irq_io;
43
44static void zevio_irq_ack(struct irq_data *irqd)
45{
46 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
47 struct irq_chip_regs *regs =
48 &container_of(irqd->chip, struct irq_chip_type, chip)->regs;
49
50 readl(gc->reg_base + regs->ack);
51}
52
53static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
54{
55 int irqnr;
56
57 while (readl(zevio_irq_io + IO_STATUS)) {
58 irqnr = readl(zevio_irq_io + IO_CURRENT);
59 irqnr = irq_find_mapping(zevio_irq_domain, irqnr);
60 handle_IRQ(irqnr, regs);
61 };
62}
63
64static void __init zevio_init_irq_base(void __iomem *base)
65{
66 /* Disable all interrupts */
67 writel(~0, base + IO_DISABLE);
68
69 /* Accept interrupts of all priorities */
70 writel(0xF, base + IO_MAX_PRIOTY);
71
72 /* Reset existing interrupts */
73 readl(base + IO_RESET);
74}
75
76static int __init zevio_of_init(struct device_node *node,
77 struct device_node *parent)
78{
79 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
80 struct irq_chip_generic *gc;
81 int ret;
82
83 if (WARN_ON(zevio_irq_io || zevio_irq_domain))
84 return -EBUSY;
85
86 zevio_irq_io = of_iomap(node, 0);
87 BUG_ON(!zevio_irq_io);
88
89 /* Do not invert interrupt status bits */
90 writel(~0, zevio_irq_io + IO_INVERT_SEL);
91
92 /* Disable sticky interrupts */
93 writel(0, zevio_irq_io + IO_STICKY_SEL);
94
95 /* We don't use IRQ priorities. Set each IRQ to highest priority. */
96 memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
97
98 /* Init IRQ and FIQ */
99 zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
100 zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
101
102 zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
103 &irq_generic_chip_ops, NULL);
104 BUG_ON(!zevio_irq_domain);
105
106 ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
107 "zevio_intc", handle_level_irq,
108 clr, 0, IRQ_GC_INIT_MASK_CACHE);
109 BUG_ON(ret);
110
111 gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
112 gc->reg_base = zevio_irq_io;
113 gc->chip_types[0].chip.irq_ack = zevio_irq_ack;
114 gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
115 gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
116 gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE;
117 gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE;
118 gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE;
119 gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET;
120
121 set_handle_irq(zevio_handle_irq);
122
123 pr_info("TI-NSPIRE classic IRQ controller\n");
124 return 0;
125}
126
127IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index af1b020a81f1..b420f8bd862e 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -810,7 +810,7 @@ prfeatureind(char *dest, u_char *p)
810 dp += sprintf(dp, " octet 3 "); 810 dp += sprintf(dp, " octet 3 ");
811 dp += prbits(dp, *p, 8, 8); 811 dp += prbits(dp, *p, 8, 8);
812 *dp++ = '\n'; 812 *dp++ = '\n';
813 if (!(*p++ & 80)) { 813 if (!(*p++ & 0x80)) {
814 dp += sprintf(dp, " octet 4 "); 814 dp += sprintf(dp, " octet 4 ");
815 dp += prbits(dp, *p++, 8, 8); 815 dp += prbits(dp, *p++, 8, 8);
816 *dp++ = '\n'; 816 *dp++ = '\n';
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index c3ead586dc27..416d1a3e028e 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -194,7 +194,7 @@ err:
194 mutex_unlock(&b->c->bucket_lock); 194 mutex_unlock(&b->c->bucket_lock);
195 bch_extent_to_text(buf, sizeof(buf), k); 195 bch_extent_to_text(buf, sizeof(buf), k);
196 btree_bug(b, 196 btree_bug(b,
197"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", 197"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), 198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
199 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); 199 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
200 return true; 200 return true;
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index 68f768a5422d..a6c3c9e2e897 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -1176,7 +1176,7 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
1176 1176
1177 switch (demod) { 1177 switch (demod) {
1178 case 0: 1178 case 0:
1179 dev_err(&state->priv->i2c->dev, 1179 dev_err(&i2c->dev,
1180 "%s: Error attaching frontend %d\n", 1180 "%s: Error attaching frontend %d\n",
1181 KBUILD_MODNAME, demod); 1181 KBUILD_MODNAME, demod);
1182 goto error1; 1182 goto error1;
@@ -1200,12 +1200,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
1200 state->demod = demod - 1; 1200 state->demod = demod - 1;
1201 state->priv = priv; 1201 state->priv = priv;
1202 1202
1203 /* test i2c bus for ack */
1204 if (demod == 0) {
1205 if (cx24117_readreg(state, 0x00) < 0)
1206 goto error3;
1207 }
1208
1209 dev_info(&state->priv->i2c->dev, 1203 dev_info(&state->priv->i2c->dev,
1210 "%s: Attaching frontend %d\n", 1204 "%s: Attaching frontend %d\n",
1211 KBUILD_MODNAME, state->demod); 1205 KBUILD_MODNAME, state->demod);
@@ -1216,8 +1210,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
1216 state->frontend.demodulator_priv = state; 1210 state->frontend.demodulator_priv = state;
1217 return &state->frontend; 1211 return &state->frontend;
1218 1212
1219error3:
1220 kfree(state);
1221error2: 1213error2:
1222 cx24117_release_priv(priv); 1214 cx24117_release_priv(priv);
1223error1: 1215error1:
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 4bf057544607..8a8e1ecb762d 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -2,7 +2,7 @@
2 * Support for NXT2002 and NXT2004 - VSB/QAM 2 * Support for NXT2002 and NXT2004 - VSB/QAM
3 * 3 *
4 * Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com> 4 * Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
5 * Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net> 5 * Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org>
6 * based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net> 6 * based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
7 * and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com> 7 * and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
8 * 8 *
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 1effc21e1cdd..9bbd6656fb8f 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2554,7 +2554,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd)
2554 sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force | 2554 sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force |
2555 (pdata->sdp_free_run_cbar_en << 1) | 2555 (pdata->sdp_free_run_cbar_en << 1) |
2556 (pdata->sdp_free_run_man_col_en << 2) | 2556 (pdata->sdp_free_run_man_col_en << 2) |
2557 (pdata->sdp_free_run_force << 3)); 2557 (pdata->sdp_free_run_auto << 3));
2558 2558
2559 /* TODO from platform data */ 2559 /* TODO from platform data */
2560 cp_write(sd, 0x69, 0x14); /* Enable CP CSC */ 2560 cp_write(sd, 0x69, 0x14); /* Enable CP CSC */
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 4b8381111cbd..77e10e0fd8d6 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -478,25 +478,33 @@ static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr,
478 u16 count, const u16 *seq) 478 u16 count, const u16 *seq)
479{ 479{
480 struct i2c_client *c = v4l2_get_subdevdata(&state->sd); 480 struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
481 __be16 buf[count + 1]; 481 __be16 buf[65];
482 int ret, n;
483 482
484 s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr); 483 s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
485 if (state->error) 484 if (state->error)
486 return; 485 return;
487 486
487 v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
488 min(2 * count, 64), seq);
489
488 buf[0] = __constant_cpu_to_be16(REG_CMD_BUF); 490 buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
489 for (n = 1; n <= count; ++n)
490 buf[n] = cpu_to_be16(*seq++);
491 491
492 n *= 2; 492 while (count > 0) {
493 ret = i2c_master_send(c, (char *)buf, n); 493 int n = min_t(int, count, ARRAY_SIZE(buf) - 1);
494 v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count, 494 int ret, i;
495 min(2 * count, 64), seq - count);
496 495
497 if (ret != n) { 496 for (i = 1; i <= n; ++i)
498 v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret); 497 buf[i] = cpu_to_be16(*seq++);
499 state->error = ret; 498
499 i *= 2;
500 ret = i2c_master_send(c, (char *)buf, i);
501 if (ret != i) {
502 v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
503 state->error = ret;
504 break;
505 }
506
507 count -= n;
500 } 508 }
501} 509}
502 510
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index d85cb0ace4dc..6662b495b22c 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2426,7 +2426,7 @@ struct tvcard bttv_tvcards[] = {
2426 }, 2426 },
2427 /* ---- card 0x87---------------------------------- */ 2427 /* ---- card 0x87---------------------------------- */
2428 [BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = { 2428 [BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = {
2429 /* Michael Krufky <mkrufky@m1k.net> */ 2429 /* Michael Krufky <mkrufky@linuxtv.org> */
2430 .name = "DViCO FusionHDTV 5 Lite", 2430 .name = "DViCO FusionHDTV 5 Lite",
2431 .tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */ 2431 .tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
2432 .tuner_addr = ADDR_UNSET, 2432 .tuner_addr = ADDR_UNSET,
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index 922e8233fd0b..3f364b7062b9 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -98,7 +98,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
98 98
99 err = device_register(&sub->dev); 99 err = device_register(&sub->dev);
100 if (0 != err) { 100 if (0 != err) {
101 kfree(sub); 101 put_device(&sub->dev);
102 return err; 102 return err;
103 } 103 }
104 pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev)); 104 pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index d45e7f6ff332..c9b2350e92c8 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -2590,7 +2590,7 @@ struct saa7134_board saa7134_boards[] = {
2590 }}, 2590 }},
2591 }, 2591 },
2592 [SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = { 2592 [SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = {
2593 /* Michael Krufky <mkrufky@m1k.net> 2593 /* Michael Krufky <mkrufky@linuxtv.org>
2594 * Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder 2594 * Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder
2595 * AFAIK, there is no analog demod, thus, 2595 * AFAIK, there is no analog demod, thus,
2596 * no support for analog television. 2596 * no support for analog television.
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index a7dfd07e8389..da2fc86cc524 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1027,7 +1027,8 @@ static int fimc_probe(struct platform_device *pdev)
1027 return 0; 1027 return 0;
1028 1028
1029err_gclk: 1029err_gclk:
1030 clk_disable(fimc->clock[CLK_GATE]); 1030 if (!pm_runtime_enabled(dev))
1031 clk_disable(fimc->clock[CLK_GATE]);
1031err_sd: 1032err_sd:
1032 fimc_unregister_capture_subdev(fimc); 1033 fimc_unregister_capture_subdev(fimc);
1033err_sclk: 1034err_sclk:
@@ -1036,6 +1037,7 @@ err_sclk:
1036 return ret; 1037 return ret;
1037} 1038}
1038 1039
1040#ifdef CONFIG_PM_RUNTIME
1039static int fimc_runtime_resume(struct device *dev) 1041static int fimc_runtime_resume(struct device *dev)
1040{ 1042{
1041 struct fimc_dev *fimc = dev_get_drvdata(dev); 1043 struct fimc_dev *fimc = dev_get_drvdata(dev);
@@ -1068,6 +1070,7 @@ static int fimc_runtime_suspend(struct device *dev)
1068 dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state); 1070 dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
1069 return ret; 1071 return ret;
1070} 1072}
1073#endif
1071 1074
1072#ifdef CONFIG_PM_SLEEP 1075#ifdef CONFIG_PM_SLEEP
1073static int fimc_resume(struct device *dev) 1076static int fimc_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 1234734bccf4..779ec3cd259d 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -1563,7 +1563,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
1563 if (!pm_runtime_enabled(dev)) { 1563 if (!pm_runtime_enabled(dev)) {
1564 ret = clk_enable(fimc->clock); 1564 ret = clk_enable(fimc->clock);
1565 if (ret < 0) 1565 if (ret < 0)
1566 goto err_clk_put; 1566 goto err_sd;
1567 } 1567 }
1568 1568
1569 fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev); 1569 fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
@@ -1579,7 +1579,8 @@ static int fimc_lite_probe(struct platform_device *pdev)
1579 return 0; 1579 return 0;
1580 1580
1581err_clk_dis: 1581err_clk_dis:
1582 clk_disable(fimc->clock); 1582 if (!pm_runtime_enabled(dev))
1583 clk_disable(fimc->clock);
1583err_sd: 1584err_sd:
1584 fimc_lite_unregister_capture_subdev(fimc); 1585 fimc_lite_unregister_capture_subdev(fimc);
1585err_clk_put: 1586err_clk_put:
@@ -1587,6 +1588,7 @@ err_clk_put:
1587 return ret; 1588 return ret;
1588} 1589}
1589 1590
1591#ifdef CONFIG_PM_RUNTIME
1590static int fimc_lite_runtime_resume(struct device *dev) 1592static int fimc_lite_runtime_resume(struct device *dev)
1591{ 1593{
1592 struct fimc_lite *fimc = dev_get_drvdata(dev); 1594 struct fimc_lite *fimc = dev_get_drvdata(dev);
@@ -1602,6 +1604,7 @@ static int fimc_lite_runtime_suspend(struct device *dev)
1602 clk_disable(fimc->clock); 1604 clk_disable(fimc->clock);
1603 return 0; 1605 return 0;
1604} 1606}
1607#endif
1605 1608
1606#ifdef CONFIG_PM_SLEEP 1609#ifdef CONFIG_PM_SLEEP
1607static int fimc_lite_resume(struct device *dev) 1610static int fimc_lite_resume(struct device *dev)
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index a1c78c870b68..7d68d0b9966a 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -175,7 +175,7 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
175 { 175 {
176 .name = "YUV 4:2:0 planar, Y/CbCr", 176 .name = "YUV 4:2:0 planar, Y/CbCr",
177 .fourcc = V4L2_PIX_FMT_NV12, 177 .fourcc = V4L2_PIX_FMT_NV12,
178 .depth = 16, 178 .depth = 12,
179 .colplanes = 2, 179 .colplanes = 2,
180 .h_align = 1, 180 .h_align = 1,
181 .v_align = 1, 181 .v_align = 1,
@@ -188,10 +188,10 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
188 { 188 {
189 .name = "YUV 4:2:0 planar, Y/CbCr", 189 .name = "YUV 4:2:0 planar, Y/CbCr",
190 .fourcc = V4L2_PIX_FMT_NV12, 190 .fourcc = V4L2_PIX_FMT_NV12,
191 .depth = 16, 191 .depth = 12,
192 .colplanes = 4, 192 .colplanes = 2,
193 .h_align = 4, 193 .h_align = 4,
194 .v_align = 1, 194 .v_align = 4,
195 .flags = SJPEG_FMT_FLAG_ENC_OUTPUT | 195 .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
196 SJPEG_FMT_FLAG_DEC_CAPTURE | 196 SJPEG_FMT_FLAG_DEC_CAPTURE |
197 SJPEG_FMT_FLAG_S5P | 197 SJPEG_FMT_FLAG_S5P |
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8f9b2cea88f0..8ede8ea762e6 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1539,6 +1539,8 @@ static const struct usb_device_id af9035_id_table[] = {
1539 &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, 1539 &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
1540 { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05, 1540 { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
1541 &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) }, 1541 &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
1542 { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
1543 &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
1542 { } 1544 { }
1543}; 1545};
1544MODULE_DEVICE_TABLE(usb, af9035_id_table); 1546MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index d83df4bb72d3..0a98d04c53e4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator 2 * mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -601,7 +601,7 @@ struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
601EXPORT_SYMBOL_GPL(mxl111sf_demod_attach); 601EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
602 602
603MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver"); 603MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
604MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>"); 604MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
605MODULE_LICENSE("GPL"); 605MODULE_LICENSE("GPL");
606MODULE_VERSION("0.1"); 606MODULE_VERSION("0.1");
607 607
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 3f3f8bfd190b..2d4530f5be54 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator 2 * mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index e4121cb8f5ef..a619410adde4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-gpio.c - driver for the MaxLinear MXL111SF 2 * mxl111sf-gpio.c - driver for the MaxLinear MXL111SF
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
index 0220f54299a5..b85a5772d771 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-gpio.h - driver for the MaxLinear MXL111SF 2 * mxl111sf-gpio.h - driver for the MaxLinear MXL111SF
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 34434557ef65..a101d06eb143 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-i2c.c - driver for the MaxLinear MXL111SF 2 * mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
index a57a45ffb9e4..465762145ad2 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-i2c.h - driver for the MaxLinear MXL111SF 2 * mxl111sf-i2c.h - driver for the MaxLinear MXL111SF
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
index b741b3a7a325..f6b348024bec 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-phy.c - driver for the MaxLinear MXL111SF 2 * mxl111sf-phy.c - driver for the MaxLinear MXL111SF
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
index f0756071d347..0643738de7de 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-phy.h - driver for the MaxLinear MXL111SF 2 * mxl111sf-phy.h - driver for the MaxLinear MXL111SF
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
index 17831b0fb9db..89bf115e927e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-reg.h - driver for the MaxLinear MXL111SF 2 * mxl111sf-reg.h - driver for the MaxLinear MXL111SF
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 879c529640f7..a8d2c7053674 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner 2 * mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -512,7 +512,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
512EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach); 512EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
513 513
514MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver"); 514MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
515MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>"); 515MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
516MODULE_LICENSE("GPL"); 516MODULE_LICENSE("GPL");
517MODULE_VERSION("0.1"); 517MODULE_VERSION("0.1");
518 518
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 90f583e5d6a6..2046db22519e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner 2 * mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner
3 * 3 *
4 * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> 4 * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
68#else 68#else
69static inline 69static inline
70struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe, 70struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
71 struct mxl111sf_state *mxl_state 71 struct mxl111sf_state *mxl_state,
72 struct mxl111sf_tuner_config *cfg) 72 struct mxl111sf_tuner_config *cfg)
73{ 73{
74 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 74 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 08240e498451..c7304fa8ab73 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com) 2 * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
@@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
105 ret = -EINVAL; 105 ret = -EINVAL;
106 } 106 }
107 107
108 pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data); 108 pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
109fail: 109fail:
110 return ret; 110 return ret;
111} 111}
@@ -1421,7 +1421,7 @@ static struct usb_driver mxl111sf_usb_driver = {
1421 1421
1422module_usb_driver(mxl111sf_usb_driver); 1422module_usb_driver(mxl111sf_usb_driver);
1423 1423
1424MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>"); 1424MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
1425MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF"); 1425MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
1426MODULE_VERSION("1.0"); 1426MODULE_VERSION("1.0");
1427MODULE_LICENSE("GPL"); 1427MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 9816de86e48c..8516c011b7cc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com) 2 * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 2f0c89cbac76..c5638964c3f2 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -198,7 +198,6 @@ static int device_authorization(struct hdpvr_device *dev)
198 hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0); 198 hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
199 v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n", 199 v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
200 print_buf); 200 print_buf);
201 kfree(print_buf);
202#endif 201#endif
203 202
204 msleep(100); 203 msleep(100);
@@ -214,6 +213,9 @@ static int device_authorization(struct hdpvr_device *dev)
214 retval = ret != 8; 213 retval = ret != 8;
215unlock: 214unlock:
216 mutex_unlock(&dev->usbc_mutex); 215 mutex_unlock(&dev->usbc_mutex);
216#ifdef HDPVR_DEBUG
217 kfree(print_buf);
218#endif
217 return retval; 219 return retval;
218} 220}
219 221
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index ee52b9f4a944..f7902fe8a526 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -515,6 +515,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
515 aspect.denominator = 9; 515 aspect.denominator = 9;
516 } 516 }
517 image_width = ((image_height * aspect.numerator) / aspect.denominator); 517 image_width = ((image_height * aspect.numerator) / aspect.denominator);
518 image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
518 519
519 /* Horizontal */ 520 /* Horizontal */
520 if (default_gtf) 521 if (default_gtf)
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 65411adcd0ea..7e6b209b7002 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,
66static void videobuf_vm_open(struct vm_area_struct *vma) 66static void videobuf_vm_open(struct vm_area_struct *vma)
67{ 67{
68 struct videobuf_mapping *map = vma->vm_private_data; 68 struct videobuf_mapping *map = vma->vm_private_data;
69 struct videobuf_queue *q = map->q;
70 69
71 dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", 70 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
72 map, map->count, vma->vm_start, vma->vm_end); 71 map, map->count, vma->vm_start, vma->vm_end);
73 72
74 videobuf_queue_lock(q);
75 map->count++; 73 map->count++;
76 videobuf_queue_unlock(q);
77} 74}
78 75
79static void videobuf_vm_close(struct vm_area_struct *vma) 76static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
85 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", 82 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
86 map, map->count, vma->vm_start, vma->vm_end); 83 map, map->count, vma->vm_start, vma->vm_end);
87 84
88 videobuf_queue_lock(q); 85 map->count--;
89 if (!--map->count) { 86 if (0 == map->count) {
90 struct videobuf_dma_contig_memory *mem; 87 struct videobuf_dma_contig_memory *mem;
91 88
92 dev_dbg(q->dev, "munmap %p q=%p\n", map, q); 89 dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
90 videobuf_queue_lock(q);
93 91
94 /* We need first to cancel streams, before unmapping */ 92 /* We need first to cancel streams, before unmapping */
95 if (q->streaming) 93 if (q->streaming)
@@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
128 126
129 kfree(map); 127 kfree(map);
130 128
129 videobuf_queue_unlock(q);
131 } 130 }
132 videobuf_queue_unlock(q);
133} 131}
134 132
135static const struct vm_operations_struct videobuf_vm_ops = { 133static const struct vm_operations_struct videobuf_vm_ops = {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 9db674ccdc68..828e7c10bd70 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);
338static void videobuf_vm_open(struct vm_area_struct *vma) 338static void videobuf_vm_open(struct vm_area_struct *vma)
339{ 339{
340 struct videobuf_mapping *map = vma->vm_private_data; 340 struct videobuf_mapping *map = vma->vm_private_data;
341 struct videobuf_queue *q = map->q;
342 341
343 dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map, 342 dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
344 map->count, vma->vm_start, vma->vm_end); 343 map->count, vma->vm_start, vma->vm_end);
345 344
346 videobuf_queue_lock(q);
347 map->count++; 345 map->count++;
348 videobuf_queue_unlock(q);
349} 346}
350 347
351static void videobuf_vm_close(struct vm_area_struct *vma) 348static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
358 dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map, 355 dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
359 map->count, vma->vm_start, vma->vm_end); 356 map->count, vma->vm_start, vma->vm_end);
360 357
361 videobuf_queue_lock(q); 358 map->count--;
362 if (!--map->count) { 359 if (0 == map->count) {
363 dprintk(1, "munmap %p q=%p\n", map, q); 360 dprintk(1, "munmap %p q=%p\n", map, q);
361 videobuf_queue_lock(q);
364 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 362 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
365 if (NULL == q->bufs[i]) 363 if (NULL == q->bufs[i])
366 continue; 364 continue;
@@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
376 q->bufs[i]->baddr = 0; 374 q->bufs[i]->baddr = 0;
377 q->ops->buf_release(q, q->bufs[i]); 375 q->ops->buf_release(q, q->bufs[i]);
378 } 376 }
377 videobuf_queue_unlock(q);
379 kfree(map); 378 kfree(map);
380 } 379 }
381 videobuf_queue_unlock(q);
382 return; 380 return;
383} 381}
384 382
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 1365c651c177..2ff7fcc77b11 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");
54static void videobuf_vm_open(struct vm_area_struct *vma) 54static void videobuf_vm_open(struct vm_area_struct *vma)
55{ 55{
56 struct videobuf_mapping *map = vma->vm_private_data; 56 struct videobuf_mapping *map = vma->vm_private_data;
57 struct videobuf_queue *q = map->q;
58 57
59 dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map, 58 dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
60 map->count, vma->vm_start, vma->vm_end); 59 map->count, vma->vm_start, vma->vm_end);
61 60
62 videobuf_queue_lock(q);
63 map->count++; 61 map->count++;
64 videobuf_queue_unlock(q);
65} 62}
66 63
67static void videobuf_vm_close(struct vm_area_struct *vma) 64static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
73 dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, 70 dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
74 map->count, vma->vm_start, vma->vm_end); 71 map->count, vma->vm_start, vma->vm_end);
75 72
76 videobuf_queue_lock(q); 73 map->count--;
77 if (!--map->count) { 74 if (0 == map->count) {
78 struct videobuf_vmalloc_memory *mem; 75 struct videobuf_vmalloc_memory *mem;
79 76
80 dprintk(1, "munmap %p q=%p\n", map, q); 77 dprintk(1, "munmap %p q=%p\n", map, q);
78 videobuf_queue_lock(q);
81 79
82 /* We need first to cancel streams, before unmapping */ 80 /* We need first to cancel streams, before unmapping */
83 if (q->streaming) 81 if (q->streaming)
@@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
116 114
117 kfree(map); 115 kfree(map);
118 116
117 videobuf_queue_unlock(q);
119 } 118 }
120 videobuf_queue_unlock(q);
121 119
122 return; 120 return;
123} 121}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5a5fb7f09b7b..a127925c9d61 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1776,6 +1776,11 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
1776 return 0; 1776 return 0;
1777 } 1777 }
1778 1778
1779 if (!q->num_buffers) {
1780 dprintk(1, "streamon: no buffers have been allocated\n");
1781 return -EINVAL;
1782 }
1783
1779 /* 1784 /*
1780 * If any buffers were queued before streamon, 1785 * If any buffers were queued before streamon,
1781 * we can now pass them to driver for processing. 1786 * we can now pass them to driver for processing.
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index a60c188c2bd9..04bd3b6de401 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -754,19 +754,19 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
754 unsigned long arg) 754 unsigned long arg)
755{ 755{
756 int ret; 756 int ret;
757 mutex_lock(&i2o_cfg_mutex);
758 switch (cmd) { 757 switch (cmd) {
759 case I2OGETIOPS: 758 case I2OGETIOPS:
760 ret = i2o_cfg_ioctl(file, cmd, arg); 759 ret = i2o_cfg_ioctl(file, cmd, arg);
761 break; 760 break;
762 case I2OPASSTHRU32: 761 case I2OPASSTHRU32:
762 mutex_lock(&i2o_cfg_mutex);
763 ret = i2o_cfg_passthru32(file, cmd, arg); 763 ret = i2o_cfg_passthru32(file, cmd, arg);
764 mutex_unlock(&i2o_cfg_mutex);
764 break; 765 break;
765 default: 766 default:
766 ret = -ENOIOCTLCMD; 767 ret = -ENOIOCTLCMD;
767 break; 768 break;
768 } 769 }
769 mutex_unlock(&i2o_cfg_mutex);
770 return ret; 770 return ret;
771} 771}
772 772
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 9b2062d17327..2bef3f76032a 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -139,8 +139,11 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
139 139
140 ubuf += sizeof(hdr); 140 ubuf += sizeof(hdr);
141 ubufcch = ubuf; 141 ubufcch = ubuf;
142 if (gru_user_copy_handle(&ubuf, cch)) 142 if (gru_user_copy_handle(&ubuf, cch)) {
143 goto fail; 143 if (cch_locked)
144 unlock_cch_handle(cch);
145 return -EFAULT;
146 }
144 if (cch_locked) 147 if (cch_locked)
145 ubufcch->delresp = 0; 148 ubufcch->delresp = 0;
146 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES; 149 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
@@ -179,10 +182,6 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
179 ret = -EFAULT; 182 ret = -EFAULT;
180 183
181 return ret ? ret : bytes; 184 return ret ? ret : bytes;
182
183fail:
184 unlock_cch_handle(cch);
185 return -EFAULT;
186} 185}
187 186
188int gru_dump_chiplet_request(unsigned long arg) 187int gru_dump_chiplet_request(unsigned long arg)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4c08018d7333..71ba18efa15b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1270,9 +1270,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1270 1270
1271 if (slave_ops->ndo_set_mac_address == NULL) { 1271 if (slave_ops->ndo_set_mac_address == NULL) {
1272 if (!bond_has_slaves(bond)) { 1272 if (!bond_has_slaves(bond)) {
1273 pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.", 1273 pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
1274 bond_dev->name); 1274 bond_dev->name);
1275 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1275 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
1276 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1277 pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
1278 bond_dev->name);
1279 }
1276 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) { 1280 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1277 pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n", 1281 pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
1278 bond_dev->name); 1282 bond_dev->name);
@@ -1315,7 +1319,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1315 */ 1319 */
1316 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); 1320 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
1317 1321
1318 if (!bond->params.fail_over_mac) { 1322 if (!bond->params.fail_over_mac ||
1323 bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
1319 /* 1324 /*
1320 * Set slave to master's mac address. The application already 1325 * Set slave to master's mac address. The application already
1321 * set the master's mac address to that of the first slave 1326 * set the master's mac address to that of the first slave
@@ -1505,7 +1510,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1505 slave_dev->npinfo = bond->dev->npinfo; 1510 slave_dev->npinfo = bond->dev->npinfo;
1506 if (slave_dev->npinfo) { 1511 if (slave_dev->npinfo) {
1507 if (slave_enable_netpoll(new_slave)) { 1512 if (slave_enable_netpoll(new_slave)) {
1508 read_unlock(&bond->lock);
1509 pr_info("Error, %s: master_dev is using netpoll, " 1513 pr_info("Error, %s: master_dev is using netpoll, "
1510 "but new slave device does not support netpoll.\n", 1514 "but new slave device does not support netpoll.\n",
1511 bond_dev->name); 1515 bond_dev->name);
@@ -1579,7 +1583,8 @@ err_close:
1579 dev_close(slave_dev); 1583 dev_close(slave_dev);
1580 1584
1581err_restore_mac: 1585err_restore_mac:
1582 if (!bond->params.fail_over_mac) { 1586 if (!bond->params.fail_over_mac ||
1587 bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
1583 /* XXX TODO - fom follow mode needs to change master's 1588 /* XXX TODO - fom follow mode needs to change master's
1584 * MAC if this slave's MAC is in use by the bond, or at 1589 * MAC if this slave's MAC is in use by the bond, or at
1585 * least print a warning. 1590 * least print a warning.
@@ -1672,7 +1677,8 @@ static int __bond_release_one(struct net_device *bond_dev,
1672 1677
1673 bond->current_arp_slave = NULL; 1678 bond->current_arp_slave = NULL;
1674 1679
1675 if (!all && !bond->params.fail_over_mac) { 1680 if (!all && (!bond->params.fail_over_mac ||
1681 bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
1676 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && 1682 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1677 bond_has_slaves(bond)) 1683 bond_has_slaves(bond))
1678 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", 1684 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
@@ -1769,7 +1775,8 @@ static int __bond_release_one(struct net_device *bond_dev,
1769 /* close slave before restoring its mac address */ 1775 /* close slave before restoring its mac address */
1770 dev_close(slave_dev); 1776 dev_close(slave_dev);
1771 1777
1772 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) { 1778 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
1779 bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
1773 /* restore original ("permanent") mac address */ 1780 /* restore original ("permanent") mac address */
1774 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 1781 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
1775 addr.sa_family = slave_dev->type; 1782 addr.sa_family = slave_dev->type;
@@ -3431,7 +3438,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3431 /* If fail_over_mac is enabled, do nothing and return success. 3438 /* If fail_over_mac is enabled, do nothing and return success.
3432 * Returning an error causes ifenslave to fail. 3439 * Returning an error causes ifenslave to fail.
3433 */ 3440 */
3434 if (bond->params.fail_over_mac) 3441 if (bond->params.fail_over_mac &&
3442 bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3435 return 0; 3443 return 0;
3436 3444
3437 if (!is_valid_ether_addr(sa->sa_data)) 3445 if (!is_valid_ether_addr(sa->sa_data))
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d447b881bbde..9e7d95dae2c7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
104 104
105config CAN_FLEXCAN 105config CAN_FLEXCAN
106 tristate "Support for Freescale FLEXCAN based chips" 106 tristate "Support for Freescale FLEXCAN based chips"
107 depends on (ARM && CPU_LITTLE_ENDIAN) || PPC 107 depends on ARM || PPC
108 ---help--- 108 ---help---
109 Say Y here if you want to support for Freescale FlexCAN. 109 Say Y here if you want to support for Freescale FlexCAN.
110 110
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 13a909822e25..fc59bc6f040b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -323,19 +323,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
323 } 323 }
324 324
325 if (!priv->echo_skb[idx]) { 325 if (!priv->echo_skb[idx]) {
326 struct sock *srcsk = skb->sk;
327 326
328 if (atomic_read(&skb->users) != 1) { 327 skb = can_create_echo_skb(skb);
329 struct sk_buff *old_skb = skb; 328 if (!skb)
330 329 return;
331 skb = skb_clone(old_skb, GFP_ATOMIC);
332 kfree_skb(old_skb);
333 if (!skb)
334 return;
335 } else
336 skb_orphan(skb);
337
338 skb->sk = srcsk;
339 330
340 /* make settings for echo to reduce code in irq context */ 331 /* make settings for echo to reduce code in irq context */
341 skb->protocol = htons(ETH_P_CAN); 332 skb->protocol = htons(ETH_P_CAN);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index aaed97bee471..320bef2dba42 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -235,9 +235,12 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
235}; 235};
236 236
237/* 237/*
238 * Abstract off the read/write for arm versus ppc. 238 * Abstract off the read/write for arm versus ppc. This
239 * assumes that PPC uses big-endian registers and everything
240 * else uses little-endian registers, independent of CPU
241 * endianess.
239 */ 242 */
240#if defined(__BIG_ENDIAN) 243#if defined(CONFIG_PPC)
241static inline u32 flexcan_read(void __iomem *addr) 244static inline u32 flexcan_read(void __iomem *addr)
242{ 245{
243 return in_be32(addr); 246 return in_be32(addr);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index e24e6690d672..71594e5676fd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -18,6 +18,7 @@
18#include <linux/netdevice.h> 18#include <linux/netdevice.h>
19#include <linux/can.h> 19#include <linux/can.h>
20#include <linux/can/dev.h> 20#include <linux/can/dev.h>
21#include <linux/can/skb.h>
21#include <linux/can/error.h> 22#include <linux/can/error.h>
22 23
23#include <linux/mfd/janz.h> 24#include <linux/mfd/janz.h>
@@ -1133,20 +1134,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
1133 */ 1134 */
1134static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb) 1135static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
1135{ 1136{
1136 struct sock *srcsk = skb->sk; 1137 skb = can_create_echo_skb(skb);
1137 1138 if (!skb)
1138 if (atomic_read(&skb->users) != 1) { 1139 return;
1139 struct sk_buff *old_skb = skb;
1140
1141 skb = skb_clone(old_skb, GFP_ATOMIC);
1142 kfree_skb(old_skb);
1143 if (!skb)
1144 return;
1145 } else {
1146 skb_orphan(skb);
1147 }
1148
1149 skb->sk = srcsk;
1150 1140
1151 /* save this skb for tx interrupt echo handling */ 1141 /* save this skb for tx interrupt echo handling */
1152 skb_queue_tail(&mod->echoq, skb); 1142 skb_queue_tail(&mod->echoq, skb);
@@ -1322,7 +1312,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
1322 1312
1323 /* process all communication messages */ 1313 /* process all communication messages */
1324 while (true) { 1314 while (true) {
1325 struct ican3_msg msg; 1315 struct ican3_msg uninitialized_var(msg);
1326 ret = ican3_recv_msg(mod, &msg); 1316 ret = ican3_recv_msg(mod, &msg);
1327 if (ret) 1317 if (ret)
1328 break; 1318 break;
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0a2a5ee79a17..4e94057ef5cf 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -46,6 +46,7 @@
46#include <linux/if_ether.h> 46#include <linux/if_ether.h>
47#include <linux/can.h> 47#include <linux/can.h>
48#include <linux/can/dev.h> 48#include <linux/can/dev.h>
49#include <linux/can/skb.h>
49#include <linux/slab.h> 50#include <linux/slab.h>
50#include <net/rtnetlink.h> 51#include <net/rtnetlink.h>
51 52
@@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
109 stats->rx_packets++; 110 stats->rx_packets++;
110 stats->rx_bytes += cfd->len; 111 stats->rx_bytes += cfd->len;
111 } 112 }
112 kfree_skb(skb); 113 consume_skb(skb);
113 return NETDEV_TX_OK; 114 return NETDEV_TX_OK;
114 } 115 }
115 116
116 /* perform standard echo handling for CAN network interfaces */ 117 /* perform standard echo handling for CAN network interfaces */
117 118
118 if (loop) { 119 if (loop) {
119 struct sock *srcsk = skb->sk;
120 120
121 skb = skb_share_check(skb, GFP_ATOMIC); 121 skb = can_create_echo_skb(skb);
122 if (!skb) 122 if (!skb)
123 return NETDEV_TX_OK; 123 return NETDEV_TX_OK;
124 124
125 /* receive with packet counting */ 125 /* receive with packet counting */
126 skb->sk = srcsk;
127 vcan_rx(skb, dev); 126 vcan_rx(skb, dev);
128 } else { 127 } else {
129 /* no looped packets => no counting */ 128 /* no looped packets => no counting */
130 kfree_skb(skb); 129 consume_skb(skb);
131 } 130 }
132 return NETDEV_TX_OK; 131 return NETDEV_TX_OK;
133} 132}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 0f4241c6e97e..238ccea965c8 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -3294,7 +3294,6 @@ static int __init vortex_init(void)
3294 3294
3295static void __exit vortex_eisa_cleanup(void) 3295static void __exit vortex_eisa_cleanup(void)
3296{ 3296{
3297 struct vortex_private *vp;
3298 void __iomem *ioaddr; 3297 void __iomem *ioaddr;
3299 3298
3300#ifdef CONFIG_EISA 3299#ifdef CONFIG_EISA
@@ -3303,7 +3302,6 @@ static void __exit vortex_eisa_cleanup(void)
3303#endif 3302#endif
3304 3303
3305 if (compaq_net_device) { 3304 if (compaq_net_device) {
3306 vp = netdev_priv(compaq_net_device);
3307 ioaddr = ioport_map(compaq_net_device->base_addr, 3305 ioaddr = ioport_map(compaq_net_device->base_addr,
3308 VORTEX_TOTAL_SIZE); 3306 VORTEX_TOTAL_SIZE);
3309 3307
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 0cc21437478c..511f6eecd58b 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -929,6 +929,9 @@ static int emac_resume(struct platform_device *dev)
929} 929}
930 930
931static const struct of_device_id emac_of_match[] = { 931static const struct of_device_id emac_of_match[] = {
932 {.compatible = "allwinner,sun4i-a10-emac",},
933
934 /* Deprecated */
932 {.compatible = "allwinner,sun4i-emac",}, 935 {.compatible = "allwinner,sun4i-emac",},
933 {}, 936 {},
934}; 937};
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e92ffd6e1c15..2e45f6ec1bf0 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1292,6 +1292,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1292 alx = netdev_priv(netdev); 1292 alx = netdev_priv(netdev);
1293 spin_lock_init(&alx->hw.mdio_lock); 1293 spin_lock_init(&alx->hw.mdio_lock);
1294 spin_lock_init(&alx->irq_lock); 1294 spin_lock_init(&alx->irq_lock);
1295 spin_lock_init(&alx->stats_lock);
1295 alx->dev = netdev; 1296 alx->dev = netdev;
1296 alx->hw.pdev = pdev; 1297 alx->hw.pdev = pdev;
1297 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | 1298 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 9d2dedadf2df..cda25ac45b47 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -85,7 +85,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
85 85
86static int disable_msi = 0; 86static int disable_msi = 0;
87 87
88module_param(disable_msi, int, 0); 88module_param(disable_msi, int, S_IRUGO);
89MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); 89MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 90
91typedef enum { 91typedef enum {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 17d1689aec6b..bfc58d488bb5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -936,7 +936,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
936 else /* CHIP_IS_E1X */ 936 else /* CHIP_IS_E1X */
937 start_params->network_cos_mode = FW_WRR; 937 start_params->network_cos_mode = FW_WRR;
938 938
939 start_params->gre_tunnel_mode = IPGRE_TUNNEL; 939 start_params->gre_tunnel_mode = L2GRE_TUNNEL;
940 start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; 940 start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
941 941
942 return bnx2x_func_state_change(bp, &func_params); 942 return bnx2x_func_state_change(bp, &func_params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c9c445e7b4a5..7d4382286457 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -95,29 +95,29 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1H);
95MODULE_FIRMWARE(FW_FILE_NAME_E2); 95MODULE_FIRMWARE(FW_FILE_NAME_E2);
96 96
97int bnx2x_num_queues; 97int bnx2x_num_queues;
98module_param_named(num_queues, bnx2x_num_queues, int, 0); 98module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
99MODULE_PARM_DESC(num_queues, 99MODULE_PARM_DESC(num_queues,
100 " Set number of queues (default is as a number of CPUs)"); 100 " Set number of queues (default is as a number of CPUs)");
101 101
102static int disable_tpa; 102static int disable_tpa;
103module_param(disable_tpa, int, 0); 103module_param(disable_tpa, int, S_IRUGO);
104MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 104MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
105 105
106static int int_mode; 106static int int_mode;
107module_param(int_mode, int, 0); 107module_param(int_mode, int, S_IRUGO);
108MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 108MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
109 "(1 INT#x; 2 MSI)"); 109 "(1 INT#x; 2 MSI)");
110 110
111static int dropless_fc; 111static int dropless_fc;
112module_param(dropless_fc, int, 0); 112module_param(dropless_fc, int, S_IRUGO);
113MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 113MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
114 114
115static int mrrs = -1; 115static int mrrs = -1;
116module_param(mrrs, int, 0); 116module_param(mrrs, int, S_IRUGO);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118 118
119static int debug; 119static int debug;
120module_param(debug, int, 0); 120module_param(debug, int, S_IRUGO);
121MODULE_PARM_DESC(debug, " Default debug msglevel"); 121MODULE_PARM_DESC(debug, " Default debug msglevel");
122 122
123struct workqueue_struct *bnx2x_wq; 123struct workqueue_struct *bnx2x_wq;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index aec5ef2ed7ce..e42f48df6e94 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1446,12 +1446,12 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1446 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1446 if (vf->cfg_flags & VF_CFG_INT_SIMD)
1447 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1447 val |= IGU_VF_CONF_SINGLE_ISR_EN;
1448 val &= ~IGU_VF_CONF_PARENT_MASK; 1448 val &= ~IGU_VF_CONF_PARENT_MASK;
1449 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1449 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
1450 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1450 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1451 1451
1452 DP(BNX2X_MSG_IOV, 1452 DP(BNX2X_MSG_IOV,
1453 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1453 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
1454 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1454 vf->abs_vfid, val);
1455 1455
1456 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1456 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1457 1457
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e2ca03e23dc1..3167ed6593b0 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2609,13 +2609,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2609 2609
2610 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2610 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2611 2611
2612 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) { 2612 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2613 reg32 &= ~0x3000; 2613 if (err)
2614 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2614 return err;
2615 } else if (!err)
2616 err = -EBUSY;
2617 2615
2618 return err; 2616 reg32 &= ~0x3000;
2617 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2618
2619 return 0;
2619} 2620}
2620 2621
2621static void tg3_carrier_off(struct tg3 *tp) 2622static void tg3_carrier_off(struct tg3 *tp)
@@ -14113,12 +14114,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14113 14114
14114 tg3_netif_stop(tp); 14115 tg3_netif_stop(tp);
14115 14116
14117 tg3_set_mtu(dev, tp, new_mtu);
14118
14116 tg3_full_lock(tp, 1); 14119 tg3_full_lock(tp, 1);
14117 14120
14118 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14121 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14119 14122
14120 tg3_set_mtu(dev, tp, new_mtu);
14121
14122 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14123 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14123 * breaks all requests to 256 bytes. 14124 * breaks all requests to 256 bytes.
14124 */ 14125 */
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4de8cfd149cf..55e0fa03dc90 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
15#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
16#include <linux/clk.h>
16#include <linux/crc32.h> 17#include <linux/crc32.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/io.h> 19#include <linux/io.h>
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
51#define ETH_HASH0 0x48 52#define ETH_HASH0 0x48
52#define ETH_HASH1 0x4c 53#define ETH_HASH1 0x4c
53#define ETH_TXCTRL 0x50 54#define ETH_TXCTRL 0x50
55#define ETH_END 0x54
54 56
55/* mode register */ 57/* mode register */
56#define MODER_RXEN (1 << 0) /* receive enable */ 58#define MODER_RXEN (1 << 0) /* receive enable */
@@ -179,6 +181,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
179 * @membase: pointer to buffer memory region 181 * @membase: pointer to buffer memory region
180 * @dma_alloc: dma allocated buffer size 182 * @dma_alloc: dma allocated buffer size
181 * @io_region_size: I/O memory region size 183 * @io_region_size: I/O memory region size
184 * @num_bd: number of buffer descriptors
182 * @num_tx: number of send buffers 185 * @num_tx: number of send buffers
183 * @cur_tx: last send buffer written 186 * @cur_tx: last send buffer written
184 * @dty_tx: last buffer actually sent 187 * @dty_tx: last buffer actually sent
@@ -199,6 +202,7 @@ struct ethoc {
199 int dma_alloc; 202 int dma_alloc;
200 resource_size_t io_region_size; 203 resource_size_t io_region_size;
201 204
205 unsigned int num_bd;
202 unsigned int num_tx; 206 unsigned int num_tx;
203 unsigned int cur_tx; 207 unsigned int cur_tx;
204 unsigned int dty_tx; 208 unsigned int dty_tx;
@@ -216,6 +220,7 @@ struct ethoc {
216 220
217 struct phy_device *phy; 221 struct phy_device *phy;
218 struct mii_bus *mdio; 222 struct mii_bus *mdio;
223 struct clk *clk;
219 s8 phy_id; 224 s8 phy_id;
220}; 225};
221 226
@@ -688,6 +693,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
688 } 693 }
689 694
690 priv->phy = phy; 695 priv->phy = phy;
696 phy->advertising &= ~(ADVERTISED_1000baseT_Full |
697 ADVERTISED_1000baseT_Half);
698 phy->supported &= ~(SUPPORTED_1000baseT_Full |
699 SUPPORTED_1000baseT_Half);
700
691 return 0; 701 return 0;
692} 702}
693 703
@@ -890,6 +900,102 @@ out:
890 return NETDEV_TX_OK; 900 return NETDEV_TX_OK;
891} 901}
892 902
903static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
904{
905 struct ethoc *priv = netdev_priv(dev);
906 struct phy_device *phydev = priv->phy;
907
908 if (!phydev)
909 return -EOPNOTSUPP;
910
911 return phy_ethtool_gset(phydev, cmd);
912}
913
914static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
915{
916 struct ethoc *priv = netdev_priv(dev);
917 struct phy_device *phydev = priv->phy;
918
919 if (!phydev)
920 return -EOPNOTSUPP;
921
922 return phy_ethtool_sset(phydev, cmd);
923}
924
925static int ethoc_get_regs_len(struct net_device *netdev)
926{
927 return ETH_END;
928}
929
930static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
931 void *p)
932{
933 struct ethoc *priv = netdev_priv(dev);
934 u32 *regs_buff = p;
935 unsigned i;
936
937 regs->version = 0;
938 for (i = 0; i < ETH_END / sizeof(u32); ++i)
939 regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
940}
941
942static void ethoc_get_ringparam(struct net_device *dev,
943 struct ethtool_ringparam *ring)
944{
945 struct ethoc *priv = netdev_priv(dev);
946
947 ring->rx_max_pending = priv->num_bd - 1;
948 ring->rx_mini_max_pending = 0;
949 ring->rx_jumbo_max_pending = 0;
950 ring->tx_max_pending = priv->num_bd - 1;
951
952 ring->rx_pending = priv->num_rx;
953 ring->rx_mini_pending = 0;
954 ring->rx_jumbo_pending = 0;
955 ring->tx_pending = priv->num_tx;
956}
957
958static int ethoc_set_ringparam(struct net_device *dev,
959 struct ethtool_ringparam *ring)
960{
961 struct ethoc *priv = netdev_priv(dev);
962
963 if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
964 ring->tx_pending + ring->rx_pending > priv->num_bd)
965 return -EINVAL;
966 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
967 return -EINVAL;
968
969 if (netif_running(dev)) {
970 netif_tx_disable(dev);
971 ethoc_disable_rx_and_tx(priv);
972 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
973 synchronize_irq(dev->irq);
974 }
975
976 priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
977 priv->num_rx = ring->rx_pending;
978 ethoc_init_ring(priv, dev->mem_start);
979
980 if (netif_running(dev)) {
981 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
982 ethoc_enable_rx_and_tx(priv);
983 netif_wake_queue(dev);
984 }
985 return 0;
986}
987
988const struct ethtool_ops ethoc_ethtool_ops = {
989 .get_settings = ethoc_get_settings,
990 .set_settings = ethoc_set_settings,
991 .get_regs_len = ethoc_get_regs_len,
992 .get_regs = ethoc_get_regs,
993 .get_link = ethtool_op_get_link,
994 .get_ringparam = ethoc_get_ringparam,
995 .set_ringparam = ethoc_set_ringparam,
996 .get_ts_info = ethtool_op_get_ts_info,
997};
998
893static const struct net_device_ops ethoc_netdev_ops = { 999static const struct net_device_ops ethoc_netdev_ops = {
894 .ndo_open = ethoc_open, 1000 .ndo_open = ethoc_open,
895 .ndo_stop = ethoc_stop, 1001 .ndo_stop = ethoc_stop,
@@ -917,6 +1023,8 @@ static int ethoc_probe(struct platform_device *pdev)
917 int num_bd; 1023 int num_bd;
918 int ret = 0; 1024 int ret = 0;
919 bool random_mac = false; 1025 bool random_mac = false;
1026 struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
1027 u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
920 1028
921 /* allocate networking device */ 1029 /* allocate networking device */
922 netdev = alloc_etherdev(sizeof(struct ethoc)); 1030 netdev = alloc_etherdev(sizeof(struct ethoc));
@@ -1016,6 +1124,7 @@ static int ethoc_probe(struct platform_device *pdev)
1016 ret = -ENODEV; 1124 ret = -ENODEV;
1017 goto error; 1125 goto error;
1018 } 1126 }
1127 priv->num_bd = num_bd;
1019 /* num_tx must be a power of two */ 1128 /* num_tx must be a power of two */
1020 priv->num_tx = rounddown_pow_of_two(num_bd >> 1); 1129 priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
1021 priv->num_rx = num_bd - priv->num_tx; 1130 priv->num_rx = num_bd - priv->num_tx;
@@ -1030,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
1030 } 1139 }
1031 1140
1032 /* Allow the platform setup code to pass in a MAC address. */ 1141 /* Allow the platform setup code to pass in a MAC address. */
1033 if (dev_get_platdata(&pdev->dev)) { 1142 if (pdata) {
1034 struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
1035 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1143 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
1036 priv->phy_id = pdata->phy_id; 1144 priv->phy_id = pdata->phy_id;
1037 } else { 1145 } else {
@@ -1069,6 +1177,27 @@ static int ethoc_probe(struct platform_device *pdev)
1069 if (random_mac) 1177 if (random_mac)
1070 netdev->addr_assign_type = NET_ADDR_RANDOM; 1178 netdev->addr_assign_type = NET_ADDR_RANDOM;
1071 1179
1180 /* Allow the platform setup code to adjust MII management bus clock. */
1181 if (!eth_clkfreq) {
1182 struct clk *clk = devm_clk_get(&pdev->dev, NULL);
1183
1184 if (!IS_ERR(clk)) {
1185 priv->clk = clk;
1186 clk_prepare_enable(clk);
1187 eth_clkfreq = clk_get_rate(clk);
1188 }
1189 }
1190 if (eth_clkfreq) {
1191 u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
1192
1193 if (!clkdiv)
1194 clkdiv = 2;
1195 dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
1196 ethoc_write(priv, MIIMODER,
1197 (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
1198 clkdiv);
1199 }
1200
1072 /* register MII bus */ 1201 /* register MII bus */
1073 priv->mdio = mdiobus_alloc(); 1202 priv->mdio = mdiobus_alloc();
1074 if (!priv->mdio) { 1203 if (!priv->mdio) {
@@ -1111,6 +1240,7 @@ static int ethoc_probe(struct platform_device *pdev)
1111 netdev->netdev_ops = &ethoc_netdev_ops; 1240 netdev->netdev_ops = &ethoc_netdev_ops;
1112 netdev->watchdog_timeo = ETHOC_TIMEOUT; 1241 netdev->watchdog_timeo = ETHOC_TIMEOUT;
1113 netdev->features |= 0; 1242 netdev->features |= 0;
1243 netdev->ethtool_ops = &ethoc_ethtool_ops;
1114 1244
1115 /* setup NAPI */ 1245 /* setup NAPI */
1116 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1246 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
@@ -1133,6 +1263,8 @@ free_mdio:
1133 kfree(priv->mdio->irq); 1263 kfree(priv->mdio->irq);
1134 mdiobus_free(priv->mdio); 1264 mdiobus_free(priv->mdio);
1135free: 1265free:
1266 if (priv->clk)
1267 clk_disable_unprepare(priv->clk);
1136 free_netdev(netdev); 1268 free_netdev(netdev);
1137out: 1269out:
1138 return ret; 1270 return ret;
@@ -1157,6 +1289,8 @@ static int ethoc_remove(struct platform_device *pdev)
1157 kfree(priv->mdio->irq); 1289 kfree(priv->mdio->irq);
1158 mdiobus_free(priv->mdio); 1290 mdiobus_free(priv->mdio);
1159 } 1291 }
1292 if (priv->clk)
1293 clk_disable_unprepare(priv->clk);
1160 unregister_netdev(netdev); 1294 unregister_netdev(netdev);
1161 free_netdev(netdev); 1295 free_netdev(netdev);
1162 } 1296 }
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index cbaba4442d4b..bf7a01ef9a57 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3034,7 +3034,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3034 *enable_wake = false; 3034 *enable_wake = false;
3035 } 3035 }
3036 3036
3037 pci_disable_device(pdev); 3037 pci_clear_master(pdev);
3038} 3038}
3039 3039
3040static int __e100_power_off(struct pci_dev *pdev, bool wake) 3040static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1ded50ca1600..e46e8698e630 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -726,9 +726,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
726 int vpath_idx = 0; 726 int vpath_idx = 0;
727 enum vxge_hw_status status = VXGE_HW_OK; 727 enum vxge_hw_status status = VXGE_HW_OK;
728 struct vxge_vpath *vpath = NULL; 728 struct vxge_vpath *vpath = NULL;
729 struct __vxge_hw_device *hldev;
730
731 hldev = pci_get_drvdata(vdev->pdev);
732 729
733 mac_address = (u8 *)&mac_addr; 730 mac_address = (u8 *)&mac_addr;
734 memcpy(mac_address, mac_header, ETH_ALEN); 731 memcpy(mac_address, mac_header, ETH_ALEN);
@@ -2443,9 +2440,6 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2443 2440
2444static void vxge_rem_isr(struct vxgedev *vdev) 2441static void vxge_rem_isr(struct vxgedev *vdev)
2445{ 2442{
2446 struct __vxge_hw_device *hldev;
2447 hldev = pci_get_drvdata(vdev->pdev);
2448
2449#ifdef CONFIG_PCI_MSI 2443#ifdef CONFIG_PCI_MSI
2450 if (vdev->config.intr_type == MSI_X) { 2444 if (vdev->config.intr_type == MSI_X) {
2451 vxge_rem_msix_isr(vdev); 2445 vxge_rem_msix_isr(vdev);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c49d1fb16965..75d11fa4eb0a 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -429,7 +429,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
429 } 429 }
430 430
431 /* Transfer ownership of the skb to the final buffer */ 431 /* Transfer ownership of the skb to the final buffer */
432#ifdef EFX_USE_PIO
432finish_packet: 433finish_packet:
434#endif
433 buffer->skb = skb; 435 buffer->skb = skb;
434 buffer->flags = EFX_TX_BUF_SKB | dma_flags; 436 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
435 437
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index bde63e3af96f..1d860ce914ed 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1878,8 +1878,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1878 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1878 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1879 phyid = be32_to_cpup(parp+1); 1879 phyid = be32_to_cpup(parp+1);
1880 mdio = of_find_device_by_node(mdio_node); 1880 mdio = of_find_device_by_node(mdio_node);
1881 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 1881
1882 PHY_ID_FMT, mdio->name, phyid); 1882 if (strncmp(mdio->name, "gpio", 4) == 0) {
1883 /* GPIO bitbang MDIO driver attached */
1884 struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
1885
1886 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1887 PHY_ID_FMT, bus->id, phyid);
1888 } else {
1889 /* davinci MDIO driver attached */
1890 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1891 PHY_ID_FMT, mdio->name, phyid);
1892 }
1883 1893
1884 mac_addr = of_get_mac_address(slave_node); 1894 mac_addr = of_get_mac_address(slave_node);
1885 if (mac_addr) 1895 if (mac_addr)
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2dc82f1d2e70..3da44d5d9149 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -210,13 +210,6 @@ config KINGSUN_DONGLE
210 To compile it as a module, choose M here: the module will be called 210 To compile it as a module, choose M here: the module will be called
211 kingsun-sir. 211 kingsun-sir.
212 212
213config EP7211_DONGLE
214 tristate "Cirrus Logic clps711x I/R support"
215 depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
216 help
217 Say Y here if you want to build support for the Cirrus logic
218 EP7211 chipset's infrared module.
219
220config KSDAZZLE_DONGLE 213config KSDAZZLE_DONGLE
221 tristate "KingSun Dazzle IrDA-USB dongle" 214 tristate "KingSun Dazzle IrDA-USB dongle"
222 depends on IRDA && USB 215 depends on IRDA && USB
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index dfc64537f62f..be8ab5b9a4a2 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
35obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o 35obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
36obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o 36obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
37obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o 37obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
38obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o
39obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o 38obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
40obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o 39obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o
41obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o 40obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
deleted file mode 100644
index 5fe1f4dd3369..000000000000
--- a/drivers/net/irda/ep7211-sir.c
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * IR port driver for the Cirrus Logic CLPS711X processors
3 *
4 * Copyright 2001, Blue Mug Inc. All rights reserved.
5 * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
6 */
7
8#include <linux/module.h>
9#include <linux/platform_device.h>
10
11#include <mach/hardware.h>
12
13#include "sir-dev.h"
14
15static int clps711x_dongle_open(struct sir_dev *dev)
16{
17 unsigned int syscon;
18
19 /* Turn on the SIR encoder. */
20 syscon = clps_readl(SYSCON1);
21 syscon |= SYSCON1_SIREN;
22 clps_writel(syscon, SYSCON1);
23
24 return 0;
25}
26
27static int clps711x_dongle_close(struct sir_dev *dev)
28{
29 unsigned int syscon;
30
31 /* Turn off the SIR encoder. */
32 syscon = clps_readl(SYSCON1);
33 syscon &= ~SYSCON1_SIREN;
34 clps_writel(syscon, SYSCON1);
35
36 return 0;
37}
38
39static struct dongle_driver clps711x_dongle = {
40 .owner = THIS_MODULE,
41 .driver_name = "EP7211 IR driver",
42 .type = IRDA_EP7211_DONGLE,
43 .open = clps711x_dongle_open,
44 .close = clps711x_dongle_close,
45};
46
47static int clps711x_sir_probe(struct platform_device *pdev)
48{
49 return irda_register_dongle(&clps711x_dongle);
50}
51
52static int clps711x_sir_remove(struct platform_device *pdev)
53{
54 return irda_unregister_dongle(&clps711x_dongle);
55}
56
57static struct platform_driver clps711x_sir_driver = {
58 .driver = {
59 .name = "sir-clps711x",
60 .owner = THIS_MODULE,
61 },
62 .probe = clps711x_sir_probe,
63 .remove = clps711x_sir_remove,
64};
65module_platform_driver(clps711x_sir_driver);
66
67MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
68MODULE_DESCRIPTION("EP7211 IR dongle driver");
69MODULE_LICENSE("GPL");
70MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 547725fa8671..9414fa272160 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -437,7 +437,10 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
437 if (on) { 437 if (on) {
438 gpio_num = gpio_tab[EXTTS0_GPIO + index]; 438 gpio_num = gpio_tab[EXTTS0_GPIO + index];
439 evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT; 439 evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
440 evnt |= EVNT_RISE; 440 if (rq->extts.flags & PTP_FALLING_EDGE)
441 evnt |= EVNT_FALL;
442 else
443 evnt |= EVNT_RISE;
441 } 444 }
442 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt); 445 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
443 return 0; 446 return 0;
@@ -1058,6 +1061,13 @@ static void dp83640_remove(struct phy_device *phydev)
1058 kfree(dp83640); 1061 kfree(dp83640);
1059} 1062}
1060 1063
1064static int dp83640_config_init(struct phy_device *phydev)
1065{
1066 enable_status_frames(phydev, true);
1067 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
1068 return 0;
1069}
1070
1061static int dp83640_ack_interrupt(struct phy_device *phydev) 1071static int dp83640_ack_interrupt(struct phy_device *phydev)
1062{ 1072{
1063 int err = phy_read(phydev, MII_DP83640_MISR); 1073 int err = phy_read(phydev, MII_DP83640_MISR);
@@ -1195,11 +1205,6 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
1195 1205
1196 mutex_lock(&dp83640->clock->extreg_lock); 1206 mutex_lock(&dp83640->clock->extreg_lock);
1197 1207
1198 if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
1199 enable_status_frames(phydev, true);
1200 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
1201 }
1202
1203 ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0); 1208 ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
1204 ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0); 1209 ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
1205 1210
@@ -1281,6 +1286,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1281 } 1286 }
1282 /* fall through */ 1287 /* fall through */
1283 case HWTSTAMP_TX_ON: 1288 case HWTSTAMP_TX_ON:
1289 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1284 skb_queue_tail(&dp83640->tx_queue, skb); 1290 skb_queue_tail(&dp83640->tx_queue, skb);
1285 schedule_work(&dp83640->ts_work); 1291 schedule_work(&dp83640->ts_work);
1286 break; 1292 break;
@@ -1330,6 +1336,7 @@ static struct phy_driver dp83640_driver = {
1330 .flags = PHY_HAS_INTERRUPT, 1336 .flags = PHY_HAS_INTERRUPT,
1331 .probe = dp83640_probe, 1337 .probe = dp83640_probe,
1332 .remove = dp83640_remove, 1338 .remove = dp83640_remove,
1339 .config_init = dp83640_config_init,
1333 .config_aneg = genphy_config_aneg, 1340 .config_aneg = genphy_config_aneg,
1334 .read_status = genphy_read_status, 1341 .read_status = genphy_read_status,
1335 .ack_interrupt = dp83640_ack_interrupt, 1342 .ack_interrupt = dp83640_ack_interrupt,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index bb88bc7d81fb..9367acc84fbb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -170,6 +170,9 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
170} 170}
171 171
172static const struct of_device_id sun4i_mdio_dt_ids[] = { 172static const struct of_device_id sun4i_mdio_dt_ids[] = {
173 { .compatible = "allwinner,sun4i-a10-mdio" },
174
175 /* Deprecated */
173 { .compatible = "allwinner,sun4i-mdio" }, 176 { .compatible = "allwinner,sun4i-mdio" },
174 { } 177 { }
175}; 178};
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4b03e63639b7..82514e72b3d8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -719,7 +719,7 @@ int phy_resume(struct phy_device *phydev)
719static int genphy_config_advert(struct phy_device *phydev) 719static int genphy_config_advert(struct phy_device *phydev)
720{ 720{
721 u32 advertise; 721 u32 advertise;
722 int oldadv, adv; 722 int oldadv, adv, bmsr;
723 int err, changed = 0; 723 int err, changed = 0;
724 724
725 /* Only allow advertising what this PHY supports */ 725 /* Only allow advertising what this PHY supports */
@@ -744,26 +744,36 @@ static int genphy_config_advert(struct phy_device *phydev)
744 changed = 1; 744 changed = 1;
745 } 745 }
746 746
747 bmsr = phy_read(phydev, MII_BMSR);
748 if (bmsr < 0)
749 return bmsr;
750
751 /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all
752 * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a
753 * logical 1.
754 */
755 if (!(bmsr & BMSR_ESTATEN))
756 return changed;
757
747 /* Configure gigabit if it's supported */ 758 /* Configure gigabit if it's supported */
759 adv = phy_read(phydev, MII_CTRL1000);
760 if (adv < 0)
761 return adv;
762
763 oldadv = adv;
764 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
765
748 if (phydev->supported & (SUPPORTED_1000baseT_Half | 766 if (phydev->supported & (SUPPORTED_1000baseT_Half |
749 SUPPORTED_1000baseT_Full)) { 767 SUPPORTED_1000baseT_Full)) {
750 adv = phy_read(phydev, MII_CTRL1000);
751 if (adv < 0)
752 return adv;
753
754 oldadv = adv;
755 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
756 adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); 768 adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
757 769 if (adv != oldadv)
758 if (adv != oldadv) {
759 err = phy_write(phydev, MII_CTRL1000, adv);
760
761 if (err < 0)
762 return err;
763 changed = 1; 770 changed = 1;
764 }
765 } 771 }
766 772
773 err = phy_write(phydev, MII_CTRL1000, adv);
774 if (err < 0)
775 return err;
776
767 return changed; 777 return changed;
768} 778}
769 779
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 6b638a066c1d..409499fdb157 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -292,6 +292,22 @@ config USB_NET_SR9700
292 This option adds support for CoreChip-sz SR9700 based USB 1.1 292 This option adds support for CoreChip-sz SR9700 based USB 1.1
293 10/100 Ethernet adapters. 293 10/100 Ethernet adapters.
294 294
295config USB_NET_SR9800
296 tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
297 depends on USB_USBNET
298 select CRC32
299 default y
300 ---help---
301 Say Y if you want to use one of the following 100Mbps USB Ethernet
302 device based on the CoreChip-sz SR9800 chip.
303
304 This driver makes the adapter appear as a normal Ethernet interface,
305 typically on eth0, if it is the only ethernet device, or perhaps on
306 eth1, if you have a PCI or ISA ethernet card installed.
307
308 To compile this driver as a module, choose M here: the
309 module will be called sr9800.
310
295config USB_NET_SMSC75XX 311config USB_NET_SMSC75XX
296 tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices" 312 tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
297 depends on USB_USBNET 313 depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b17b5e88bbaf..433f0a00c683 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
15obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 15obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
16obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 16obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
17obj-$(CONFIG_USB_NET_SR9700) += sr9700.o 17obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
18obj-$(CONFIG_USB_NET_SR9800) += sr9800.o
18obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o 19obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
19obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o 20obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
20obj-$(CONFIG_USB_NET_GL620A) += gl620a.o 21obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1a482344b3f5..660bd5ea9fc0 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1201,16 +1201,18 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
1201 struct hso_serial *serial = urb->context; 1201 struct hso_serial *serial = urb->context;
1202 int status = urb->status; 1202 int status = urb->status;
1203 1203
1204 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1205
1204 /* sanity check */ 1206 /* sanity check */
1205 if (!serial) { 1207 if (!serial) {
1206 D1("serial == NULL"); 1208 D1("serial == NULL");
1207 return; 1209 return;
1208 } else if (status) { 1210 }
1211 if (status) {
1209 handle_usb_error(status, __func__, serial->parent); 1212 handle_usb_error(status, __func__, serial->parent);
1210 return; 1213 return;
1211 } 1214 }
1212 1215
1213 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1214 D1("Actual length = %d\n", urb->actual_length); 1216 D1("Actual length = %d\n", urb->actual_length);
1215 DUMP1(urb->transfer_buffer, urb->actual_length); 1217 DUMP1(urb->transfer_buffer, urb->actual_length);
1216 1218
@@ -1218,25 +1220,13 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
1218 if (serial->port.count == 0) 1220 if (serial->port.count == 0)
1219 return; 1221 return;
1220 1222
1221 if (status == 0) { 1223 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
1222 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) 1224 fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
1223 fix_crc_bug(urb, serial->in_endp->wMaxPacketSize); 1225 /* Valid data, handle RX data */
1224 /* Valid data, handle RX data */ 1226 spin_lock(&serial->serial_lock);
1225 spin_lock(&serial->serial_lock); 1227 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
1226 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; 1228 put_rxbuf_data_and_resubmit_bulk_urb(serial);
1227 put_rxbuf_data_and_resubmit_bulk_urb(serial); 1229 spin_unlock(&serial->serial_lock);
1228 spin_unlock(&serial->serial_lock);
1229 } else if (status == -ENOENT || status == -ECONNRESET) {
1230 /* Unlinked - check for throttled port. */
1231 D2("Port %d, successfully unlinked urb", serial->minor);
1232 spin_lock(&serial->serial_lock);
1233 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
1234 hso_resubmit_rx_bulk_urb(serial, urb);
1235 spin_unlock(&serial->serial_lock);
1236 } else {
1237 D2("Port %d, status = %d for read urb", serial->minor, status);
1238 return;
1239 }
1240} 1230}
1241 1231
1242/* 1232/*
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23bdd5b9274d..ff5c87128ffe 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -712,6 +712,7 @@ static const struct usb_device_id products[] = {
712 {QMI_FIXED_INTF(0x19d2, 0x1255, 3)}, 712 {QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
713 {QMI_FIXED_INTF(0x19d2, 0x1255, 4)}, 713 {QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
714 {QMI_FIXED_INTF(0x19d2, 0x1256, 4)}, 714 {QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
715 {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
715 {QMI_FIXED_INTF(0x19d2, 0x1401, 2)}, 716 {QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
716 {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ 717 {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
717 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, 718 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -723,6 +724,7 @@ static const struct usb_device_id products[] = {
723 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 724 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
724 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 725 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
725 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 726 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
727 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
726 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 728 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
727 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 729 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
728 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 730 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e8fac732c6f1..d89dbe395ad2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2273,22 +2273,21 @@ static int rtl8152_open(struct net_device *netdev)
2273 struct r8152 *tp = netdev_priv(netdev); 2273 struct r8152 *tp = netdev_priv(netdev);
2274 int res = 0; 2274 int res = 0;
2275 2275
2276 rtl8152_set_speed(tp, AUTONEG_ENABLE,
2277 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
2278 DUPLEX_FULL);
2279 tp->speed = 0;
2280 netif_carrier_off(netdev);
2281 netif_start_queue(netdev);
2282 set_bit(WORK_ENABLE, &tp->flags);
2276 res = usb_submit_urb(tp->intr_urb, GFP_KERNEL); 2283 res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
2277 if (res) { 2284 if (res) {
2278 if (res == -ENODEV) 2285 if (res == -ENODEV)
2279 netif_device_detach(tp->netdev); 2286 netif_device_detach(tp->netdev);
2280 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", 2287 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
2281 res); 2288 res);
2282 return res;
2283 } 2289 }
2284 2290
2285 rtl8152_set_speed(tp, AUTONEG_ENABLE,
2286 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
2287 DUPLEX_FULL);
2288 tp->speed = 0;
2289 netif_carrier_off(netdev);
2290 netif_start_queue(netdev);
2291 set_bit(WORK_ENABLE, &tp->flags);
2292 2291
2293 return res; 2292 return res;
2294} 2293}
@@ -2298,8 +2297,8 @@ static int rtl8152_close(struct net_device *netdev)
2298 struct r8152 *tp = netdev_priv(netdev); 2297 struct r8152 *tp = netdev_priv(netdev);
2299 int res = 0; 2298 int res = 0;
2300 2299
2301 usb_kill_urb(tp->intr_urb);
2302 clear_bit(WORK_ENABLE, &tp->flags); 2300 clear_bit(WORK_ENABLE, &tp->flags);
2301 usb_kill_urb(tp->intr_urb);
2303 cancel_delayed_work_sync(&tp->schedule); 2302 cancel_delayed_work_sync(&tp->schedule);
2304 netif_stop_queue(netdev); 2303 netif_stop_queue(netdev);
2305 tasklet_disable(&tp->tl); 2304 tasklet_disable(&tp->tl);
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
new file mode 100644
index 000000000000..4175eb9fdeca
--- /dev/null
+++ b/drivers/net/usb/sr9800.c
@@ -0,0 +1,870 @@
1/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
2 *
3 * Author : Liu Junliang <liujunliang_ljl@163.com>
4 *
5 * Based on asix_common.c, asix_devices.c
6 *
7 * This file is licensed under the terms of the GNU General Public License
8 * version 2. This program is licensed "as is" without any warranty of any
9 * kind, whether express or implied.*
10 */
11
12#include <linux/module.h>
13#include <linux/kmod.h>
14#include <linux/init.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/ethtool.h>
18#include <linux/workqueue.h>
19#include <linux/mii.h>
20#include <linux/usb.h>
21#include <linux/crc32.h>
22#include <linux/usb/usbnet.h>
23#include <linux/slab.h>
24#include <linux/if_vlan.h>
25
26#include "sr9800.h"
27
28static int sr_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
29 u16 size, void *data)
30{
31 int err;
32
33 err = usbnet_read_cmd(dev, cmd, SR_REQ_RD_REG, value, index,
34 data, size);
35 if ((err != size) && (err >= 0))
36 err = -EINVAL;
37
38 return err;
39}
40
41static int sr_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
42 u16 size, void *data)
43{
44 int err;
45
46 err = usbnet_write_cmd(dev, cmd, SR_REQ_WR_REG, value, index,
47 data, size);
48 if ((err != size) && (err >= 0))
49 err = -EINVAL;
50
51 return err;
52}
53
54static void
55sr_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
56 u16 size, void *data)
57{
58 usbnet_write_cmd_async(dev, cmd, SR_REQ_WR_REG, value, index, data,
59 size);
60}
61
62static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
63{
64 int offset = 0;
65
66 while (offset + sizeof(u32) < skb->len) {
67 struct sk_buff *sr_skb;
68 u16 size;
69 u32 header = get_unaligned_le32(skb->data + offset);
70
71 offset += sizeof(u32);
72 /* get the packet length */
73 size = (u16) (header & 0x7ff);
74 if (size != ((~header >> 16) & 0x07ff)) {
75 netdev_err(dev->net, "%s : Bad Header Length\n",
76 __func__);
77 return 0;
78 }
79
80 if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
81 (size + offset > skb->len)) {
82 netdev_err(dev->net, "%s : Bad RX Length %d\n",
83 __func__, size);
84 return 0;
85 }
86 sr_skb = netdev_alloc_skb_ip_align(dev->net, size);
87 if (!sr_skb)
88 return 0;
89
90 skb_put(sr_skb, size);
91 memcpy(sr_skb->data, skb->data + offset, size);
92 usbnet_skb_return(dev, sr_skb);
93
94 offset += (size + 1) & 0xfffe;
95 }
96
97 if (skb->len != offset) {
98 netdev_err(dev->net, "%s : Bad SKB Length %d\n", __func__,
99 skb->len);
100 return 0;
101 }
102
103 return 1;
104}
105
106static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
107 gfp_t flags)
108{
109 int headroom = skb_headroom(skb);
110 int tailroom = skb_tailroom(skb);
111 u32 padbytes = 0xffff0000;
112 u32 packet_len;
113 int padlen;
114
115 padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
116
117 if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) {
118 if ((headroom < 4) || (tailroom < padlen)) {
119 skb->data = memmove(skb->head + 4, skb->data,
120 skb->len);
121 skb_set_tail_pointer(skb, skb->len);
122 }
123 } else {
124 struct sk_buff *skb2;
125 skb2 = skb_copy_expand(skb, 4, padlen, flags);
126 dev_kfree_skb_any(skb);
127 skb = skb2;
128 if (!skb)
129 return NULL;
130 }
131
132 skb_push(skb, 4);
133 packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
134 cpu_to_le32s(&packet_len);
135 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
136
137 if (padlen) {
138 cpu_to_le32s(&padbytes);
139 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
140 skb_put(skb, sizeof(padbytes));
141 }
142
143 return skb;
144}
145
146static void sr_status(struct usbnet *dev, struct urb *urb)
147{
148 struct sr9800_int_data *event;
149 int link;
150
151 if (urb->actual_length < 8)
152 return;
153
154 event = urb->transfer_buffer;
155 link = event->link & 0x01;
156 if (netif_carrier_ok(dev->net) != link) {
157 usbnet_link_change(dev, link, 1);
158 netdev_dbg(dev->net, "Link Status is: %d\n", link);
159 }
160
161 return;
162}
163
164static inline int sr_set_sw_mii(struct usbnet *dev)
165{
166 int ret;
167
168 ret = sr_write_cmd(dev, SR_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
169 if (ret < 0)
170 netdev_err(dev->net, "Failed to enable software MII access\n");
171 return ret;
172}
173
174static inline int sr_set_hw_mii(struct usbnet *dev)
175{
176 int ret;
177
178 ret = sr_write_cmd(dev, SR_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
179 if (ret < 0)
180 netdev_err(dev->net, "Failed to enable hardware MII access\n");
181 return ret;
182}
183
184static inline int sr_get_phy_addr(struct usbnet *dev)
185{
186 u8 buf[2];
187 int ret;
188
189 ret = sr_read_cmd(dev, SR_CMD_READ_PHY_ID, 0, 0, 2, buf);
190 if (ret < 0) {
191 netdev_err(dev->net, "%s : Error reading PHYID register:%02x\n",
192 __func__, ret);
193 goto out;
194 }
195 netdev_dbg(dev->net, "%s : returning 0x%04x\n", __func__,
196 *((__le16 *)buf));
197
198 ret = buf[1];
199
200out:
201 return ret;
202}
203
204static int sr_sw_reset(struct usbnet *dev, u8 flags)
205{
206 int ret;
207
208 ret = sr_write_cmd(dev, SR_CMD_SW_RESET, flags, 0, 0, NULL);
209 if (ret < 0)
210 netdev_err(dev->net, "Failed to send software reset:%02x\n",
211 ret);
212
213 return ret;
214}
215
216static u16 sr_read_rx_ctl(struct usbnet *dev)
217{
218 __le16 v;
219 int ret;
220
221 ret = sr_read_cmd(dev, SR_CMD_READ_RX_CTL, 0, 0, 2, &v);
222 if (ret < 0) {
223 netdev_err(dev->net, "Error reading RX_CTL register:%02x\n",
224 ret);
225 goto out;
226 }
227
228 ret = le16_to_cpu(v);
229out:
230 return ret;
231}
232
233static int sr_write_rx_ctl(struct usbnet *dev, u16 mode)
234{
235 int ret;
236
237 netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
238 ret = sr_write_cmd(dev, SR_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
239 if (ret < 0)
240 netdev_err(dev->net,
241 "Failed to write RX_CTL mode to 0x%04x:%02x\n",
242 mode, ret);
243
244 return ret;
245}
246
247static u16 sr_read_medium_status(struct usbnet *dev)
248{
249 __le16 v;
250 int ret;
251
252 ret = sr_read_cmd(dev, SR_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
253 if (ret < 0) {
254 netdev_err(dev->net,
255 "Error reading Medium Status register:%02x\n", ret);
256 return ret; /* TODO: callers not checking for error ret */
257 }
258
259 return le16_to_cpu(v);
260}
261
262static int sr_write_medium_mode(struct usbnet *dev, u16 mode)
263{
264 int ret;
265
266 netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
267 ret = sr_write_cmd(dev, SR_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
268 if (ret < 0)
269 netdev_err(dev->net,
270 "Failed to write Medium Mode mode to 0x%04x:%02x\n",
271 mode, ret);
272 return ret;
273}
274
275static int sr_write_gpio(struct usbnet *dev, u16 value, int sleep)
276{
277 int ret;
278
279 netdev_dbg(dev->net, "%s : value = 0x%04x\n", __func__, value);
280 ret = sr_write_cmd(dev, SR_CMD_WRITE_GPIOS, value, 0, 0, NULL);
281 if (ret < 0)
282 netdev_err(dev->net, "Failed to write GPIO value 0x%04x:%02x\n",
283 value, ret);
284 if (sleep)
285 msleep(sleep);
286
287 return ret;
288}
289
290/* SR9800 have a 16-bit RX_CTL value */
291static void sr_set_multicast(struct net_device *net)
292{
293 struct usbnet *dev = netdev_priv(net);
294 struct sr_data *data = (struct sr_data *)&dev->data;
295 u16 rx_ctl = SR_DEFAULT_RX_CTL;
296
297 if (net->flags & IFF_PROMISC) {
298 rx_ctl |= SR_RX_CTL_PRO;
299 } else if (net->flags & IFF_ALLMULTI ||
300 netdev_mc_count(net) > SR_MAX_MCAST) {
301 rx_ctl |= SR_RX_CTL_AMALL;
302 } else if (netdev_mc_empty(net)) {
303 /* just broadcast and directed */
304 } else {
305 /* We use the 20 byte dev->data
306 * for our 8 byte filter buffer
307 * to avoid allocating memory that
308 * is tricky to free later
309 */
310 struct netdev_hw_addr *ha;
311 u32 crc_bits;
312
313 memset(data->multi_filter, 0, SR_MCAST_FILTER_SIZE);
314
315 /* Build the multicast hash filter. */
316 netdev_for_each_mc_addr(ha, net) {
317 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
318 data->multi_filter[crc_bits >> 3] |=
319 1 << (crc_bits & 7);
320 }
321
322 sr_write_cmd_async(dev, SR_CMD_WRITE_MULTI_FILTER, 0, 0,
323 SR_MCAST_FILTER_SIZE, data->multi_filter);
324
325 rx_ctl |= SR_RX_CTL_AM;
326 }
327
328 sr_write_cmd_async(dev, SR_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
329}
330
331static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
332{
333 struct usbnet *dev = netdev_priv(net);
334 __le16 res;
335
336 mutex_lock(&dev->phy_mutex);
337 sr_set_sw_mii(dev);
338 sr_read_cmd(dev, SR_CMD_READ_MII_REG, phy_id, (__u16)loc, 2, &res);
339 sr_set_hw_mii(dev);
340 mutex_unlock(&dev->phy_mutex);
341
342 netdev_dbg(dev->net,
343 "%s : phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", __func__,
344 phy_id, loc, le16_to_cpu(res));
345
346 return le16_to_cpu(res);
347}
348
349static void
350sr_mdio_write(struct net_device *net, int phy_id, int loc, int val)
351{
352 struct usbnet *dev = netdev_priv(net);
353 __le16 res = cpu_to_le16(val);
354
355 netdev_dbg(dev->net,
356 "%s : phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", __func__,
357 phy_id, loc, val);
358 mutex_lock(&dev->phy_mutex);
359 sr_set_sw_mii(dev);
360 sr_write_cmd(dev, SR_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
361 sr_set_hw_mii(dev);
362 mutex_unlock(&dev->phy_mutex);
363}
364
365/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
366static u32 sr_get_phyid(struct usbnet *dev)
367{
368 int phy_reg;
369 u32 phy_id;
370 int i;
371
372 /* Poll for the rare case the FW or phy isn't ready yet. */
373 for (i = 0; i < 100; i++) {
374 phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
375 if (phy_reg != 0 && phy_reg != 0xFFFF)
376 break;
377 mdelay(1);
378 }
379
380 if (phy_reg <= 0 || phy_reg == 0xFFFF)
381 return 0;
382
383 phy_id = (phy_reg & 0xffff) << 16;
384
385 phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
386 if (phy_reg < 0)
387 return 0;
388
389 phy_id |= (phy_reg & 0xffff);
390
391 return phy_id;
392}
393
394static void
395sr_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
396{
397 struct usbnet *dev = netdev_priv(net);
398 u8 opt;
399
400 if (sr_read_cmd(dev, SR_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
401 wolinfo->supported = 0;
402 wolinfo->wolopts = 0;
403 return;
404 }
405 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
406 wolinfo->wolopts = 0;
407 if (opt & SR_MONITOR_LINK)
408 wolinfo->wolopts |= WAKE_PHY;
409 if (opt & SR_MONITOR_MAGIC)
410 wolinfo->wolopts |= WAKE_MAGIC;
411}
412
413static int
414sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
415{
416 struct usbnet *dev = netdev_priv(net);
417 u8 opt = 0;
418
419 if (wolinfo->wolopts & WAKE_PHY)
420 opt |= SR_MONITOR_LINK;
421 if (wolinfo->wolopts & WAKE_MAGIC)
422 opt |= SR_MONITOR_MAGIC;
423
424 if (sr_write_cmd(dev, SR_CMD_WRITE_MONITOR_MODE,
425 opt, 0, 0, NULL) < 0)
426 return -EINVAL;
427
428 return 0;
429}
430
431static int sr_get_eeprom_len(struct net_device *net)
432{
433 struct usbnet *dev = netdev_priv(net);
434 struct sr_data *data = (struct sr_data *)&dev->data;
435
436 return data->eeprom_len;
437}
438
439static int sr_get_eeprom(struct net_device *net,
440 struct ethtool_eeprom *eeprom, u8 *data)
441{
442 struct usbnet *dev = netdev_priv(net);
443 __le16 *ebuf = (__le16 *)data;
444 int ret;
445 int i;
446
447 /* Crude hack to ensure that we don't overwrite memory
448 * if an odd length is supplied
449 */
450 if (eeprom->len % 2)
451 return -EINVAL;
452
453 eeprom->magic = SR_EEPROM_MAGIC;
454
455 /* sr9800 returns 2 bytes from eeprom on read */
456 for (i = 0; i < eeprom->len / 2; i++) {
457 ret = sr_read_cmd(dev, SR_CMD_READ_EEPROM, eeprom->offset + i,
458 0, 2, &ebuf[i]);
459 if (ret < 0)
460 return -EINVAL;
461 }
462 return 0;
463}
464
465static void sr_get_drvinfo(struct net_device *net,
466 struct ethtool_drvinfo *info)
467{
468 struct usbnet *dev = netdev_priv(net);
469 struct sr_data *data = (struct sr_data *)&dev->data;
470
471 /* Inherit standard device info */
472 usbnet_get_drvinfo(net, info);
473 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
474 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
475 info->eedump_len = data->eeprom_len;
476}
477
478static u32 sr_get_link(struct net_device *net)
479{
480 struct usbnet *dev = netdev_priv(net);
481
482 return mii_link_ok(&dev->mii);
483}
484
485static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
486{
487 struct usbnet *dev = netdev_priv(net);
488
489 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
490}
491
492static int sr_set_mac_address(struct net_device *net, void *p)
493{
494 struct usbnet *dev = netdev_priv(net);
495 struct sr_data *data = (struct sr_data *)&dev->data;
496 struct sockaddr *addr = p;
497
498 if (netif_running(net))
499 return -EBUSY;
500 if (!is_valid_ether_addr(addr->sa_data))
501 return -EADDRNOTAVAIL;
502
503 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
504
505 /* We use the 20 byte dev->data
506 * for our 6 byte mac buffer
507 * to avoid allocating memory that
508 * is tricky to free later
509 */
510 memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
511 sr_write_cmd_async(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
512 data->mac_addr);
513
514 return 0;
515}
516
517static const struct ethtool_ops sr9800_ethtool_ops = {
518 .get_drvinfo = sr_get_drvinfo,
519 .get_link = sr_get_link,
520 .get_msglevel = usbnet_get_msglevel,
521 .set_msglevel = usbnet_set_msglevel,
522 .get_wol = sr_get_wol,
523 .set_wol = sr_set_wol,
524 .get_eeprom_len = sr_get_eeprom_len,
525 .get_eeprom = sr_get_eeprom,
526 .get_settings = usbnet_get_settings,
527 .set_settings = usbnet_set_settings,
528 .nway_reset = usbnet_nway_reset,
529};
530
531static int sr9800_link_reset(struct usbnet *dev)
532{
533 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
534 u16 mode;
535
536 mii_check_media(&dev->mii, 1, 1);
537 mii_ethtool_gset(&dev->mii, &ecmd);
538 mode = SR9800_MEDIUM_DEFAULT;
539
540 if (ethtool_cmd_speed(&ecmd) != SPEED_100)
541 mode &= ~SR_MEDIUM_PS;
542
543 if (ecmd.duplex != DUPLEX_FULL)
544 mode &= ~SR_MEDIUM_FD;
545
546 netdev_dbg(dev->net, "%s : speed: %u duplex: %d mode: 0x%04x\n",
547 __func__, ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
548
549 sr_write_medium_mode(dev, mode);
550
551 return 0;
552}
553
554
555static int sr9800_set_default_mode(struct usbnet *dev)
556{
557 u16 rx_ctl;
558 int ret;
559
560 sr_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
561 sr_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
562 ADVERTISE_ALL | ADVERTISE_CSMA);
563 mii_nway_restart(&dev->mii);
564
565 ret = sr_write_medium_mode(dev, SR9800_MEDIUM_DEFAULT);
566 if (ret < 0)
567 goto out;
568
569 ret = sr_write_cmd(dev, SR_CMD_WRITE_IPG012,
570 SR9800_IPG0_DEFAULT | SR9800_IPG1_DEFAULT,
571 SR9800_IPG2_DEFAULT, 0, NULL);
572 if (ret < 0) {
573 netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
574 goto out;
575 }
576
577 /* Set RX_CTL to default values with 2k buffer, and enable cactus */
578 ret = sr_write_rx_ctl(dev, SR_DEFAULT_RX_CTL);
579 if (ret < 0)
580 goto out;
581
582 rx_ctl = sr_read_rx_ctl(dev);
583 netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
584 rx_ctl);
585
586 rx_ctl = sr_read_medium_status(dev);
587 netdev_dbg(dev->net, "Medium Status:0x%04x after all initializations\n",
588 rx_ctl);
589
590 return 0;
591out:
592 return ret;
593}
594
595static int sr9800_reset(struct usbnet *dev)
596{
597 struct sr_data *data = (struct sr_data *)&dev->data;
598 int ret, embd_phy;
599 u16 rx_ctl;
600
601 ret = sr_write_gpio(dev,
602 SR_GPIO_RSE | SR_GPIO_GPO_2 | SR_GPIO_GPO2EN, 5);
603 if (ret < 0)
604 goto out;
605
606 embd_phy = ((sr_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
607
608 ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
609 if (ret < 0) {
610 netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
611 goto out;
612 }
613
614 ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_PRL);
615 if (ret < 0)
616 goto out;
617
618 msleep(150);
619
620 ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
621 if (ret < 0)
622 goto out;
623
624 msleep(150);
625
626 if (embd_phy) {
627 ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
628 if (ret < 0)
629 goto out;
630 } else {
631 ret = sr_sw_reset(dev, SR_SWRESET_PRTE);
632 if (ret < 0)
633 goto out;
634 }
635
636 msleep(150);
637 rx_ctl = sr_read_rx_ctl(dev);
638 netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
639 ret = sr_write_rx_ctl(dev, 0x0000);
640 if (ret < 0)
641 goto out;
642
643 rx_ctl = sr_read_rx_ctl(dev);
644 netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
645
646 ret = sr_sw_reset(dev, SR_SWRESET_PRL);
647 if (ret < 0)
648 goto out;
649
650 msleep(150);
651
652 ret = sr_sw_reset(dev, SR_SWRESET_IPRL | SR_SWRESET_PRL);
653 if (ret < 0)
654 goto out;
655
656 msleep(150);
657
658 ret = sr9800_set_default_mode(dev);
659 if (ret < 0)
660 goto out;
661
662 /* Rewrite MAC address */
663 memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
664 ret = sr_write_cmd(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
665 data->mac_addr);
666 if (ret < 0)
667 goto out;
668
669 return 0;
670
671out:
672 return ret;
673}
674
675static const struct net_device_ops sr9800_netdev_ops = {
676 .ndo_open = usbnet_open,
677 .ndo_stop = usbnet_stop,
678 .ndo_start_xmit = usbnet_start_xmit,
679 .ndo_tx_timeout = usbnet_tx_timeout,
680 .ndo_change_mtu = usbnet_change_mtu,
681 .ndo_set_mac_address = sr_set_mac_address,
682 .ndo_validate_addr = eth_validate_addr,
683 .ndo_do_ioctl = sr_ioctl,
684 .ndo_set_rx_mode = sr_set_multicast,
685};
686
687static int sr9800_phy_powerup(struct usbnet *dev)
688{
689 int ret;
690
691 /* set the embedded Ethernet PHY in power-down state */
692 ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_IPRL);
693 if (ret < 0) {
694 netdev_err(dev->net, "Failed to power down PHY : %d\n", ret);
695 return ret;
696 }
697 msleep(20);
698
699 /* set the embedded Ethernet PHY in power-up state */
700 ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
701 if (ret < 0) {
702 netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
703 return ret;
704 }
705 msleep(600);
706
707 /* set the embedded Ethernet PHY in reset state */
708 ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
709 if (ret < 0) {
710 netdev_err(dev->net, "Failed to power up PHY: %d\n", ret);
711 return ret;
712 }
713 msleep(20);
714
715 /* set the embedded Ethernet PHY in power-up state */
716 ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
717 if (ret < 0) {
718 netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
719 return ret;
720 }
721
722 return 0;
723}
724
725static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
726{
727 struct sr_data *data = (struct sr_data *)&dev->data;
728 u16 led01_mux, led23_mux;
729 int ret, embd_phy;
730 u32 phyid;
731 u16 rx_ctl;
732
733 data->eeprom_len = SR9800_EEPROM_LEN;
734
735 usbnet_get_endpoints(dev, intf);
736
737 /* LED Setting Rule :
738 * AABB:CCDD
739 * AA : MFA0(LED0)
740 * BB : MFA1(LED1)
741 * CC : MFA2(LED2), Reserved for SR9800
742 * DD : MFA3(LED3), Reserved for SR9800
743 */
744 led01_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_LINK;
745 led23_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_TX_ACTIVE;
746 ret = sr_write_cmd(dev, SR_CMD_LED_MUX, led01_mux, led23_mux, 0, NULL);
747 if (ret < 0) {
748 netdev_err(dev->net, "set LINK LED failed : %d\n", ret);
749 goto out;
750 }
751
752 /* Get the MAC address */
753 ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
754 dev->net->dev_addr);
755 if (ret < 0) {
756 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
757 return ret;
758 }
759 netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
760
761 /* Initialize MII structure */
762 dev->mii.dev = dev->net;
763 dev->mii.mdio_read = sr_mdio_read;
764 dev->mii.mdio_write = sr_mdio_write;
765 dev->mii.phy_id_mask = 0x1f;
766 dev->mii.reg_num_mask = 0x1f;
767 dev->mii.phy_id = sr_get_phy_addr(dev);
768
769 dev->net->netdev_ops = &sr9800_netdev_ops;
770 dev->net->ethtool_ops = &sr9800_ethtool_ops;
771
772 embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
773 /* Reset the PHY to normal operation mode */
774 ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
775 if (ret < 0) {
776 netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
777 return ret;
778 }
779
780 /* Init PHY routine */
781 ret = sr9800_phy_powerup(dev);
782 if (ret < 0)
783 goto out;
784
785 rx_ctl = sr_read_rx_ctl(dev);
786 netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
787 ret = sr_write_rx_ctl(dev, 0x0000);
788 if (ret < 0)
789 goto out;
790
791 rx_ctl = sr_read_rx_ctl(dev);
792 netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
793
794 /* Read PHYID register *AFTER* the PHY was reset properly */
795 phyid = sr_get_phyid(dev);
796 netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
797
798 /* medium mode setting */
799 ret = sr9800_set_default_mode(dev);
800 if (ret < 0)
801 goto out;
802
803 if (dev->udev->speed == USB_SPEED_HIGH) {
804 ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
805 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].byte_cnt,
806 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].threshold,
807 0, NULL);
808 if (ret < 0) {
809 netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
810 goto out;
811 }
812 dev->rx_urb_size =
813 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].size;
814 } else {
815 ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
816 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].byte_cnt,
817 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].threshold,
818 0, NULL);
819 if (ret < 0) {
820 netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
821 goto out;
822 }
823 dev->rx_urb_size =
824 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
825 }
826 netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__,
827 dev->rx_urb_size);
828 return 0;
829
830out:
831 return ret;
832}
833
834static const struct driver_info sr9800_driver_info = {
835 .description = "CoreChip SR9800 USB 2.0 Ethernet",
836 .bind = sr9800_bind,
837 .status = sr_status,
838 .link_reset = sr9800_link_reset,
839 .reset = sr9800_reset,
840 .flags = DRIVER_FLAG,
841 .rx_fixup = sr_rx_fixup,
842 .tx_fixup = sr_tx_fixup,
843};
844
845static const struct usb_device_id products[] = {
846 {
847 USB_DEVICE(0x0fe6, 0x9800), /* SR9800 Device */
848 .driver_info = (unsigned long) &sr9800_driver_info,
849 },
850 {}, /* END */
851};
852
853MODULE_DEVICE_TABLE(usb, products);
854
855static struct usb_driver sr_driver = {
856 .name = DRIVER_NAME,
857 .id_table = products,
858 .probe = usbnet_probe,
859 .suspend = usbnet_suspend,
860 .resume = usbnet_resume,
861 .disconnect = usbnet_disconnect,
862 .supports_autosuspend = 1,
863};
864
865module_usb_driver(sr_driver);
866
867MODULE_AUTHOR("Liu Junliang <liujunliang_ljl@163.com");
868MODULE_VERSION(DRIVER_VERSION);
869MODULE_DESCRIPTION("SR9800 USB 2.0 USB2NET Dev : http://www.corechip-sz.com");
870MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
new file mode 100644
index 000000000000..18f670251275
--- /dev/null
+++ b/drivers/net/usb/sr9800.h
@@ -0,0 +1,202 @@
1/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
2 *
3 * Author : Liu Junliang <liujunliang_ljl@163.com>
4 *
5 * This file is licensed under the terms of the GNU General Public License
6 * version 2. This program is licensed "as is" without any warranty of any
7 * kind, whether express or implied.
8 */
9
10#ifndef _SR9800_H
11#define _SR9800_H
12
13/* SR9800 spec. command table on Linux Platform */
14
15/* command : Software Station Management Control Reg */
16#define SR_CMD_SET_SW_MII 0x06
17/* command : PHY Read Reg */
18#define SR_CMD_READ_MII_REG 0x07
19/* command : PHY Write Reg */
20#define SR_CMD_WRITE_MII_REG 0x08
21/* command : Hardware Station Management Control Reg */
22#define SR_CMD_SET_HW_MII 0x0a
23/* command : SROM Read Reg */
24#define SR_CMD_READ_EEPROM 0x0b
25/* command : SROM Write Reg */
26#define SR_CMD_WRITE_EEPROM 0x0c
27/* command : SROM Write Enable Reg */
28#define SR_CMD_WRITE_ENABLE 0x0d
29/* command : SROM Write Disable Reg */
30#define SR_CMD_WRITE_DISABLE 0x0e
31/* command : RX Control Read Reg */
32#define SR_CMD_READ_RX_CTL 0x0f
33#define SR_RX_CTL_PRO (1 << 0)
34#define SR_RX_CTL_AMALL (1 << 1)
35#define SR_RX_CTL_SEP (1 << 2)
36#define SR_RX_CTL_AB (1 << 3)
37#define SR_RX_CTL_AM (1 << 4)
38#define SR_RX_CTL_AP (1 << 5)
39#define SR_RX_CTL_ARP (1 << 6)
40#define SR_RX_CTL_SO (1 << 7)
41#define SR_RX_CTL_RH1M (1 << 8)
42#define SR_RX_CTL_RH2M (1 << 9)
43#define SR_RX_CTL_RH3M (1 << 10)
44/* command : RX Control Write Reg */
45#define SR_CMD_WRITE_RX_CTL 0x10
46/* command : IPG0/IPG1/IPG2 Control Read Reg */
47#define SR_CMD_READ_IPG012 0x11
48/* command : IPG0/IPG1/IPG2 Control Write Reg */
49#define SR_CMD_WRITE_IPG012 0x12
50/* command : Node ID Read Reg */
51#define SR_CMD_READ_NODE_ID 0x13
52/* command : Node ID Write Reg */
53#define SR_CMD_WRITE_NODE_ID 0x14
54/* command : Multicast Filter Array Read Reg */
55#define SR_CMD_READ_MULTI_FILTER 0x15
56/* command : Multicast Filter Array Write Reg */
57#define SR_CMD_WRITE_MULTI_FILTER 0x16
58/* command : Eth/HomePNA PHY Address Reg */
59#define SR_CMD_READ_PHY_ID 0x19
60/* command : Medium Status Read Reg */
61#define SR_CMD_READ_MEDIUM_STATUS 0x1a
62#define SR_MONITOR_LINK (1 << 1)
63#define SR_MONITOR_MAGIC (1 << 2)
64#define SR_MONITOR_HSFS (1 << 4)
65/* command : Medium Status Write Reg */
66#define SR_CMD_WRITE_MEDIUM_MODE 0x1b
67#define SR_MEDIUM_GM (1 << 0)
68#define SR_MEDIUM_FD (1 << 1)
69#define SR_MEDIUM_AC (1 << 2)
70#define SR_MEDIUM_ENCK (1 << 3)
71#define SR_MEDIUM_RFC (1 << 4)
72#define SR_MEDIUM_TFC (1 << 5)
73#define SR_MEDIUM_JFE (1 << 6)
74#define SR_MEDIUM_PF (1 << 7)
75#define SR_MEDIUM_RE (1 << 8)
76#define SR_MEDIUM_PS (1 << 9)
77#define SR_MEDIUM_RSV (1 << 10)
78#define SR_MEDIUM_SBP (1 << 11)
79#define SR_MEDIUM_SM (1 << 12)
80/* command : Monitor Mode Status Read Reg */
81#define SR_CMD_READ_MONITOR_MODE 0x1c
82/* command : Monitor Mode Status Write Reg */
83#define SR_CMD_WRITE_MONITOR_MODE 0x1d
84/* command : GPIO Status Read Reg */
85#define SR_CMD_READ_GPIOS 0x1e
86#define SR_GPIO_GPO0EN (1 << 0) /* GPIO0 Output enable */
87#define SR_GPIO_GPO_0 (1 << 1) /* GPIO0 Output value */
88#define SR_GPIO_GPO1EN (1 << 2) /* GPIO1 Output enable */
89#define SR_GPIO_GPO_1 (1 << 3) /* GPIO1 Output value */
90#define SR_GPIO_GPO2EN (1 << 4) /* GPIO2 Output enable */
91#define SR_GPIO_GPO_2 (1 << 5) /* GPIO2 Output value */
92#define SR_GPIO_RESERVED (1 << 6) /* Reserved */
93#define SR_GPIO_RSE (1 << 7) /* Reload serial EEPROM */
94/* command : GPIO Status Write Reg */
95#define SR_CMD_WRITE_GPIOS 0x1f
96/* command : Eth PHY Power and Reset Control Reg */
97#define SR_CMD_SW_RESET 0x20
98#define SR_SWRESET_CLEAR 0x00
99#define SR_SWRESET_RR (1 << 0)
100#define SR_SWRESET_RT (1 << 1)
101#define SR_SWRESET_PRTE (1 << 2)
102#define SR_SWRESET_PRL (1 << 3)
103#define SR_SWRESET_BZ (1 << 4)
104#define SR_SWRESET_IPRL (1 << 5)
105#define SR_SWRESET_IPPD (1 << 6)
106/* command : Software Interface Selection Status Read Reg */
107#define SR_CMD_SW_PHY_STATUS 0x21
108/* command : Software Interface Selection Status Write Reg */
109#define SR_CMD_SW_PHY_SELECT 0x22
110/* command : BULK in Buffer Size Reg */
111#define SR_CMD_BULKIN_SIZE 0x2A
112/* command : LED_MUX Control Reg */
113#define SR_CMD_LED_MUX 0x70
114#define SR_LED_MUX_TX_ACTIVE (1 << 0)
115#define SR_LED_MUX_RX_ACTIVE (1 << 1)
116#define SR_LED_MUX_COLLISION (1 << 2)
117#define SR_LED_MUX_DUP_COL (1 << 3)
118#define SR_LED_MUX_DUP (1 << 4)
119#define SR_LED_MUX_SPEED (1 << 5)
120#define SR_LED_MUX_LINK_ACTIVE (1 << 6)
121#define SR_LED_MUX_LINK (1 << 7)
122
123/* Register Access Flags */
124#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
125#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
126
127/* Multicast Filter Array size & Max Number */
128#define SR_MCAST_FILTER_SIZE 8
129#define SR_MAX_MCAST 64
130
131/* IPG0/1/2 Default Value */
132#define SR9800_IPG0_DEFAULT 0x15
133#define SR9800_IPG1_DEFAULT 0x0c
134#define SR9800_IPG2_DEFAULT 0x12
135
136/* Medium Status Default Mode */
137#define SR9800_MEDIUM_DEFAULT \
138 (SR_MEDIUM_FD | SR_MEDIUM_RFC | \
139 SR_MEDIUM_TFC | SR_MEDIUM_PS | \
140 SR_MEDIUM_AC | SR_MEDIUM_RE)
141
142/* RX Control Default Setting */
143#define SR_DEFAULT_RX_CTL \
144 (SR_RX_CTL_SO | SR_RX_CTL_AB | SR_RX_CTL_RH1M)
145
146/* EEPROM Magic Number & EEPROM Size */
147#define SR_EEPROM_MAGIC 0xdeadbeef
148#define SR9800_EEPROM_LEN 0xff
149
150/* SR9800 Driver Version and Driver Name */
151#define DRIVER_VERSION "11-Nov-2013"
152#define DRIVER_NAME "CoreChips"
153#define DRIVER_FLAG \
154 (FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET)
155
156/* SR9800 BULKIN Buffer Size */
157#define SR9800_MAX_BULKIN_2K 0
158#define SR9800_MAX_BULKIN_4K 1
159#define SR9800_MAX_BULKIN_6K 2
160#define SR9800_MAX_BULKIN_8K 3
161#define SR9800_MAX_BULKIN_16K 4
162#define SR9800_MAX_BULKIN_20K 5
163#define SR9800_MAX_BULKIN_24K 6
164#define SR9800_MAX_BULKIN_32K 7
165
166struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
167 /* 2k */
168 {2048, 0x8000, 0x8001},
169 /* 4k */
170 {4096, 0x8100, 0x8147},
171 /* 6k */
172 {6144, 0x8200, 0x81EB},
173 /* 8k */
174 {8192, 0x8300, 0x83D7},
175 /* 16 */
176 {16384, 0x8400, 0x851E},
177 /* 20k */
178 {20480, 0x8500, 0x8666},
179 /* 24k */
180 {24576, 0x8600, 0x87AE},
181 /* 32k */
182 {32768, 0x8700, 0x8A3D},
183};
184
185/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
186struct sr_data {
187 u8 multi_filter[SR_MCAST_FILTER_SIZE];
188 u8 mac_addr[ETH_ALEN];
189 u8 phymode;
190 u8 ledmode;
191 u8 eeprom_len;
192};
193
194struct sr9800_int_data {
195 __le16 res1;
196 u8 link;
197 __le16 res2;
198 u8 status;
199 __le16 res3;
200} __packed;
201
202#endif /* _SR9800_H */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 026a313c2d2d..b0f705c2378f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -469,7 +469,6 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
469/* Look up Ethernet address in forwarding table */ 469/* Look up Ethernet address in forwarding table */
470static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, 470static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
471 const u8 *mac) 471 const u8 *mac)
472
473{ 472{
474 struct hlist_head *head = vxlan_fdb_head(vxlan, mac); 473 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
475 struct vxlan_fdb *f; 474 struct vxlan_fdb *f;
@@ -596,10 +595,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
596 NAPI_GRO_CB(p)->same_flow = 0; 595 NAPI_GRO_CB(p)->same_flow = 0;
597 continue; 596 continue;
598 } 597 }
599 goto found;
600 } 598 }
601 599
602found:
603 type = eh->h_proto; 600 type = eh->h_proto;
604 601
605 rcu_read_lock(); 602 rcu_read_lock();
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 0d1c7592efa0..19f7cb2cdef3 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -71,12 +71,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
71 const void *saddr, unsigned len) 71 const void *saddr, unsigned len)
72{ 72{
73 struct frhdr hdr; 73 struct frhdr hdr;
74 struct dlci_local *dlp;
75 unsigned int hlen; 74 unsigned int hlen;
76 char *dest; 75 char *dest;
77 76
78 dlp = netdev_priv(dev);
79
80 hdr.control = FRAD_I_UI; 77 hdr.control = FRAD_I_UI;
81 switch (type) 78 switch (type)
82 { 79 {
@@ -107,11 +104,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
107 104
108static void dlci_receive(struct sk_buff *skb, struct net_device *dev) 105static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
109{ 106{
110 struct dlci_local *dlp;
111 struct frhdr *hdr; 107 struct frhdr *hdr;
112 int process, header; 108 int process, header;
113 109
114 dlp = netdev_priv(dev);
115 if (!pskb_may_pull(skb, sizeof(*hdr))) { 110 if (!pskb_may_pull(skb, sizeof(*hdr))) {
116 netdev_notice(dev, "invalid data no header\n"); 111 netdev_notice(dev, "invalid data no header\n");
117 dev->stats.rx_errors++; 112 dev->stats.rx_errors++;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 8aa20df55e50..507d9a9ee69a 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1764,7 +1764,7 @@ static struct usb_device_id ar5523_id_table[] = {
1764 AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */ 1764 AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
1765 AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */ 1765 AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
1766 AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */ 1766 AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
1767 AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108 1767 AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
1768 (CyberTAN Technology) */ 1768 (CyberTAN Technology) */
1769 AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */ 1769 AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
1770 AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */ 1770 AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 25243cbc07f0..b8daff78b9d1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -5065,6 +5065,10 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
5065 break; 5065 break;
5066 } 5066 }
5067 } 5067 }
5068
5069 if (is2GHz && !twiceMaxEdgePower)
5070 twiceMaxEdgePower = 60;
5071
5068 return twiceMaxEdgePower; 5072 return twiceMaxEdgePower;
5069} 5073}
5070 5074
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 58da3468d1f0..99a203174f45 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -262,6 +262,8 @@ enum tid_aggr_state {
262struct ath9k_htc_sta { 262struct ath9k_htc_sta {
263 u8 index; 263 u8 index;
264 enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID]; 264 enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
265 struct work_struct rc_update_work;
266 struct ath9k_htc_priv *htc_priv;
265}; 267};
266 268
267#define ATH9K_HTC_RXBUF 256 269#define ATH9K_HTC_RXBUF 256
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index f4e1de20d99c..c57d6b859c04 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,10 @@ static int ath9k_htc_btcoex_enable;
34module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444); 34module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
35MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 35MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
36 36
37static int ath9k_ps_enable;
38module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
39MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
40
37#define CHAN2G(_freq, _idx) { \ 41#define CHAN2G(_freq, _idx) { \
38 .center_freq = (_freq), \ 42 .center_freq = (_freq), \
39 .hw_value = (_idx), \ 43 .hw_value = (_idx), \
@@ -725,12 +729,14 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
725 IEEE80211_HW_SPECTRUM_MGMT | 729 IEEE80211_HW_SPECTRUM_MGMT |
726 IEEE80211_HW_HAS_RATE_CONTROL | 730 IEEE80211_HW_HAS_RATE_CONTROL |
727 IEEE80211_HW_RX_INCLUDES_FCS | 731 IEEE80211_HW_RX_INCLUDES_FCS |
728 IEEE80211_HW_SUPPORTS_PS |
729 IEEE80211_HW_PS_NULLFUNC_STACK | 732 IEEE80211_HW_PS_NULLFUNC_STACK |
730 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 733 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
731 IEEE80211_HW_MFP_CAPABLE | 734 IEEE80211_HW_MFP_CAPABLE |
732 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 735 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
733 736
737 if (ath9k_ps_enable)
738 hw->flags |= IEEE80211_HW_SUPPORTS_PS;
739
734 hw->wiphy->interface_modes = 740 hw->wiphy->interface_modes =
735 BIT(NL80211_IFTYPE_STATION) | 741 BIT(NL80211_IFTYPE_STATION) |
736 BIT(NL80211_IFTYPE_ADHOC) | 742 BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 608d739d1378..c9254a61ca52 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1270,18 +1270,50 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1270 mutex_unlock(&priv->mutex); 1270 mutex_unlock(&priv->mutex);
1271} 1271}
1272 1272
1273static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
1274{
1275 struct ath9k_htc_sta *ista =
1276 container_of(work, struct ath9k_htc_sta, rc_update_work);
1277 struct ieee80211_sta *sta =
1278 container_of((void *)ista, struct ieee80211_sta, drv_priv);
1279 struct ath9k_htc_priv *priv = ista->htc_priv;
1280 struct ath_common *common = ath9k_hw_common(priv->ah);
1281 struct ath9k_htc_target_rate trate;
1282
1283 mutex_lock(&priv->mutex);
1284 ath9k_htc_ps_wakeup(priv);
1285
1286 memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
1287 ath9k_htc_setup_rate(priv, sta, &trate);
1288 if (!ath9k_htc_send_rate_cmd(priv, &trate))
1289 ath_dbg(common, CONFIG,
1290 "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
1291 sta->addr, be32_to_cpu(trate.capflags));
1292 else
1293 ath_dbg(common, CONFIG,
1294 "Unable to update supported rates for sta: %pM\n",
1295 sta->addr);
1296
1297 ath9k_htc_ps_restore(priv);
1298 mutex_unlock(&priv->mutex);
1299}
1300
1273static int ath9k_htc_sta_add(struct ieee80211_hw *hw, 1301static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
1274 struct ieee80211_vif *vif, 1302 struct ieee80211_vif *vif,
1275 struct ieee80211_sta *sta) 1303 struct ieee80211_sta *sta)
1276{ 1304{
1277 struct ath9k_htc_priv *priv = hw->priv; 1305 struct ath9k_htc_priv *priv = hw->priv;
1306 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
1278 int ret; 1307 int ret;
1279 1308
1280 mutex_lock(&priv->mutex); 1309 mutex_lock(&priv->mutex);
1281 ath9k_htc_ps_wakeup(priv); 1310 ath9k_htc_ps_wakeup(priv);
1282 ret = ath9k_htc_add_station(priv, vif, sta); 1311 ret = ath9k_htc_add_station(priv, vif, sta);
1283 if (!ret) 1312 if (!ret) {
1313 INIT_WORK(&ista->rc_update_work, ath9k_htc_sta_rc_update_work);
1314 ista->htc_priv = priv;
1284 ath9k_htc_init_rate(priv, sta); 1315 ath9k_htc_init_rate(priv, sta);
1316 }
1285 ath9k_htc_ps_restore(priv); 1317 ath9k_htc_ps_restore(priv);
1286 mutex_unlock(&priv->mutex); 1318 mutex_unlock(&priv->mutex);
1287 1319
@@ -1293,12 +1325,13 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
1293 struct ieee80211_sta *sta) 1325 struct ieee80211_sta *sta)
1294{ 1326{
1295 struct ath9k_htc_priv *priv = hw->priv; 1327 struct ath9k_htc_priv *priv = hw->priv;
1296 struct ath9k_htc_sta *ista; 1328 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
1297 int ret; 1329 int ret;
1298 1330
1331 cancel_work_sync(&ista->rc_update_work);
1332
1299 mutex_lock(&priv->mutex); 1333 mutex_lock(&priv->mutex);
1300 ath9k_htc_ps_wakeup(priv); 1334 ath9k_htc_ps_wakeup(priv);
1301 ista = (struct ath9k_htc_sta *) sta->drv_priv;
1302 htc_sta_drain(priv->htc, ista->index); 1335 htc_sta_drain(priv->htc, ista->index);
1303 ret = ath9k_htc_remove_station(priv, vif, sta); 1336 ret = ath9k_htc_remove_station(priv, vif, sta);
1304 ath9k_htc_ps_restore(priv); 1337 ath9k_htc_ps_restore(priv);
@@ -1311,28 +1344,12 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
1311 struct ieee80211_vif *vif, 1344 struct ieee80211_vif *vif,
1312 struct ieee80211_sta *sta, u32 changed) 1345 struct ieee80211_sta *sta, u32 changed)
1313{ 1346{
1314 struct ath9k_htc_priv *priv = hw->priv; 1347 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
1315 struct ath_common *common = ath9k_hw_common(priv->ah);
1316 struct ath9k_htc_target_rate trate;
1317
1318 mutex_lock(&priv->mutex);
1319 ath9k_htc_ps_wakeup(priv);
1320 1348
1321 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 1349 if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
1322 memset(&trate, 0, sizeof(struct ath9k_htc_target_rate)); 1350 return;
1323 ath9k_htc_setup_rate(priv, sta, &trate);
1324 if (!ath9k_htc_send_rate_cmd(priv, &trate))
1325 ath_dbg(common, CONFIG,
1326 "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
1327 sta->addr, be32_to_cpu(trate.capflags));
1328 else
1329 ath_dbg(common, CONFIG,
1330 "Unable to update supported rates for sta: %pM\n",
1331 sta->addr);
1332 }
1333 1351
1334 ath9k_htc_ps_restore(priv); 1352 schedule_work(&ista->rc_update_work);
1335 mutex_unlock(&priv->mutex);
1336} 1353}
1337 1354
1338static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, 1355static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fbf43c05713f..11eab9f01fd8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1316,7 +1316,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1316 if (AR_SREV_9300_20_OR_LATER(ah)) 1316 if (AR_SREV_9300_20_OR_LATER(ah))
1317 udelay(50); 1317 udelay(50);
1318 else if (AR_SREV_9100(ah)) 1318 else if (AR_SREV_9100(ah))
1319 udelay(10000); 1319 mdelay(10);
1320 else 1320 else
1321 udelay(100); 1321 udelay(100);
1322 1322
@@ -2051,9 +2051,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
2051 2051
2052 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, 2052 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2053 AR_RTC_FORCE_WAKE_EN); 2053 AR_RTC_FORCE_WAKE_EN);
2054
2055 if (AR_SREV_9100(ah)) 2054 if (AR_SREV_9100(ah))
2056 udelay(10000); 2055 mdelay(10);
2057 else 2056 else
2058 udelay(50); 2057 udelay(50);
2059 2058
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c36de303c8f3..1fc2e5a26b52 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -57,6 +57,10 @@ static int ath9k_bt_ant_diversity;
57module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444); 57module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
58MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity"); 58MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
59 59
60static int ath9k_ps_enable;
61module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
62MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
63
60bool is_ath9k_unloaded; 64bool is_ath9k_unloaded;
61/* We use the hw_value as an index into our private channel structure */ 65/* We use the hw_value as an index into our private channel structure */
62 66
@@ -903,13 +907,15 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
903 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 907 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
904 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 908 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
905 IEEE80211_HW_SIGNAL_DBM | 909 IEEE80211_HW_SIGNAL_DBM |
906 IEEE80211_HW_SUPPORTS_PS |
907 IEEE80211_HW_PS_NULLFUNC_STACK | 910 IEEE80211_HW_PS_NULLFUNC_STACK |
908 IEEE80211_HW_SPECTRUM_MGMT | 911 IEEE80211_HW_SPECTRUM_MGMT |
909 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 912 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
910 IEEE80211_HW_SUPPORTS_RC_TABLE | 913 IEEE80211_HW_SUPPORTS_RC_TABLE |
911 IEEE80211_HW_SUPPORTS_HT_CCK_RATES; 914 IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
912 915
916 if (ath9k_ps_enable)
917 hw->flags |= IEEE80211_HW_SUPPORTS_PS;
918
913 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 919 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
914 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 920 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
915 921
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index f06f4cbe1317..725e954d8475 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -182,6 +182,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
182 182
183 for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) { 183 for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
184 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); 184 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
185
186 if (ch_idx >= NUM_2GHZ_CHANNELS &&
187 !data->sku_cap_band_52GHz_enable)
188 ch_flags &= ~NVM_CHANNEL_VALID;
189
185 if (!(ch_flags & NVM_CHANNEL_VALID)) { 190 if (!(ch_flags & NVM_CHANNEL_VALID)) {
186 IWL_DEBUG_EEPROM(dev, 191 IWL_DEBUG_EEPROM(dev,
187 "Ch. %d Flags %x [%sGHz] - No traffic\n", 192 "Ch. %d Flags %x [%sGHz] - No traffic\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 73cbba7424f2..9426905de6b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -504,6 +504,7 @@ struct iwl_scan_offload_profile {
504 * @match_notify: clients waiting for match found notification 504 * @match_notify: clients waiting for match found notification
505 * @pass_match: clients waiting for the results 505 * @pass_match: clients waiting for the results
506 * @active_clients: active clients bitmap - enum scan_framework_client 506 * @active_clients: active clients bitmap - enum scan_framework_client
507 * @any_beacon_notify: clients waiting for match notification without match
507 */ 508 */
508struct iwl_scan_offload_profile_cfg { 509struct iwl_scan_offload_profile_cfg {
509 struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; 510 struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
@@ -512,7 +513,8 @@ struct iwl_scan_offload_profile_cfg {
512 u8 match_notify; 513 u8 match_notify;
513 u8 pass_match; 514 u8 pass_match;
514 u8 active_clients; 515 u8 active_clients;
515 u8 reserved[3]; 516 u8 any_beacon_notify;
517 u8 reserved[2];
516} __packed; 518} __packed;
517 519
518/** 520/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c49b5073c251..6bf9766e5982 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -246,7 +246,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
246 else 246 else
247 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 247 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
248 248
249 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) { 249 if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
250 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 250 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
251 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 251 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
252 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 252 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 0e0007960612..742afc429c94 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -344,7 +344,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
344 344
345 iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0); 345 iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
346 346
347 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL); 347 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
348 TX_CMD_FLG_BT_DIS);
348 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; 349 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
349 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 350 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
350 cmd->tx_cmd.rate_n_flags = 351 cmd->tx_cmd.rate_n_flags =
@@ -807,6 +808,8 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
807 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN; 808 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
808 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN; 809 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
809 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN; 810 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
811 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
812 profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
810 813
811 for (i = 0; i < req->n_match_sets; i++) { 814 for (i = 0; i < req->n_match_sets; i++) {
812 profile = &profile_cfg->profiles[i]; 815 profile = &profile_cfg->profiles[i];
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index ec1812133235..3397f59cd4e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -652,7 +652,7 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
652{ 652{
653 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 653 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
654 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 654 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
655 static const u8 *baddr = _baddr; 655 const u8 *baddr = _baddr;
656 656
657 lockdep_assert_held(&mvm->mutex); 657 lockdep_assert_held(&mvm->mutex);
658 658
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 90378c217bc7..4df12fa9d336 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -659,8 +659,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
659 rcu_read_lock(); 659 rcu_read_lock();
660 660
661 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 661 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
662 /*
663 * sta can't be NULL otherwise it'd mean that the sta has been freed in
664 * the firmware while we still have packets for it in the Tx queues.
665 */
666 if (WARN_ON_ONCE(!sta))
667 goto out;
662 668
663 if (!IS_ERR_OR_NULL(sta)) { 669 if (!IS_ERR(sta)) {
664 mvmsta = iwl_mvm_sta_from_mac80211(sta); 670 mvmsta = iwl_mvm_sta_from_mac80211(sta);
665 671
666 if (tid != IWL_TID_NON_QOS) { 672 if (tid != IWL_TID_NON_QOS) {
@@ -675,7 +681,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
675 spin_unlock_bh(&mvmsta->lock); 681 spin_unlock_bh(&mvmsta->lock);
676 } 682 }
677 } else { 683 } else {
678 sta = NULL;
679 mvmsta = NULL; 684 mvmsta = NULL;
680 } 685 }
681 686
@@ -683,42 +688,38 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
683 * If the txq is not an AMPDU queue, there is no chance we freed 688 * If the txq is not an AMPDU queue, there is no chance we freed
684 * several skbs. Check that out... 689 * several skbs. Check that out...
685 */ 690 */
686 if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) && 691 if (txq_id >= mvm->first_agg_queue)
687 atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) { 692 goto out;
688 if (mvmsta) { 693
689 /* 694 /* We can't free more than one frame at once on a shared queue */
690 * If there are no pending frames for this STA, notify 695 WARN_ON(skb_freed > 1);
691 * mac80211 that this station can go to sleep in its 696
692 * STA table. 697 /* If we have still frames from this STA nothing to do here */
693 */ 698 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
694 if (mvmsta->vif->type == NL80211_IFTYPE_AP) 699 goto out;
695 ieee80211_sta_block_awake(mvm->hw, sta, false); 700
696 /* 701 if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
697 * We might very well have taken mvmsta pointer while 702 /*
698 * the station was being removed. The remove flow might 703 * If there are no pending frames for this STA, notify
699 * have seen a pending_frame (because we didn't take 704 * mac80211 that this station can go to sleep in its
700 * the lock) even if now the queues are drained. So make 705 * STA table.
701 * really sure now that this the station is not being 706 * If mvmsta is not NULL, sta is valid.
702 * removed. If it is, run the drain worker to remove it. 707 */
703 */ 708 ieee80211_sta_block_awake(mvm->hw, sta, false);
704 spin_lock_bh(&mvmsta->lock); 709 }
705 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 710
706 if (!sta || PTR_ERR(sta) == -EBUSY) { 711 if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
707 /* 712 /*
708 * Station disappeared in the meantime: 713 * We are draining and this was the last packet - pre_rcu_remove
709 * so we are draining. 714 * has been called already. We might be after the
710 */ 715 * synchronize_net already.
711 set_bit(sta_id, mvm->sta_drained); 716 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
712 schedule_work(&mvm->sta_drained_wk); 717 */
713 } 718 set_bit(sta_id, mvm->sta_drained);
714 spin_unlock_bh(&mvmsta->lock); 719 schedule_work(&mvm->sta_drained_wk);
715 } else if (!mvmsta && PTR_ERR(sta) == -EBUSY) {
716 /* Tx response without STA, so we are draining */
717 set_bit(sta_id, mvm->sta_drained);
718 schedule_work(&mvm->sta_drained_wk);
719 }
720 } 720 }
721 721
722out:
722 rcu_read_unlock(); 723 rcu_read_unlock();
723} 724}
724 725
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a4a5e25623c3..86989df69356 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -411,6 +411,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
411 mvm->status, table.valid); 411 mvm->status, table.valid);
412 } 412 }
413 413
414 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
415
414 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, 416 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
415 table.data1, table.data2, table.data3, 417 table.data1, table.data2, table.data3,
416 table.blink1, table.blink2, table.ilink1, 418 table.blink1, table.blink2, table.ilink1,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3040924f5f3c..f47bcbe2945a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -359,20 +359,25 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
359/* 7265 Series */ 359/* 7265 Series */
360 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 360 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, 361 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 365 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 366 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 367 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
365 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, 368 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
366 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
367 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 372 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
371 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 374 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
373 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 376 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
374 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 377 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 379 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
377 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 383 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index abc5f56f29fe..2f1cd929c6f6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1877,6 +1877,11 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1877 EEPROM_MAC_ADDR_0)); 1877 EEPROM_MAC_ADDR_0));
1878 1878
1879 /* 1879 /*
1880 * Disable powersaving as default.
1881 */
1882 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1883
1884 /*
1880 * Initialize hw_mode information. 1885 * Initialize hw_mode information.
1881 */ 1886 */
1882 spec->supported_bands = SUPPORT_BAND_2GHZ; 1887 spec->supported_bands = SUPPORT_BAND_2GHZ;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9f16824cd1bc..d849d590de25 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1706,6 +1706,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1706 IEEE80211_HW_SUPPORTS_PS | 1706 IEEE80211_HW_SUPPORTS_PS |
1707 IEEE80211_HW_PS_NULLFUNC_STACK; 1707 IEEE80211_HW_PS_NULLFUNC_STACK;
1708 1708
1709 /*
1710 * Disable powersaving as default.
1711 */
1712 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1713
1709 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); 1714 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
1710 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1715 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
1711 rt2x00_eeprom_addr(rt2x00dev, 1716 rt2x00_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b8f5b06006c4..7f8b5d156c8c 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -7458,10 +7458,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7458 u32 reg; 7458 u32 reg;
7459 7459
7460 /* 7460 /*
7461 * Disable powersaving as default on PCI devices. 7461 * Disable powersaving as default.
7462 */ 7462 */
7463 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) 7463 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
7464 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
7465 7464
7466 /* 7465 /*
7467 * Initialize all hw fields. 7466 * Initialize all hw fields.
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 8ec17aad0e52..3867d1470b36 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -107,6 +107,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
107 struct rtl8180_priv *priv = dev->priv; 107 struct rtl8180_priv *priv = dev->priv;
108 unsigned int count = 32; 108 unsigned int count = 32;
109 u8 signal, agc, sq; 109 u8 signal, agc, sq;
110 dma_addr_t mapping;
110 111
111 while (count--) { 112 while (count--) {
112 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; 113 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
@@ -128,6 +129,17 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
128 if (unlikely(!new_skb)) 129 if (unlikely(!new_skb))
129 goto done; 130 goto done;
130 131
132 mapping = pci_map_single(priv->pdev,
133 skb_tail_pointer(new_skb),
134 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
135
136 if (pci_dma_mapping_error(priv->pdev, mapping)) {
137 kfree_skb(new_skb);
138 dev_err(&priv->pdev->dev, "RX DMA map error\n");
139
140 goto done;
141 }
142
131 pci_unmap_single(priv->pdev, 143 pci_unmap_single(priv->pdev,
132 *((dma_addr_t *)skb->cb), 144 *((dma_addr_t *)skb->cb),
133 MAX_RX_SIZE, PCI_DMA_FROMDEVICE); 145 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
@@ -158,9 +170,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
158 170
159 skb = new_skb; 171 skb = new_skb;
160 priv->rx_buf[priv->rx_idx] = skb; 172 priv->rx_buf[priv->rx_idx] = skb;
161 *((dma_addr_t *) skb->cb) = 173 *((dma_addr_t *) skb->cb) = mapping;
162 pci_map_single(priv->pdev, skb_tail_pointer(skb),
163 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
164 } 174 }
165 175
166 done: 176 done:
@@ -266,6 +276,13 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
266 mapping = pci_map_single(priv->pdev, skb->data, 276 mapping = pci_map_single(priv->pdev, skb->data,
267 skb->len, PCI_DMA_TODEVICE); 277 skb->len, PCI_DMA_TODEVICE);
268 278
279 if (pci_dma_mapping_error(priv->pdev, mapping)) {
280 kfree_skb(skb);
281 dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
282 return;
283
284 }
285
269 tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS | 286 tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
270 RTL818X_TX_DESC_FLAG_LS | 287 RTL818X_TX_DESC_FLAG_LS |
271 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) | 288 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4c76bcb9a879..ae413a2cbee7 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -143,11 +143,7 @@ struct xenvif {
143 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 143 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
144 struct xen_netif_rx_back_ring rx; 144 struct xen_netif_rx_back_ring rx;
145 struct sk_buff_head rx_queue; 145 struct sk_buff_head rx_queue;
146 bool rx_queue_stopped; 146 RING_IDX rx_last_skb_slots;
147 /* Set when the RX interrupt is triggered by the frontend.
148 * The worker thread may need to wake the queue.
149 */
150 bool rx_event;
151 147
152 /* This array is allocated seperately as it is large */ 148 /* This array is allocated seperately as it is large */
153 struct gnttab_copy *grant_copy_op; 149 struct gnttab_copy *grant_copy_op;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b9de31ea7fc4..7669d49a67e2 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -100,7 +100,6 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
100{ 100{
101 struct xenvif *vif = dev_id; 101 struct xenvif *vif = dev_id;
102 102
103 vif->rx_event = true;
104 xenvif_kick_thread(vif); 103 xenvif_kick_thread(vif);
105 104
106 return IRQ_HANDLED; 105 return IRQ_HANDLED;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6b62c3eb8e18..e5284bca2d90 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -476,7 +476,6 @@ static void xenvif_rx_action(struct xenvif *vif)
476 unsigned long offset; 476 unsigned long offset;
477 struct skb_cb_overlay *sco; 477 struct skb_cb_overlay *sco;
478 bool need_to_notify = false; 478 bool need_to_notify = false;
479 bool ring_full = false;
480 479
481 struct netrx_pending_operations npo = { 480 struct netrx_pending_operations npo = {
482 .copy = vif->grant_copy_op, 481 .copy = vif->grant_copy_op,
@@ -486,7 +485,7 @@ static void xenvif_rx_action(struct xenvif *vif)
486 skb_queue_head_init(&rxq); 485 skb_queue_head_init(&rxq);
487 486
488 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 487 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
489 int max_slots_needed; 488 RING_IDX max_slots_needed;
490 int i; 489 int i;
491 490
492 /* We need a cheap worse case estimate for the number of 491 /* We need a cheap worse case estimate for the number of
@@ -509,9 +508,10 @@ static void xenvif_rx_action(struct xenvif *vif)
509 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { 508 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
510 skb_queue_head(&vif->rx_queue, skb); 509 skb_queue_head(&vif->rx_queue, skb);
511 need_to_notify = true; 510 need_to_notify = true;
512 ring_full = true; 511 vif->rx_last_skb_slots = max_slots_needed;
513 break; 512 break;
514 } 513 } else
514 vif->rx_last_skb_slots = 0;
515 515
516 sco = (struct skb_cb_overlay *)skb->cb; 516 sco = (struct skb_cb_overlay *)skb->cb;
517 sco->meta_slots_used = xenvif_gop_skb(skb, &npo); 517 sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
@@ -522,8 +522,6 @@ static void xenvif_rx_action(struct xenvif *vif)
522 522
523 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); 523 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
524 524
525 vif->rx_queue_stopped = !npo.copy_prod && ring_full;
526
527 if (!npo.copy_prod) 525 if (!npo.copy_prod)
528 goto done; 526 goto done;
529 527
@@ -1473,8 +1471,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1473 1471
1474static inline int rx_work_todo(struct xenvif *vif) 1472static inline int rx_work_todo(struct xenvif *vif)
1475{ 1473{
1476 return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) || 1474 return !skb_queue_empty(&vif->rx_queue) &&
1477 vif->rx_event; 1475 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
1478} 1476}
1479 1477
1480static inline int tx_work_todo(struct xenvif *vif) 1478static inline int tx_work_todo(struct xenvif *vif)
@@ -1560,8 +1558,6 @@ int xenvif_kthread(void *data)
1560 if (!skb_queue_empty(&vif->rx_queue)) 1558 if (!skb_queue_empty(&vif->rx_queue))
1561 xenvif_rx_action(vif); 1559 xenvif_rx_action(vif);
1562 1560
1563 vif->rx_event = false;
1564
1565 if (skb_queue_empty(&vif->rx_queue) && 1561 if (skb_queue_empty(&vif->rx_queue) &&
1566 netif_queue_stopped(vif->dev)) 1562 netif_queue_stopped(vif->dev))
1567 xenvif_start_queue(vif); 1563 xenvif_start_queue(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff04d4f95baa..f9daa9e183f2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1832,7 +1832,6 @@ static void netback_changed(struct xenbus_device *dev,
1832 case XenbusStateReconfiguring: 1832 case XenbusStateReconfiguring:
1833 case XenbusStateReconfigured: 1833 case XenbusStateReconfigured:
1834 case XenbusStateUnknown: 1834 case XenbusStateUnknown:
1835 case XenbusStateClosed:
1836 break; 1835 break;
1837 1836
1838 case XenbusStateInitWait: 1837 case XenbusStateInitWait:
@@ -1847,6 +1846,10 @@ static void netback_changed(struct xenbus_device *dev,
1847 netdev_notify_peers(netdev); 1846 netdev_notify_peers(netdev);
1848 break; 1847 break;
1849 1848
1849 case XenbusStateClosed:
1850 if (dev->state == XenbusStateClosed)
1851 break;
1852 /* Missed the backend's CLOSING state -- fallthrough */
1850 case XenbusStateClosing: 1853 case XenbusStateClosing:
1851 xenbus_frontend_closed(dev); 1854 xenbus_frontend_closed(dev);
1852 break; 1855 break;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index cd929aed3613..e2a783fdb98f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -210,10 +210,29 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
210 } 210 }
211} 211}
212 212
213static void dock_event(acpi_handle handle, u32 type, void *data)
214{
215 struct acpiphp_context *context;
216
217 mutex_lock(&acpiphp_context_lock);
218 context = acpiphp_get_context(handle);
219 if (!context || WARN_ON(context->handle != handle)
220 || context->func.parent->is_going_away) {
221 mutex_unlock(&acpiphp_context_lock);
222 return;
223 }
224 get_bridge(context->func.parent);
225 acpiphp_put_context(context);
226 mutex_unlock(&acpiphp_context_lock);
227
228 hotplug_event(handle, type, data);
229
230 put_bridge(context->func.parent);
231}
213 232
214static const struct acpi_dock_ops acpiphp_dock_ops = { 233static const struct acpi_dock_ops acpiphp_dock_ops = {
215 .fixup = post_dock_fixups, 234 .fixup = post_dock_fixups,
216 .handler = hotplug_event, 235 .handler = dock_event,
217}; 236};
218 237
219/* Check whether the PCI device is managed by native PCIe hotplug driver */ 238/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -441,7 +460,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
441 list_del(&bridge->list); 460 list_del(&bridge->list);
442 mutex_unlock(&bridge_mutex); 461 mutex_unlock(&bridge_mutex);
443 462
463 mutex_lock(&acpiphp_context_lock);
444 bridge->is_going_away = true; 464 bridge->is_going_away = true;
465 mutex_unlock(&acpiphp_context_lock);
445} 466}
446 467
447/** 468/**
@@ -742,7 +763,7 @@ static void trim_stale_devices(struct pci_dev *dev)
742 763
743 /* The device is a bridge. so check the bus below it. */ 764 /* The device is a bridge. so check the bus below it. */
744 pm_runtime_get_sync(&dev->dev); 765 pm_runtime_get_sync(&dev->dev);
745 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) 766 list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
746 trim_stale_devices(child); 767 trim_stale_devices(child);
747 768
748 pm_runtime_put(&dev->dev); 769 pm_runtime_put(&dev->dev);
@@ -773,8 +794,8 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
773 ; /* do nothing */ 794 ; /* do nothing */
774 } else if (get_slot_status(slot) == ACPI_STA_ALL) { 795 } else if (get_slot_status(slot) == ACPI_STA_ALL) {
775 /* remove stale devices if any */ 796 /* remove stale devices if any */
776 list_for_each_entry_safe(dev, tmp, &bus->devices, 797 list_for_each_entry_safe_reverse(dev, tmp,
777 bus_list) 798 &bus->devices, bus_list)
778 if (PCI_SLOT(dev->devfn) == slot->device) 799 if (PCI_SLOT(dev->devfn) == slot->device)
779 trim_stale_devices(dev); 800 trim_stale_devices(dev);
780 801
@@ -805,7 +826,7 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
805 int i; 826 int i;
806 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM; 827 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
807 828
808 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { 829 list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
809 for (i=0; i<PCI_BRIDGE_RESOURCES; i++) { 830 for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
810 struct resource *res = &dev->resource[i]; 831 struct resource *res = &dev->resource[i];
811 if ((res->flags & type_mask) && !res->start && 832 if ((res->flags & type_mask) && !res->start &&
@@ -829,7 +850,11 @@ void acpiphp_check_host_bridge(acpi_handle handle)
829 850
830 bridge = acpiphp_handle_to_bridge(handle); 851 bridge = acpiphp_handle_to_bridge(handle);
831 if (bridge) { 852 if (bridge) {
853 pci_lock_rescan_remove();
854
832 acpiphp_check_bridge(bridge); 855 acpiphp_check_bridge(bridge);
856
857 pci_unlock_rescan_remove();
833 put_bridge(bridge); 858 put_bridge(bridge);
834 } 859 }
835} 860}
@@ -852,6 +877,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
852 877
853 mutex_unlock(&acpiphp_context_lock); 878 mutex_unlock(&acpiphp_context_lock);
854 879
880 pci_lock_rescan_remove();
855 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 881 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
856 882
857 switch (type) { 883 switch (type) {
@@ -905,6 +931,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
905 break; 931 break;
906 } 932 }
907 933
934 pci_unlock_rescan_remove();
908 if (bridge) 935 if (bridge)
909 put_bridge(bridge); 936 put_bridge(bridge);
910} 937}
@@ -915,11 +942,9 @@ static void hotplug_event_work(void *data, u32 type)
915 acpi_handle handle = context->handle; 942 acpi_handle handle = context->handle;
916 943
917 acpi_scan_lock_acquire(); 944 acpi_scan_lock_acquire();
918 pci_lock_rescan_remove();
919 945
920 hotplug_event(handle, type, context); 946 hotplug_event(handle, type, context);
921 947
922 pci_unlock_rescan_remove();
923 acpi_scan_lock_release(); 948 acpi_scan_lock_release();
924 acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL); 949 acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
925 put_bridge(context->func.parent); 950 put_bridge(context->func.parent);
@@ -937,6 +962,7 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
937{ 962{
938 struct acpiphp_context *context; 963 struct acpiphp_context *context;
939 u32 ost_code = ACPI_OST_SC_SUCCESS; 964 u32 ost_code = ACPI_OST_SC_SUCCESS;
965 acpi_status status;
940 966
941 switch (type) { 967 switch (type) {
942 case ACPI_NOTIFY_BUS_CHECK: 968 case ACPI_NOTIFY_BUS_CHECK:
@@ -972,13 +998,20 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
972 998
973 mutex_lock(&acpiphp_context_lock); 999 mutex_lock(&acpiphp_context_lock);
974 context = acpiphp_get_context(handle); 1000 context = acpiphp_get_context(handle);
975 if (context && !WARN_ON(context->handle != handle)) { 1001 if (!context || WARN_ON(context->handle != handle)
976 get_bridge(context->func.parent); 1002 || context->func.parent->is_going_away)
977 acpiphp_put_context(context); 1003 goto err_out;
978 acpi_hotplug_execute(hotplug_event_work, context, type); 1004
1005 get_bridge(context->func.parent);
1006 acpiphp_put_context(context);
1007 status = acpi_hotplug_execute(hotplug_event_work, context, type);
1008 if (ACPI_SUCCESS(status)) {
979 mutex_unlock(&acpiphp_context_lock); 1009 mutex_unlock(&acpiphp_context_lock);
980 return; 1010 return;
981 } 1011 }
1012 put_bridge(context->func.parent);
1013
1014 err_out:
982 mutex_unlock(&acpiphp_context_lock); 1015 mutex_unlock(&acpiphp_context_lock);
983 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; 1016 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
984 1017
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5ee61a470016..c0fe6091566a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -851,7 +851,9 @@ static struct pinctrl *create_pinctrl(struct device *dev)
851 kref_init(&p->users); 851 kref_init(&p->users);
852 852
853 /* Add the pinctrl handle to the global list */ 853 /* Add the pinctrl handle to the global list */
854 mutex_lock(&pinctrl_list_mutex);
854 list_add_tail(&p->node, &pinctrl_list); 855 list_add_tail(&p->node, &pinctrl_list);
856 mutex_unlock(&pinctrl_list_mutex);
855 857
856 return p; 858 return p;
857} 859}
@@ -1642,8 +1644,10 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
1642 device_root, pctldev, &pinctrl_groups_ops); 1644 device_root, pctldev, &pinctrl_groups_ops);
1643 debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO, 1645 debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
1644 device_root, pctldev, &pinctrl_gpioranges_ops); 1646 device_root, pctldev, &pinctrl_gpioranges_ops);
1645 pinmux_init_device_debugfs(device_root, pctldev); 1647 if (pctldev->desc->pmxops)
1646 pinconf_init_device_debugfs(device_root, pctldev); 1648 pinmux_init_device_debugfs(device_root, pctldev);
1649 if (pctldev->desc->confops)
1650 pinconf_init_device_debugfs(device_root, pctldev);
1647} 1651}
1648 1652
1649static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev) 1653static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 38c6f8b9790e..d990e33d8aa7 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1286,22 +1286,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
1286 1286
1287 switch (type) { 1287 switch (type) {
1288 case IRQ_TYPE_EDGE_RISING: 1288 case IRQ_TYPE_EDGE_RISING:
1289 irq_set_handler(d->irq, handle_simple_irq); 1289 __irq_set_handler_locked(d->irq, handle_simple_irq);
1290 writel_relaxed(mask, pio + PIO_ESR); 1290 writel_relaxed(mask, pio + PIO_ESR);
1291 writel_relaxed(mask, pio + PIO_REHLSR); 1291 writel_relaxed(mask, pio + PIO_REHLSR);
1292 break; 1292 break;
1293 case IRQ_TYPE_EDGE_FALLING: 1293 case IRQ_TYPE_EDGE_FALLING:
1294 irq_set_handler(d->irq, handle_simple_irq); 1294 __irq_set_handler_locked(d->irq, handle_simple_irq);
1295 writel_relaxed(mask, pio + PIO_ESR); 1295 writel_relaxed(mask, pio + PIO_ESR);
1296 writel_relaxed(mask, pio + PIO_FELLSR); 1296 writel_relaxed(mask, pio + PIO_FELLSR);
1297 break; 1297 break;
1298 case IRQ_TYPE_LEVEL_LOW: 1298 case IRQ_TYPE_LEVEL_LOW:
1299 irq_set_handler(d->irq, handle_level_irq); 1299 __irq_set_handler_locked(d->irq, handle_level_irq);
1300 writel_relaxed(mask, pio + PIO_LSR); 1300 writel_relaxed(mask, pio + PIO_LSR);
1301 writel_relaxed(mask, pio + PIO_FELLSR); 1301 writel_relaxed(mask, pio + PIO_FELLSR);
1302 break; 1302 break;
1303 case IRQ_TYPE_LEVEL_HIGH: 1303 case IRQ_TYPE_LEVEL_HIGH:
1304 irq_set_handler(d->irq, handle_level_irq); 1304 __irq_set_handler_locked(d->irq, handle_level_irq);
1305 writel_relaxed(mask, pio + PIO_LSR); 1305 writel_relaxed(mask, pio + PIO_LSR);
1306 writel_relaxed(mask, pio + PIO_REHLSR); 1306 writel_relaxed(mask, pio + PIO_REHLSR);
1307 break; 1307 break;
@@ -1310,7 +1310,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
1310 * disable additional interrupt modes: 1310 * disable additional interrupt modes:
1311 * fall back to default behavior 1311 * fall back to default behavior
1312 */ 1312 */
1313 irq_set_handler(d->irq, handle_simple_irq); 1313 __irq_set_handler_locked(d->irq, handle_simple_irq);
1314 writel_relaxed(mask, pio + PIO_AIMDR); 1314 writel_relaxed(mask, pio + PIO_AIMDR);
1315 return 0; 1315 return 0;
1316 case IRQ_TYPE_NONE: 1316 case IRQ_TYPE_NONE:
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c
index 17aecde1b51d..815384b377b5 100644
--- a/drivers/pinctrl/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/pinctrl-imx1-core.c
@@ -45,7 +45,7 @@ struct imx1_pinctrl {
45#define MX1_DDIR 0x00 45#define MX1_DDIR 0x00
46#define MX1_OCR 0x04 46#define MX1_OCR 0x04
47#define MX1_ICONFA 0x0c 47#define MX1_ICONFA 0x0c
48#define MX1_ICONFB 0x10 48#define MX1_ICONFB 0x14
49#define MX1_GIUS 0x20 49#define MX1_GIUS 0x20
50#define MX1_GPR 0x38 50#define MX1_GPR 0x38
51#define MX1_PUEN 0x40 51#define MX1_PUEN 0x40
@@ -97,13 +97,13 @@ static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
97 u32 old_val; 97 u32 old_val;
98 u32 new_val; 98 u32 new_val;
99 99
100 dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
101 reg, offset, value);
102
103 /* Use the next register if the pin's port pin number is >=16 */ 100 /* Use the next register if the pin's port pin number is >=16 */
104 if (pin_id % 32 >= 16) 101 if (pin_id % 32 >= 16)
105 reg += 0x04; 102 reg += 0x04;
106 103
104 dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
105 reg, offset, value);
106
107 /* Get current state of pins */ 107 /* Get current state of pins */
108 old_val = readl(reg); 108 old_val = readl(reg);
109 old_val &= mask; 109 old_val &= mask;
@@ -139,7 +139,7 @@ static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
139 u32 reg_offset) 139 u32 reg_offset)
140{ 140{
141 void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; 141 void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
142 int offset = pin_id % 16; 142 int offset = (pin_id % 16) * 2;
143 143
144 /* Use the next register if the pin's port pin number is >=16 */ 144 /* Use the next register if the pin's port pin number is >=16 */
145 if (pin_id % 32 >= 16) 145 if (pin_id % 32 >= 16)
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index a2e93a2b5ff4..e767355ab0ad 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -645,7 +645,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
645 GFP_KERNEL); 645 GFP_KERNEL);
646 if (!pmx->regs) { 646 if (!pmx->regs) {
647 dev_err(&pdev->dev, "Can't alloc regs pointer\n"); 647 dev_err(&pdev->dev, "Can't alloc regs pointer\n");
648 return -ENODEV; 648 return -ENOMEM;
649 } 649 }
650 650
651 for (i = 0; i < pmx->nbanks; i++) { 651 for (i = 0; i < pmx->nbanks; i++) {
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index 37b42651d76a..dde0285544d6 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -413,7 +413,7 @@ static const struct sirfsoc_padmux ac97_padmux = {
413 .funcval = 0, 413 .funcval = 0,
414}; 414};
415 415
416static const unsigned ac97_pins[] = { 33, 34, 35, 36 }; 416static const unsigned ac97_pins[] = { 43, 44, 45, 46 };
417 417
418static const struct sirfsoc_muxmask spi1_muxmask[] = { 418static const struct sirfsoc_muxmask spi1_muxmask[] = {
419 { 419 {
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index b28d1af9c232..9802b67040cc 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
276 if (!configs) 276 if (!configs)
277 return -ENOMEM; 277 return -ENOMEM;
278 278
279 configs[0] = pull; 279 switch (pull) {
280 case 0:
281 configs[0] = PIN_CONFIG_BIAS_DISABLE;
282 break;
283 case 1:
284 configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
285 break;
286 case 2:
287 configs[0] = PIN_CONFIG_BIAS_PULL_UP;
288 break;
289 default:
290 configs[0] = PIN_CONFIG_BIAS_DISABLE;
291 dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
292 }
280 293
281 map->type = PIN_MAP_TYPE_CONFIGS_PIN; 294 map->type = PIN_MAP_TYPE_CONFIGS_PIN;
282 map->data.configs.group_or_pin = data->groups[group]; 295 map->data.configs.group_or_pin = data->groups[group];
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 77b46d0b37a6..e10febe9ec34 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -498,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
498 struct ab3100_platform_data *plfdata, 498 struct ab3100_platform_data *plfdata,
499 struct regulator_init_data *init_data, 499 struct regulator_init_data *init_data,
500 struct device_node *np, 500 struct device_node *np,
501 int id) 501 unsigned long id)
502{ 502{
503 struct regulator_desc *desc; 503 struct regulator_desc *desc;
504 struct ab3100_regulator *reg; 504 struct ab3100_regulator *reg;
@@ -646,7 +646,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
646 err = ab3100_regulator_register( 646 err = ab3100_regulator_register(
647 pdev, NULL, ab3100_regulator_matches[i].init_data, 647 pdev, NULL, ab3100_regulator_matches[i].init_data,
648 ab3100_regulator_matches[i].of_node, 648 ab3100_regulator_matches[i].of_node,
649 (int) ab3100_regulator_matches[i].driver_data); 649 (unsigned long)ab3100_regulator_matches[i].driver_data);
650 if (err) { 650 if (err) {
651 ab3100_regulators_remove(pdev); 651 ab3100_regulators_remove(pdev);
652 return err; 652 return err;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b38a6b669e8c..16a309e5c024 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1272,6 +1272,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
1272 if (r->dev.parent && 1272 if (r->dev.parent &&
1273 node == r->dev.of_node) 1273 node == r->dev.of_node)
1274 return r; 1274 return r;
1275 *ret = -EPROBE_DEFER;
1276 return NULL;
1275 } else { 1277 } else {
1276 /* 1278 /*
1277 * If we couldn't even get the node then it's 1279 * If we couldn't even get the node then it's
@@ -1312,7 +1314,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
1312 struct regulator_dev *rdev; 1314 struct regulator_dev *rdev;
1313 struct regulator *regulator = ERR_PTR(-EPROBE_DEFER); 1315 struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
1314 const char *devname = NULL; 1316 const char *devname = NULL;
1315 int ret = -EPROBE_DEFER; 1317 int ret;
1316 1318
1317 if (id == NULL) { 1319 if (id == NULL) {
1318 pr_err("get() with no identifier\n"); 1320 pr_err("get() with no identifier\n");
@@ -1322,6 +1324,11 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
1322 if (dev) 1324 if (dev)
1323 devname = dev_name(dev); 1325 devname = dev_name(dev);
1324 1326
1327 if (have_full_constraints())
1328 ret = -ENODEV;
1329 else
1330 ret = -EPROBE_DEFER;
1331
1325 mutex_lock(&regulator_list_mutex); 1332 mutex_lock(&regulator_list_mutex);
1326 1333
1327 rdev = regulator_dev_lookup(dev, id, &ret); 1334 rdev = regulator_dev_lookup(dev, id, &ret);
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index d9e557990577..cd0b9e35a56d 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -441,6 +441,7 @@ common_reg:
441 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { 441 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
442 if (!reg_np) { 442 if (!reg_np) {
443 config.init_data = pdata->regulators[i].initdata; 443 config.init_data = pdata->regulators[i].initdata;
444 config.of_node = pdata->regulators[i].reg_node;
444 } else { 445 } else {
445 config.init_data = rdata[i].init_data; 446 config.init_data = rdata[i].init_data;
446 config.of_node = rdata[i].of_node; 447 config.of_node = rdata[i].of_node;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 88e35d85d205..8ee88c4ebd83 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -342,8 +342,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
342 */ 342 */
343int cio_commit_config(struct subchannel *sch) 343int cio_commit_config(struct subchannel *sch)
344{ 344{
345 struct schib schib;
346 int ccode, retry, ret = 0; 345 int ccode, retry, ret = 0;
346 struct schib schib;
347 struct irb irb;
347 348
348 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) 349 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
349 return -ENODEV; 350 return -ENODEV;
@@ -367,7 +368,10 @@ int cio_commit_config(struct subchannel *sch)
367 ret = -EAGAIN; 368 ret = -EAGAIN;
368 break; 369 break;
369 case 1: /* status pending */ 370 case 1: /* status pending */
370 return -EBUSY; 371 ret = -EBUSY;
372 if (tsch(sch->schid, &irb))
373 return ret;
374 break;
371 case 2: /* busy */ 375 case 2: /* busy */
372 udelay(100); /* allow for recovery */ 376 udelay(100); /* allow for recovery */
373 ret = -EBUSY; 377 ret = -EBUSY;
@@ -403,7 +407,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
403 */ 407 */
404int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 408int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
405{ 409{
406 int retry;
407 int ret; 410 int ret;
408 411
409 CIO_TRACE_EVENT(2, "ensch"); 412 CIO_TRACE_EVENT(2, "ensch");
@@ -418,20 +421,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
418 sch->config.isc = sch->isc; 421 sch->config.isc = sch->isc;
419 sch->config.intparm = intparm; 422 sch->config.intparm = intparm;
420 423
421 for (retry = 0; retry < 3; retry++) { 424 ret = cio_commit_config(sch);
425 if (ret == -EIO) {
426 /*
427 * Got a program check in msch. Try without
428 * the concurrent sense bit the next time.
429 */
430 sch->config.csense = 0;
422 ret = cio_commit_config(sch); 431 ret = cio_commit_config(sch);
423 if (ret == -EIO) {
424 /*
425 * Got a program check in msch. Try without
426 * the concurrent sense bit the next time.
427 */
428 sch->config.csense = 0;
429 } else if (ret == -EBUSY) {
430 struct irb irb;
431 if (tsch(sch->schid, &irb) != 0)
432 break;
433 } else
434 break;
435 } 432 }
436 CIO_HEX_EVENT(2, &ret, sizeof(ret)); 433 CIO_HEX_EVENT(2, &ret, sizeof(ret));
437 return ret; 434 return ret;
@@ -444,7 +441,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
444 */ 441 */
445int cio_disable_subchannel(struct subchannel *sch) 442int cio_disable_subchannel(struct subchannel *sch)
446{ 443{
447 int retry;
448 int ret; 444 int ret;
449 445
450 CIO_TRACE_EVENT(2, "dissch"); 446 CIO_TRACE_EVENT(2, "dissch");
@@ -456,16 +452,8 @@ int cio_disable_subchannel(struct subchannel *sch)
456 return -ENODEV; 452 return -ENODEV;
457 453
458 sch->config.ena = 0; 454 sch->config.ena = 0;
455 ret = cio_commit_config(sch);
459 456
460 for (retry = 0; retry < 3; retry++) {
461 ret = cio_commit_config(sch);
462 if (ret == -EBUSY) {
463 struct irb irb;
464 if (tsch(sch->schid, &irb) != 0)
465 break;
466 } else
467 break;
468 }
469 CIO_HEX_EVENT(2, &ret, sizeof(ret)); 457 CIO_HEX_EVENT(2, &ret, sizeof(ret));
470 return ret; 458 return ret;
471} 459}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 8acaae18bd11..a563e4c00590 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -359,14 +359,12 @@ static inline int multicast_outbound(struct qdio_q *q)
359#define need_siga_sync_out_after_pci(q) \ 359#define need_siga_sync_out_after_pci(q) \
360 (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci)) 360 (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
361 361
362#define for_each_input_queue(irq_ptr, q, i) \ 362#define for_each_input_queue(irq_ptr, q, i) \
363 for (i = 0, q = irq_ptr->input_qs[0]; \ 363 for (i = 0; i < irq_ptr->nr_input_qs && \
364 i < irq_ptr->nr_input_qs; \ 364 ({ q = irq_ptr->input_qs[i]; 1; }); i++)
365 q = irq_ptr->input_qs[++i]) 365#define for_each_output_queue(irq_ptr, q, i) \
366#define for_each_output_queue(irq_ptr, q, i) \ 366 for (i = 0; i < irq_ptr->nr_output_qs && \
367 for (i = 0, q = irq_ptr->output_qs[0]; \ 367 ({ q = irq_ptr->output_qs[i]; 1; }); i++)
368 i < irq_ptr->nr_output_qs; \
369 q = irq_ptr->output_qs[++i])
370 368
371#define prev_buf(bufnr) \ 369#define prev_buf(bufnr) \
372 ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) 370 ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index c883a085c059..77466c4faabb 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -996,7 +996,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
996 } 996 }
997 } 997 }
998 998
999 if (!pci_out_supported(q)) 999 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
1000 return; 1000 return;
1001 1001
1002 for_each_output_queue(irq_ptr, q, i) { 1002 for_each_output_queue(irq_ptr, q, i) {
diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c
index 10bb41c2fb6d..eecb1f2a5574 100644
--- a/drivers/staging/media/go7007/go7007-loader.c
+++ b/drivers/staging/media/go7007/go7007-loader.c
@@ -59,7 +59,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
59 59
60 if (usbdev->descriptor.bNumConfigurations != 1) { 60 if (usbdev->descriptor.bNumConfigurations != 1) {
61 dev_err(&interface->dev, "can't handle multiple config\n"); 61 dev_err(&interface->dev, "can't handle multiple config\n");
62 return -ENODEV; 62 goto failed2;
63 } 63 }
64 64
65 vendor = le16_to_cpu(usbdev->descriptor.idVendor); 65 vendor = le16_to_cpu(usbdev->descriptor.idVendor);
@@ -108,6 +108,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
108 return 0; 108 return 0;
109 109
110failed2: 110failed2:
111 usb_put_dev(usbdev);
111 dev_err(&interface->dev, "probe failed\n"); 112 dev_err(&interface->dev, "probe failed\n");
112 return -ENODEV; 113 return -ENODEV;
113} 114}
@@ -115,6 +116,7 @@ failed2:
115static void go7007_loader_disconnect(struct usb_interface *interface) 116static void go7007_loader_disconnect(struct usb_interface *interface)
116{ 117{
117 dev_info(&interface->dev, "disconnect\n"); 118 dev_info(&interface->dev, "disconnect\n");
119 usb_put_dev(interface_to_usbdev(interface));
118 usb_set_intfdata(interface, NULL); 120 usb_set_intfdata(interface, NULL);
119} 121}
120 122
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 4c4c566c52a3..79d25894343a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -223,6 +223,7 @@ config SA1100_WATCHDOG
223 223
224config DW_WATCHDOG 224config DW_WATCHDOG
225 tristate "Synopsys DesignWare watchdog" 225 tristate "Synopsys DesignWare watchdog"
226 depends on HAS_IOMEM
226 help 227 help
227 Say Y here if to include support for the Synopsys DesignWare 228 Say Y here if to include support for the Synopsys DesignWare
228 watchdog timer found in many chips. 229 watchdog timer found in many chips.
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 34a2704fbc88..073b4a19a8b0 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -284,10 +284,8 @@ static int map_grant_pages(struct grant_map *map)
284 } 284 }
285 285
286 pr_debug("map %d+%d\n", map->index, map->count); 286 pr_debug("map %d+%d\n", map->index, map->count);
287 err = gnttab_map_refs_userspace(map->map_ops, 287 err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
288 use_ptemod ? map->kmap_ops : NULL, 288 map->pages, map->count);
289 map->pages,
290 map->count);
291 if (err) 289 if (err)
292 return err; 290 return err;
293 291
@@ -317,10 +315,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
317 } 315 }
318 } 316 }
319 317
320 err = gnttab_unmap_refs_userspace(map->unmap_ops + offset, 318 err = gnttab_unmap_refs(map->unmap_ops + offset,
321 use_ptemod ? map->kmap_ops + offset : NULL, 319 use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
322 map->pages + offset, 320 pages);
323 pages);
324 if (err) 321 if (err)
325 return err; 322 return err;
326 323
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 8ee13e2e45e2..b84e3ab839aa 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -928,17 +928,15 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
928} 928}
929EXPORT_SYMBOL_GPL(gnttab_batch_copy); 929EXPORT_SYMBOL_GPL(gnttab_batch_copy);
930 930
931int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 931int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
932 struct gnttab_map_grant_ref *kmap_ops, 932 struct gnttab_map_grant_ref *kmap_ops,
933 struct page **pages, unsigned int count, 933 struct page **pages, unsigned int count)
934 bool m2p_override)
935{ 934{
936 int i, ret; 935 int i, ret;
937 bool lazy = false; 936 bool lazy = false;
938 pte_t *pte; 937 pte_t *pte;
939 unsigned long mfn, pfn; 938 unsigned long mfn;
940 939
941 BUG_ON(kmap_ops && !m2p_override);
942 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); 940 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
943 if (ret) 941 if (ret)
944 return ret; 942 return ret;
@@ -957,12 +955,10 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
957 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, 955 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
958 map_ops[i].dev_bus_addr >> PAGE_SHIFT); 956 map_ops[i].dev_bus_addr >> PAGE_SHIFT);
959 } 957 }
960 return 0; 958 return ret;
961 } 959 }
962 960
963 if (m2p_override && 961 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
964 !in_interrupt() &&
965 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
966 arch_enter_lazy_mmu_mode(); 962 arch_enter_lazy_mmu_mode();
967 lazy = true; 963 lazy = true;
968 } 964 }
@@ -979,20 +975,8 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
979 } else { 975 } else {
980 mfn = PFN_DOWN(map_ops[i].dev_bus_addr); 976 mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
981 } 977 }
982 pfn = page_to_pfn(pages[i]); 978 ret = m2p_add_override(mfn, pages[i], kmap_ops ?
983 979 &kmap_ops[i] : NULL);
984 WARN_ON(PagePrivate(pages[i]));
985 SetPagePrivate(pages[i]);
986 set_page_private(pages[i], mfn);
987
988 pages[i]->index = pfn_to_mfn(pfn);
989 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
990 ret = -ENOMEM;
991 goto out;
992 }
993 if (m2p_override)
994 ret = m2p_add_override(mfn, pages[i], kmap_ops ?
995 &kmap_ops[i] : NULL);
996 if (ret) 980 if (ret)
997 goto out; 981 goto out;
998 } 982 }
@@ -1003,32 +987,15 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1003 987
1004 return ret; 988 return ret;
1005} 989}
1006
1007int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1008 struct page **pages, unsigned int count)
1009{
1010 return __gnttab_map_refs(map_ops, NULL, pages, count, false);
1011}
1012EXPORT_SYMBOL_GPL(gnttab_map_refs); 990EXPORT_SYMBOL_GPL(gnttab_map_refs);
1013 991
1014int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops, 992int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1015 struct gnttab_map_grant_ref *kmap_ops,
1016 struct page **pages, unsigned int count)
1017{
1018 return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true);
1019}
1020EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace);
1021
1022int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1023 struct gnttab_map_grant_ref *kmap_ops, 993 struct gnttab_map_grant_ref *kmap_ops,
1024 struct page **pages, unsigned int count, 994 struct page **pages, unsigned int count)
1025 bool m2p_override)
1026{ 995{
1027 int i, ret; 996 int i, ret;
1028 bool lazy = false; 997 bool lazy = false;
1029 unsigned long pfn, mfn;
1030 998
1031 BUG_ON(kmap_ops && !m2p_override);
1032 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 999 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1033 if (ret) 1000 if (ret)
1034 return ret; 1001 return ret;
@@ -1039,33 +1006,17 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1039 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, 1006 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
1040 INVALID_P2M_ENTRY); 1007 INVALID_P2M_ENTRY);
1041 } 1008 }
1042 return 0; 1009 return ret;
1043 } 1010 }
1044 1011
1045 if (m2p_override && 1012 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
1046 !in_interrupt() &&
1047 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
1048 arch_enter_lazy_mmu_mode(); 1013 arch_enter_lazy_mmu_mode();
1049 lazy = true; 1014 lazy = true;
1050 } 1015 }
1051 1016
1052 for (i = 0; i < count; i++) { 1017 for (i = 0; i < count; i++) {
1053 pfn = page_to_pfn(pages[i]); 1018 ret = m2p_remove_override(pages[i], kmap_ops ?
1054 mfn = get_phys_to_machine(pfn); 1019 &kmap_ops[i] : NULL);
1055 if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
1056 ret = -EINVAL;
1057 goto out;
1058 }
1059
1060 set_page_private(pages[i], INVALID_P2M_ENTRY);
1061 WARN_ON(!PagePrivate(pages[i]));
1062 ClearPagePrivate(pages[i]);
1063 set_phys_to_machine(pfn, pages[i]->index);
1064 if (m2p_override)
1065 ret = m2p_remove_override(pages[i],
1066 kmap_ops ?
1067 &kmap_ops[i] : NULL,
1068 mfn);
1069 if (ret) 1020 if (ret)
1070 goto out; 1021 goto out;
1071 } 1022 }
@@ -1076,22 +1027,8 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1076 1027
1077 return ret; 1028 return ret;
1078} 1029}
1079
1080int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops,
1081 struct page **pages, unsigned int count)
1082{
1083 return __gnttab_unmap_refs(map_ops, NULL, pages, count, false);
1084}
1085EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 1030EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1086 1031
1087int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops,
1088 struct gnttab_map_grant_ref *kmap_ops,
1089 struct page **pages, unsigned int count)
1090{
1091 return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true);
1092}
1093EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace);
1094
1095static unsigned nr_status_frames(unsigned nr_grant_frames) 1032static unsigned nr_status_frames(unsigned nr_grant_frames)
1096{ 1033{
1097 BUG_ON(grefs_per_grant_frame == 0); 1034 BUG_ON(grefs_per_grant_frame == 0);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 49a62b4dda3b..0e8388e72d8d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -92,11 +92,11 @@
92#include <linux/slab.h> 92#include <linux/slab.h>
93#include <linux/buffer_head.h> 93#include <linux/buffer_head.h>
94#include <linux/mutex.h> 94#include <linux/mutex.h>
95#include <linux/crc32c.h>
96#include <linux/genhd.h> 95#include <linux/genhd.h>
97#include <linux/blkdev.h> 96#include <linux/blkdev.h>
98#include "ctree.h" 97#include "ctree.h"
99#include "disk-io.h" 98#include "disk-io.h"
99#include "hash.h"
100#include "transaction.h" 100#include "transaction.h"
101#include "extent_io.h" 101#include "extent_io.h"
102#include "volumes.h" 102#include "volumes.h"
@@ -1823,7 +1823,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
1823 size_t sublen = i ? PAGE_CACHE_SIZE : 1823 size_t sublen = i ? PAGE_CACHE_SIZE :
1824 (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE); 1824 (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
1825 1825
1826 crc = crc32c(crc, data, sublen); 1826 crc = btrfs_crc32c(crc, data, sublen);
1827 } 1827 }
1828 btrfs_csum_final(crc, csum); 1828 btrfs_csum_final(crc, csum);
1829 if (memcmp(csum, h->csum, state->csum_size)) 1829 if (memcmp(csum, h->csum, state->csum_size))
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index e2600cdb6c25..b01fb6c527e3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1010,6 +1010,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1010 bytes = min(bytes, working_bytes); 1010 bytes = min(bytes, working_bytes);
1011 kaddr = kmap_atomic(page_out); 1011 kaddr = kmap_atomic(page_out);
1012 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1012 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
1013 if (*pg_index == (vcnt - 1) && *pg_offset == 0)
1014 memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1013 kunmap_atomic(kaddr); 1015 kunmap_atomic(kaddr);
1014 flush_dcache_page(page_out); 1016 flush_dcache_page(page_out);
1015 1017
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 0e69295d0031..5215f04260b2 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -26,7 +26,6 @@
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/freezer.h> 28#include <linux/freezer.h>
29#include <linux/crc32c.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
31#include <linux/migrate.h> 30#include <linux/migrate.h>
32#include <linux/ratelimit.h> 31#include <linux/ratelimit.h>
@@ -35,6 +34,7 @@
35#include <asm/unaligned.h> 34#include <asm/unaligned.h>
36#include "ctree.h" 35#include "ctree.h"
37#include "disk-io.h" 36#include "disk-io.h"
37#include "hash.h"
38#include "transaction.h" 38#include "transaction.h"
39#include "btrfs_inode.h" 39#include "btrfs_inode.h"
40#include "volumes.h" 40#include "volumes.h"
@@ -244,7 +244,7 @@ out:
244 244
245u32 btrfs_csum_data(char *data, u32 seed, size_t len) 245u32 btrfs_csum_data(char *data, u32 seed, size_t len)
246{ 246{
247 return crc32c(seed, data, len); 247 return btrfs_crc32c(seed, data, len);
248} 248}
249 249
250void btrfs_csum_final(u32 crc, char *result) 250void btrfs_csum_final(u32 crc, char *result)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9c9ecc93ae2c..32312e09f0f5 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2385,6 +2385,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2385 spin_unlock(&delayed_refs->lock); 2385 spin_unlock(&delayed_refs->lock);
2386 locked_ref = NULL; 2386 locked_ref = NULL;
2387 cond_resched(); 2387 cond_resched();
2388 count++;
2388 continue; 2389 continue;
2389 } 2390 }
2390 2391
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5c4ab9c18940..184e9cb39647 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2629,7 +2629,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2629 EXTENT_DEFRAG, 1, cached_state); 2629 EXTENT_DEFRAG, 1, cached_state);
2630 if (ret) { 2630 if (ret) {
2631 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2631 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2632 if (last_snapshot >= BTRFS_I(inode)->generation) 2632 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2633 /* the inode is shared */ 2633 /* the inode is shared */
2634 new = record_old_file_extents(inode, ordered_extent); 2634 new = record_old_file_extents(inode, ordered_extent);
2635 2635
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b0134892dc70..383ab455bfa7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4525,7 +4525,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
4525 spin_lock(&root->fs_info->super_lock); 4525 spin_lock(&root->fs_info->super_lock);
4526 strcpy(super_block->label, label); 4526 strcpy(super_block->label, label);
4527 spin_unlock(&root->fs_info->super_lock); 4527 spin_unlock(&root->fs_info->super_lock);
4528 ret = btrfs_end_transaction(trans, root); 4528 ret = btrfs_commit_transaction(trans, root);
4529 4529
4530out_unlock: 4530out_unlock:
4531 mnt_drop_write_file(file); 4531 mnt_drop_write_file(file);
@@ -4668,7 +4668,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
4668 if (ret) 4668 if (ret)
4669 return ret; 4669 return ret;
4670 4670
4671 trans = btrfs_start_transaction(root, 1); 4671 trans = btrfs_start_transaction(root, 0);
4672 if (IS_ERR(trans)) 4672 if (IS_ERR(trans))
4673 return PTR_ERR(trans); 4673 return PTR_ERR(trans);
4674 4674
@@ -4689,7 +4689,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
4689 btrfs_set_super_incompat_flags(super_block, newflags); 4689 btrfs_set_super_incompat_flags(super_block, newflags);
4690 spin_unlock(&root->fs_info->super_lock); 4690 spin_unlock(&root->fs_info->super_lock);
4691 4691
4692 return btrfs_end_transaction(trans, root); 4692 return btrfs_commit_transaction(trans, root);
4693} 4693}
4694 4694
4695long btrfs_ioctl(struct file *file, unsigned int 4695long btrfs_ioctl(struct file *file, unsigned int
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 730dce395858..9c8d1a3fdc3a 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -24,12 +24,12 @@
24#include <linux/xattr.h> 24#include <linux/xattr.h>
25#include <linux/posix_acl_xattr.h> 25#include <linux/posix_acl_xattr.h>
26#include <linux/radix-tree.h> 26#include <linux/radix-tree.h>
27#include <linux/crc32c.h>
28#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
29#include <linux/string.h> 28#include <linux/string.h>
30 29
31#include "send.h" 30#include "send.h"
32#include "backref.h" 31#include "backref.h"
32#include "hash.h"
33#include "locking.h" 33#include "locking.h"
34#include "disk-io.h" 34#include "disk-io.h"
35#include "btrfs_inode.h" 35#include "btrfs_inode.h"
@@ -620,7 +620,7 @@ static int send_cmd(struct send_ctx *sctx)
620 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); 620 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
621 hdr->crc = 0; 621 hdr->crc = 0;
622 622
623 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); 623 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
624 hdr->crc = cpu_to_le32(crc); 624 hdr->crc = cpu_to_le32(crc);
625 625
626 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, 626 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
@@ -2774,8 +2774,6 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2774 return 0; 2774 return 0;
2775} 2775}
2776 2776
2777#ifdef CONFIG_BTRFS_ASSERT
2778
2779static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino) 2777static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2780{ 2778{
2781 struct rb_node *n = sctx->waiting_dir_moves.rb_node; 2779 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
@@ -2796,8 +2794,6 @@ static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2796 return -ENOENT; 2794 return -ENOENT;
2797} 2795}
2798 2796
2799#endif
2800
2801static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino) 2797static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino)
2802{ 2798{
2803 struct rb_node **p = &sctx->pending_dir_moves.rb_node; 2799 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
@@ -2902,7 +2898,9 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
2902 } 2898 }
2903 2899
2904 sctx->send_progress = sctx->cur_ino + 1; 2900 sctx->send_progress = sctx->cur_ino + 1;
2905 ASSERT(del_waiting_dir_move(sctx, pm->ino) == 0); 2901 ret = del_waiting_dir_move(sctx, pm->ino);
2902 ASSERT(ret == 0);
2903
2906 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); 2904 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
2907 if (ret < 0) 2905 if (ret < 0)
2908 goto out; 2906 goto out;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c02f63356895..97cc24198554 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1996,7 +1996,7 @@ static void __exit exit_btrfs_fs(void)
1996 btrfs_hash_exit(); 1996 btrfs_hash_exit();
1997} 1997}
1998 1998
1999module_init(init_btrfs_fs) 1999late_initcall(init_btrfs_fs);
2000module_exit(exit_btrfs_fs) 2000module_exit(exit_btrfs_fs)
2001 2001
2002MODULE_LICENSE("GPL"); 2002MODULE_LICENSE("GPL");
diff --git a/fs/buffer.c b/fs/buffer.c
index 651dba10b9c2..27265a8b43c1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -654,14 +654,16 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
654static void __set_page_dirty(struct page *page, 654static void __set_page_dirty(struct page *page,
655 struct address_space *mapping, int warn) 655 struct address_space *mapping, int warn)
656{ 656{
657 spin_lock_irq(&mapping->tree_lock); 657 unsigned long flags;
658
659 spin_lock_irqsave(&mapping->tree_lock, flags);
658 if (page->mapping) { /* Race with truncate? */ 660 if (page->mapping) { /* Race with truncate? */
659 WARN_ON_ONCE(warn && !PageUptodate(page)); 661 WARN_ON_ONCE(warn && !PageUptodate(page));
660 account_page_dirtied(page, mapping); 662 account_page_dirtied(page, mapping);
661 radix_tree_tag_set(&mapping->page_tree, 663 radix_tree_tag_set(&mapping->page_tree,
662 page_index(page), PAGECACHE_TAG_DIRTY); 664 page_index(page), PAGECACHE_TAG_DIRTY);
663 } 665 }
664 spin_unlock_irq(&mapping->tree_lock); 666 spin_unlock_irqrestore(&mapping->tree_lock, flags);
665 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 667 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
666} 668}
667 669
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 8f9b4f710d4a..c819b0bd491a 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1043,15 +1043,30 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1043 __u32 secdesclen = 0; 1043 __u32 secdesclen = 0;
1044 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */ 1044 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1045 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ 1045 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1046 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1047 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1048 struct cifs_tcon *tcon;
1049
1050 if (IS_ERR(tlink))
1051 return PTR_ERR(tlink);
1052 tcon = tlink_tcon(tlink);
1046 1053
1047 cifs_dbg(NOISY, "set ACL from mode for %s\n", path); 1054 cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
1048 1055
1049 /* Get the security descriptor */ 1056 /* Get the security descriptor */
1050 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen); 1057
1058 if (tcon->ses->server->ops->get_acl == NULL) {
1059 cifs_put_tlink(tlink);
1060 return -EOPNOTSUPP;
1061 }
1062
1063 pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
1064 &secdesclen);
1051 if (IS_ERR(pntsd)) { 1065 if (IS_ERR(pntsd)) {
1052 rc = PTR_ERR(pntsd); 1066 rc = PTR_ERR(pntsd);
1053 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); 1067 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
1054 goto out; 1068 cifs_put_tlink(tlink);
1069 return rc;
1055 } 1070 }
1056 1071
1057 /* 1072 /*
@@ -1064,6 +1079,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1064 pnntsd = kmalloc(secdesclen, GFP_KERNEL); 1079 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1065 if (!pnntsd) { 1080 if (!pnntsd) {
1066 kfree(pntsd); 1081 kfree(pntsd);
1082 cifs_put_tlink(tlink);
1067 return -ENOMEM; 1083 return -ENOMEM;
1068 } 1084 }
1069 1085
@@ -1072,14 +1088,18 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1072 1088
1073 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); 1089 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
1074 1090
1091 if (tcon->ses->server->ops->set_acl == NULL)
1092 rc = -EOPNOTSUPP;
1093
1075 if (!rc) { 1094 if (!rc) {
1076 /* Set the security descriptor */ 1095 /* Set the security descriptor */
1077 rc = set_cifs_acl(pnntsd, secdesclen, inode, path, aclflag); 1096 rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode,
1097 path, aclflag);
1078 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); 1098 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
1079 } 1099 }
1100 cifs_put_tlink(tlink);
1080 1101
1081 kfree(pnntsd); 1102 kfree(pnntsd);
1082 kfree(pntsd); 1103 kfree(pntsd);
1083out:
1084 return rc; 1104 return rc;
1085} 1105}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a245d1809ed8..86dc28c7aa5c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -323,7 +323,8 @@ struct smb_version_operations {
323 /* async read from the server */ 323 /* async read from the server */
324 int (*async_readv)(struct cifs_readdata *); 324 int (*async_readv)(struct cifs_readdata *);
325 /* async write to the server */ 325 /* async write to the server */
326 int (*async_writev)(struct cifs_writedata *); 326 int (*async_writev)(struct cifs_writedata *,
327 void (*release)(struct kref *));
327 /* sync read from the server */ 328 /* sync read from the server */
328 int (*sync_read)(const unsigned int, struct cifsFileInfo *, 329 int (*sync_read)(const unsigned int, struct cifsFileInfo *,
329 struct cifs_io_parms *, unsigned int *, char **, 330 struct cifs_io_parms *, unsigned int *, char **,
@@ -395,6 +396,10 @@ struct smb_version_operations {
395 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *, 396 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
396 const char *, const void *, const __u16, 397 const char *, const void *, const __u16,
397 const struct nls_table *, int); 398 const struct nls_table *, int);
399 struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
400 const char *, u32 *);
401 int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
402 int);
398}; 403};
399 404
400struct smb_version_values { 405struct smb_version_values {
@@ -1064,7 +1069,7 @@ struct cifs_writedata {
1064 unsigned int pagesz; 1069 unsigned int pagesz;
1065 unsigned int tailsz; 1070 unsigned int tailsz;
1066 unsigned int nr_pages; 1071 unsigned int nr_pages;
1067 struct page *pages[1]; 1072 struct page *pages[];
1068}; 1073};
1069 1074
1070/* 1075/*
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 79e6e9a93a8c..d00e09dfc452 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -488,7 +488,8 @@ void cifs_readdata_release(struct kref *refcount);
488int cifs_async_readv(struct cifs_readdata *rdata); 488int cifs_async_readv(struct cifs_readdata *rdata);
489int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid); 489int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
490 490
491int cifs_async_writev(struct cifs_writedata *wdata); 491int cifs_async_writev(struct cifs_writedata *wdata,
492 void (*release)(struct kref *kref));
492void cifs_writev_complete(struct work_struct *work); 493void cifs_writev_complete(struct work_struct *work);
493struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, 494struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
494 work_func_t complete); 495 work_func_t complete);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4d881c35eeca..f3264bd7a83d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1910,7 +1910,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1910 1910
1911 do { 1911 do {
1912 server = tlink_tcon(wdata->cfile->tlink)->ses->server; 1912 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1913 rc = server->ops->async_writev(wdata); 1913 rc = server->ops->async_writev(wdata, cifs_writedata_release);
1914 } while (rc == -EAGAIN); 1914 } while (rc == -EAGAIN);
1915 1915
1916 for (i = 0; i < wdata->nr_pages; i++) { 1916 for (i = 0; i < wdata->nr_pages; i++) {
@@ -1962,15 +1962,9 @@ cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
1962{ 1962{
1963 struct cifs_writedata *wdata; 1963 struct cifs_writedata *wdata;
1964 1964
1965 /* this would overflow */
1966 if (nr_pages == 0) {
1967 cifs_dbg(VFS, "%s: called with nr_pages == 0!\n", __func__);
1968 return NULL;
1969 }
1970
1971 /* writedata + number of page pointers */ 1965 /* writedata + number of page pointers */
1972 wdata = kzalloc(sizeof(*wdata) + 1966 wdata = kzalloc(sizeof(*wdata) +
1973 sizeof(struct page *) * (nr_pages - 1), GFP_NOFS); 1967 sizeof(struct page *) * nr_pages, GFP_NOFS);
1974 if (wdata != NULL) { 1968 if (wdata != NULL) {
1975 kref_init(&wdata->refcount); 1969 kref_init(&wdata->refcount);
1976 INIT_LIST_HEAD(&wdata->list); 1970 INIT_LIST_HEAD(&wdata->list);
@@ -2031,7 +2025,8 @@ cifs_writev_callback(struct mid_q_entry *mid)
2031 2025
2032/* cifs_async_writev - send an async write, and set up mid to handle result */ 2026/* cifs_async_writev - send an async write, and set up mid to handle result */
2033int 2027int
2034cifs_async_writev(struct cifs_writedata *wdata) 2028cifs_async_writev(struct cifs_writedata *wdata,
2029 void (*release)(struct kref *kref))
2035{ 2030{
2036 int rc = -EACCES; 2031 int rc = -EACCES;
2037 WRITE_REQ *smb = NULL; 2032 WRITE_REQ *smb = NULL;
@@ -2105,7 +2100,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
2105 if (rc == 0) 2100 if (rc == 0)
2106 cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); 2101 cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
2107 else 2102 else
2108 kref_put(&wdata->refcount, cifs_writedata_release); 2103 kref_put(&wdata->refcount, release);
2109 2104
2110async_writev_out: 2105async_writev_out:
2111 cifs_small_buf_release(smb); 2106 cifs_small_buf_release(smb);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 853d6d1cc822..755584684f6c 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2043,7 +2043,8 @@ retry:
2043 } 2043 }
2044 wdata->pid = wdata->cfile->pid; 2044 wdata->pid = wdata->cfile->pid;
2045 server = tlink_tcon(wdata->cfile->tlink)->ses->server; 2045 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2046 rc = server->ops->async_writev(wdata); 2046 rc = server->ops->async_writev(wdata,
2047 cifs_writedata_release);
2047 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN); 2048 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
2048 2049
2049 for (i = 0; i < nr_pages; ++i) 2050 for (i = 0; i < nr_pages; ++i)
@@ -2331,9 +2332,20 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2331} 2332}
2332 2333
2333static void 2334static void
2334cifs_uncached_writev_complete(struct work_struct *work) 2335cifs_uncached_writedata_release(struct kref *refcount)
2335{ 2336{
2336 int i; 2337 int i;
2338 struct cifs_writedata *wdata = container_of(refcount,
2339 struct cifs_writedata, refcount);
2340
2341 for (i = 0; i < wdata->nr_pages; i++)
2342 put_page(wdata->pages[i]);
2343 cifs_writedata_release(refcount);
2344}
2345
2346static void
2347cifs_uncached_writev_complete(struct work_struct *work)
2348{
2337 struct cifs_writedata *wdata = container_of(work, 2349 struct cifs_writedata *wdata = container_of(work,
2338 struct cifs_writedata, work); 2350 struct cifs_writedata, work);
2339 struct inode *inode = wdata->cfile->dentry->d_inode; 2351 struct inode *inode = wdata->cfile->dentry->d_inode;
@@ -2347,12 +2359,7 @@ cifs_uncached_writev_complete(struct work_struct *work)
2347 2359
2348 complete(&wdata->done); 2360 complete(&wdata->done);
2349 2361
2350 if (wdata->result != -EAGAIN) { 2362 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2351 for (i = 0; i < wdata->nr_pages; i++)
2352 put_page(wdata->pages[i]);
2353 }
2354
2355 kref_put(&wdata->refcount, cifs_writedata_release);
2356} 2363}
2357 2364
2358/* attempt to send write to server, retry on any -EAGAIN errors */ 2365/* attempt to send write to server, retry on any -EAGAIN errors */
@@ -2370,7 +2377,8 @@ cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2370 if (rc != 0) 2377 if (rc != 0)
2371 continue; 2378 continue;
2372 } 2379 }
2373 rc = server->ops->async_writev(wdata); 2380 rc = server->ops->async_writev(wdata,
2381 cifs_uncached_writedata_release);
2374 } while (rc == -EAGAIN); 2382 } while (rc == -EAGAIN);
2375 2383
2376 return rc; 2384 return rc;
@@ -2454,7 +2462,8 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2454 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); 2462 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2455 rc = cifs_uncached_retry_writev(wdata); 2463 rc = cifs_uncached_retry_writev(wdata);
2456 if (rc) { 2464 if (rc) {
2457 kref_put(&wdata->refcount, cifs_writedata_release); 2465 kref_put(&wdata->refcount,
2466 cifs_uncached_writedata_release);
2458 break; 2467 break;
2459 } 2468 }
2460 2469
@@ -2496,7 +2505,7 @@ restart_loop:
2496 } 2505 }
2497 } 2506 }
2498 list_del_init(&wdata->list); 2507 list_del_init(&wdata->list);
2499 kref_put(&wdata->refcount, cifs_writedata_release); 2508 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2500 } 2509 }
2501 2510
2502 if (total_written > 0) 2511 if (total_written > 0)
@@ -2559,8 +2568,8 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2559 if (rc > 0) { 2568 if (rc > 0) {
2560 ssize_t err; 2569 ssize_t err;
2561 2570
2562 err = generic_write_sync(file, pos, rc); 2571 err = generic_write_sync(file, iocb->ki_pos - rc, rc);
2563 if (err < 0 && rc > 0) 2572 if (err < 0)
2564 rc = err; 2573 rc = err;
2565 } 2574 }
2566 2575
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9cb9679d7357..be58b8fcdb3c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -527,10 +527,15 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
527 return PTR_ERR(tlink); 527 return PTR_ERR(tlink);
528 tcon = tlink_tcon(tlink); 528 tcon = tlink_tcon(tlink);
529 529
530 rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS", 530 if (tcon->ses->server->ops->query_all_EAs == NULL) {
531 ea_value, 4 /* size of buf */, cifs_sb->local_nls, 531 cifs_put_tlink(tlink);
532 cifs_sb->mnt_cifs_flags & 532 return -EOPNOTSUPP;
533 CIFS_MOUNT_MAP_SPECIAL_CHR); 533 }
534
535 rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
536 "SETFILEBITS", ea_value, 4 /* size of buf */,
537 cifs_sb->local_nls,
538 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
534 cifs_put_tlink(tlink); 539 cifs_put_tlink(tlink);
535 if (rc < 0) 540 if (rc < 0)
536 return (int)rc; 541 return (int)rc;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 9ac5bfc9cc56..bfd66d84831e 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1067,6 +1067,14 @@ struct smb_version_operations smb1_operations = {
1067 .query_mf_symlink = cifs_query_mf_symlink, 1067 .query_mf_symlink = cifs_query_mf_symlink,
1068 .create_mf_symlink = cifs_create_mf_symlink, 1068 .create_mf_symlink = cifs_create_mf_symlink,
1069 .is_read_op = cifs_is_read_op, 1069 .is_read_op = cifs_is_read_op,
1070#ifdef CONFIG_CIFS_XATTR
1071 .query_all_EAs = CIFSSMBQAllEAs,
1072 .set_EA = CIFSSMBSetEA,
1073#endif /* CIFS_XATTR */
1074#ifdef CONFIG_CIFS_ACL
1075 .get_acl = get_cifs_acl,
1076 .set_acl = set_cifs_acl,
1077#endif /* CIFS_ACL */
1070}; 1078};
1071 1079
1072struct smb_version_values smb1_values = { 1080struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2013234b73ad..a3f7a9c3cc69 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1890,7 +1890,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
1890 1890
1891/* smb2_async_writev - send an async write, and set up mid to handle result */ 1891/* smb2_async_writev - send an async write, and set up mid to handle result */
1892int 1892int
1893smb2_async_writev(struct cifs_writedata *wdata) 1893smb2_async_writev(struct cifs_writedata *wdata,
1894 void (*release)(struct kref *kref))
1894{ 1895{
1895 int rc = -EACCES; 1896 int rc = -EACCES;
1896 struct smb2_write_req *req = NULL; 1897 struct smb2_write_req *req = NULL;
@@ -1938,7 +1939,7 @@ smb2_async_writev(struct cifs_writedata *wdata)
1938 smb2_writev_callback, wdata, 0); 1939 smb2_writev_callback, wdata, 0);
1939 1940
1940 if (rc) { 1941 if (rc) {
1941 kref_put(&wdata->refcount, cifs_writedata_release); 1942 kref_put(&wdata->refcount, release);
1942 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 1943 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1943 } 1944 }
1944 1945
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 93adc64666f3..0ce48db20a65 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -123,7 +123,8 @@ extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
123extern int smb2_async_readv(struct cifs_readdata *rdata); 123extern int smb2_async_readv(struct cifs_readdata *rdata);
124extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, 124extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
125 unsigned int *nbytes, char **buf, int *buf_type); 125 unsigned int *nbytes, char **buf, int *buf_type);
126extern int smb2_async_writev(struct cifs_writedata *wdata); 126extern int smb2_async_writev(struct cifs_writedata *wdata,
127 void (*release)(struct kref *kref));
127extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, 128extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
128 unsigned int *nbytes, struct kvec *iov, int n_vec); 129 unsigned int *nbytes, struct kvec *iov, int n_vec);
129extern int SMB2_echo(struct TCP_Server_Info *server); 130extern int SMB2_echo(struct TCP_Server_Info *server);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 95c43bb20335..5ac836a86b18 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -176,8 +176,12 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
176 rc = -ENOMEM; 176 rc = -ENOMEM;
177 } else { 177 } else {
178 memcpy(pacl, ea_value, value_size); 178 memcpy(pacl, ea_value, value_size);
179 rc = set_cifs_acl(pacl, value_size, 179 if (pTcon->ses->server->ops->set_acl)
180 direntry->d_inode, full_path, CIFS_ACL_DACL); 180 rc = pTcon->ses->server->ops->set_acl(pacl,
181 value_size, direntry->d_inode,
182 full_path, CIFS_ACL_DACL);
183 else
184 rc = -EOPNOTSUPP;
181 if (rc == 0) /* force revalidate of the inode */ 185 if (rc == 0) /* force revalidate of the inode */
182 CIFS_I(direntry->d_inode)->time = 0; 186 CIFS_I(direntry->d_inode)->time = 0;
183 kfree(pacl); 187 kfree(pacl);
@@ -323,8 +327,11 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
323 u32 acllen; 327 u32 acllen;
324 struct cifs_ntsd *pacl; 328 struct cifs_ntsd *pacl;
325 329
326 pacl = get_cifs_acl(cifs_sb, direntry->d_inode, 330 if (pTcon->ses->server->ops->get_acl == NULL)
327 full_path, &acllen); 331 goto get_ea_exit; /* rc already EOPNOTSUPP */
332
333 pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
334 direntry->d_inode, full_path, &acllen);
328 if (IS_ERR(pacl)) { 335 if (IS_ERR(pacl)) {
329 rc = PTR_ERR(pacl); 336 rc = PTR_ERR(pacl);
330 cifs_dbg(VFS, "%s: error %zd getting sec desc\n", 337 cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
diff --git a/fs/exec.c b/fs/exec.c
index e1529b4c79b1..3d78fccdd723 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -748,11 +748,10 @@ EXPORT_SYMBOL(setup_arg_pages);
748 748
749#endif /* CONFIG_MMU */ 749#endif /* CONFIG_MMU */
750 750
751struct file *open_exec(const char *name) 751static struct file *do_open_exec(struct filename *name)
752{ 752{
753 struct file *file; 753 struct file *file;
754 int err; 754 int err;
755 struct filename tmp = { .name = name };
756 static const struct open_flags open_exec_flags = { 755 static const struct open_flags open_exec_flags = {
757 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 756 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
758 .acc_mode = MAY_EXEC | MAY_OPEN, 757 .acc_mode = MAY_EXEC | MAY_OPEN,
@@ -760,7 +759,7 @@ struct file *open_exec(const char *name)
760 .lookup_flags = LOOKUP_FOLLOW, 759 .lookup_flags = LOOKUP_FOLLOW,
761 }; 760 };
762 761
763 file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags); 762 file = do_filp_open(AT_FDCWD, name, &open_exec_flags);
764 if (IS_ERR(file)) 763 if (IS_ERR(file))
765 goto out; 764 goto out;
766 765
@@ -784,6 +783,12 @@ exit:
784 fput(file); 783 fput(file);
785 return ERR_PTR(err); 784 return ERR_PTR(err);
786} 785}
786
787struct file *open_exec(const char *name)
788{
789 struct filename tmp = { .name = name };
790 return do_open_exec(&tmp);
791}
787EXPORT_SYMBOL(open_exec); 792EXPORT_SYMBOL(open_exec);
788 793
789int kernel_read(struct file *file, loff_t offset, 794int kernel_read(struct file *file, loff_t offset,
@@ -1162,7 +1167,7 @@ int prepare_bprm_creds(struct linux_binprm *bprm)
1162 return -ENOMEM; 1167 return -ENOMEM;
1163} 1168}
1164 1169
1165void free_bprm(struct linux_binprm *bprm) 1170static void free_bprm(struct linux_binprm *bprm)
1166{ 1171{
1167 free_arg_pages(bprm); 1172 free_arg_pages(bprm);
1168 if (bprm->cred) { 1173 if (bprm->cred) {
@@ -1432,7 +1437,7 @@ static int exec_binprm(struct linux_binprm *bprm)
1432/* 1437/*
1433 * sys_execve() executes a new program. 1438 * sys_execve() executes a new program.
1434 */ 1439 */
1435static int do_execve_common(const char *filename, 1440static int do_execve_common(struct filename *filename,
1436 struct user_arg_ptr argv, 1441 struct user_arg_ptr argv,
1437 struct user_arg_ptr envp) 1442 struct user_arg_ptr envp)
1438{ 1443{
@@ -1441,6 +1446,9 @@ static int do_execve_common(const char *filename,
1441 struct files_struct *displaced; 1446 struct files_struct *displaced;
1442 int retval; 1447 int retval;
1443 1448
1449 if (IS_ERR(filename))
1450 return PTR_ERR(filename);
1451
1444 /* 1452 /*
1445 * We move the actual failure in case of RLIMIT_NPROC excess from 1453 * We move the actual failure in case of RLIMIT_NPROC excess from
1446 * set*uid() to execve() because too many poorly written programs 1454 * set*uid() to execve() because too many poorly written programs
@@ -1473,7 +1481,7 @@ static int do_execve_common(const char *filename,
1473 check_unsafe_exec(bprm); 1481 check_unsafe_exec(bprm);
1474 current->in_execve = 1; 1482 current->in_execve = 1;
1475 1483
1476 file = open_exec(filename); 1484 file = do_open_exec(filename);
1477 retval = PTR_ERR(file); 1485 retval = PTR_ERR(file);
1478 if (IS_ERR(file)) 1486 if (IS_ERR(file))
1479 goto out_unmark; 1487 goto out_unmark;
@@ -1481,8 +1489,7 @@ static int do_execve_common(const char *filename,
1481 sched_exec(); 1489 sched_exec();
1482 1490
1483 bprm->file = file; 1491 bprm->file = file;
1484 bprm->filename = filename; 1492 bprm->filename = bprm->interp = filename->name;
1485 bprm->interp = filename;
1486 1493
1487 retval = bprm_mm_init(bprm); 1494 retval = bprm_mm_init(bprm);
1488 if (retval) 1495 if (retval)
@@ -1523,6 +1530,7 @@ static int do_execve_common(const char *filename,
1523 acct_update_integrals(current); 1530 acct_update_integrals(current);
1524 task_numa_free(current); 1531 task_numa_free(current);
1525 free_bprm(bprm); 1532 free_bprm(bprm);
1533 putname(filename);
1526 if (displaced) 1534 if (displaced)
1527 put_files_struct(displaced); 1535 put_files_struct(displaced);
1528 return retval; 1536 return retval;
@@ -1544,10 +1552,11 @@ out_files:
1544 if (displaced) 1552 if (displaced)
1545 reset_files_struct(displaced); 1553 reset_files_struct(displaced);
1546out_ret: 1554out_ret:
1555 putname(filename);
1547 return retval; 1556 return retval;
1548} 1557}
1549 1558
1550int do_execve(const char *filename, 1559int do_execve(struct filename *filename,
1551 const char __user *const __user *__argv, 1560 const char __user *const __user *__argv,
1552 const char __user *const __user *__envp) 1561 const char __user *const __user *__envp)
1553{ 1562{
@@ -1557,7 +1566,7 @@ int do_execve(const char *filename,
1557} 1566}
1558 1567
1559#ifdef CONFIG_COMPAT 1568#ifdef CONFIG_COMPAT
1560static int compat_do_execve(const char *filename, 1569static int compat_do_execve(struct filename *filename,
1561 const compat_uptr_t __user *__argv, 1570 const compat_uptr_t __user *__argv,
1562 const compat_uptr_t __user *__envp) 1571 const compat_uptr_t __user *__envp)
1563{ 1572{
@@ -1607,25 +1616,13 @@ SYSCALL_DEFINE3(execve,
1607 const char __user *const __user *, argv, 1616 const char __user *const __user *, argv,
1608 const char __user *const __user *, envp) 1617 const char __user *const __user *, envp)
1609{ 1618{
1610 struct filename *path = getname(filename); 1619 return do_execve(getname(filename), argv, envp);
1611 int error = PTR_ERR(path);
1612 if (!IS_ERR(path)) {
1613 error = do_execve(path->name, argv, envp);
1614 putname(path);
1615 }
1616 return error;
1617} 1620}
1618#ifdef CONFIG_COMPAT 1621#ifdef CONFIG_COMPAT
1619asmlinkage long compat_sys_execve(const char __user * filename, 1622asmlinkage long compat_sys_execve(const char __user * filename,
1620 const compat_uptr_t __user * argv, 1623 const compat_uptr_t __user * argv,
1621 const compat_uptr_t __user * envp) 1624 const compat_uptr_t __user * envp)
1622{ 1625{
1623 struct filename *path = getname(filename); 1626 return compat_do_execve(getname(filename), argv, envp);
1624 int error = PTR_ERR(path);
1625 if (!IS_ERR(path)) {
1626 error = compat_do_execve(path->name, argv, envp);
1627 putname(path);
1628 }
1629 return error;
1630} 1627}
1631#endif 1628#endif
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 43e64f6022eb..1a5073959f32 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -152,7 +152,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
152 if (ret > 0) { 152 if (ret > 0) {
153 ssize_t err; 153 ssize_t err;
154 154
155 err = generic_write_sync(file, pos, ret); 155 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
156 if (err < 0 && ret > 0) 156 if (err < 0 && ret > 0)
157 ret = err; 157 ret = err;
158 } 158 }
diff --git a/fs/file.c b/fs/file.c
index 771578b33fb6..db25c2bdfe46 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -34,7 +34,7 @@ static void *alloc_fdmem(size_t size)
34 * vmalloc() if the allocation size will be considered "large" by the VM. 34 * vmalloc() if the allocation size will be considered "large" by the VM.
35 */ 35 */
36 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 36 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
37 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); 37 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
38 if (data != NULL) 38 if (data != NULL)
39 return data; 39 return data;
40 } 40 }
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 3bd5ee45f7b3..46325d5c34fc 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -854,9 +854,6 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
854 int rc; 854 int rc;
855 tid_t tid; 855 tid_t tid;
856 856
857 if ((rc = can_set_xattr(inode, name, value, value_len)))
858 return rc;
859
860 /* 857 /*
861 * If this is a request for a synthetic attribute in the system.* 858 * If this is a request for a synthetic attribute in the system.*
862 * namespace use the generic infrastructure to resolve a handler 859 * namespace use the generic infrastructure to resolve a handler
@@ -865,6 +862,9 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
865 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 862 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
866 return generic_setxattr(dentry, name, value, value_len, flags); 863 return generic_setxattr(dentry, name, value, value_len, flags);
867 864
865 if ((rc = can_set_xattr(inode, name, value, value_len)))
866 return rc;
867
868 if (value == NULL) { /* empty EA, do not remove */ 868 if (value == NULL) { /* empty EA, do not remove */
869 value = ""; 869 value = "";
870 value_len = 0; 870 value_len = 0;
@@ -1034,9 +1034,6 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
1034 int rc; 1034 int rc;
1035 tid_t tid; 1035 tid_t tid;
1036 1036
1037 if ((rc = can_set_xattr(inode, name, NULL, 0)))
1038 return rc;
1039
1040 /* 1037 /*
1041 * If this is a request for a synthetic attribute in the system.* 1038 * If this is a request for a synthetic attribute in the system.*
1042 * namespace use the generic infrastructure to resolve a handler 1039 * namespace use the generic infrastructure to resolve a handler
@@ -1045,6 +1042,9 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
1045 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 1042 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1046 return generic_removexattr(dentry, name); 1043 return generic_removexattr(dentry, name);
1047 1044
1045 if ((rc = can_set_xattr(inode, name, NULL, 0)))
1046 return rc;
1047
1048 tid = txBegin(inode->i_sb, 0); 1048 tid = txBegin(inode->i_sb, 0);
1049 mutex_lock(&ji->commit_mutex); 1049 mutex_lock(&ji->commit_mutex);
1050 rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); 1050 rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
@@ -1061,7 +1061,7 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
1061 * attributes are handled directly. 1061 * attributes are handled directly.
1062 */ 1062 */
1063const struct xattr_handler *jfs_xattr_handlers[] = { 1063const struct xattr_handler *jfs_xattr_handlers[] = {
1064#ifdef JFS_POSIX_ACL 1064#ifdef CONFIG_JFS_POSIX_ACL
1065 &posix_acl_access_xattr_handler, 1065 &posix_acl_access_xattr_handler,
1066 &posix_acl_default_xattr_handler, 1066 &posix_acl_default_xattr_handler,
1067#endif 1067#endif
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 5104cf5d25c5..bd6e18be6e1a 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -187,19 +187,23 @@ static void kernfs_deactivate(struct kernfs_node *kn)
187 187
188 kn->u.completion = (void *)&wait; 188 kn->u.completion = (void *)&wait;
189 189
190 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); 190 if (kn->flags & KERNFS_LOCKDEP)
191 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
191 /* atomic_add_return() is a mb(), put_active() will always see 192 /* atomic_add_return() is a mb(), put_active() will always see
192 * the updated kn->u.completion. 193 * the updated kn->u.completion.
193 */ 194 */
194 v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active); 195 v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active);
195 196
196 if (v != KN_DEACTIVATED_BIAS) { 197 if (v != KN_DEACTIVATED_BIAS) {
197 lock_contended(&kn->dep_map, _RET_IP_); 198 if (kn->flags & KERNFS_LOCKDEP)
199 lock_contended(&kn->dep_map, _RET_IP_);
198 wait_for_completion(&wait); 200 wait_for_completion(&wait);
199 } 201 }
200 202
201 lock_acquired(&kn->dep_map, _RET_IP_); 203 if (kn->flags & KERNFS_LOCKDEP) {
202 rwsem_release(&kn->dep_map, 1, _RET_IP_); 204 lock_acquired(&kn->dep_map, _RET_IP_);
205 rwsem_release(&kn->dep_map, 1, _RET_IP_);
206 }
203} 207}
204 208
205/** 209/**
diff --git a/fs/namei.c b/fs/namei.c
index d580df2e6804..385f7817bfcc 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -196,6 +196,7 @@ recopy:
196 goto error; 196 goto error;
197 197
198 result->uptr = filename; 198 result->uptr = filename;
199 result->aname = NULL;
199 audit_getname(result); 200 audit_getname(result);
200 return result; 201 return result;
201 202
@@ -210,6 +211,35 @@ getname(const char __user * filename)
210 return getname_flags(filename, 0, NULL); 211 return getname_flags(filename, 0, NULL);
211} 212}
212 213
214/*
215 * The "getname_kernel()" interface doesn't do pathnames longer
216 * than EMBEDDED_NAME_MAX. Deal with it - you're a kernel user.
217 */
218struct filename *
219getname_kernel(const char * filename)
220{
221 struct filename *result;
222 char *kname;
223 int len;
224
225 len = strlen(filename);
226 if (len >= EMBEDDED_NAME_MAX)
227 return ERR_PTR(-ENAMETOOLONG);
228
229 result = __getname();
230 if (unlikely(!result))
231 return ERR_PTR(-ENOMEM);
232
233 kname = (char *)result + sizeof(*result);
234 result->name = kname;
235 result->uptr = NULL;
236 result->aname = NULL;
237 result->separate = false;
238
239 strlcpy(kname, filename, EMBEDDED_NAME_MAX);
240 return result;
241}
242
213#ifdef CONFIG_AUDITSYSCALL 243#ifdef CONFIG_AUDITSYSCALL
214void putname(struct filename *name) 244void putname(struct filename *name)
215{ 245{
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index be38b573495a..4a48fe4b84b6 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1846,6 +1846,11 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1846 GFP_KERNEL)) { 1846 GFP_KERNEL)) {
1847 SetPageUptodate(page); 1847 SetPageUptodate(page);
1848 unlock_page(page); 1848 unlock_page(page);
1849 /*
1850 * add_to_page_cache_lru() grabs an extra page refcount.
1851 * Drop it here to avoid leaking this page later.
1852 */
1853 page_cache_release(page);
1849 } else 1854 } else
1850 __free_page(page); 1855 __free_page(page);
1851 1856
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 9a5ca03fa539..871d6eda8dba 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -80,7 +80,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type)
80 } 80 }
81 81
82 if (res.acl_access != NULL) { 82 if (res.acl_access != NULL) {
83 if (posix_acl_equiv_mode(res.acl_access, NULL) || 83 if ((posix_acl_equiv_mode(res.acl_access, NULL) == 0) ||
84 res.acl_access->a_count == 0) { 84 res.acl_access->a_count == 0) {
85 posix_acl_release(res.acl_access); 85 posix_acl_release(res.acl_access);
86 res.acl_access = NULL; 86 res.acl_access = NULL;
@@ -113,7 +113,7 @@ getout:
113 return ERR_PTR(status); 113 return ERR_PTR(status);
114} 114}
115 115
116int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, 116static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
117 struct posix_acl *dfacl) 117 struct posix_acl *dfacl)
118{ 118{
119 struct nfs_server *server = NFS_SERVER(inode); 119 struct nfs_server *server = NFS_SERVER(inode);
@@ -198,6 +198,15 @@ out:
198 return status; 198 return status;
199} 199}
200 200
201int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
202 struct posix_acl *dfacl)
203{
204 int ret;
205 ret = __nfs3_proc_setacls(inode, acl, dfacl);
206 return (ret == -EOPNOTSUPP) ? 0 : ret;
207
208}
209
201int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type) 210int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
202{ 211{
203 struct posix_acl *alloc = NULL, *dfacl = NULL; 212 struct posix_acl *alloc = NULL, *dfacl = NULL;
@@ -225,7 +234,7 @@ int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
225 if (IS_ERR(alloc)) 234 if (IS_ERR(alloc))
226 goto fail; 235 goto fail;
227 } 236 }
228 status = nfs3_proc_setacls(inode, acl, dfacl); 237 status = __nfs3_proc_setacls(inode, acl, dfacl);
229 posix_acl_release(alloc); 238 posix_acl_release(alloc);
230 return status; 239 return status;
231 240
@@ -233,25 +242,6 @@ fail:
233 return PTR_ERR(alloc); 242 return PTR_ERR(alloc);
234} 243}
235 244
236int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
237 umode_t mode)
238{
239 struct posix_acl *default_acl, *acl;
240 int error;
241
242 error = posix_acl_create(dir, &mode, &default_acl, &acl);
243 if (error)
244 return (error == -EOPNOTSUPP) ? 0 : error;
245
246 error = nfs3_proc_setacls(inode, acl, default_acl);
247
248 if (acl)
249 posix_acl_release(acl);
250 if (default_acl)
251 posix_acl_release(default_acl);
252 return error;
253}
254
255const struct xattr_handler *nfs3_xattr_handlers[] = { 245const struct xattr_handler *nfs3_xattr_handlers[] = {
256 &posix_acl_access_xattr_handler, 246 &posix_acl_access_xattr_handler,
257 &posix_acl_default_xattr_handler, 247 &posix_acl_default_xattr_handler,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index dbb3e1f30c68..860ad26a5590 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -170,7 +170,7 @@ void nfs41_shutdown_client(struct nfs_client *clp)
170void nfs40_shutdown_client(struct nfs_client *clp) 170void nfs40_shutdown_client(struct nfs_client *clp)
171{ 171{
172 if (clp->cl_slot_tbl) { 172 if (clp->cl_slot_tbl) {
173 nfs4_release_slot_table(clp->cl_slot_tbl); 173 nfs4_shutdown_slot_table(clp->cl_slot_tbl);
174 kfree(clp->cl_slot_tbl); 174 kfree(clp->cl_slot_tbl);
175 } 175 }
176} 176}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 42da6af77587..2da6a698b8f7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1620,15 +1620,15 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1620{ 1620{
1621 struct nfs4_opendata *data = calldata; 1621 struct nfs4_opendata *data = calldata;
1622 1622
1623 nfs40_setup_sequence(data->o_arg.server, &data->o_arg.seq_args, 1623 nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args,
1624 &data->o_res.seq_res, task); 1624 &data->c_res.seq_res, task);
1625} 1625}
1626 1626
1627static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1627static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1628{ 1628{
1629 struct nfs4_opendata *data = calldata; 1629 struct nfs4_opendata *data = calldata;
1630 1630
1631 nfs40_sequence_done(task, &data->o_res.seq_res); 1631 nfs40_sequence_done(task, &data->c_res.seq_res);
1632 1632
1633 data->rpc_status = task->tk_status; 1633 data->rpc_status = task->tk_status;
1634 if (data->rpc_status == 0) { 1634 if (data->rpc_status == 0) {
@@ -1686,7 +1686,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1686 }; 1686 };
1687 int status; 1687 int status;
1688 1688
1689 nfs4_init_sequence(&data->o_arg.seq_args, &data->o_res.seq_res, 1); 1689 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1690 kref_get(&data->kref); 1690 kref_get(&data->kref);
1691 data->rpc_done = 0; 1691 data->rpc_done = 0;
1692 data->rpc_status = 0; 1692 data->rpc_status = 0;
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index cf883c7ae053..e799dc3c3b1d 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -231,14 +231,23 @@ out:
231 return ret; 231 return ret;
232} 232}
233 233
234/*
235 * nfs4_release_slot_table - release all slot table entries
236 */
237static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
238{
239 nfs4_shrink_slot_table(tbl, 0);
240}
241
234/** 242/**
235 * nfs4_release_slot_table - release resources attached to a slot table 243 * nfs4_shutdown_slot_table - release resources attached to a slot table
236 * @tbl: slot table to shut down 244 * @tbl: slot table to shut down
237 * 245 *
238 */ 246 */
239void nfs4_release_slot_table(struct nfs4_slot_table *tbl) 247void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
240{ 248{
241 nfs4_shrink_slot_table(tbl, 0); 249 nfs4_release_slot_table(tbl);
250 rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
242} 251}
243 252
244/** 253/**
@@ -422,7 +431,7 @@ void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
422 spin_unlock(&tbl->slot_tbl_lock); 431 spin_unlock(&tbl->slot_tbl_lock);
423} 432}
424 433
425static void nfs4_destroy_session_slot_tables(struct nfs4_session *session) 434static void nfs4_release_session_slot_tables(struct nfs4_session *session)
426{ 435{
427 nfs4_release_slot_table(&session->fc_slot_table); 436 nfs4_release_slot_table(&session->fc_slot_table);
428 nfs4_release_slot_table(&session->bc_slot_table); 437 nfs4_release_slot_table(&session->bc_slot_table);
@@ -450,7 +459,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
450 if (status && tbl->slots == NULL) 459 if (status && tbl->slots == NULL)
451 /* Fore and back channel share a connection so get 460 /* Fore and back channel share a connection so get
452 * both slot tables or neither */ 461 * both slot tables or neither */
453 nfs4_destroy_session_slot_tables(ses); 462 nfs4_release_session_slot_tables(ses);
454 return status; 463 return status;
455} 464}
456 465
@@ -470,6 +479,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
470 return session; 479 return session;
471} 480}
472 481
482static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
483{
484 nfs4_shutdown_slot_table(&session->fc_slot_table);
485 nfs4_shutdown_slot_table(&session->bc_slot_table);
486}
487
473void nfs4_destroy_session(struct nfs4_session *session) 488void nfs4_destroy_session(struct nfs4_session *session)
474{ 489{
475 struct rpc_xprt *xprt; 490 struct rpc_xprt *xprt;
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 232306100651..b34ada9bc6a2 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -74,7 +74,7 @@ enum nfs4_session_state {
74 74
75extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, 75extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
76 unsigned int max_reqs, const char *queue); 76 unsigned int max_reqs, const char *queue);
77extern void nfs4_release_slot_table(struct nfs4_slot_table *tbl); 77extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
78extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); 78extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
79extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); 79extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
80extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); 80extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index ea4ba9daeb47..db9bd8a31725 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2134,7 +2134,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2134 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); 2134 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
2135 mutex_unlock(&inode->i_mutex); 2135 mutex_unlock(&inode->i_mutex);
2136 if (ret > 0) { 2136 if (ret > 0) {
2137 int err = generic_write_sync(file, pos, ret); 2137 int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2138 if (err < 0) 2138 if (err < 0)
2139 ret = err; 2139 ret = err;
2140 } 2140 }
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 8750ae1b8636..e2edff38be52 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -4742,6 +4742,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
4742 enum ocfs2_alloc_restarted *reason_ret) 4742 enum ocfs2_alloc_restarted *reason_ret)
4743{ 4743{
4744 int status = 0, err = 0; 4744 int status = 0, err = 0;
4745 int need_free = 0;
4745 int free_extents; 4746 int free_extents;
4746 enum ocfs2_alloc_restarted reason = RESTART_NONE; 4747 enum ocfs2_alloc_restarted reason = RESTART_NONE;
4747 u32 bit_off, num_bits; 4748 u32 bit_off, num_bits;
@@ -4796,7 +4797,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
4796 OCFS2_JOURNAL_ACCESS_WRITE); 4797 OCFS2_JOURNAL_ACCESS_WRITE);
4797 if (status < 0) { 4798 if (status < 0) {
4798 mlog_errno(status); 4799 mlog_errno(status);
4799 goto leave; 4800 need_free = 1;
4801 goto bail;
4800 } 4802 }
4801 4803
4802 block = ocfs2_clusters_to_blocks(osb->sb, bit_off); 4804 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
@@ -4807,7 +4809,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
4807 num_bits, flags, meta_ac); 4809 num_bits, flags, meta_ac);
4808 if (status < 0) { 4810 if (status < 0) {
4809 mlog_errno(status); 4811 mlog_errno(status);
4810 goto leave; 4812 need_free = 1;
4813 goto bail;
4811 } 4814 }
4812 4815
4813 ocfs2_journal_dirty(handle, et->et_root_bh); 4816 ocfs2_journal_dirty(handle, et->et_root_bh);
@@ -4821,6 +4824,19 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
4821 reason = RESTART_TRANS; 4824 reason = RESTART_TRANS;
4822 } 4825 }
4823 4826
4827bail:
4828 if (need_free) {
4829 if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
4830 ocfs2_free_local_alloc_bits(osb, handle, data_ac,
4831 bit_off, num_bits);
4832 else
4833 ocfs2_free_clusters(handle,
4834 data_ac->ac_inode,
4835 data_ac->ac_bh,
4836 ocfs2_clusters_to_blocks(osb->sb, bit_off),
4837 num_bits);
4838 }
4839
4824leave: 4840leave:
4825 if (reason_ret) 4841 if (reason_ret)
4826 *reason_ret = reason; 4842 *reason_ret = reason;
@@ -6805,6 +6821,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6805 struct buffer_head *di_bh) 6821 struct buffer_head *di_bh)
6806{ 6822{
6807 int ret, i, has_data, num_pages = 0; 6823 int ret, i, has_data, num_pages = 0;
6824 int need_free = 0;
6825 u32 bit_off, num;
6808 handle_t *handle; 6826 handle_t *handle;
6809 u64 uninitialized_var(block); 6827 u64 uninitialized_var(block);
6810 struct ocfs2_inode_info *oi = OCFS2_I(inode); 6828 struct ocfs2_inode_info *oi = OCFS2_I(inode);
@@ -6850,7 +6868,6 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6850 } 6868 }
6851 6869
6852 if (has_data) { 6870 if (has_data) {
6853 u32 bit_off, num;
6854 unsigned int page_end; 6871 unsigned int page_end;
6855 u64 phys; 6872 u64 phys;
6856 6873
@@ -6886,6 +6903,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6886 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); 6903 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
6887 if (ret) { 6904 if (ret) {
6888 mlog_errno(ret); 6905 mlog_errno(ret);
6906 need_free = 1;
6889 goto out_commit; 6907 goto out_commit;
6890 } 6908 }
6891 6909
@@ -6896,6 +6914,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6896 ret = ocfs2_read_inline_data(inode, pages[0], di_bh); 6914 ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
6897 if (ret) { 6915 if (ret) {
6898 mlog_errno(ret); 6916 mlog_errno(ret);
6917 need_free = 1;
6899 goto out_commit; 6918 goto out_commit;
6900 } 6919 }
6901 6920
@@ -6927,6 +6946,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6927 ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL); 6946 ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL);
6928 if (ret) { 6947 if (ret) {
6929 mlog_errno(ret); 6948 mlog_errno(ret);
6949 need_free = 1;
6930 goto out_commit; 6950 goto out_commit;
6931 } 6951 }
6932 6952
@@ -6938,6 +6958,18 @@ out_commit:
6938 dquot_free_space_nodirty(inode, 6958 dquot_free_space_nodirty(inode,
6939 ocfs2_clusters_to_bytes(osb->sb, 1)); 6959 ocfs2_clusters_to_bytes(osb->sb, 1));
6940 6960
6961 if (need_free) {
6962 if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
6963 ocfs2_free_local_alloc_bits(osb, handle, data_ac,
6964 bit_off, num);
6965 else
6966 ocfs2_free_clusters(handle,
6967 data_ac->ac_inode,
6968 data_ac->ac_bh,
6969 ocfs2_clusters_to_blocks(osb->sb, bit_off),
6970 num);
6971 }
6972
6941 ocfs2_commit_trans(osb, handle); 6973 ocfs2_commit_trans(osb, handle);
6942 6974
6943out_unlock: 6975out_unlock:
@@ -7126,7 +7158,7 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
7126 if (end > i_size_read(inode)) 7158 if (end > i_size_read(inode))
7127 end = i_size_read(inode); 7159 end = i_size_read(inode);
7128 7160
7129 BUG_ON(start >= end); 7161 BUG_ON(start > end);
7130 7162
7131 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) || 7163 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
7132 !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) || 7164 !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index d77d71ead8d1..8450262bcf2a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -185,6 +185,9 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
185 file->f_path.dentry->d_name.name, 185 file->f_path.dentry->d_name.name,
186 (unsigned long long)datasync); 186 (unsigned long long)datasync);
187 187
188 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
189 return -EROFS;
190
188 err = filemap_write_and_wait_range(inode->i_mapping, start, end); 191 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
189 if (err) 192 if (err)
190 return err; 193 return err;
@@ -474,11 +477,6 @@ static int ocfs2_truncate_file(struct inode *inode,
474 goto bail; 477 goto bail;
475 } 478 }
476 479
477 /* lets handle the simple truncate cases before doing any more
478 * cluster locking. */
479 if (new_i_size == le64_to_cpu(fe->i_size))
480 goto bail;
481
482 down_write(&OCFS2_I(inode)->ip_alloc_sem); 480 down_write(&OCFS2_I(inode)->ip_alloc_sem);
483 481
484 ocfs2_resv_discard(&osb->osb_la_resmap, 482 ocfs2_resv_discard(&osb->osb_la_resmap,
@@ -718,7 +716,8 @@ leave:
718 * While a write will already be ordering the data, a truncate will not. 716 * While a write will already be ordering the data, a truncate will not.
719 * Thus, we need to explicitly order the zeroed pages. 717 * Thus, we need to explicitly order the zeroed pages.
720 */ 718 */
721static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode) 719static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
720 struct buffer_head *di_bh)
722{ 721{
723 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 722 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
724 handle_t *handle = NULL; 723 handle_t *handle = NULL;
@@ -735,7 +734,14 @@ static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
735 } 734 }
736 735
737 ret = ocfs2_jbd2_file_inode(handle, inode); 736 ret = ocfs2_jbd2_file_inode(handle, inode);
738 if (ret < 0) 737 if (ret < 0) {
738 mlog_errno(ret);
739 goto out;
740 }
741
742 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
743 OCFS2_JOURNAL_ACCESS_WRITE);
744 if (ret)
739 mlog_errno(ret); 745 mlog_errno(ret);
740 746
741out: 747out:
@@ -751,7 +757,7 @@ out:
751 * to be too fragile to do exactly what we need without us having to 757 * to be too fragile to do exactly what we need without us having to
752 * worry about recursive locking in ->write_begin() and ->write_end(). */ 758 * worry about recursive locking in ->write_begin() and ->write_end(). */
753static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, 759static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
754 u64 abs_to) 760 u64 abs_to, struct buffer_head *di_bh)
755{ 761{
756 struct address_space *mapping = inode->i_mapping; 762 struct address_space *mapping = inode->i_mapping;
757 struct page *page; 763 struct page *page;
@@ -759,6 +765,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
759 handle_t *handle = NULL; 765 handle_t *handle = NULL;
760 int ret = 0; 766 int ret = 0;
761 unsigned zero_from, zero_to, block_start, block_end; 767 unsigned zero_from, zero_to, block_start, block_end;
768 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
762 769
763 BUG_ON(abs_from >= abs_to); 770 BUG_ON(abs_from >= abs_to);
764 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); 771 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
@@ -801,7 +808,8 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
801 } 808 }
802 809
803 if (!handle) { 810 if (!handle) {
804 handle = ocfs2_zero_start_ordered_transaction(inode); 811 handle = ocfs2_zero_start_ordered_transaction(inode,
812 di_bh);
805 if (IS_ERR(handle)) { 813 if (IS_ERR(handle)) {
806 ret = PTR_ERR(handle); 814 ret = PTR_ERR(handle);
807 handle = NULL; 815 handle = NULL;
@@ -818,8 +826,22 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
818 ret = 0; 826 ret = 0;
819 } 827 }
820 828
821 if (handle) 829 if (handle) {
830 /*
831 * fs-writeback will release the dirty pages without page lock
832 * whose offset are over inode size, the release happens at
833 * block_write_full_page_endio().
834 */
835 i_size_write(inode, abs_to);
836 inode->i_blocks = ocfs2_inode_sector_count(inode);
837 di->i_size = cpu_to_le64((u64)i_size_read(inode));
838 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
839 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
840 di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
841 di->i_mtime_nsec = di->i_ctime_nsec;
842 ocfs2_journal_dirty(handle, di_bh);
822 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 843 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
844 }
823 845
824out_unlock: 846out_unlock:
825 unlock_page(page); 847 unlock_page(page);
@@ -915,7 +937,7 @@ out:
915 * has made sure that the entire range needs zeroing. 937 * has made sure that the entire range needs zeroing.
916 */ 938 */
917static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start, 939static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
918 u64 range_end) 940 u64 range_end, struct buffer_head *di_bh)
919{ 941{
920 int rc = 0; 942 int rc = 0;
921 u64 next_pos; 943 u64 next_pos;
@@ -931,7 +953,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
931 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; 953 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
932 if (next_pos > range_end) 954 if (next_pos > range_end)
933 next_pos = range_end; 955 next_pos = range_end;
934 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos); 956 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
935 if (rc < 0) { 957 if (rc < 0) {
936 mlog_errno(rc); 958 mlog_errno(rc);
937 break; 959 break;
@@ -977,7 +999,7 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
977 range_end = zero_to_size; 999 range_end = zero_to_size;
978 1000
979 ret = ocfs2_zero_extend_range(inode, range_start, 1001 ret = ocfs2_zero_extend_range(inode, range_start,
980 range_end); 1002 range_end, di_bh);
981 if (ret) { 1003 if (ret) {
982 mlog_errno(ret); 1004 mlog_errno(ret);
983 break; 1005 break;
@@ -1145,14 +1167,14 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1145 goto bail_unlock_rw; 1167 goto bail_unlock_rw;
1146 } 1168 }
1147 1169
1148 if (size_change && attr->ia_size != i_size_read(inode)) { 1170 if (size_change) {
1149 status = inode_newsize_ok(inode, attr->ia_size); 1171 status = inode_newsize_ok(inode, attr->ia_size);
1150 if (status) 1172 if (status)
1151 goto bail_unlock; 1173 goto bail_unlock;
1152 1174
1153 inode_dio_wait(inode); 1175 inode_dio_wait(inode);
1154 1176
1155 if (i_size_read(inode) > attr->ia_size) { 1177 if (i_size_read(inode) >= attr->ia_size) {
1156 if (ocfs2_should_order_data(inode)) { 1178 if (ocfs2_should_order_data(inode)) {
1157 status = ocfs2_begin_ordered_truncate(inode, 1179 status = ocfs2_begin_ordered_truncate(inode,
1158 attr->ia_size); 1180 attr->ia_size);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index cd5496b7a0a3..044013455621 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -781,6 +781,48 @@ bail:
781 return status; 781 return status;
782} 782}
783 783
784int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
785 handle_t *handle,
786 struct ocfs2_alloc_context *ac,
787 u32 bit_off,
788 u32 num_bits)
789{
790 int status, start;
791 u32 clear_bits;
792 struct inode *local_alloc_inode;
793 void *bitmap;
794 struct ocfs2_dinode *alloc;
795 struct ocfs2_local_alloc *la;
796
797 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
798
799 local_alloc_inode = ac->ac_inode;
800 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
801 la = OCFS2_LOCAL_ALLOC(alloc);
802
803 bitmap = la->la_bitmap;
804 start = bit_off - le32_to_cpu(la->la_bm_off);
805 clear_bits = num_bits;
806
807 status = ocfs2_journal_access_di(handle,
808 INODE_CACHE(local_alloc_inode),
809 osb->local_alloc_bh,
810 OCFS2_JOURNAL_ACCESS_WRITE);
811 if (status < 0) {
812 mlog_errno(status);
813 goto bail;
814 }
815
816 while (clear_bits--)
817 ocfs2_clear_bit(start++, bitmap);
818
819 le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
820 ocfs2_journal_dirty(handle, osb->local_alloc_bh);
821
822bail:
823 return status;
824}
825
784static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc) 826static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
785{ 827{
786 u32 count; 828 u32 count;
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
index 1be9b5864460..44a7d1fb2dec 100644
--- a/fs/ocfs2/localalloc.h
+++ b/fs/ocfs2/localalloc.h
@@ -55,6 +55,12 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
55 u32 *bit_off, 55 u32 *bit_off,
56 u32 *num_bits); 56 u32 *num_bits);
57 57
58int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
59 handle_t *handle,
60 struct ocfs2_alloc_context *ac,
61 u32 bit_off,
62 u32 num_bits);
63
58void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb, 64void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
59 unsigned int num_clusters); 65 unsigned int num_clusters);
60void ocfs2_la_enable_worker(struct work_struct *work); 66void ocfs2_la_enable_worker(struct work_struct *work);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f4d609be9400..3683643f3f0e 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -664,6 +664,7 @@ static int ocfs2_link(struct dentry *old_dentry,
664 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); 664 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
665 struct ocfs2_dir_lookup_result lookup = { NULL, }; 665 struct ocfs2_dir_lookup_result lookup = { NULL, };
666 sigset_t oldset; 666 sigset_t oldset;
667 u64 old_de_ino;
667 668
668 trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno, 669 trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno,
669 old_dentry->d_name.len, old_dentry->d_name.name, 670 old_dentry->d_name.len, old_dentry->d_name.name,
@@ -686,6 +687,22 @@ static int ocfs2_link(struct dentry *old_dentry,
686 goto out; 687 goto out;
687 } 688 }
688 689
690 err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
691 old_dentry->d_name.len, &old_de_ino);
692 if (err) {
693 err = -ENOENT;
694 goto out;
695 }
696
697 /*
698 * Check whether another node removed the source inode while we
699 * were in the vfs.
700 */
701 if (old_de_ino != OCFS2_I(inode)->ip_blkno) {
702 err = -ENOENT;
703 goto out;
704 }
705
689 err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name, 706 err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
690 dentry->d_name.len); 707 dentry->d_name.len);
691 if (err) 708 if (err)
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 38bae5a0ea25..11c54fd51e16 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -521,8 +521,11 @@ posix_acl_chmod(struct inode *inode, umode_t mode)
521 return -EOPNOTSUPP; 521 return -EOPNOTSUPP;
522 522
523 acl = get_acl(inode, ACL_TYPE_ACCESS); 523 acl = get_acl(inode, ACL_TYPE_ACCESS);
524 if (IS_ERR_OR_NULL(acl)) 524 if (IS_ERR_OR_NULL(acl)) {
525 if (acl == ERR_PTR(-EOPNOTSUPP))
526 return 0;
525 return PTR_ERR(acl); 527 return PTR_ERR(acl);
528 }
526 529
527 ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode); 530 ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode);
528 if (ret) 531 if (ret)
@@ -544,14 +547,15 @@ posix_acl_create(struct inode *dir, umode_t *mode,
544 goto no_acl; 547 goto no_acl;
545 548
546 p = get_acl(dir, ACL_TYPE_DEFAULT); 549 p = get_acl(dir, ACL_TYPE_DEFAULT);
547 if (IS_ERR(p)) 550 if (IS_ERR(p)) {
551 if (p == ERR_PTR(-EOPNOTSUPP))
552 goto apply_umask;
548 return PTR_ERR(p); 553 return PTR_ERR(p);
549
550 if (!p) {
551 *mode &= ~current_umask();
552 goto no_acl;
553 } 554 }
554 555
556 if (!p)
557 goto apply_umask;
558
555 *acl = posix_acl_clone(p, GFP_NOFS); 559 *acl = posix_acl_clone(p, GFP_NOFS);
556 if (!*acl) 560 if (!*acl)
557 return -ENOMEM; 561 return -ENOMEM;
@@ -575,6 +579,8 @@ posix_acl_create(struct inode *dir, umode_t *mode,
575 } 579 }
576 return 0; 580 return 0;
577 581
582apply_umask:
583 *mode &= ~current_umask();
578no_acl: 584no_acl:
579 *default_acl = NULL; 585 *default_acl = NULL;
580 *acl = NULL; 586 *acl = NULL;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 2ca7ba047f04..88d4585b30f1 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -468,17 +468,24 @@ static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
468 return rc; 468 return rc;
469 } 469 }
470 nhdr_ptr = notes_section; 470 nhdr_ptr = notes_section;
471 while (real_sz < max_sz) { 471 while (nhdr_ptr->n_namesz != 0) {
472 if (nhdr_ptr->n_namesz == 0)
473 break;
474 sz = sizeof(Elf64_Nhdr) + 472 sz = sizeof(Elf64_Nhdr) +
475 ((nhdr_ptr->n_namesz + 3) & ~3) + 473 ((nhdr_ptr->n_namesz + 3) & ~3) +
476 ((nhdr_ptr->n_descsz + 3) & ~3); 474 ((nhdr_ptr->n_descsz + 3) & ~3);
475 if ((real_sz + sz) > max_sz) {
476 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
477 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
478 break;
479 }
477 real_sz += sz; 480 real_sz += sz;
478 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); 481 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
479 } 482 }
480 kfree(notes_section); 483 kfree(notes_section);
481 phdr_ptr->p_memsz = real_sz; 484 phdr_ptr->p_memsz = real_sz;
485 if (real_sz == 0) {
486 pr_warn("Warning: Zero PT_NOTE entries found\n");
487 return -EINVAL;
488 }
482 } 489 }
483 490
484 return 0; 491 return 0;
@@ -648,17 +655,24 @@ static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
648 return rc; 655 return rc;
649 } 656 }
650 nhdr_ptr = notes_section; 657 nhdr_ptr = notes_section;
651 while (real_sz < max_sz) { 658 while (nhdr_ptr->n_namesz != 0) {
652 if (nhdr_ptr->n_namesz == 0)
653 break;
654 sz = sizeof(Elf32_Nhdr) + 659 sz = sizeof(Elf32_Nhdr) +
655 ((nhdr_ptr->n_namesz + 3) & ~3) + 660 ((nhdr_ptr->n_namesz + 3) & ~3) +
656 ((nhdr_ptr->n_descsz + 3) & ~3); 661 ((nhdr_ptr->n_descsz + 3) & ~3);
662 if ((real_sz + sz) > max_sz) {
663 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
664 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
665 break;
666 }
657 real_sz += sz; 667 real_sz += sz;
658 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); 668 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
659 } 669 }
660 kfree(notes_section); 670 kfree(notes_section);
661 phdr_ptr->p_memsz = real_sz; 671 phdr_ptr->p_memsz = real_sz;
672 if (real_sz == 0) {
673 pr_warn("Warning: Zero PT_NOTE entries found\n");
674 return -EINVAL;
675 }
662 } 676 }
663 677
664 return 0; 678 return 0;
diff --git a/fs/sync.c b/fs/sync.c
index f15537452231..e8ba024a055b 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -222,23 +222,6 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
222 return do_fsync(fd, 1); 222 return do_fsync(fd, 1);
223} 223}
224 224
225/**
226 * generic_write_sync - perform syncing after a write if file / inode is sync
227 * @file: file to which the write happened
228 * @pos: offset where the write started
229 * @count: length of the write
230 *
231 * This is just a simple wrapper about our general syncing function.
232 */
233int generic_write_sync(struct file *file, loff_t pos, loff_t count)
234{
235 if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
236 return 0;
237 return vfs_fsync_range(file, pos, pos + count - 1,
238 (file->f_flags & __O_SYNC) ? 0 : 1);
239}
240EXPORT_SYMBOL(generic_write_sync);
241
242/* 225/*
243 * sys_sync_file_range() permits finely controlled syncing over a segment of 226 * sys_sync_file_range() permits finely controlled syncing over a segment of
244 * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is 227 * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 2e7989e3a2d6..64b48eade91d 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -799,7 +799,7 @@ xfs_file_aio_write(
799 XFS_STATS_ADD(xs_write_bytes, ret); 799 XFS_STATS_ADD(xs_write_bytes, ret);
800 800
801 /* Handle various SYNC-type writes */ 801 /* Handle various SYNC-type writes */
802 err = generic_write_sync(file, pos, ret); 802 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
803 if (err < 0) 803 if (err < 0)
804 ret = err; 804 ret = err;
805 } 805 }
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index fd8bf3219ef7..b4a745d7d9a9 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -115,7 +115,6 @@ extern int copy_strings_kernel(int argc, const char *const *argv,
115extern int prepare_bprm_creds(struct linux_binprm *bprm); 115extern int prepare_bprm_creds(struct linux_binprm *bprm);
116extern void install_exec_creds(struct linux_binprm *bprm); 116extern void install_exec_creds(struct linux_binprm *bprm);
117extern void set_binfmt(struct linux_binfmt *new); 117extern void set_binfmt(struct linux_binfmt *new);
118extern void free_bprm(struct linux_binprm *);
119extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); 118extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
120 119
121#endif /* _LINUX_BINFMTS_H */ 120#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 2f0543f7510c..f9bbbb472663 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -11,7 +11,9 @@
11#define CAN_SKB_H 11#define CAN_SKB_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/skbuff.h>
14#include <linux/can.h> 15#include <linux/can.h>
16#include <net/sock.h>
15 17
16/* 18/*
17 * The struct can_skb_priv is used to transport additional information along 19 * The struct can_skb_priv is used to transport additional information along
@@ -42,4 +44,40 @@ static inline void can_skb_reserve(struct sk_buff *skb)
42 skb_reserve(skb, sizeof(struct can_skb_priv)); 44 skb_reserve(skb, sizeof(struct can_skb_priv));
43} 45}
44 46
47static inline void can_skb_destructor(struct sk_buff *skb)
48{
49 sock_put(skb->sk);
50}
51
52static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
53{
54 if (sk) {
55 sock_hold(sk);
56 skb->destructor = can_skb_destructor;
57 skb->sk = sk;
58 }
59}
60
61/*
62 * returns an unshared skb owned by the original sock to be echo'ed back
63 */
64static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
65{
66 if (skb_shared(skb)) {
67 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
68
69 if (likely(nskb)) {
70 can_skb_set_owner(nskb, skb->sk);
71 consume_skb(skb);
72 return nskb;
73 } else {
74 kfree_skb(skb);
75 return NULL;
76 }
77 }
78
79 /* we can assume to have an unshared skb with proper owner */
80 return skb;
81}
82
45#endif /* CAN_SKB_H */ 83#endif /* CAN_SKB_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 09f553c59813..60829565e552 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2079,6 +2079,7 @@ extern struct file * dentry_open(const struct path *, int, const struct cred *);
2079extern int filp_close(struct file *, fl_owner_t id); 2079extern int filp_close(struct file *, fl_owner_t id);
2080 2080
2081extern struct filename *getname(const char __user *); 2081extern struct filename *getname(const char __user *);
2082extern struct filename *getname_kernel(const char *);
2082 2083
2083enum { 2084enum {
2084 FILE_CREATED = 1, 2085 FILE_CREATED = 1,
@@ -2273,7 +2274,13 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
2273extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, 2274extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
2274 int datasync); 2275 int datasync);
2275extern int vfs_fsync(struct file *file, int datasync); 2276extern int vfs_fsync(struct file *file, int datasync);
2276extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); 2277static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
2278{
2279 if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
2280 return 0;
2281 return vfs_fsync_range(file, pos, pos + count - 1,
2282 (file->f_flags & __O_SYNC) ? 0 : 1);
2283}
2277extern void emergency_sync(void); 2284extern void emergency_sync(void);
2278extern void emergency_remount(void); 2285extern void emergency_remount(void);
2279#ifdef CONFIG_BLOCK 2286#ifdef CONFIG_BLOCK
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 3ccfcecf8999..b2fb167b2e6d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -379,12 +379,14 @@ struct nfs_openres {
379 * Arguments to the open_confirm call. 379 * Arguments to the open_confirm call.
380 */ 380 */
381struct nfs_open_confirmargs { 381struct nfs_open_confirmargs {
382 struct nfs4_sequence_args seq_args;
382 const struct nfs_fh * fh; 383 const struct nfs_fh * fh;
383 nfs4_stateid * stateid; 384 nfs4_stateid * stateid;
384 struct nfs_seqid * seqid; 385 struct nfs_seqid * seqid;
385}; 386};
386 387
387struct nfs_open_confirmres { 388struct nfs_open_confirmres {
389 struct nfs4_sequence_res seq_res;
388 nfs4_stateid stateid; 390 nfs4_stateid stateid;
389 struct nfs_seqid * seqid; 391 struct nfs_seqid * seqid;
390}; 392};
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 26ebcf41c213..69ae03f6eb15 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -80,13 +80,14 @@ struct nvme_dev {
80 struct dma_pool *prp_small_pool; 80 struct dma_pool *prp_small_pool;
81 int instance; 81 int instance;
82 int queue_count; 82 int queue_count;
83 int db_stride; 83 u32 db_stride;
84 u32 ctrl_config; 84 u32 ctrl_config;
85 struct msix_entry *entry; 85 struct msix_entry *entry;
86 struct nvme_bar __iomem *bar; 86 struct nvme_bar __iomem *bar;
87 struct list_head namespaces; 87 struct list_head namespaces;
88 struct kref kref; 88 struct kref kref;
89 struct miscdevice miscdev; 89 struct miscdevice miscdev;
90 struct work_struct reset_work;
90 char name[12]; 91 char name[12];
91 char serial[20]; 92 char serial[20];
92 char model[40]; 93 char model[40];
@@ -94,6 +95,8 @@ struct nvme_dev {
94 u32 max_hw_sectors; 95 u32 max_hw_sectors;
95 u32 stripe_size; 96 u32 stripe_size;
96 u16 oncs; 97 u16 oncs;
98 u16 abort_limit;
99 u8 initialized;
97}; 100};
98 101
99/* 102/*
@@ -165,6 +168,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
165struct sg_io_hdr; 168struct sg_io_hdr;
166 169
167int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); 170int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
171int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
168int nvme_sg_get_version_num(int __user *ip); 172int nvme_sg_get_version_num(int __user *ip);
169 173
170#endif /* _LINUX_NVME_H */ 174#endif /* _LINUX_NVME_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e464b4e987e8..d1fe1a761047 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -228,9 +228,9 @@ PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
228TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) 228TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
229PAGEFLAG(MappedToDisk, mappedtodisk) 229PAGEFLAG(MappedToDisk, mappedtodisk)
230 230
231/* PG_readahead is only used for file reads; PG_reclaim is only for writes */ 231/* PG_readahead is only used for reads; PG_reclaim is only for writes */
232PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) 232PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
233PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */ 233PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
234 234
235#ifdef CONFIG_HIGHMEM 235#ifdef CONFIG_HIGHMEM
236/* 236/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 68a0e84463a0..a781dec1cd0b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -128,6 +128,7 @@ struct bio_list;
128struct fs_struct; 128struct fs_struct;
129struct perf_event_context; 129struct perf_event_context;
130struct blk_plug; 130struct blk_plug;
131struct filename;
131 132
132/* 133/*
133 * List of flags we want to share for kernel threads, 134 * List of flags we want to share for kernel threads,
@@ -2311,7 +2312,7 @@ extern void do_group_exit(int);
2311extern int allow_signal(int); 2312extern int allow_signal(int);
2312extern int disallow_signal(int); 2313extern int disallow_signal(int);
2313 2314
2314extern int do_execve(const char *, 2315extern int do_execve(struct filename *,
2315 const char __user * const __user *, 2316 const char __user * const __user *,
2316 const char __user * const __user *); 2317 const char __user * const __user *);
2317extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); 2318extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 3834f43f9993..6ae004e437ea 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -188,6 +188,9 @@ static inline void kick_all_cpus_sync(void) { }
188 */ 188 */
189extern void arch_disable_smp_support(void); 189extern void arch_disable_smp_support(void);
190 190
191extern void arch_enable_nonboot_cpus_begin(void);
192extern void arch_enable_nonboot_cpus_end(void);
193
191void smp_setup_processor_id(void); 194void smp_setup_processor_id(void);
192 195
193#endif /* __LINUX_SMP_H */ 196#endif /* __LINUX_SMP_H */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index c557c6d096de..3a712e2e7d76 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -71,12 +71,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
71 THP_ZERO_PAGE_ALLOC, 71 THP_ZERO_PAGE_ALLOC,
72 THP_ZERO_PAGE_ALLOC_FAILED, 72 THP_ZERO_PAGE_ALLOC_FAILED,
73#endif 73#endif
74#ifdef CONFIG_DEBUG_TLBFLUSH
74#ifdef CONFIG_SMP 75#ifdef CONFIG_SMP
75 NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ 76 NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
76 NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */ 77 NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
77#endif 78#endif /* CONFIG_SMP */
78 NR_TLB_LOCAL_FLUSH_ALL, 79 NR_TLB_LOCAL_FLUSH_ALL,
79 NR_TLB_LOCAL_FLUSH_ONE, 80 NR_TLB_LOCAL_FLUSH_ONE,
81#endif /* CONFIG_DEBUG_TLBFLUSH */
80 NR_VM_EVENT_ITEMS 82 NR_VM_EVENT_ITEMS
81}; 83};
82 84
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index a67b38415768..67ce70c8279b 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -83,6 +83,14 @@ static inline void vm_events_fold_cpu(int cpu)
83#define count_vm_numa_events(x, y) do { (void)(y); } while (0) 83#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
84#endif /* CONFIG_NUMA_BALANCING */ 84#endif /* CONFIG_NUMA_BALANCING */
85 85
86#ifdef CONFIG_DEBUG_TLBFLUSH
87#define count_vm_tlb_event(x) count_vm_event(x)
88#define count_vm_tlb_events(x, y) count_vm_events(x, y)
89#else
90#define count_vm_tlb_event(x) do {} while (0)
91#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
92#endif
93
86#define __count_zone_vm_events(item, zone, delta) \ 94#define __count_zone_vm_events(item, zone, delta) \
87 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ 95 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
88 zone_idx(zone), delta) 96 zone_idx(zone), delta)
diff --git a/include/net/datalink.h b/include/net/datalink.h
index deb7ca75db48..93cb18f729b5 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -15,4 +15,6 @@ struct datalink_proto {
15 struct list_head node; 15 struct list_head node;
16}; 16};
17 17
18struct datalink_proto *make_EII_client(void);
19void destroy_EII_client(struct datalink_proto *dl);
18#endif 20#endif
diff --git a/include/net/dn.h b/include/net/dn.h
index ccc15588d108..913b73d239f5 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -200,6 +200,8 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
200} 200}
201 201
202unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu); 202unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
203void dn_register_sysctl(void);
204void dn_unregister_sysctl(void);
203 205
204#define DN_MENUVER_ACC 0x01 206#define DN_MENUVER_ACC 0x01
205#define DN_MENUVER_USR 0x02 207#define DN_MENUVER_USR 0x02
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index b409ad6b8d7a..55df9939bca2 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -20,6 +20,8 @@ int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
20 struct sock *sk, int flags); 20 struct sock *sk, int flags);
21int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); 21int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
22void dn_rt_cache_flush(int delay); 22void dn_rt_cache_flush(int delay);
23int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
24 struct packet_type *pt, struct net_device *orig_dev);
23 25
24/* Masks for flags field */ 26/* Masks for flags field */
25#define DN_RT_F_PID 0x07 /* Mask for packet type */ 27#define DN_RT_F_PID 0x07 /* Mask for packet type */
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 96f3789b27bc..2a2d6bb34eb8 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -16,6 +16,7 @@
16struct ethoc_platform_data { 16struct ethoc_platform_data {
17 u8 hwaddr[IFHWADDRLEN]; 17 u8 hwaddr[IFHWADDRLEN];
18 s8 phy_id; 18 s8 phy_id;
19 u32 eth_clkfreq;
19}; 20};
20 21
21#endif /* !LINUX_NET_ETHOC_H */ 22#endif /* !LINUX_NET_ETHOC_H */
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 9e9e35465baf..0143180fecc9 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -140,6 +140,17 @@ static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
140} 140}
141 141
142void ipxitf_down(struct ipx_interface *intrfc); 142void ipxitf_down(struct ipx_interface *intrfc);
143struct ipx_interface *ipxitf_find_using_net(__be32 net);
144int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node);
145__be16 ipx_cksum(struct ipxhdr *packet, int length);
146int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
147 unsigned char *node);
148void ipxrtr_del_routes(struct ipx_interface *intrfc);
149int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
150 struct iovec *iov, size_t len, int noblock);
151int ipxrtr_route_skb(struct sk_buff *skb);
152struct ipx_route *ipxrtr_lookup(__be32 net);
153int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
143 154
144static __inline__ void ipxitf_put(struct ipx_interface *intrfc) 155static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
145{ 156{
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index da68c9a90ac5..991dcd94cbbf 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -162,6 +162,14 @@ extern struct list_head net_namespace_list;
162struct net *get_net_ns_by_pid(pid_t pid); 162struct net *get_net_ns_by_pid(pid_t pid);
163struct net *get_net_ns_by_fd(int pid); 163struct net *get_net_ns_by_fd(int pid);
164 164
165#ifdef CONFIG_SYSCTL
166void ipx_register_sysctl(void);
167void ipx_unregister_sysctl(void);
168#else
169#define ipx_register_sysctl()
170#define ipx_unregister_sysctl()
171#endif
172
165#ifdef CONFIG_NET_NS 173#ifdef CONFIG_NET_NS
166void __put_net(struct net *net); 174void __put_net(struct net *net);
167 175
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 01ea6eed1bb1..b2ac6246b7e0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -284,6 +284,8 @@ extern unsigned int nf_conntrack_max;
284extern unsigned int nf_conntrack_hash_rnd; 284extern unsigned int nf_conntrack_hash_rnd;
285void init_nf_conntrack_hash_rnd(void); 285void init_nf_conntrack_hash_rnd(void);
286 286
287void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl);
288
287#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 289#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
288#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) 290#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
289 291
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 57c8ff7955df..e7e14ffe0f6a 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -252,6 +252,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
252 * @owner: module reference 252 * @owner: module reference
253 * @policy: netlink attribute policy 253 * @policy: netlink attribute policy
254 * @maxattr: highest netlink attribute number 254 * @maxattr: highest netlink attribute number
255 * @family: address family for AF-specific types
255 */ 256 */
256struct nft_expr_type { 257struct nft_expr_type {
257 const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *, 258 const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
@@ -262,6 +263,7 @@ struct nft_expr_type {
262 struct module *owner; 263 struct module *owner;
263 const struct nla_policy *policy; 264 const struct nla_policy *policy;
264 unsigned int maxattr; 265 unsigned int maxattr;
266 u8 family;
265}; 267};
266 268
267/** 269/**
@@ -320,7 +322,6 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
320 * struct nft_rule - nf_tables rule 322 * struct nft_rule - nf_tables rule
321 * 323 *
322 * @list: used internally 324 * @list: used internally
323 * @rcu_head: used internally for rcu
324 * @handle: rule handle 325 * @handle: rule handle
325 * @genmask: generation mask 326 * @genmask: generation mask
326 * @dlen: length of expression data 327 * @dlen: length of expression data
@@ -328,7 +329,6 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
328 */ 329 */
329struct nft_rule { 330struct nft_rule {
330 struct list_head list; 331 struct list_head list;
331 struct rcu_head rcu_head;
332 u64 handle:46, 332 u64 handle:46,
333 genmask:2, 333 genmask:2,
334 dlen:16; 334 dlen:16;
@@ -389,7 +389,6 @@ enum nft_chain_flags {
389 * 389 *
390 * @rules: list of rules in the chain 390 * @rules: list of rules in the chain
391 * @list: used internally 391 * @list: used internally
392 * @rcu_head: used internally
393 * @net: net namespace that this chain belongs to 392 * @net: net namespace that this chain belongs to
394 * @table: table that this chain belongs to 393 * @table: table that this chain belongs to
395 * @handle: chain handle 394 * @handle: chain handle
@@ -401,7 +400,6 @@ enum nft_chain_flags {
401struct nft_chain { 400struct nft_chain {
402 struct list_head rules; 401 struct list_head rules;
403 struct list_head list; 402 struct list_head list;
404 struct rcu_head rcu_head;
405 struct net *net; 403 struct net *net;
406 struct nft_table *table; 404 struct nft_table *table;
407 u64 handle; 405 u64 handle;
@@ -529,6 +527,9 @@ void nft_unregister_expr(struct nft_expr_type *);
529#define MODULE_ALIAS_NFT_CHAIN(family, name) \ 527#define MODULE_ALIAS_NFT_CHAIN(family, name) \
530 MODULE_ALIAS("nft-chain-" __stringify(family) "-" name) 528 MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
531 529
530#define MODULE_ALIAS_NFT_AF_EXPR(family, name) \
531 MODULE_ALIAS("nft-expr-" __stringify(family) "-" name)
532
532#define MODULE_ALIAS_NFT_EXPR(name) \ 533#define MODULE_ALIAS_NFT_EXPR(name) \
533 MODULE_ALIAS("nft-expr-" name) 534 MODULE_ALIAS("nft-expr-" name)
534 535
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
new file mode 100644
index 000000000000..36b0da2d55bb
--- /dev/null
+++ b/include/net/netfilter/nft_reject.h
@@ -0,0 +1,25 @@
1#ifndef _NFT_REJECT_H_
2#define _NFT_REJECT_H_
3
4struct nft_reject {
5 enum nft_reject_types type:8;
6 u8 icmp_code;
7};
8
9extern const struct nla_policy nft_reject_policy[];
10
11int nft_reject_init(const struct nft_ctx *ctx,
12 const struct nft_expr *expr,
13 const struct nlattr * const tb[]);
14
15int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
16
17void nft_reject_ipv4_eval(const struct nft_expr *expr,
18 struct nft_data data[NFT_REG_MAX + 1],
19 const struct nft_pktinfo *pkt);
20
21void nft_reject_ipv6_eval(const struct nft_expr *expr,
22 struct nft_data data[NFT_REG_MAX + 1],
23 const struct nft_pktinfo *pkt);
24
25#endif
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 633b93cac1ed..e9a1d2d973b6 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -128,22 +128,13 @@ struct in6_flowlabel_req {
128 * IPV6 extension headers 128 * IPV6 extension headers
129 */ 129 */
130#if __UAPI_DEF_IPPROTO_V6 130#if __UAPI_DEF_IPPROTO_V6
131enum { 131#define IPPROTO_HOPOPTS 0 /* IPv6 hop-by-hop options */
132 IPPROTO_HOPOPTS = 0, /* IPv6 hop-by-hop options */ 132#define IPPROTO_ROUTING 43 /* IPv6 routing header */
133#define IPPROTO_HOPOPTS IPPROTO_HOPOPTS 133#define IPPROTO_FRAGMENT 44 /* IPv6 fragmentation header */
134 IPPROTO_ROUTING = 43, /* IPv6 routing header */ 134#define IPPROTO_ICMPV6 58 /* ICMPv6 */
135#define IPPROTO_ROUTING IPPROTO_ROUTING 135#define IPPROTO_NONE 59 /* IPv6 no next header */
136 IPPROTO_FRAGMENT = 44, /* IPv6 fragmentation header */ 136#define IPPROTO_DSTOPTS 60 /* IPv6 destination options */
137#define IPPROTO_FRAGMENT IPPROTO_FRAGMENT 137#define IPPROTO_MH 135 /* IPv6 mobility header */
138 IPPROTO_ICMPV6 = 58, /* ICMPv6 */
139#define IPPROTO_ICMPV6 IPPROTO_ICMPV6
140 IPPROTO_NONE = 59, /* IPv6 no next header */
141#define IPPROTO_NONE IPPROTO_NONE
142 IPPROTO_DSTOPTS = 60, /* IPv6 destination options */
143#define IPPROTO_DSTOPTS IPPROTO_DSTOPTS
144 IPPROTO_MH = 135, /* IPv6 mobility header */
145#define IPPROTO_MH IPPROTO_MH
146};
147#endif /* __UAPI_DEF_IPPROTO_V6 */ 138#endif /* __UAPI_DEF_IPPROTO_V6 */
148 139
149/* 140/*
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 989c04e0c563..e5ab62201119 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -350,6 +350,16 @@ struct nvme_delete_queue {
350 __u32 rsvd11[5]; 350 __u32 rsvd11[5];
351}; 351};
352 352
353struct nvme_abort_cmd {
354 __u8 opcode;
355 __u8 flags;
356 __u16 command_id;
357 __u32 rsvd1[9];
358 __le16 sqid;
359 __u16 cid;
360 __u32 rsvd11[5];
361};
362
353struct nvme_download_firmware { 363struct nvme_download_firmware {
354 __u8 opcode; 364 __u8 opcode;
355 __u8 flags; 365 __u8 flags;
@@ -384,6 +394,7 @@ struct nvme_command {
384 struct nvme_download_firmware dlfw; 394 struct nvme_download_firmware dlfw;
385 struct nvme_format_cmd format; 395 struct nvme_format_cmd format;
386 struct nvme_dsm_cmd dsm; 396 struct nvme_dsm_cmd dsm;
397 struct nvme_abort_cmd abort;
387 }; 398 };
388}; 399};
389 400
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 7ad033dbc845..a5af2a26d94f 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -191,15 +191,11 @@ void gnttab_free_auto_xlat_frames(void);
191#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) 191#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
192 192
193int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 193int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
194 struct gnttab_map_grant_ref *kmap_ops,
194 struct page **pages, unsigned int count); 195 struct page **pages, unsigned int count);
195int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
196 struct gnttab_map_grant_ref *kmap_ops,
197 struct page **pages, unsigned int count);
198int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 196int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
197 struct gnttab_map_grant_ref *kunmap_ops,
199 struct page **pages, unsigned int count); 198 struct page **pages, unsigned int count);
200int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops,
201 struct gnttab_map_grant_ref *kunmap_ops,
202 struct page **pages, unsigned int count);
203 199
204/* Perform a batch of grant map/copy operations. Retry every batch slot 200/* Perform a batch of grant map/copy operations. Retry every batch slot
205 * for which the hypervisor returns GNTST_eagain. This is typically due 201 * for which the hypervisor returns GNTST_eagain. This is typically due
diff --git a/init/main.c b/init/main.c
index 2fd9cef70ee8..eb03090cdced 100644
--- a/init/main.c
+++ b/init/main.c
@@ -812,7 +812,7 @@ void __init load_default_modules(void)
812static int run_init_process(const char *init_filename) 812static int run_init_process(const char *init_filename)
813{ 813{
814 argv_init[0] = init_filename; 814 argv_init[0] = init_filename;
815 return do_execve(init_filename, 815 return do_execve(getname_kernel(init_filename),
816 (const char __user *const __user *)argv_init, 816 (const char __user *const __user *)argv_init,
817 (const char __user *const __user *)envp_init); 817 (const char __user *const __user *)envp_init);
818} 818}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 10176cd5956a..7aef2f4b6c64 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1719,7 +1719,7 @@ void audit_putname(struct filename *name)
1719 struct audit_context *context = current->audit_context; 1719 struct audit_context *context = current->audit_context;
1720 1720
1721 BUG_ON(!context); 1721 BUG_ON(!context);
1722 if (!context->in_syscall) { 1722 if (!name->aname || !context->in_syscall) {
1723#if AUDIT_DEBUG == 2 1723#if AUDIT_DEBUG == 2
1724 printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", 1724 printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
1725 __FILE__, __LINE__, context->serial, name); 1725 __FILE__, __LINE__, context->serial, name);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 4a1fef09f658..07cbdfea9ae2 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER
40# Generic configurable interrupt chip implementation 40# Generic configurable interrupt chip implementation
41config GENERIC_IRQ_CHIP 41config GENERIC_IRQ_CHIP
42 bool 42 bool
43 select IRQ_DOMAIN
43 44
44# Generic irq_domain hw <--> linux irq number translation 45# Generic irq_domain hw <--> linux irq number translation
45config IRQ_DOMAIN 46config IRQ_DOMAIN
diff --git a/kernel/kmod.c b/kernel/kmod.c
index b086006c59e7..6b375af4958d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -239,7 +239,7 @@ static int ____call_usermodehelper(void *data)
239 239
240 commit_creds(new); 240 commit_creds(new);
241 241
242 retval = do_execve(sub_info->path, 242 retval = do_execve(getname_kernel(sub_info->path),
243 (const char __user *const __user *)sub_info->argv, 243 (const char __user *const __user *)sub_info->argv,
244 (const char __user *const __user *)sub_info->envp); 244 (const char __user *const __user *)sub_info->envp);
245 if (!retval) 245 if (!retval)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dbf94a7d25a8..a48abeac753f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options"
119 119
120config DEBUG_INFO 120config DEBUG_INFO
121 bool "Compile the kernel with debug info" 121 bool "Compile the kernel with debug info"
122 depends on DEBUG_KERNEL 122 depends on DEBUG_KERNEL && !COMPILE_TEST
123 help 123 help
124 If you say Y here the resulting kernel image will include 124 If you say Y here the resulting kernel image will include
125 debugging info resulting in a larger kernel image. 125 debugging info resulting in a larger kernel image.
diff --git a/lib/Makefile b/lib/Makefile
index 126b34f2eb16..48140e3ba73f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
45obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o 45obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
46obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 46obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
47 47
48GCOV_PROFILE_hweight.o := n
48CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) 49CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
49obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 50obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
50 51
diff --git a/mm/filemap.c b/mm/filemap.c
index d56d3c145b9f..7a13f6ac5421 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2553,8 +2553,8 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2553 if (ret > 0) { 2553 if (ret > 0) {
2554 ssize_t err; 2554 ssize_t err;
2555 2555
2556 err = generic_write_sync(file, pos, ret); 2556 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2557 if (err < 0 && ret > 0) 2557 if (err < 0)
2558 ret = err; 2558 ret = err;
2559 } 2559 }
2560 return ret; 2560 return ret;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4f08a2d61487..2f2f34a4e77d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -945,8 +945,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
945 * to it. Similarly, page lock is shifted. 945 * to it. Similarly, page lock is shifted.
946 */ 946 */
947 if (hpage != p) { 947 if (hpage != p) {
948 put_page(hpage); 948 if (!(flags & MF_COUNT_INCREASED)) {
949 get_page(p); 949 put_page(hpage);
950 get_page(p);
951 }
950 lock_page(p); 952 lock_page(p);
951 unlock_page(hpage); 953 unlock_page(hpage);
952 *hpagep = p; 954 *hpagep = p;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2d30e2cfe804..7106cb1aca8e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2173,11 +2173,12 @@ int __set_page_dirty_nobuffers(struct page *page)
2173 if (!TestSetPageDirty(page)) { 2173 if (!TestSetPageDirty(page)) {
2174 struct address_space *mapping = page_mapping(page); 2174 struct address_space *mapping = page_mapping(page);
2175 struct address_space *mapping2; 2175 struct address_space *mapping2;
2176 unsigned long flags;
2176 2177
2177 if (!mapping) 2178 if (!mapping)
2178 return 1; 2179 return 1;
2179 2180
2180 spin_lock_irq(&mapping->tree_lock); 2181 spin_lock_irqsave(&mapping->tree_lock, flags);
2181 mapping2 = page_mapping(page); 2182 mapping2 = page_mapping(page);
2182 if (mapping2) { /* Race with truncate? */ 2183 if (mapping2) { /* Race with truncate? */
2183 BUG_ON(mapping2 != mapping); 2184 BUG_ON(mapping2 != mapping);
@@ -2186,7 +2187,7 @@ int __set_page_dirty_nobuffers(struct page *page)
2186 radix_tree_tag_set(&mapping->page_tree, 2187 radix_tree_tag_set(&mapping->page_tree,
2187 page_index(page), PAGECACHE_TAG_DIRTY); 2188 page_index(page), PAGECACHE_TAG_DIRTY);
2188 } 2189 }
2189 spin_unlock_irq(&mapping->tree_lock); 2190 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2190 if (mapping->host) { 2191 if (mapping->host) {
2191 /* !PageAnon && !swapper_space */ 2192 /* !PageAnon && !swapper_space */
2192 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 2193 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
diff --git a/mm/slub.c b/mm/slub.c
index 7e3e0458bce4..25f14ad8f817 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1004,21 +1004,19 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1004static void add_full(struct kmem_cache *s, 1004static void add_full(struct kmem_cache *s,
1005 struct kmem_cache_node *n, struct page *page) 1005 struct kmem_cache_node *n, struct page *page)
1006{ 1006{
1007 lockdep_assert_held(&n->list_lock);
1008
1009 if (!(s->flags & SLAB_STORE_USER)) 1007 if (!(s->flags & SLAB_STORE_USER))
1010 return; 1008 return;
1011 1009
1010 lockdep_assert_held(&n->list_lock);
1012 list_add(&page->lru, &n->full); 1011 list_add(&page->lru, &n->full);
1013} 1012}
1014 1013
1015static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) 1014static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1016{ 1015{
1017 lockdep_assert_held(&n->list_lock);
1018
1019 if (!(s->flags & SLAB_STORE_USER)) 1016 if (!(s->flags & SLAB_STORE_USER))
1020 return; 1017 return;
1021 1018
1019 lockdep_assert_held(&n->list_lock);
1022 list_del(&page->lru); 1020 list_del(&page->lru);
1023} 1021}
1024 1022
@@ -1520,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
1520/* 1518/*
1521 * Management of partially allocated slabs. 1519 * Management of partially allocated slabs.
1522 */ 1520 */
1523static inline void add_partial(struct kmem_cache_node *n, 1521static inline void
1524 struct page *page, int tail) 1522__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1525{ 1523{
1526 lockdep_assert_held(&n->list_lock);
1527
1528 n->nr_partial++; 1524 n->nr_partial++;
1529 if (tail == DEACTIVATE_TO_TAIL) 1525 if (tail == DEACTIVATE_TO_TAIL)
1530 list_add_tail(&page->lru, &n->partial); 1526 list_add_tail(&page->lru, &n->partial);
@@ -1532,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n,
1532 list_add(&page->lru, &n->partial); 1528 list_add(&page->lru, &n->partial);
1533} 1529}
1534 1530
1535static inline void remove_partial(struct kmem_cache_node *n, 1531static inline void add_partial(struct kmem_cache_node *n,
1536 struct page *page) 1532 struct page *page, int tail)
1537{ 1533{
1538 lockdep_assert_held(&n->list_lock); 1534 lockdep_assert_held(&n->list_lock);
1535 __add_partial(n, page, tail);
1536}
1539 1537
1538static inline void
1539__remove_partial(struct kmem_cache_node *n, struct page *page)
1540{
1540 list_del(&page->lru); 1541 list_del(&page->lru);
1541 n->nr_partial--; 1542 n->nr_partial--;
1542} 1543}
1543 1544
1545static inline void remove_partial(struct kmem_cache_node *n,
1546 struct page *page)
1547{
1548 lockdep_assert_held(&n->list_lock);
1549 __remove_partial(n, page);
1550}
1551
1544/* 1552/*
1545 * Remove slab from the partial list, freeze it and 1553 * Remove slab from the partial list, freeze it and
1546 * return the pointer to the freelist. 1554 * return the pointer to the freelist.
@@ -2906,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node)
2906 inc_slabs_node(kmem_cache_node, node, page->objects); 2914 inc_slabs_node(kmem_cache_node, node, page->objects);
2907 2915
2908 /* 2916 /*
2909 * the lock is for lockdep's sake, not for any actual 2917 * No locks need to be taken here as it has just been
2910 * race protection 2918 * initialized and there is no concurrent access.
2911 */ 2919 */
2912 spin_lock(&n->list_lock); 2920 __add_partial(n, page, DEACTIVATE_TO_HEAD);
2913 add_partial(n, page, DEACTIVATE_TO_HEAD);
2914 spin_unlock(&n->list_lock);
2915} 2921}
2916 2922
2917static void free_kmem_cache_nodes(struct kmem_cache *s) 2923static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -3197,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3197 3203
3198 list_for_each_entry_safe(page, h, &n->partial, lru) { 3204 list_for_each_entry_safe(page, h, &n->partial, lru) {
3199 if (!page->inuse) { 3205 if (!page->inuse) {
3200 remove_partial(n, page); 3206 __remove_partial(n, page);
3201 discard_slab(s, page); 3207 discard_slab(s, page);
3202 } else { 3208 } else {
3203 list_slab_objects(s, page, 3209 list_slab_objects(s, page,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 98e85e9c2b2d..e76ace30d436 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -63,6 +63,8 @@ unsigned long total_swapcache_pages(void)
63 return ret; 63 return ret;
64} 64}
65 65
66static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
67
66void show_swap_cache_info(void) 68void show_swap_cache_info(void)
67{ 69{
68 printk("%lu pages in swap cache\n", total_swapcache_pages()); 70 printk("%lu pages in swap cache\n", total_swapcache_pages());
@@ -286,8 +288,11 @@ struct page * lookup_swap_cache(swp_entry_t entry)
286 288
287 page = find_get_page(swap_address_space(entry), entry.val); 289 page = find_get_page(swap_address_space(entry), entry.val);
288 290
289 if (page) 291 if (page) {
290 INC_CACHE_INFO(find_success); 292 INC_CACHE_INFO(find_success);
293 if (TestClearPageReadahead(page))
294 atomic_inc(&swapin_readahead_hits);
295 }
291 296
292 INC_CACHE_INFO(find_total); 297 INC_CACHE_INFO(find_total);
293 return page; 298 return page;
@@ -389,6 +394,50 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
389 return found_page; 394 return found_page;
390} 395}
391 396
397static unsigned long swapin_nr_pages(unsigned long offset)
398{
399 static unsigned long prev_offset;
400 unsigned int pages, max_pages, last_ra;
401 static atomic_t last_readahead_pages;
402
403 max_pages = 1 << ACCESS_ONCE(page_cluster);
404 if (max_pages <= 1)
405 return 1;
406
407 /*
408 * This heuristic has been found to work well on both sequential and
409 * random loads, swapping to hard disk or to SSD: please don't ask
410 * what the "+ 2" means, it just happens to work well, that's all.
411 */
412 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
413 if (pages == 2) {
414 /*
415 * We can have no readahead hits to judge by: but must not get
416 * stuck here forever, so check for an adjacent offset instead
417 * (and don't even bother to check whether swap type is same).
418 */
419 if (offset != prev_offset + 1 && offset != prev_offset - 1)
420 pages = 1;
421 prev_offset = offset;
422 } else {
423 unsigned int roundup = 4;
424 while (roundup < pages)
425 roundup <<= 1;
426 pages = roundup;
427 }
428
429 if (pages > max_pages)
430 pages = max_pages;
431
432 /* Don't shrink readahead too fast */
433 last_ra = atomic_read(&last_readahead_pages) / 2;
434 if (pages < last_ra)
435 pages = last_ra;
436 atomic_set(&last_readahead_pages, pages);
437
438 return pages;
439}
440
392/** 441/**
393 * swapin_readahead - swap in pages in hope we need them soon 442 * swapin_readahead - swap in pages in hope we need them soon
394 * @entry: swap entry of this memory 443 * @entry: swap entry of this memory
@@ -412,11 +461,16 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
412 struct vm_area_struct *vma, unsigned long addr) 461 struct vm_area_struct *vma, unsigned long addr)
413{ 462{
414 struct page *page; 463 struct page *page;
415 unsigned long offset = swp_offset(entry); 464 unsigned long entry_offset = swp_offset(entry);
465 unsigned long offset = entry_offset;
416 unsigned long start_offset, end_offset; 466 unsigned long start_offset, end_offset;
417 unsigned long mask = (1UL << page_cluster) - 1; 467 unsigned long mask;
418 struct blk_plug plug; 468 struct blk_plug plug;
419 469
470 mask = swapin_nr_pages(offset) - 1;
471 if (!mask)
472 goto skip;
473
420 /* Read a page_cluster sized and aligned cluster around offset. */ 474 /* Read a page_cluster sized and aligned cluster around offset. */
421 start_offset = offset & ~mask; 475 start_offset = offset & ~mask;
422 end_offset = offset | mask; 476 end_offset = offset | mask;
@@ -430,10 +484,13 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
430 gfp_mask, vma, addr); 484 gfp_mask, vma, addr);
431 if (!page) 485 if (!page)
432 continue; 486 continue;
487 if (offset != entry_offset)
488 SetPageReadahead(page);
433 page_cache_release(page); 489 page_cache_release(page);
434 } 490 }
435 blk_finish_plug(&plug); 491 blk_finish_plug(&plug);
436 492
437 lru_add_drain(); /* Push any new pages onto the LRU now */ 493 lru_add_drain(); /* Push any new pages onto the LRU now */
494skip:
438 return read_swap_cache_async(entry, gfp_mask, vma, addr); 495 return read_swap_cache_async(entry, gfp_mask, vma, addr);
439} 496}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c6c13b050a58..4a7f7e6992b6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1923,7 +1923,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1923 p->swap_map = NULL; 1923 p->swap_map = NULL;
1924 cluster_info = p->cluster_info; 1924 cluster_info = p->cluster_info;
1925 p->cluster_info = NULL; 1925 p->cluster_info = NULL;
1926 p->flags = 0;
1927 frontswap_map = frontswap_map_get(p); 1926 frontswap_map = frontswap_map_get(p);
1928 spin_unlock(&p->lock); 1927 spin_unlock(&p->lock);
1929 spin_unlock(&swap_lock); 1928 spin_unlock(&swap_lock);
@@ -1949,6 +1948,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1949 mutex_unlock(&inode->i_mutex); 1948 mutex_unlock(&inode->i_mutex);
1950 } 1949 }
1951 filp_close(swap_file, NULL); 1950 filp_close(swap_file, NULL);
1951
1952 /*
1953 * Clear the SWP_USED flag after all resources are freed so that swapon
1954 * can reuse this swap_info in alloc_swap_info() safely. It is ok to
1955 * not hold p->lock after we cleared its SWP_WRITEOK.
1956 */
1957 spin_lock(&swap_lock);
1958 p->flags = 0;
1959 spin_unlock(&swap_lock);
1960
1952 err = 0; 1961 err = 0;
1953 atomic_inc(&proc_poll_event); 1962 atomic_inc(&proc_poll_event);
1954 wake_up_interruptible(&proc_poll_wait); 1963 wake_up_interruptible(&proc_poll_wait);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 72496140ac08..def5dd2fbe61 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -851,12 +851,14 @@ const char * const vmstat_text[] = {
851 "thp_zero_page_alloc", 851 "thp_zero_page_alloc",
852 "thp_zero_page_alloc_failed", 852 "thp_zero_page_alloc_failed",
853#endif 853#endif
854#ifdef CONFIG_DEBUG_TLBFLUSH
854#ifdef CONFIG_SMP 855#ifdef CONFIG_SMP
855 "nr_tlb_remote_flush", 856 "nr_tlb_remote_flush",
856 "nr_tlb_remote_flush_received", 857 "nr_tlb_remote_flush_received",
857#endif 858#endif /* CONFIG_SMP */
858 "nr_tlb_local_flush_all", 859 "nr_tlb_local_flush_all",
859 "nr_tlb_local_flush_one", 860 "nr_tlb_local_flush_one",
861#endif /* CONFIG_DEBUG_TLBFLUSH */
860 862
861#endif /* CONFIG_VM_EVENTS_COUNTERS */ 863#endif /* CONFIG_VM_EVENTS_COUNTERS */
862}; 864};
diff --git a/net/9p/client.c b/net/9p/client.c
index a5e4d2dcb03e..9186550d77a6 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -204,7 +204,7 @@ free_and_return:
204 return ret; 204 return ret;
205} 205}
206 206
207struct p9_fcall *p9_fcall_alloc(int alloc_msize) 207static struct p9_fcall *p9_fcall_alloc(int alloc_msize)
208{ 208{
209 struct p9_fcall *fc; 209 struct p9_fcall *fc;
210 fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS); 210 fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index cd1e1ede73a4..ac2666c1d011 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
340 int count = nr_pages; 340 int count = nr_pages;
341 while (nr_pages) { 341 while (nr_pages) {
342 s = rest_of_page(data); 342 s = rest_of_page(data);
343 pages[index++] = kmap_to_page(data); 343 if (is_vmalloc_addr(data))
344 pages[index++] = vmalloc_to_page(data);
345 else
346 pages[index++] = kmap_to_page(data);
344 data += s; 347 data += s;
345 nr_pages--; 348 nr_pages--;
346 } 349 }
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e4401a531afb..63f0455c0bc3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -187,8 +187,7 @@ static int br_set_mac_address(struct net_device *dev, void *p)
187 187
188 spin_lock_bh(&br->lock); 188 spin_lock_bh(&br->lock);
189 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { 189 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
190 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 190 /* Mac address will be changed in br_stp_change_bridge_id(). */
191 br_fdb_change_mac_address(br, addr->sa_data);
192 br_stp_change_bridge_id(br, addr->sa_data); 191 br_stp_change_bridge_id(br, addr->sa_data);
193 } 192 }
194 spin_unlock_bh(&br->lock); 193 spin_unlock_bh(&br->lock);
@@ -226,6 +225,33 @@ static void br_netpoll_cleanup(struct net_device *dev)
226 br_netpoll_disable(p); 225 br_netpoll_disable(p);
227} 226}
228 227
228static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
229{
230 struct netpoll *np;
231 int err;
232
233 np = kzalloc(sizeof(*p->np), gfp);
234 if (!np)
235 return -ENOMEM;
236
237 err = __netpoll_setup(np, p->dev, gfp);
238 if (err) {
239 kfree(np);
240 return err;
241 }
242
243 p->np = np;
244 return err;
245}
246
247int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
248{
249 if (!p->br->dev->npinfo)
250 return 0;
251
252 return __br_netpoll_enable(p, gfp);
253}
254
229static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, 255static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
230 gfp_t gfp) 256 gfp_t gfp)
231{ 257{
@@ -236,7 +262,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
236 list_for_each_entry(p, &br->port_list, list) { 262 list_for_each_entry(p, &br->port_list, list) {
237 if (!p->dev) 263 if (!p->dev)
238 continue; 264 continue;
239 err = br_netpoll_enable(p, gfp); 265 err = __br_netpoll_enable(p, gfp);
240 if (err) 266 if (err)
241 goto fail; 267 goto fail;
242 } 268 }
@@ -249,28 +275,6 @@ fail:
249 goto out; 275 goto out;
250} 276}
251 277
252int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
253{
254 struct netpoll *np;
255 int err;
256
257 if (!p->br->dev->npinfo)
258 return 0;
259
260 np = kzalloc(sizeof(*p->np), gfp);
261 if (!np)
262 return -ENOMEM;
263
264 err = __netpoll_setup(np, p->dev, gfp);
265 if (err) {
266 kfree(np);
267 return err;
268 }
269
270 p->np = np;
271 return err;
272}
273
274void br_netpoll_disable(struct net_bridge_port *p) 278void br_netpoll_disable(struct net_bridge_port *p)
275{ 279{
276 struct netpoll *np = p->np; 280 struct netpoll *np = p->np;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c5f5a4a933f4..9203d5a1943f 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -27,6 +27,9 @@
27#include "br_private.h" 27#include "br_private.h"
28 28
29static struct kmem_cache *br_fdb_cache __read_mostly; 29static struct kmem_cache *br_fdb_cache __read_mostly;
30static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
31 const unsigned char *addr,
32 __u16 vid);
30static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 33static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
31 const unsigned char *addr, u16 vid); 34 const unsigned char *addr, u16 vid);
32static void fdb_notify(struct net_bridge *br, 35static void fdb_notify(struct net_bridge *br,
@@ -89,11 +92,57 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
89 call_rcu(&f->rcu, fdb_rcu_free); 92 call_rcu(&f->rcu, fdb_rcu_free);
90} 93}
91 94
95/* Delete a local entry if no other port had the same address. */
96static void fdb_delete_local(struct net_bridge *br,
97 const struct net_bridge_port *p,
98 struct net_bridge_fdb_entry *f)
99{
100 const unsigned char *addr = f->addr.addr;
101 u16 vid = f->vlan_id;
102 struct net_bridge_port *op;
103
104 /* Maybe another port has same hw addr? */
105 list_for_each_entry(op, &br->port_list, list) {
106 if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
107 (!vid || nbp_vlan_find(op, vid))) {
108 f->dst = op;
109 f->added_by_user = 0;
110 return;
111 }
112 }
113
114 /* Maybe bridge device has same hw addr? */
115 if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
116 (!vid || br_vlan_find(br, vid))) {
117 f->dst = NULL;
118 f->added_by_user = 0;
119 return;
120 }
121
122 fdb_delete(br, f);
123}
124
125void br_fdb_find_delete_local(struct net_bridge *br,
126 const struct net_bridge_port *p,
127 const unsigned char *addr, u16 vid)
128{
129 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
130 struct net_bridge_fdb_entry *f;
131
132 spin_lock_bh(&br->hash_lock);
133 f = fdb_find(head, addr, vid);
134 if (f && f->is_local && !f->added_by_user && f->dst == p)
135 fdb_delete_local(br, p, f);
136 spin_unlock_bh(&br->hash_lock);
137}
138
92void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) 139void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
93{ 140{
94 struct net_bridge *br = p->br; 141 struct net_bridge *br = p->br;
95 bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false; 142 struct net_port_vlans *pv = nbp_get_vlan_info(p);
143 bool no_vlan = !pv;
96 int i; 144 int i;
145 u16 vid;
97 146
98 spin_lock_bh(&br->hash_lock); 147 spin_lock_bh(&br->hash_lock);
99 148
@@ -104,38 +153,34 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
104 struct net_bridge_fdb_entry *f; 153 struct net_bridge_fdb_entry *f;
105 154
106 f = hlist_entry(h, struct net_bridge_fdb_entry, hlist); 155 f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
107 if (f->dst == p && f->is_local) { 156 if (f->dst == p && f->is_local && !f->added_by_user) {
108 /* maybe another port has same hw addr? */
109 struct net_bridge_port *op;
110 u16 vid = f->vlan_id;
111 list_for_each_entry(op, &br->port_list, list) {
112 if (op != p &&
113 ether_addr_equal(op->dev->dev_addr,
114 f->addr.addr) &&
115 nbp_vlan_find(op, vid)) {
116 f->dst = op;
117 goto insert;
118 }
119 }
120
121 /* delete old one */ 157 /* delete old one */
122 fdb_delete(br, f); 158 fdb_delete_local(br, p, f);
123insert:
124 /* insert new address, may fail if invalid
125 * address or dup.
126 */
127 fdb_insert(br, p, newaddr, vid);
128 159
129 /* if this port has no vlan information 160 /* if this port has no vlan information
130 * configured, we can safely be done at 161 * configured, we can safely be done at
131 * this point. 162 * this point.
132 */ 163 */
133 if (no_vlan) 164 if (no_vlan)
134 goto done; 165 goto insert;
135 } 166 }
136 } 167 }
137 } 168 }
138 169
170insert:
171 /* insert new address, may fail if invalid address or dup. */
172 fdb_insert(br, p, newaddr, 0);
173
174 if (no_vlan)
175 goto done;
176
177 /* Now add entries for every VLAN configured on the port.
178 * This function runs under RTNL so the bitmap will not change
179 * from under us.
180 */
181 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
182 fdb_insert(br, p, newaddr, vid);
183
139done: 184done:
140 spin_unlock_bh(&br->hash_lock); 185 spin_unlock_bh(&br->hash_lock);
141} 186}
@@ -146,10 +191,12 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
146 struct net_port_vlans *pv; 191 struct net_port_vlans *pv;
147 u16 vid = 0; 192 u16 vid = 0;
148 193
194 spin_lock_bh(&br->hash_lock);
195
149 /* If old entry was unassociated with any port, then delete it. */ 196 /* If old entry was unassociated with any port, then delete it. */
150 f = __br_fdb_get(br, br->dev->dev_addr, 0); 197 f = __br_fdb_get(br, br->dev->dev_addr, 0);
151 if (f && f->is_local && !f->dst) 198 if (f && f->is_local && !f->dst)
152 fdb_delete(br, f); 199 fdb_delete_local(br, NULL, f);
153 200
154 fdb_insert(br, NULL, newaddr, 0); 201 fdb_insert(br, NULL, newaddr, 0);
155 202
@@ -159,14 +206,16 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
159 */ 206 */
160 pv = br_get_vlan_info(br); 207 pv = br_get_vlan_info(br);
161 if (!pv) 208 if (!pv)
162 return; 209 goto out;
163 210
164 for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) { 211 for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
165 f = __br_fdb_get(br, br->dev->dev_addr, vid); 212 f = __br_fdb_get(br, br->dev->dev_addr, vid);
166 if (f && f->is_local && !f->dst) 213 if (f && f->is_local && !f->dst)
167 fdb_delete(br, f); 214 fdb_delete_local(br, NULL, f);
168 fdb_insert(br, NULL, newaddr, vid); 215 fdb_insert(br, NULL, newaddr, vid);
169 } 216 }
217out:
218 spin_unlock_bh(&br->hash_lock);
170} 219}
171 220
172void br_fdb_cleanup(unsigned long _data) 221void br_fdb_cleanup(unsigned long _data)
@@ -235,25 +284,11 @@ void br_fdb_delete_by_port(struct net_bridge *br,
235 284
236 if (f->is_static && !do_all) 285 if (f->is_static && !do_all)
237 continue; 286 continue;
238 /*
239 * if multiple ports all have the same device address
240 * then when one port is deleted, assign
241 * the local entry to other port
242 */
243 if (f->is_local) {
244 struct net_bridge_port *op;
245 list_for_each_entry(op, &br->port_list, list) {
246 if (op != p &&
247 ether_addr_equal(op->dev->dev_addr,
248 f->addr.addr)) {
249 f->dst = op;
250 goto skip_delete;
251 }
252 }
253 }
254 287
255 fdb_delete(br, f); 288 if (f->is_local)
256 skip_delete: ; 289 fdb_delete_local(br, p, f);
290 else
291 fdb_delete(br, f);
257 } 292 }
258 } 293 }
259 spin_unlock_bh(&br->hash_lock); 294 spin_unlock_bh(&br->hash_lock);
@@ -397,6 +432,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
397 fdb->vlan_id = vid; 432 fdb->vlan_id = vid;
398 fdb->is_local = 0; 433 fdb->is_local = 0;
399 fdb->is_static = 0; 434 fdb->is_static = 0;
435 fdb->added_by_user = 0;
400 fdb->updated = fdb->used = jiffies; 436 fdb->updated = fdb->used = jiffies;
401 hlist_add_head_rcu(&fdb->hlist, head); 437 hlist_add_head_rcu(&fdb->hlist, head);
402 } 438 }
@@ -447,7 +483,7 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
447} 483}
448 484
449void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, 485void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
450 const unsigned char *addr, u16 vid) 486 const unsigned char *addr, u16 vid, bool added_by_user)
451{ 487{
452 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 488 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
453 struct net_bridge_fdb_entry *fdb; 489 struct net_bridge_fdb_entry *fdb;
@@ -473,13 +509,18 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
473 /* fastpath: update of existing entry */ 509 /* fastpath: update of existing entry */
474 fdb->dst = source; 510 fdb->dst = source;
475 fdb->updated = jiffies; 511 fdb->updated = jiffies;
512 if (unlikely(added_by_user))
513 fdb->added_by_user = 1;
476 } 514 }
477 } else { 515 } else {
478 spin_lock(&br->hash_lock); 516 spin_lock(&br->hash_lock);
479 if (likely(!fdb_find(head, addr, vid))) { 517 if (likely(!fdb_find(head, addr, vid))) {
480 fdb = fdb_create(head, source, addr, vid); 518 fdb = fdb_create(head, source, addr, vid);
481 if (fdb) 519 if (fdb) {
520 if (unlikely(added_by_user))
521 fdb->added_by_user = 1;
482 fdb_notify(br, fdb, RTM_NEWNEIGH); 522 fdb_notify(br, fdb, RTM_NEWNEIGH);
523 }
483 } 524 }
484 /* else we lose race and someone else inserts 525 /* else we lose race and someone else inserts
485 * it first, don't bother updating 526 * it first, don't bother updating
@@ -647,6 +688,7 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
647 688
648 modified = true; 689 modified = true;
649 } 690 }
691 fdb->added_by_user = 1;
650 692
651 fdb->used = jiffies; 693 fdb->used = jiffies;
652 if (modified) { 694 if (modified) {
@@ -664,7 +706,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
664 706
665 if (ndm->ndm_flags & NTF_USE) { 707 if (ndm->ndm_flags & NTF_USE) {
666 rcu_read_lock(); 708 rcu_read_lock();
667 br_fdb_update(p->br, p, addr, vid); 709 br_fdb_update(p->br, p, addr, vid, true);
668 rcu_read_unlock(); 710 rcu_read_unlock();
669 } else { 711 } else {
670 spin_lock_bh(&p->br->hash_lock); 712 spin_lock_bh(&p->br->hash_lock);
@@ -749,8 +791,7 @@ out:
749 return err; 791 return err;
750} 792}
751 793
752int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, 794static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vlan)
753 u16 vlan)
754{ 795{
755 struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)]; 796 struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
756 struct net_bridge_fdb_entry *fdb; 797 struct net_bridge_fdb_entry *fdb;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index cffe1d666ba1..54d207d3a31c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -389,6 +389,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
389 if (br->dev->needed_headroom < dev->needed_headroom) 389 if (br->dev->needed_headroom < dev->needed_headroom)
390 br->dev->needed_headroom = dev->needed_headroom; 390 br->dev->needed_headroom = dev->needed_headroom;
391 391
392 if (br_fdb_insert(br, p, dev->dev_addr, 0))
393 netdev_err(dev, "failed insert local address bridge forwarding table\n");
394
392 spin_lock_bh(&br->lock); 395 spin_lock_bh(&br->lock);
393 changed_addr = br_stp_recalculate_bridge_id(br); 396 changed_addr = br_stp_recalculate_bridge_id(br);
394 397
@@ -404,9 +407,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
404 407
405 dev_set_mtu(br->dev, br_min_mtu(br)); 408 dev_set_mtu(br->dev, br_min_mtu(br));
406 409
407 if (br_fdb_insert(br, p, dev->dev_addr, 0))
408 netdev_err(dev, "failed insert local address bridge forwarding table\n");
409
410 kobject_uevent(&p->kobj, KOBJ_ADD); 410 kobject_uevent(&p->kobj, KOBJ_ADD);
411 411
412 return 0; 412 return 0;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index bf8dc7d308d6..28d544627422 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -77,7 +77,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
77 /* insert into forwarding database after filtering to avoid spoofing */ 77 /* insert into forwarding database after filtering to avoid spoofing */
78 br = p->br; 78 br = p->br;
79 if (p->flags & BR_LEARNING) 79 if (p->flags & BR_LEARNING)
80 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); 80 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
81 81
82 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && 82 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
83 br_multicast_rcv(br, p, skb, vid)) 83 br_multicast_rcv(br, p, skb, vid))
@@ -148,7 +148,7 @@ static int br_handle_local_finish(struct sk_buff *skb)
148 148
149 br_vlan_get_tag(skb, &vid); 149 br_vlan_get_tag(skb, &vid);
150 if (p->flags & BR_LEARNING) 150 if (p->flags & BR_LEARNING)
151 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid); 151 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
152 return 0; /* process further */ 152 return 0; /* process further */
153} 153}
154 154
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index fcd12333c59b..3ba11bc99b65 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -104,6 +104,7 @@ struct net_bridge_fdb_entry
104 mac_addr addr; 104 mac_addr addr;
105 unsigned char is_local; 105 unsigned char is_local;
106 unsigned char is_static; 106 unsigned char is_static;
107 unsigned char added_by_user;
107 __u16 vlan_id; 108 __u16 vlan_id;
108}; 109};
109 110
@@ -370,6 +371,9 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
370int br_fdb_init(void); 371int br_fdb_init(void);
371void br_fdb_fini(void); 372void br_fdb_fini(void);
372void br_fdb_flush(struct net_bridge *br); 373void br_fdb_flush(struct net_bridge *br);
374void br_fdb_find_delete_local(struct net_bridge *br,
375 const struct net_bridge_port *p,
376 const unsigned char *addr, u16 vid);
373void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr); 377void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
374void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr); 378void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
375void br_fdb_cleanup(unsigned long arg); 379void br_fdb_cleanup(unsigned long arg);
@@ -383,8 +387,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
383int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 387int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
384 const unsigned char *addr, u16 vid); 388 const unsigned char *addr, u16 vid);
385void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, 389void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
386 const unsigned char *addr, u16 vid); 390 const unsigned char *addr, u16 vid, bool added_by_user);
387int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
388 391
389int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 392int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
390 struct net_device *dev, const unsigned char *addr); 393 struct net_device *dev, const unsigned char *addr);
@@ -584,6 +587,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
584int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags); 587int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
585int br_vlan_delete(struct net_bridge *br, u16 vid); 588int br_vlan_delete(struct net_bridge *br, u16 vid);
586void br_vlan_flush(struct net_bridge *br); 589void br_vlan_flush(struct net_bridge *br);
590bool br_vlan_find(struct net_bridge *br, u16 vid);
587int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); 591int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
588int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags); 592int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
589int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); 593int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
@@ -665,6 +669,11 @@ static inline void br_vlan_flush(struct net_bridge *br)
665{ 669{
666} 670}
667 671
672static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
673{
674 return false;
675}
676
668static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) 677static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
669{ 678{
670 return -EOPNOTSUPP; 679 return -EOPNOTSUPP;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 656a6f3e40de..189ba1e7d851 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -194,6 +194,8 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
194 194
195 wasroot = br_is_root_bridge(br); 195 wasroot = br_is_root_bridge(br);
196 196
197 br_fdb_change_mac_address(br, addr);
198
197 memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN); 199 memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN);
198 memcpy(br->bridge_id.addr, addr, ETH_ALEN); 200 memcpy(br->bridge_id.addr, addr, ETH_ALEN);
199 memcpy(br->dev->dev_addr, addr, ETH_ALEN); 201 memcpy(br->dev->dev_addr, addr, ETH_ALEN);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 4ca4d0a0151c..8249ca764c79 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -275,9 +275,7 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
275 if (!pv) 275 if (!pv)
276 return -EINVAL; 276 return -EINVAL;
277 277
278 spin_lock_bh(&br->hash_lock); 278 br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
279 fdb_delete_by_addr(br, br->dev->dev_addr, vid);
280 spin_unlock_bh(&br->hash_lock);
281 279
282 __vlan_del(pv, vid); 280 __vlan_del(pv, vid);
283 return 0; 281 return 0;
@@ -295,6 +293,25 @@ void br_vlan_flush(struct net_bridge *br)
295 __vlan_flush(pv); 293 __vlan_flush(pv);
296} 294}
297 295
296bool br_vlan_find(struct net_bridge *br, u16 vid)
297{
298 struct net_port_vlans *pv;
299 bool found = false;
300
301 rcu_read_lock();
302 pv = rcu_dereference(br->vlan_info);
303
304 if (!pv)
305 goto out;
306
307 if (test_bit(vid, pv->vlan_bitmap))
308 found = true;
309
310out:
311 rcu_read_unlock();
312 return found;
313}
314
298int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) 315int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
299{ 316{
300 if (!rtnl_trylock()) 317 if (!rtnl_trylock())
@@ -359,9 +376,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
359 if (!pv) 376 if (!pv)
360 return -EINVAL; 377 return -EINVAL;
361 378
362 spin_lock_bh(&port->br->hash_lock); 379 br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
363 fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
364 spin_unlock_bh(&port->br->hash_lock);
365 380
366 return __vlan_del(pv, vid); 381 return __vlan_del(pv, vid);
367} 382}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 4dca159435cf..edbca468fa73 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -22,6 +22,7 @@
22#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
23#include <net/caif/caif_device.h> 23#include <net/caif/caif_device.h>
24#include <net/caif/caif_layer.h> 24#include <net/caif/caif_layer.h>
25#include <net/caif/caif_dev.h>
25#include <net/caif/cfpkt.h> 26#include <net/caif/cfpkt.h>
26#include <net/caif/cfcnfg.h> 27#include <net/caif/cfcnfg.h>
27#include <net/caif/cfserl.h> 28#include <net/caif/cfserl.h>
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 353f793d1b3b..a6e115463052 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -15,6 +15,7 @@
15#include <net/caif/caif_layer.h> 15#include <net/caif/caif_layer.h>
16#include <net/caif/cfsrvl.h> 16#include <net/caif/cfsrvl.h>
17#include <net/caif/cfpkt.h> 17#include <net/caif/cfpkt.h>
18#include <net/caif/caif_dev.h>
18 19
19#define SRVL_CTRL_PKT_SIZE 1 20#define SRVL_CTRL_PKT_SIZE 1
20#define SRVL_FLOW_OFF 0x81 21#define SRVL_FLOW_OFF 0x81
diff --git a/net/can/af_can.c b/net/can/af_can.c
index d249874a366d..a27f8aad9e99 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -57,6 +57,7 @@
57#include <linux/skbuff.h> 57#include <linux/skbuff.h>
58#include <linux/can.h> 58#include <linux/can.h>
59#include <linux/can/core.h> 59#include <linux/can/core.h>
60#include <linux/can/skb.h>
60#include <linux/ratelimit.h> 61#include <linux/ratelimit.h>
61#include <net/net_namespace.h> 62#include <net/net_namespace.h>
62#include <net/sock.h> 63#include <net/sock.h>
@@ -290,7 +291,7 @@ int can_send(struct sk_buff *skb, int loop)
290 return -ENOMEM; 291 return -ENOMEM;
291 } 292 }
292 293
293 newskb->sk = skb->sk; 294 can_skb_set_owner(newskb, skb->sk);
294 newskb->ip_summed = CHECKSUM_UNNECESSARY; 295 newskb->ip_summed = CHECKSUM_UNNECESSARY;
295 newskb->pkt_type = PACKET_BROADCAST; 296 newskb->pkt_type = PACKET_BROADCAST;
296 } 297 }
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 3fc737b214c7..dcb75c0e66c1 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -268,7 +268,7 @@ static void bcm_can_tx(struct bcm_op *op)
268 268
269 /* send with loopback */ 269 /* send with loopback */
270 skb->dev = dev; 270 skb->dev = dev;
271 skb->sk = op->sk; 271 can_skb_set_owner(skb, op->sk);
272 can_send(skb, 1); 272 can_send(skb, 1);
273 273
274 /* update statistics */ 274 /* update statistics */
@@ -1223,7 +1223,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1223 1223
1224 can_skb_prv(skb)->ifindex = dev->ifindex; 1224 can_skb_prv(skb)->ifindex = dev->ifindex;
1225 skb->dev = dev; 1225 skb->dev = dev;
1226 skb->sk = sk; 1226 can_skb_set_owner(skb, sk);
1227 err = can_send(skb, 1); /* send with loopback */ 1227 err = can_send(skb, 1); /* send with loopback */
1228 dev_put(dev); 1228 dev_put(dev);
1229 1229
diff --git a/net/can/raw.c b/net/can/raw.c
index 07d72d852324..8be757cca2ec 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -715,6 +715,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
715 715
716 skb->dev = dev; 716 skb->dev = dev;
717 skb->sk = sk; 717 skb->sk = sk;
718 skb->priority = sk->sk_priority;
718 719
719 err = can_send(skb, ro->loopback); 720 err = can_send(skb, ro->loopback);
720 721
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 0e478a0f4204..30efc5c18622 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -840,9 +840,13 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
840 840
841 if (!cursor->bvec_iter.bi_size) { 841 if (!cursor->bvec_iter.bi_size) {
842 bio = bio->bi_next; 842 bio = bio->bi_next;
843 cursor->bvec_iter = bio->bi_iter; 843 cursor->bio = bio;
844 if (bio)
845 cursor->bvec_iter = bio->bi_iter;
846 else
847 memset(&cursor->bvec_iter, 0,
848 sizeof(cursor->bvec_iter));
844 } 849 }
845 cursor->bio = bio;
846 850
847 if (!cursor->last_piece) { 851 if (!cursor->last_piece) {
848 BUG_ON(!cursor->resid); 852 BUG_ON(!cursor->resid);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 010ff3bd58ad..0676f2b199d6 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1427,6 +1427,40 @@ static void __send_queued(struct ceph_osd_client *osdc)
1427} 1427}
1428 1428
1429/* 1429/*
1430 * Caller should hold map_sem for read and request_mutex.
1431 */
1432static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
1433 struct ceph_osd_request *req,
1434 bool nofail)
1435{
1436 int rc;
1437
1438 __register_request(osdc, req);
1439 req->r_sent = 0;
1440 req->r_got_reply = 0;
1441 rc = __map_request(osdc, req, 0);
1442 if (rc < 0) {
1443 if (nofail) {
1444 dout("osdc_start_request failed map, "
1445 " will retry %lld\n", req->r_tid);
1446 rc = 0;
1447 } else {
1448 __unregister_request(osdc, req);
1449 }
1450 return rc;
1451 }
1452
1453 if (req->r_osd == NULL) {
1454 dout("send_request %p no up osds in pg\n", req);
1455 ceph_monc_request_next_osdmap(&osdc->client->monc);
1456 } else {
1457 __send_queued(osdc);
1458 }
1459
1460 return 0;
1461}
1462
1463/*
1430 * Timeout callback, called every N seconds when 1 or more osd 1464 * Timeout callback, called every N seconds when 1 or more osd
1431 * requests has been active for more than N seconds. When this 1465 * requests has been active for more than N seconds. When this
1432 * happens, we ping all OSDs with requests who have timed out to 1466 * happens, we ping all OSDs with requests who have timed out to
@@ -1653,6 +1687,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1653 osdmap_epoch = ceph_decode_32(&p); 1687 osdmap_epoch = ceph_decode_32(&p);
1654 1688
1655 /* lookup */ 1689 /* lookup */
1690 down_read(&osdc->map_sem);
1656 mutex_lock(&osdc->request_mutex); 1691 mutex_lock(&osdc->request_mutex);
1657 req = __lookup_request(osdc, tid); 1692 req = __lookup_request(osdc, tid);
1658 if (req == NULL) { 1693 if (req == NULL) {
@@ -1709,7 +1744,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1709 dout("redirect pool %lld\n", redir.oloc.pool); 1744 dout("redirect pool %lld\n", redir.oloc.pool);
1710 1745
1711 __unregister_request(osdc, req); 1746 __unregister_request(osdc, req);
1712 mutex_unlock(&osdc->request_mutex);
1713 1747
1714 req->r_target_oloc = redir.oloc; /* struct */ 1748 req->r_target_oloc = redir.oloc; /* struct */
1715 1749
@@ -1721,10 +1755,10 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1721 * successfully. In the future we might want to follow 1755 * successfully. In the future we might want to follow
1722 * original request's nofail setting here. 1756 * original request's nofail setting here.
1723 */ 1757 */
1724 err = ceph_osdc_start_request(osdc, req, true); 1758 err = __ceph_osdc_start_request(osdc, req, true);
1725 BUG_ON(err); 1759 BUG_ON(err);
1726 1760
1727 goto done; 1761 goto out_unlock;
1728 } 1762 }
1729 1763
1730 already_completed = req->r_got_reply; 1764 already_completed = req->r_got_reply;
@@ -1742,8 +1776,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1742 req->r_got_reply = 1; 1776 req->r_got_reply = 1;
1743 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { 1777 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1744 dout("handle_reply tid %llu dup ack\n", tid); 1778 dout("handle_reply tid %llu dup ack\n", tid);
1745 mutex_unlock(&osdc->request_mutex); 1779 goto out_unlock;
1746 goto done;
1747 } 1780 }
1748 1781
1749 dout("handle_reply tid %llu flags %d\n", tid, flags); 1782 dout("handle_reply tid %llu flags %d\n", tid, flags);
@@ -1758,6 +1791,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1758 __unregister_request(osdc, req); 1791 __unregister_request(osdc, req);
1759 1792
1760 mutex_unlock(&osdc->request_mutex); 1793 mutex_unlock(&osdc->request_mutex);
1794 up_read(&osdc->map_sem);
1761 1795
1762 if (!already_completed) { 1796 if (!already_completed) {
1763 if (req->r_unsafe_callback && 1797 if (req->r_unsafe_callback &&
@@ -1775,10 +1809,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1775 complete_request(req); 1809 complete_request(req);
1776 } 1810 }
1777 1811
1778done: 1812out:
1779 dout("req=%p req->r_linger=%d\n", req, req->r_linger); 1813 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
1780 ceph_osdc_put_request(req); 1814 ceph_osdc_put_request(req);
1781 return; 1815 return;
1816out_unlock:
1817 mutex_unlock(&osdc->request_mutex);
1818 up_read(&osdc->map_sem);
1819 goto out;
1782 1820
1783bad_put: 1821bad_put:
1784 req->r_result = -EIO; 1822 req->r_result = -EIO;
@@ -1791,6 +1829,7 @@ bad_put:
1791 ceph_osdc_put_request(req); 1829 ceph_osdc_put_request(req);
1792bad_mutex: 1830bad_mutex:
1793 mutex_unlock(&osdc->request_mutex); 1831 mutex_unlock(&osdc->request_mutex);
1832 up_read(&osdc->map_sem);
1794bad: 1833bad:
1795 pr_err("corrupt osd_op_reply got %d %d\n", 1834 pr_err("corrupt osd_op_reply got %d %d\n",
1796 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); 1835 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
@@ -2351,34 +2390,16 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
2351 struct ceph_osd_request *req, 2390 struct ceph_osd_request *req,
2352 bool nofail) 2391 bool nofail)
2353{ 2392{
2354 int rc = 0; 2393 int rc;
2355 2394
2356 down_read(&osdc->map_sem); 2395 down_read(&osdc->map_sem);
2357 mutex_lock(&osdc->request_mutex); 2396 mutex_lock(&osdc->request_mutex);
2358 __register_request(osdc, req); 2397
2359 req->r_sent = 0; 2398 rc = __ceph_osdc_start_request(osdc, req, nofail);
2360 req->r_got_reply = 0; 2399
2361 rc = __map_request(osdc, req, 0);
2362 if (rc < 0) {
2363 if (nofail) {
2364 dout("osdc_start_request failed map, "
2365 " will retry %lld\n", req->r_tid);
2366 rc = 0;
2367 } else {
2368 __unregister_request(osdc, req);
2369 }
2370 goto out_unlock;
2371 }
2372 if (req->r_osd == NULL) {
2373 dout("send_request %p no up osds in pg\n", req);
2374 ceph_monc_request_next_osdmap(&osdc->client->monc);
2375 } else {
2376 __send_queued(osdc);
2377 }
2378 rc = 0;
2379out_unlock:
2380 mutex_unlock(&osdc->request_mutex); 2400 mutex_unlock(&osdc->request_mutex);
2381 up_read(&osdc->map_sem); 2401 up_read(&osdc->map_sem);
2402
2382 return rc; 2403 return rc;
2383} 2404}
2384EXPORT_SYMBOL(ceph_osdc_start_request); 2405EXPORT_SYMBOL(ceph_osdc_start_request);
@@ -2504,9 +2525,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
2504 err = -ENOMEM; 2525 err = -ENOMEM;
2505 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 2526 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
2506 if (!osdc->notify_wq) 2527 if (!osdc->notify_wq)
2507 goto out_msgpool; 2528 goto out_msgpool_reply;
2529
2508 return 0; 2530 return 0;
2509 2531
2532out_msgpool_reply:
2533 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
2510out_msgpool: 2534out_msgpool:
2511 ceph_msgpool_destroy(&osdc->msgpool_op); 2535 ceph_msgpool_destroy(&osdc->msgpool_op);
2512out_mempool: 2536out_mempool:
diff --git a/net/core/dev.c b/net/core/dev.c
index 3721db716350..4ad1b78c9c77 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2803,7 +2803,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
2803 * the BH enable code must have IRQs enabled so that it will not deadlock. 2803 * the BH enable code must have IRQs enabled so that it will not deadlock.
2804 * --BLG 2804 * --BLG
2805 */ 2805 */
2806int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) 2806static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2807{ 2807{
2808 struct net_device *dev = skb->dev; 2808 struct net_device *dev = skb->dev;
2809 struct netdev_queue *txq; 2809 struct netdev_queue *txq;
@@ -4637,7 +4637,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4637} 4637}
4638EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 4638EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4639 4639
4640int netdev_adjacent_sysfs_add(struct net_device *dev, 4640static int netdev_adjacent_sysfs_add(struct net_device *dev,
4641 struct net_device *adj_dev, 4641 struct net_device *adj_dev,
4642 struct list_head *dev_list) 4642 struct list_head *dev_list)
4643{ 4643{
@@ -4647,7 +4647,7 @@ int netdev_adjacent_sysfs_add(struct net_device *dev,
4647 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 4647 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4648 linkname); 4648 linkname);
4649} 4649}
4650void netdev_adjacent_sysfs_del(struct net_device *dev, 4650static void netdev_adjacent_sysfs_del(struct net_device *dev,
4651 char *name, 4651 char *name,
4652 struct list_head *dev_list) 4652 struct list_head *dev_list)
4653{ 4653{
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f409e0bd35c0..185c341fafbd 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -745,6 +745,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
745 attach_rules(&ops->rules_list, dev); 745 attach_rules(&ops->rules_list, dev);
746 break; 746 break;
747 747
748 case NETDEV_CHANGENAME:
749 list_for_each_entry(ops, &net->rules_ops, list) {
750 detach_rules(&ops->rules_list, dev);
751 attach_rules(&ops->rules_list, dev);
752 }
753 break;
754
748 case NETDEV_UNREGISTER: 755 case NETDEV_UNREGISTER:
749 list_for_each_entry(ops, &net->rules_ops, list) 756 list_for_each_entry(ops, &net->rules_ops, list)
750 detach_rules(&ops->rules_list, dev); 757 detach_rules(&ops->rules_list, dev);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c03f3dec4763..a664f7829a6d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -948,6 +948,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
948{ 948{
949 char *cur=opt, *delim; 949 char *cur=opt, *delim;
950 int ipv6; 950 int ipv6;
951 bool ipversion_set = false;
951 952
952 if (*cur != '@') { 953 if (*cur != '@') {
953 if ((delim = strchr(cur, '@')) == NULL) 954 if ((delim = strchr(cur, '@')) == NULL)
@@ -960,6 +961,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
960 cur++; 961 cur++;
961 962
962 if (*cur != '/') { 963 if (*cur != '/') {
964 ipversion_set = true;
963 if ((delim = strchr(cur, '/')) == NULL) 965 if ((delim = strchr(cur, '/')) == NULL)
964 goto parse_failed; 966 goto parse_failed;
965 *delim = 0; 967 *delim = 0;
@@ -1002,7 +1004,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
1002 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); 1004 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
1003 if (ipv6 < 0) 1005 if (ipv6 < 0)
1004 goto parse_failed; 1006 goto parse_failed;
1005 else if (np->ipv6 != (bool)ipv6) 1007 else if (ipversion_set && np->ipv6 != (bool)ipv6)
1006 goto parse_failed; 1008 goto parse_failed;
1007 else 1009 else
1008 np->ipv6 = (bool)ipv6; 1010 np->ipv6 = (bool)ipv6;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 393b1bc9a618..048dc8d183aa 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -374,7 +374,7 @@ static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
374 if (!master_dev) 374 if (!master_dev)
375 return 0; 375 return 0;
376 ops = master_dev->rtnl_link_ops; 376 ops = master_dev->rtnl_link_ops;
377 if (!ops->get_slave_size) 377 if (!ops || !ops->get_slave_size)
378 return 0; 378 return 0;
379 /* IFLA_INFO_SLAVE_DATA + nested data */ 379 /* IFLA_INFO_SLAVE_DATA + nested data */
380 return nla_total_size(sizeof(struct nlattr)) + 380 return nla_total_size(sizeof(struct nlattr)) +
diff --git a/net/core/sock.c b/net/core/sock.c
index 0c127dcdf6a8..5b6a9431b017 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1775,7 +1775,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1775 while (order) { 1775 while (order) {
1776 if (npages >= 1 << order) { 1776 if (npages >= 1 << order) {
1777 page = alloc_pages(sk->sk_allocation | 1777 page = alloc_pages(sk->sk_allocation |
1778 __GFP_COMP | __GFP_NOWARN, 1778 __GFP_COMP |
1779 __GFP_NOWARN |
1780 __GFP_NORETRY,
1779 order); 1781 order);
1780 if (page) 1782 if (page)
1781 goto fill_page; 1783 goto fill_page;
@@ -1845,7 +1847,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
1845 gfp_t gfp = prio; 1847 gfp_t gfp = prio;
1846 1848
1847 if (order) 1849 if (order)
1848 gfp |= __GFP_COMP | __GFP_NOWARN; 1850 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1849 pfrag->page = alloc_pages(gfp, order); 1851 pfrag->page = alloc_pages(gfp, order);
1850 if (likely(pfrag->page)) { 1852 if (likely(pfrag->page)) {
1851 pfrag->offset = 0; 1853 pfrag->offset = 0;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2954dcbca832..4c04848953bd 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2104,8 +2104,6 @@ static struct notifier_block dn_dev_notifier = {
2104 .notifier_call = dn_device_event, 2104 .notifier_call = dn_device_event,
2105}; 2105};
2106 2106
2107extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2108
2109static struct packet_type dn_dix_packet_type __read_mostly = { 2107static struct packet_type dn_dix_packet_type __read_mostly = {
2110 .type = cpu_to_be16(ETH_P_DNA_RT), 2108 .type = cpu_to_be16(ETH_P_DNA_RT),
2111 .func = dn_route_rcv, 2109 .func = dn_route_rcv,
@@ -2353,9 +2351,6 @@ static const struct proto_ops dn_proto_ops = {
2353 .sendpage = sock_no_sendpage, 2351 .sendpage = sock_no_sendpage,
2354}; 2352};
2355 2353
2356void dn_register_sysctl(void);
2357void dn_unregister_sysctl(void);
2358
2359MODULE_DESCRIPTION("The Linux DECnet Network Protocol"); 2354MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2360MODULE_AUTHOR("Linux DECnet Project Team"); 2355MODULE_AUTHOR("Linux DECnet Project Team");
2361MODULE_LICENSE("GPL"); 2356MODULE_LICENSE("GPL");
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 48b25c0af4d0..8edfea5da572 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -106,7 +106,6 @@ static int lowpan_header_create(struct sk_buff *skb,
106 unsigned short type, const void *_daddr, 106 unsigned short type, const void *_daddr,
107 const void *_saddr, unsigned int len) 107 const void *_saddr, unsigned int len)
108{ 108{
109 struct ipv6hdr *hdr;
110 const u8 *saddr = _saddr; 109 const u8 *saddr = _saddr;
111 const u8 *daddr = _daddr; 110 const u8 *daddr = _daddr;
112 struct ieee802154_addr sa, da; 111 struct ieee802154_addr sa, da;
@@ -117,8 +116,6 @@ static int lowpan_header_create(struct sk_buff *skb,
117 if (type != ETH_P_IPV6) 116 if (type != ETH_P_IPV6)
118 return 0; 117 return 0;
119 118
120 hdr = ipv6_hdr(skb);
121
122 if (!saddr) 119 if (!saddr)
123 saddr = dev->dev_addr; 120 saddr = dev->dev_addr;
124 121
@@ -533,7 +530,27 @@ static struct header_ops lowpan_header_ops = {
533 .create = lowpan_header_create, 530 .create = lowpan_header_create,
534}; 531};
535 532
533static struct lock_class_key lowpan_tx_busylock;
534static struct lock_class_key lowpan_netdev_xmit_lock_key;
535
536static void lowpan_set_lockdep_class_one(struct net_device *dev,
537 struct netdev_queue *txq,
538 void *_unused)
539{
540 lockdep_set_class(&txq->_xmit_lock,
541 &lowpan_netdev_xmit_lock_key);
542}
543
544
545static int lowpan_dev_init(struct net_device *dev)
546{
547 netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
548 dev->qdisc_tx_busylock = &lowpan_tx_busylock;
549 return 0;
550}
551
536static const struct net_device_ops lowpan_netdev_ops = { 552static const struct net_device_ops lowpan_netdev_ops = {
553 .ndo_init = lowpan_dev_init,
537 .ndo_start_xmit = lowpan_xmit, 554 .ndo_start_xmit = lowpan_xmit,
538 .ndo_set_mac_address = lowpan_set_address, 555 .ndo_set_mac_address = lowpan_set_address,
539}; 556};
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ac2dff3c2c1c..bdbf68bb2e2d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1443,7 +1443,8 @@ static size_t inet_nlmsg_size(void)
1443 + nla_total_size(4) /* IFA_LOCAL */ 1443 + nla_total_size(4) /* IFA_LOCAL */
1444 + nla_total_size(4) /* IFA_BROADCAST */ 1444 + nla_total_size(4) /* IFA_BROADCAST */
1445 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ 1445 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1446 + nla_total_size(4); /* IFA_FLAGS */ 1446 + nla_total_size(4) /* IFA_FLAGS */
1447 + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1447} 1448}
1448 1449
1449static inline u32 cstamp_delta(unsigned long cstamp) 1450static inline u32 cstamp_delta(unsigned long cstamp)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index bd28f386bd02..50228be5c17b 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -101,28 +101,22 @@ static void tunnel_dst_reset_all(struct ip_tunnel *t)
101 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); 101 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
102} 102}
103 103
104static struct dst_entry *tunnel_dst_get(struct ip_tunnel *t) 104static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
105{ 105{
106 struct dst_entry *dst; 106 struct dst_entry *dst;
107 107
108 rcu_read_lock(); 108 rcu_read_lock();
109 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); 109 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
110 if (dst) 110 if (dst) {
111 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
112 rcu_read_unlock();
113 tunnel_dst_reset(t);
114 return NULL;
115 }
111 dst_hold(dst); 116 dst_hold(dst);
112 rcu_read_unlock();
113 return dst;
114}
115
116static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
117{
118 struct dst_entry *dst = tunnel_dst_get(t);
119
120 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
121 tunnel_dst_reset(t);
122 return NULL;
123 } 117 }
124 118 rcu_read_unlock();
125 return dst; 119 return (struct rtable *)dst;
126} 120}
127 121
128/* Often modified stats are per cpu, other are shared (netdev->stats) */ 122/* Often modified stats are per cpu, other are shared (netdev->stats) */
@@ -584,7 +578,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
584 struct flowi4 fl4; 578 struct flowi4 fl4;
585 u8 tos, ttl; 579 u8 tos, ttl;
586 __be16 df; 580 __be16 df;
587 struct rtable *rt = NULL; /* Route to the other host */ 581 struct rtable *rt; /* Route to the other host */
588 unsigned int max_headroom; /* The extra header space needed */ 582 unsigned int max_headroom; /* The extra header space needed */
589 __be32 dst; 583 __be32 dst;
590 int err; 584 int err;
@@ -657,8 +651,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
657 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, 651 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
658 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); 652 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
659 653
660 if (connected) 654 rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
661 rt = (struct rtable *)tunnel_dst_check(tunnel, 0);
662 655
663 if (!rt) { 656 if (!rt) {
664 rt = ip_route_output_key(tunnel->net, &fl4); 657 rt = ip_route_output_key(tunnel->net, &fl4);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 81c6910cfa92..a26ce035e3fa 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -61,6 +61,11 @@ config NFT_CHAIN_NAT_IPV4
61 packet transformations such as the source, destination address and 61 packet transformations such as the source, destination address and
62 source and destination ports. 62 source and destination ports.
63 63
64config NFT_REJECT_IPV4
65 depends on NF_TABLES_IPV4
66 default NFT_REJECT
67 tristate
68
64config NF_TABLES_ARP 69config NF_TABLES_ARP
65 depends on NF_TABLES 70 depends on NF_TABLES
66 tristate "ARP nf_tables support" 71 tristate "ARP nf_tables support"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index c16be9d58420..90b82405331e 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
30obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o 30obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
31obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o 31obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
32obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o 32obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
33obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
33obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o 34obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
34 35
35# generic IP tables 36# generic IP tables
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 9eea059dd621..574f7ebba0b6 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -229,7 +229,10 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
229 ret = nf_ct_expect_related(rtcp_exp); 229 ret = nf_ct_expect_related(rtcp_exp);
230 if (ret == 0) 230 if (ret == 0)
231 break; 231 break;
232 else if (ret != -EBUSY) { 232 else if (ret == -EBUSY) {
233 nf_ct_unexpect_related(rtp_exp);
234 continue;
235 } else if (ret < 0) {
233 nf_ct_unexpect_related(rtp_exp); 236 nf_ct_unexpect_related(rtp_exp);
234 nated_port = 0; 237 nated_port = 0;
235 break; 238 break;
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
new file mode 100644
index 000000000000..e79718a382f2
--- /dev/null
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 * Copyright (c) 2013 Eric Leblond <eric@regit.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Development of this code funded by Astaro AG (http://www.astaro.com/)
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/netlink.h>
16#include <linux/netfilter.h>
17#include <linux/netfilter/nf_tables.h>
18#include <net/netfilter/nf_tables.h>
19#include <net/icmp.h>
20#include <net/netfilter/ipv4/nf_reject.h>
21#include <net/netfilter/nft_reject.h>
22
23void nft_reject_ipv4_eval(const struct nft_expr *expr,
24 struct nft_data data[NFT_REG_MAX + 1],
25 const struct nft_pktinfo *pkt)
26{
27 struct nft_reject *priv = nft_expr_priv(expr);
28
29 switch (priv->type) {
30 case NFT_REJECT_ICMP_UNREACH:
31 nf_send_unreach(pkt->skb, priv->icmp_code);
32 break;
33 case NFT_REJECT_TCP_RST:
34 nf_send_reset(pkt->skb, pkt->ops->hooknum);
35 break;
36 }
37
38 data[NFT_REG_VERDICT].verdict = NF_DROP;
39}
40EXPORT_SYMBOL_GPL(nft_reject_ipv4_eval);
41
42static struct nft_expr_type nft_reject_ipv4_type;
43static const struct nft_expr_ops nft_reject_ipv4_ops = {
44 .type = &nft_reject_ipv4_type,
45 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
46 .eval = nft_reject_ipv4_eval,
47 .init = nft_reject_init,
48 .dump = nft_reject_dump,
49};
50
51static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
52 .family = NFPROTO_IPV4,
53 .name = "reject",
54 .ops = &nft_reject_ipv4_ops,
55 .policy = nft_reject_policy,
56 .maxattr = NFTA_REJECT_MAX,
57 .owner = THIS_MODULE,
58};
59
60static int __init nft_reject_ipv4_module_init(void)
61{
62 return nft_register_expr(&nft_reject_ipv4_type);
63}
64
65static void __exit nft_reject_ipv4_module_exit(void)
66{
67 nft_unregister_expr(&nft_reject_ipv4_type);
68}
69
70module_init(nft_reject_ipv4_module_init);
71module_exit(nft_reject_ipv4_module_exit);
72
73MODULE_LICENSE("GPL");
74MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
75MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4475b3bb494d..9f3a2db9109e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2229,7 +2229,7 @@ adjudge_to_death:
2229 /* This is a (useful) BSD violating of the RFC. There is a 2229 /* This is a (useful) BSD violating of the RFC. There is a
2230 * problem with TCP as specified in that the other end could 2230 * problem with TCP as specified in that the other end could
2231 * keep a socket open forever with no application left this end. 2231 * keep a socket open forever with no application left this end.
2232 * We use a 3 minute timeout (about the same as BSD) then kill 2232 * We use a 1 minute timeout (about the same as BSD) then kill
2233 * our end. If they send after that then tough - BUT: long enough 2233 * our end. If they send after that then tough - BUT: long enough
2234 * that we won't make the old 4*rto = almost no time - whoops 2234 * that we won't make the old 4*rto = almost no time - whoops
2235 * reset mistake. 2235 * reset mistake.
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 65cf90e063d5..227cba79fa6b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -671,6 +671,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
671{ 671{
672 struct tcp_sock *tp = tcp_sk(sk); 672 struct tcp_sock *tp = tcp_sk(sk);
673 long m = mrtt; /* RTT */ 673 long m = mrtt; /* RTT */
674 u32 srtt = tp->srtt;
674 675
675 /* The following amusing code comes from Jacobson's 676 /* The following amusing code comes from Jacobson's
676 * article in SIGCOMM '88. Note that rtt and mdev 677 * article in SIGCOMM '88. Note that rtt and mdev
@@ -688,11 +689,9 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
688 * does not matter how to _calculate_ it. Seems, it was trap 689 * does not matter how to _calculate_ it. Seems, it was trap
689 * that VJ failed to avoid. 8) 690 * that VJ failed to avoid. 8)
690 */ 691 */
691 if (m == 0) 692 if (srtt != 0) {
692 m = 1; 693 m -= (srtt >> 3); /* m is now error in rtt est */
693 if (tp->srtt != 0) { 694 srtt += m; /* rtt = 7/8 rtt + 1/8 new */
694 m -= (tp->srtt >> 3); /* m is now error in rtt est */
695 tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */
696 if (m < 0) { 695 if (m < 0) {
697 m = -m; /* m is now abs(error) */ 696 m = -m; /* m is now abs(error) */
698 m -= (tp->mdev >> 2); /* similar update on mdev */ 697 m -= (tp->mdev >> 2); /* similar update on mdev */
@@ -723,11 +722,12 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
723 } 722 }
724 } else { 723 } else {
725 /* no previous measure. */ 724 /* no previous measure. */
726 tp->srtt = m << 3; /* take the measured time to be rtt */ 725 srtt = m << 3; /* take the measured time to be rtt */
727 tp->mdev = m << 1; /* make sure rto = 3*rtt */ 726 tp->mdev = m << 1; /* make sure rto = 3*rtt */
728 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 727 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
729 tp->rtt_seq = tp->snd_nxt; 728 tp->rtt_seq = tp->snd_nxt;
730 } 729 }
730 tp->srtt = max(1U, srtt);
731} 731}
732 732
733/* Set the sk_pacing_rate to allow proper sizing of TSO packets. 733/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
@@ -746,8 +746,10 @@ static void tcp_update_pacing_rate(struct sock *sk)
746 746
747 rate *= max(tp->snd_cwnd, tp->packets_out); 747 rate *= max(tp->snd_cwnd, tp->packets_out);
748 748
749 /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3), 749 /* Correction for small srtt and scheduling constraints.
750 * be conservative and assume srtt = 1 (125 us instead of 1.25 ms) 750 * For small rtt, consider noise is too high, and use
751 * the minimal value (srtt = 1 -> 125 us for HZ=1000)
752 *
751 * We probably need usec resolution in the future. 753 * We probably need usec resolution in the future.
752 * Note: This also takes care of possible srtt=0 case, 754 * Note: This also takes care of possible srtt=0 case,
753 * when tcp_rtt_estimator() was not yet called. 755 * when tcp_rtt_estimator() was not yet called.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 03d26b85eab8..3be16727f058 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -698,7 +698,8 @@ static void tcp_tsq_handler(struct sock *sk)
698 if ((1 << sk->sk_state) & 698 if ((1 << sk->sk_state) &
699 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 699 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
700 TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) 700 TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
701 tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); 701 tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
702 0, GFP_ATOMIC);
702} 703}
703/* 704/*
704 * One tasklet per cpu tries to send more skbs. 705 * One tasklet per cpu tries to send more skbs.
@@ -1904,7 +1905,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1904 1905
1905 if (atomic_read(&sk->sk_wmem_alloc) > limit) { 1906 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1906 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1907 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1907 break; 1908 /* It is possible TX completion already happened
1909 * before we set TSQ_THROTTLED, so we must
1910 * test again the condition.
1911 * We abuse smp_mb__after_clear_bit() because
1912 * there is no smp_mb__after_set_bit() yet
1913 */
1914 smp_mb__after_clear_bit();
1915 if (atomic_read(&sk->sk_wmem_alloc) > limit)
1916 break;
1908 } 1917 }
1909 1918
1910 limit = mss_now; 1919 limit = mss_now;
@@ -1977,7 +1986,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
1977 /* Schedule a loss probe in 2*RTT for SACK capable connections 1986 /* Schedule a loss probe in 2*RTT for SACK capable connections
1978 * in Open state, that are either limited by cwnd or application. 1987 * in Open state, that are either limited by cwnd or application.
1979 */ 1988 */
1980 if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || 1989 if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out ||
1981 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 1990 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
1982 return false; 1991 return false;
1983 1992
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 25f5cee3a08a..88b4023ecfcf 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -17,6 +17,8 @@
17static DEFINE_SPINLOCK(udp_offload_lock); 17static DEFINE_SPINLOCK(udp_offload_lock);
18static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; 18static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
19 19
20#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
21
20struct udp_offload_priv { 22struct udp_offload_priv {
21 struct udp_offload *offload; 23 struct udp_offload *offload;
22 struct rcu_head rcu; 24 struct rcu_head rcu;
@@ -100,8 +102,7 @@ out:
100 102
101int udp_add_offload(struct udp_offload *uo) 103int udp_add_offload(struct udp_offload *uo)
102{ 104{
103 struct udp_offload_priv __rcu **head = &udp_offload_base; 105 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
104 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_KERNEL);
105 106
106 if (!new_offload) 107 if (!new_offload)
107 return -ENOMEM; 108 return -ENOMEM;
@@ -109,8 +110,8 @@ int udp_add_offload(struct udp_offload *uo)
109 new_offload->offload = uo; 110 new_offload->offload = uo;
110 111
111 spin_lock(&udp_offload_lock); 112 spin_lock(&udp_offload_lock);
112 rcu_assign_pointer(new_offload->next, rcu_dereference(*head)); 113 new_offload->next = udp_offload_base;
113 rcu_assign_pointer(*head, new_offload); 114 rcu_assign_pointer(udp_offload_base, new_offload);
114 spin_unlock(&udp_offload_lock); 115 spin_unlock(&udp_offload_lock);
115 116
116 return 0; 117 return 0;
@@ -130,12 +131,12 @@ void udp_del_offload(struct udp_offload *uo)
130 131
131 spin_lock(&udp_offload_lock); 132 spin_lock(&udp_offload_lock);
132 133
133 uo_priv = rcu_dereference(*head); 134 uo_priv = udp_deref_protected(*head);
134 for (; uo_priv != NULL; 135 for (; uo_priv != NULL;
135 uo_priv = rcu_dereference(*head)) { 136 uo_priv = udp_deref_protected(*head)) {
136
137 if (uo_priv->offload == uo) { 137 if (uo_priv->offload == uo) {
138 rcu_assign_pointer(*head, rcu_dereference(uo_priv->next)); 138 rcu_assign_pointer(*head,
139 udp_deref_protected(uo_priv->next));
139 goto unlock; 140 goto unlock;
140 } 141 }
141 head = &uo_priv->next; 142 head = &uo_priv->next;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index f81f59686f21..f2610e157660 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -414,7 +414,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
414 addr_type = ipv6_addr_type(&hdr->daddr); 414 addr_type = ipv6_addr_type(&hdr->daddr);
415 415
416 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) || 416 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
417 ipv6_anycast_destination(skb)) 417 ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
418 saddr = &hdr->daddr; 418 saddr = &hdr->daddr;
419 419
420 /* 420 /*
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 35750df744dc..4bff1f297e39 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -50,6 +50,11 @@ config NFT_CHAIN_NAT_IPV6
50 packet transformations such as the source, destination address and 50 packet transformations such as the source, destination address and
51 source and destination ports. 51 source and destination ports.
52 52
53config NFT_REJECT_IPV6
54 depends on NF_TABLES_IPV6
55 default NFT_REJECT
56 tristate
57
53config IP6_NF_IPTABLES 58config IP6_NF_IPTABLES
54 tristate "IP6 tables support (required for filtering)" 59 tristate "IP6 tables support (required for filtering)"
55 depends on INET && IPV6 60 depends on INET && IPV6
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index d1b4928f34f7..70d3dd66f2cd 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
27obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o 27obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
28obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o 28obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
29obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o 29obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
30obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
30 31
31# matches 32# matches
32obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o 33obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
new file mode 100644
index 000000000000..0bc19fa87821
--- /dev/null
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 * Copyright (c) 2013 Eric Leblond <eric@regit.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Development of this code funded by Astaro AG (http://www.astaro.com/)
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/netlink.h>
16#include <linux/netfilter.h>
17#include <linux/netfilter/nf_tables.h>
18#include <net/netfilter/nf_tables.h>
19#include <net/netfilter/nft_reject.h>
20#include <net/netfilter/ipv6/nf_reject.h>
21
22void nft_reject_ipv6_eval(const struct nft_expr *expr,
23 struct nft_data data[NFT_REG_MAX + 1],
24 const struct nft_pktinfo *pkt)
25{
26 struct nft_reject *priv = nft_expr_priv(expr);
27 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
28
29 switch (priv->type) {
30 case NFT_REJECT_ICMP_UNREACH:
31 nf_send_unreach6(net, pkt->skb, priv->icmp_code,
32 pkt->ops->hooknum);
33 break;
34 case NFT_REJECT_TCP_RST:
35 nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
36 break;
37 }
38
39 data[NFT_REG_VERDICT].verdict = NF_DROP;
40}
41EXPORT_SYMBOL_GPL(nft_reject_ipv6_eval);
42
43static struct nft_expr_type nft_reject_ipv6_type;
44static const struct nft_expr_ops nft_reject_ipv6_ops = {
45 .type = &nft_reject_ipv6_type,
46 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
47 .eval = nft_reject_ipv6_eval,
48 .init = nft_reject_init,
49 .dump = nft_reject_dump,
50};
51
52static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
53 .family = NFPROTO_IPV6,
54 .name = "reject",
55 .ops = &nft_reject_ipv6_ops,
56 .policy = nft_reject_policy,
57 .maxattr = NFTA_REJECT_MAX,
58 .owner = THIS_MODULE,
59};
60
61static int __init nft_reject_ipv6_module_init(void)
62{
63 return nft_register_expr(&nft_reject_ipv6_type);
64}
65
66static void __exit nft_reject_ipv6_module_exit(void)
67{
68 nft_unregister_expr(&nft_reject_ipv6_type);
69}
70
71module_init(nft_reject_ipv6_module_init);
72module_exit(nft_reject_ipv6_module_exit);
73
74MODULE_LICENSE("GPL");
75MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
76MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 994e28bfb32e..00b2a6d1c009 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -52,18 +52,12 @@
52#include <net/p8022.h> 52#include <net/p8022.h>
53#include <net/psnap.h> 53#include <net/psnap.h>
54#include <net/sock.h> 54#include <net/sock.h>
55#include <net/datalink.h>
55#include <net/tcp_states.h> 56#include <net/tcp_states.h>
57#include <net/net_namespace.h>
56 58
57#include <asm/uaccess.h> 59#include <asm/uaccess.h>
58 60
59#ifdef CONFIG_SYSCTL
60extern void ipx_register_sysctl(void);
61extern void ipx_unregister_sysctl(void);
62#else
63#define ipx_register_sysctl()
64#define ipx_unregister_sysctl()
65#endif
66
67/* Configuration Variables */ 61/* Configuration Variables */
68static unsigned char ipxcfg_max_hops = 16; 62static unsigned char ipxcfg_max_hops = 16;
69static char ipxcfg_auto_select_primary; 63static char ipxcfg_auto_select_primary;
@@ -84,15 +78,6 @@ DEFINE_SPINLOCK(ipx_interfaces_lock);
84struct ipx_interface *ipx_primary_net; 78struct ipx_interface *ipx_primary_net;
85struct ipx_interface *ipx_internal_net; 79struct ipx_interface *ipx_internal_net;
86 80
87extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
88 unsigned char *node);
89extern void ipxrtr_del_routes(struct ipx_interface *intrfc);
90extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
91 struct iovec *iov, size_t len, int noblock);
92extern int ipxrtr_route_skb(struct sk_buff *skb);
93extern struct ipx_route *ipxrtr_lookup(__be32 net);
94extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
95
96struct ipx_interface *ipx_interfaces_head(void) 81struct ipx_interface *ipx_interfaces_head(void)
97{ 82{
98 struct ipx_interface *rc = NULL; 83 struct ipx_interface *rc = NULL;
@@ -1986,9 +1971,6 @@ static struct notifier_block ipx_dev_notifier = {
1986 .notifier_call = ipxitf_device_event, 1971 .notifier_call = ipxitf_device_event,
1987}; 1972};
1988 1973
1989extern struct datalink_proto *make_EII_client(void);
1990extern void destroy_EII_client(struct datalink_proto *);
1991
1992static const unsigned char ipx_8022_type = 0xE0; 1974static const unsigned char ipx_8022_type = 0xE0;
1993static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 }; 1975static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 };
1994static const char ipx_EII_err_msg[] __initconst = 1976static const char ipx_EII_err_msg[] __initconst =
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 30f4519b092f..c1f03185c5e1 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -20,15 +20,11 @@ DEFINE_RWLOCK(ipx_routes_lock);
20 20
21extern struct ipx_interface *ipx_internal_net; 21extern struct ipx_interface *ipx_internal_net;
22 22
23extern __be16 ipx_cksum(struct ipxhdr *packet, int length);
24extern struct ipx_interface *ipxitf_find_using_net(__be32 net); 23extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
25extern int ipxitf_demux_socket(struct ipx_interface *intrfc, 24extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
26 struct sk_buff *skb, int copy); 25 struct sk_buff *skb, int copy);
27extern int ipxitf_demux_socket(struct ipx_interface *intrfc, 26extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
28 struct sk_buff *skb, int copy); 27 struct sk_buff *skb, int copy);
29extern int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb,
30 char *node);
31extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
32 28
33struct ipx_route *ipxrtr_lookup(__be32 net) 29struct ipx_route *ipxrtr_lookup(__be32 net)
34{ 30{
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index f9ae9b85d4c1..453e974287d1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1021,8 +1021,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
1021 IEEE80211_P2P_OPPPS_ENABLE_BIT; 1021 IEEE80211_P2P_OPPPS_ENABLE_BIT;
1022 1022
1023 err = ieee80211_assign_beacon(sdata, &params->beacon); 1023 err = ieee80211_assign_beacon(sdata, &params->beacon);
1024 if (err < 0) 1024 if (err < 0) {
1025 ieee80211_vif_release_channel(sdata);
1025 return err; 1026 return err;
1027 }
1026 changed |= err; 1028 changed |= err;
1027 1029
1028 err = drv_start_ap(sdata->local, sdata); 1030 err = drv_start_ap(sdata->local, sdata);
@@ -1032,6 +1034,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
1032 if (old) 1034 if (old)
1033 kfree_rcu(old, rcu_head); 1035 kfree_rcu(old, rcu_head);
1034 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); 1036 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
1037 ieee80211_vif_release_channel(sdata);
1035 return err; 1038 return err;
1036 } 1039 }
1037 1040
@@ -1090,8 +1093,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1090 kfree(sdata->u.ap.next_beacon); 1093 kfree(sdata->u.ap.next_beacon);
1091 sdata->u.ap.next_beacon = NULL; 1094 sdata->u.ap.next_beacon = NULL;
1092 1095
1093 cancel_work_sync(&sdata->u.ap.request_smps_work);
1094
1095 /* turn off carrier for this interface and dependent VLANs */ 1096 /* turn off carrier for this interface and dependent VLANs */
1096 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 1097 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
1097 netif_carrier_off(vlan->dev); 1098 netif_carrier_off(vlan->dev);
@@ -1103,6 +1104,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1103 kfree_rcu(old_beacon, rcu_head); 1104 kfree_rcu(old_beacon, rcu_head);
1104 if (old_probe_resp) 1105 if (old_probe_resp)
1105 kfree_rcu(old_probe_resp, rcu_head); 1106 kfree_rcu(old_probe_resp, rcu_head);
1107 sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF;
1106 1108
1107 __sta_info_flush(sdata, true); 1109 __sta_info_flush(sdata, true);
1108 ieee80211_free_keys(sdata, true); 1110 ieee80211_free_keys(sdata, true);
@@ -2638,6 +2640,24 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2638 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work); 2640 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
2639 INIT_LIST_HEAD(&roc->dependents); 2641 INIT_LIST_HEAD(&roc->dependents);
2640 2642
2643 /*
2644 * cookie is either the roc cookie (for normal roc)
2645 * or the SKB (for mgmt TX)
2646 */
2647 if (!txskb) {
2648 /* local->mtx protects this */
2649 local->roc_cookie_counter++;
2650 roc->cookie = local->roc_cookie_counter;
2651 /* wow, you wrapped 64 bits ... more likely a bug */
2652 if (WARN_ON(roc->cookie == 0)) {
2653 roc->cookie = 1;
2654 local->roc_cookie_counter++;
2655 }
2656 *cookie = roc->cookie;
2657 } else {
2658 *cookie = (unsigned long)txskb;
2659 }
2660
2641 /* if there's one pending or we're scanning, queue this one */ 2661 /* if there's one pending or we're scanning, queue this one */
2642 if (!list_empty(&local->roc_list) || 2662 if (!list_empty(&local->roc_list) ||
2643 local->scanning || local->radar_detect_enabled) 2663 local->scanning || local->radar_detect_enabled)
@@ -2772,24 +2792,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2772 if (!queued) 2792 if (!queued)
2773 list_add_tail(&roc->list, &local->roc_list); 2793 list_add_tail(&roc->list, &local->roc_list);
2774 2794
2775 /*
2776 * cookie is either the roc cookie (for normal roc)
2777 * or the SKB (for mgmt TX)
2778 */
2779 if (!txskb) {
2780 /* local->mtx protects this */
2781 local->roc_cookie_counter++;
2782 roc->cookie = local->roc_cookie_counter;
2783 /* wow, you wrapped 64 bits ... more likely a bug */
2784 if (WARN_ON(roc->cookie == 0)) {
2785 roc->cookie = 1;
2786 local->roc_cookie_counter++;
2787 }
2788 *cookie = roc->cookie;
2789 } else {
2790 *cookie = (unsigned long)txskb;
2791 }
2792
2793 return 0; 2795 return 0;
2794} 2796}
2795 2797
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index fab7b91923e0..70dd013de836 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -466,7 +466,9 @@ void ieee80211_request_smps_ap_work(struct work_struct *work)
466 u.ap.request_smps_work); 466 u.ap.request_smps_work);
467 467
468 sdata_lock(sdata); 468 sdata_lock(sdata);
469 __ieee80211_request_smps_ap(sdata, sdata->u.ap.driver_smps_mode); 469 if (sdata_dereference(sdata->u.ap.beacon, sdata))
470 __ieee80211_request_smps_ap(sdata,
471 sdata->u.ap.driver_smps_mode);
470 sdata_unlock(sdata); 472 sdata_unlock(sdata);
471} 473}
472 474
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 771080ec7212..2796a198728f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -695,12 +695,9 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
695 struct cfg80211_bss *cbss; 695 struct cfg80211_bss *cbss;
696 struct beacon_data *presp; 696 struct beacon_data *presp;
697 struct sta_info *sta; 697 struct sta_info *sta;
698 int active_ibss;
699 u16 capability; 698 u16 capability;
700 699
701 active_ibss = ieee80211_sta_active_ibss(sdata); 700 if (!is_zero_ether_addr(ifibss->bssid)) {
702
703 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
704 capability = WLAN_CAPABILITY_IBSS; 701 capability = WLAN_CAPABILITY_IBSS;
705 702
706 if (ifibss->privacy) 703 if (ifibss->privacy)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3dfd20a453ab..d6d1f1df9119 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -418,20 +418,24 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
418 return ret; 418 return ret;
419 } 419 }
420 420
421 mutex_lock(&local->iflist_mtx);
422 rcu_assign_pointer(local->monitor_sdata, sdata);
423 mutex_unlock(&local->iflist_mtx);
424
421 mutex_lock(&local->mtx); 425 mutex_lock(&local->mtx);
422 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, 426 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
423 IEEE80211_CHANCTX_EXCLUSIVE); 427 IEEE80211_CHANCTX_EXCLUSIVE);
424 mutex_unlock(&local->mtx); 428 mutex_unlock(&local->mtx);
425 if (ret) { 429 if (ret) {
430 mutex_lock(&local->iflist_mtx);
431 rcu_assign_pointer(local->monitor_sdata, NULL);
432 mutex_unlock(&local->iflist_mtx);
433 synchronize_net();
426 drv_remove_interface(local, sdata); 434 drv_remove_interface(local, sdata);
427 kfree(sdata); 435 kfree(sdata);
428 return ret; 436 return ret;
429 } 437 }
430 438
431 mutex_lock(&local->iflist_mtx);
432 rcu_assign_pointer(local->monitor_sdata, sdata);
433 mutex_unlock(&local->iflist_mtx);
434
435 return 0; 439 return 0;
436} 440}
437 441
@@ -770,12 +774,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
770 774
771 ieee80211_roc_purge(local, sdata); 775 ieee80211_roc_purge(local, sdata);
772 776
773 if (sdata->vif.type == NL80211_IFTYPE_STATION) 777 switch (sdata->vif.type) {
778 case NL80211_IFTYPE_STATION:
774 ieee80211_mgd_stop(sdata); 779 ieee80211_mgd_stop(sdata);
775 780 break;
776 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 781 case NL80211_IFTYPE_ADHOC:
777 ieee80211_ibss_stop(sdata); 782 ieee80211_ibss_stop(sdata);
778 783 break;
784 case NL80211_IFTYPE_AP:
785 cancel_work_sync(&sdata->u.ap.request_smps_work);
786 break;
787 default:
788 break;
789 }
779 790
780 /* 791 /*
781 * Remove all stations associated with this interface. 792 * Remove all stations associated with this interface.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 27c990bf2320..97a02d3f7d87 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -878,7 +878,7 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx,
878 } 878 }
879 879
880 /* adjust first fragment's length */ 880 /* adjust first fragment's length */
881 skb->len = hdrlen + per_fragm; 881 skb_trim(skb, hdrlen + per_fragm);
882 return 0; 882 return 0;
883} 883}
884 884
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c37467562fd0..e9410d17619d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -513,7 +513,6 @@ config NFT_QUEUE
513 513
514config NFT_REJECT 514config NFT_REJECT
515 depends on NF_TABLES 515 depends on NF_TABLES
516 depends on NF_TABLES_IPV6 || !NF_TABLES_IPV6
517 default m if NETFILTER_ADVANCED=n 516 default m if NETFILTER_ADVANCED=n
518 tristate "Netfilter nf_tables reject support" 517 tristate "Netfilter nf_tables reject support"
519 help 518 help
@@ -521,6 +520,11 @@ config NFT_REJECT
521 explicitly deny and notify via TCP reset/ICMP informational errors 520 explicitly deny and notify via TCP reset/ICMP informational errors
522 unallowed traffic. 521 unallowed traffic.
523 522
523config NFT_REJECT_INET
524 depends on NF_TABLES_INET
525 default NFT_REJECT
526 tristate
527
524config NFT_COMPAT 528config NFT_COMPAT
525 depends on NF_TABLES 529 depends on NF_TABLES
526 depends on NETFILTER_XTABLES 530 depends on NETFILTER_XTABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ee9c4de5f8ed..bffdad774da7 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_NFT_LIMIT) += nft_limit.o
79obj-$(CONFIG_NFT_NAT) += nft_nat.o 79obj-$(CONFIG_NFT_NAT) += nft_nat.o
80obj-$(CONFIG_NFT_QUEUE) += nft_queue.o 80obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
81obj-$(CONFIG_NFT_REJECT) += nft_reject.o 81obj-$(CONFIG_NFT_REJECT) += nft_reject.o
82obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
82obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o 83obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o
83obj-$(CONFIG_NFT_HASH) += nft_hash.o 84obj-$(CONFIG_NFT_HASH) += nft_hash.o
84obj-$(CONFIG_NFT_COUNTER) += nft_counter.o 85obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 59a1a85bcb3e..a8eb0a89326a 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -871,11 +871,11 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
871 cp->protocol = p->protocol; 871 cp->protocol = p->protocol;
872 ip_vs_addr_set(p->af, &cp->caddr, p->caddr); 872 ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
873 cp->cport = p->cport; 873 cp->cport = p->cport;
874 ip_vs_addr_set(p->af, &cp->vaddr, p->vaddr); 874 /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
875 cp->vport = p->vport;
876 /* proto should only be IPPROTO_IP if d_addr is a fwmark */
877 ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af, 875 ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
878 &cp->daddr, daddr); 876 &cp->vaddr, p->vaddr);
877 cp->vport = p->vport;
878 ip_vs_addr_set(p->af, &cp->daddr, daddr);
879 cp->dport = dport; 879 cp->dport = dport;
880 cp->flags = flags; 880 cp->flags = flags;
881 cp->fwmark = fwmark; 881 cp->fwmark = fwmark;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8824ed0ccc9c..356bef519fe5 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -312,6 +312,21 @@ static void death_by_timeout(unsigned long ul_conntrack)
312 nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); 312 nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
313} 313}
314 314
315static inline bool
316nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
317 const struct nf_conntrack_tuple *tuple,
318 u16 zone)
319{
320 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
321
322 /* A conntrack can be recreated with the equal tuple,
323 * so we need to check that the conntrack is confirmed
324 */
325 return nf_ct_tuple_equal(tuple, &h->tuple) &&
326 nf_ct_zone(ct) == zone &&
327 nf_ct_is_confirmed(ct);
328}
329
315/* 330/*
316 * Warning : 331 * Warning :
317 * - Caller must take a reference on returned object 332 * - Caller must take a reference on returned object
@@ -333,8 +348,7 @@ ____nf_conntrack_find(struct net *net, u16 zone,
333 local_bh_disable(); 348 local_bh_disable();
334begin: 349begin:
335 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { 350 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
336 if (nf_ct_tuple_equal(tuple, &h->tuple) && 351 if (nf_ct_key_equal(h, tuple, zone)) {
337 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
338 NF_CT_STAT_INC(net, found); 352 NF_CT_STAT_INC(net, found);
339 local_bh_enable(); 353 local_bh_enable();
340 return h; 354 return h;
@@ -372,8 +386,7 @@ begin:
372 !atomic_inc_not_zero(&ct->ct_general.use))) 386 !atomic_inc_not_zero(&ct->ct_general.use)))
373 h = NULL; 387 h = NULL;
374 else { 388 else {
375 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || 389 if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
376 nf_ct_zone(ct) != zone)) {
377 nf_ct_put(ct); 390 nf_ct_put(ct);
378 goto begin; 391 goto begin;
379 } 392 }
@@ -435,7 +448,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
435 goto out; 448 goto out;
436 449
437 add_timer(&ct->timeout); 450 add_timer(&ct->timeout);
438 nf_conntrack_get(&ct->ct_general); 451 smp_wmb();
452 /* The caller holds a reference to this object */
453 atomic_set(&ct->ct_general.use, 2);
439 __nf_conntrack_hash_insert(ct, hash, repl_hash); 454 __nf_conntrack_hash_insert(ct, hash, repl_hash);
440 NF_CT_STAT_INC(net, insert); 455 NF_CT_STAT_INC(net, insert);
441 spin_unlock_bh(&nf_conntrack_lock); 456 spin_unlock_bh(&nf_conntrack_lock);
@@ -449,6 +464,21 @@ out:
449} 464}
450EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); 465EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
451 466
467/* deletion from this larval template list happens via nf_ct_put() */
468void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
469{
470 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
471 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
472 nf_conntrack_get(&tmpl->ct_general);
473
474 spin_lock_bh(&nf_conntrack_lock);
475 /* Overload tuple linked list to put us in template list. */
476 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
477 &net->ct.tmpl);
478 spin_unlock_bh(&nf_conntrack_lock);
479}
480EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
481
452/* Confirm a connection given skb; places it in hash table */ 482/* Confirm a connection given skb; places it in hash table */
453int 483int
454__nf_conntrack_confirm(struct sk_buff *skb) 484__nf_conntrack_confirm(struct sk_buff *skb)
@@ -720,11 +750,10 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
720 nf_ct_zone->id = zone; 750 nf_ct_zone->id = zone;
721 } 751 }
722#endif 752#endif
723 /* 753 /* Because we use RCU lookups, we set ct_general.use to zero before
724 * changes to lookup keys must be done before setting refcnt to 1 754 * this is inserted in any list.
725 */ 755 */
726 smp_wmb(); 756 atomic_set(&ct->ct_general.use, 0);
727 atomic_set(&ct->ct_general.use, 1);
728 return ct; 757 return ct;
729 758
730#ifdef CONFIG_NF_CONNTRACK_ZONES 759#ifdef CONFIG_NF_CONNTRACK_ZONES
@@ -748,6 +777,11 @@ void nf_conntrack_free(struct nf_conn *ct)
748{ 777{
749 struct net *net = nf_ct_net(ct); 778 struct net *net = nf_ct_net(ct);
750 779
780 /* A freed object has refcnt == 0, that's
781 * the golden rule for SLAB_DESTROY_BY_RCU
782 */
783 NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
784
751 nf_ct_ext_destroy(ct); 785 nf_ct_ext_destroy(ct);
752 nf_ct_ext_free(ct); 786 nf_ct_ext_free(ct);
753 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 787 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
@@ -843,6 +877,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
843 NF_CT_STAT_INC(net, new); 877 NF_CT_STAT_INC(net, new);
844 } 878 }
845 879
880 /* Now it is inserted into the unconfirmed list, bump refcount */
881 nf_conntrack_get(&ct->ct_general);
882
846 /* Overload tuple linked list to put us in unconfirmed list. */ 883 /* Overload tuple linked list to put us in unconfirmed list. */
847 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, 884 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
848 &net->ct.unconfirmed); 885 &net->ct.unconfirmed);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 9858e3e51a3a..52e20c9a46a5 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -363,9 +363,8 @@ static int __net_init synproxy_net_init(struct net *net)
363 goto err2; 363 goto err2;
364 if (!nfct_synproxy_ext_add(ct)) 364 if (!nfct_synproxy_ext_add(ct))
365 goto err2; 365 goto err2;
366 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
367 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
368 366
367 nf_conntrack_tmpl_insert(net, ct);
369 snet->tmpl = ct; 368 snet->tmpl = ct;
370 369
371 snet->stats = alloc_percpu(struct synproxy_stats); 370 snet->stats = alloc_percpu(struct synproxy_stats);
@@ -390,7 +389,7 @@ static void __net_exit synproxy_net_exit(struct net *net)
390{ 389{
391 struct synproxy_net *snet = synproxy_pernet(net); 390 struct synproxy_net *snet = synproxy_pernet(net);
392 391
393 nf_conntrack_free(snet->tmpl); 392 nf_ct_put(snet->tmpl);
394 synproxy_proc_exit(net); 393 synproxy_proc_exit(net);
395 free_percpu(snet->stats); 394 free_percpu(snet->stats);
396} 395}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 117bbaaddde6..adce01e8bb57 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1008,10 +1008,8 @@ notify:
1008 return 0; 1008 return 0;
1009} 1009}
1010 1010
1011static void nf_tables_rcu_chain_destroy(struct rcu_head *head) 1011static void nf_tables_chain_destroy(struct nft_chain *chain)
1012{ 1012{
1013 struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
1014
1015 BUG_ON(chain->use > 0); 1013 BUG_ON(chain->use > 0);
1016 1014
1017 if (chain->flags & NFT_BASE_CHAIN) { 1015 if (chain->flags & NFT_BASE_CHAIN) {
@@ -1045,7 +1043,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1045 if (IS_ERR(chain)) 1043 if (IS_ERR(chain))
1046 return PTR_ERR(chain); 1044 return PTR_ERR(chain);
1047 1045
1048 if (!list_empty(&chain->rules)) 1046 if (!list_empty(&chain->rules) || chain->use > 0)
1049 return -EBUSY; 1047 return -EBUSY;
1050 1048
1051 list_del(&chain->list); 1049 list_del(&chain->list);
@@ -1059,7 +1057,9 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1059 family); 1057 family);
1060 1058
1061 /* Make sure all rule references are gone before this is released */ 1059 /* Make sure all rule references are gone before this is released */
1062 call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy); 1060 synchronize_rcu();
1061
1062 nf_tables_chain_destroy(chain);
1063 return 0; 1063 return 0;
1064} 1064}
1065 1065
@@ -1114,35 +1114,45 @@ void nft_unregister_expr(struct nft_expr_type *type)
1114} 1114}
1115EXPORT_SYMBOL_GPL(nft_unregister_expr); 1115EXPORT_SYMBOL_GPL(nft_unregister_expr);
1116 1116
1117static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla) 1117static const struct nft_expr_type *__nft_expr_type_get(u8 family,
1118 struct nlattr *nla)
1118{ 1119{
1119 const struct nft_expr_type *type; 1120 const struct nft_expr_type *type;
1120 1121
1121 list_for_each_entry(type, &nf_tables_expressions, list) { 1122 list_for_each_entry(type, &nf_tables_expressions, list) {
1122 if (!nla_strcmp(nla, type->name)) 1123 if (!nla_strcmp(nla, type->name) &&
1124 (!type->family || type->family == family))
1123 return type; 1125 return type;
1124 } 1126 }
1125 return NULL; 1127 return NULL;
1126} 1128}
1127 1129
1128static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla) 1130static const struct nft_expr_type *nft_expr_type_get(u8 family,
1131 struct nlattr *nla)
1129{ 1132{
1130 const struct nft_expr_type *type; 1133 const struct nft_expr_type *type;
1131 1134
1132 if (nla == NULL) 1135 if (nla == NULL)
1133 return ERR_PTR(-EINVAL); 1136 return ERR_PTR(-EINVAL);
1134 1137
1135 type = __nft_expr_type_get(nla); 1138 type = __nft_expr_type_get(family, nla);
1136 if (type != NULL && try_module_get(type->owner)) 1139 if (type != NULL && try_module_get(type->owner))
1137 return type; 1140 return type;
1138 1141
1139#ifdef CONFIG_MODULES 1142#ifdef CONFIG_MODULES
1140 if (type == NULL) { 1143 if (type == NULL) {
1141 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1144 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1145 request_module("nft-expr-%u-%.*s", family,
1146 nla_len(nla), (char *)nla_data(nla));
1147 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1148 if (__nft_expr_type_get(family, nla))
1149 return ERR_PTR(-EAGAIN);
1150
1151 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1142 request_module("nft-expr-%.*s", 1152 request_module("nft-expr-%.*s",
1143 nla_len(nla), (char *)nla_data(nla)); 1153 nla_len(nla), (char *)nla_data(nla));
1144 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1154 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1145 if (__nft_expr_type_get(nla)) 1155 if (__nft_expr_type_get(family, nla))
1146 return ERR_PTR(-EAGAIN); 1156 return ERR_PTR(-EAGAIN);
1147 } 1157 }
1148#endif 1158#endif
@@ -1193,7 +1203,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
1193 if (err < 0) 1203 if (err < 0)
1194 return err; 1204 return err;
1195 1205
1196 type = nft_expr_type_get(tb[NFTA_EXPR_NAME]); 1206 type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]);
1197 if (IS_ERR(type)) 1207 if (IS_ERR(type))
1198 return PTR_ERR(type); 1208 return PTR_ERR(type);
1199 1209
@@ -1521,9 +1531,8 @@ err:
1521 return err; 1531 return err;
1522} 1532}
1523 1533
1524static void nf_tables_rcu_rule_destroy(struct rcu_head *head) 1534static void nf_tables_rule_destroy(struct nft_rule *rule)
1525{ 1535{
1526 struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head);
1527 struct nft_expr *expr; 1536 struct nft_expr *expr;
1528 1537
1529 /* 1538 /*
@@ -1538,11 +1547,6 @@ static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
1538 kfree(rule); 1547 kfree(rule);
1539} 1548}
1540 1549
1541static void nf_tables_rule_destroy(struct nft_rule *rule)
1542{
1543 call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy);
1544}
1545
1546#define NFT_RULE_MAXEXPRS 128 1550#define NFT_RULE_MAXEXPRS 128
1547 1551
1548static struct nft_expr_info *info; 1552static struct nft_expr_info *info;
@@ -1809,9 +1813,6 @@ static int nf_tables_commit(struct sk_buff *skb)
1809 synchronize_rcu(); 1813 synchronize_rcu();
1810 1814
1811 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { 1815 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1812 /* Delete this rule from the dirty list */
1813 list_del(&rupd->list);
1814
1815 /* This rule was inactive in the past and just became active. 1816 /* This rule was inactive in the past and just became active.
1816 * Clear the next bit of the genmask since its meaning has 1817 * Clear the next bit of the genmask since its meaning has
1817 * changed, now it is the future. 1818 * changed, now it is the future.
@@ -1822,6 +1823,7 @@ static int nf_tables_commit(struct sk_buff *skb)
1822 rupd->chain, rupd->rule, 1823 rupd->chain, rupd->rule,
1823 NFT_MSG_NEWRULE, 0, 1824 NFT_MSG_NEWRULE, 0,
1824 rupd->family); 1825 rupd->family);
1826 list_del(&rupd->list);
1825 kfree(rupd); 1827 kfree(rupd);
1826 continue; 1828 continue;
1827 } 1829 }
@@ -1831,7 +1833,15 @@ static int nf_tables_commit(struct sk_buff *skb)
1831 nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain, 1833 nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
1832 rupd->rule, NFT_MSG_DELRULE, 0, 1834 rupd->rule, NFT_MSG_DELRULE, 0,
1833 rupd->family); 1835 rupd->family);
1836 }
1837
1838 /* Make sure we don't see any packet traversing old rules */
1839 synchronize_rcu();
1840
1841 /* Now we can safely release unused old rules */
1842 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1834 nf_tables_rule_destroy(rupd->rule); 1843 nf_tables_rule_destroy(rupd->rule);
1844 list_del(&rupd->list);
1835 kfree(rupd); 1845 kfree(rupd);
1836 } 1846 }
1837 1847
@@ -1844,20 +1854,26 @@ static int nf_tables_abort(struct sk_buff *skb)
1844 struct nft_rule_trans *rupd, *tmp; 1854 struct nft_rule_trans *rupd, *tmp;
1845 1855
1846 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { 1856 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1847 /* Delete all rules from the dirty list */
1848 list_del(&rupd->list);
1849
1850 if (!nft_rule_is_active_next(net, rupd->rule)) { 1857 if (!nft_rule_is_active_next(net, rupd->rule)) {
1851 nft_rule_clear(net, rupd->rule); 1858 nft_rule_clear(net, rupd->rule);
1859 list_del(&rupd->list);
1852 kfree(rupd); 1860 kfree(rupd);
1853 continue; 1861 continue;
1854 } 1862 }
1855 1863
1856 /* This rule is inactive, get rid of it */ 1864 /* This rule is inactive, get rid of it */
1857 list_del_rcu(&rupd->rule->list); 1865 list_del_rcu(&rupd->rule->list);
1866 }
1867
1868 /* Make sure we don't see any packet accessing aborted rules */
1869 synchronize_rcu();
1870
1871 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1858 nf_tables_rule_destroy(rupd->rule); 1872 nf_tables_rule_destroy(rupd->rule);
1873 list_del(&rupd->list);
1859 kfree(rupd); 1874 kfree(rupd);
1860 } 1875 }
1876
1861 return 0; 1877 return 0;
1862} 1878}
1863 1879
@@ -1943,6 +1959,9 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
1943 } 1959 }
1944 1960
1945 if (nla[NFTA_SET_TABLE] != NULL) { 1961 if (nla[NFTA_SET_TABLE] != NULL) {
1962 if (afi == NULL)
1963 return -EAFNOSUPPORT;
1964
1946 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); 1965 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
1947 if (IS_ERR(table)) 1966 if (IS_ERR(table))
1948 return PTR_ERR(table); 1967 return PTR_ERR(table);
@@ -1989,13 +2008,13 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
1989 2008
1990 if (!sscanf(i->name, name, &tmp)) 2009 if (!sscanf(i->name, name, &tmp))
1991 continue; 2010 continue;
1992 if (tmp < 0 || tmp > BITS_PER_LONG * PAGE_SIZE) 2011 if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE)
1993 continue; 2012 continue;
1994 2013
1995 set_bit(tmp, inuse); 2014 set_bit(tmp, inuse);
1996 } 2015 }
1997 2016
1998 n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE); 2017 n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
1999 free_page((unsigned long)inuse); 2018 free_page((unsigned long)inuse);
2000 } 2019 }
2001 2020
@@ -2428,6 +2447,8 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2428 struct nft_ctx ctx; 2447 struct nft_ctx ctx;
2429 int err; 2448 int err;
2430 2449
2450 if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
2451 return -EAFNOSUPPORT;
2431 if (nla[NFTA_SET_TABLE] == NULL) 2452 if (nla[NFTA_SET_TABLE] == NULL)
2432 return -EINVAL; 2453 return -EINVAL;
2433 2454
@@ -2435,9 +2456,6 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2435 if (err < 0) 2456 if (err < 0)
2436 return err; 2457 return err;
2437 2458
2438 if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
2439 return -EAFNOSUPPORT;
2440
2441 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2459 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
2442 if (IS_ERR(set)) 2460 if (IS_ERR(set))
2443 return PTR_ERR(set); 2461 return PTR_ERR(set);
@@ -2723,6 +2741,9 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
2723 if (nla[NFTA_SET_ELEM_DATA] == NULL && 2741 if (nla[NFTA_SET_ELEM_DATA] == NULL &&
2724 !(elem.flags & NFT_SET_ELEM_INTERVAL_END)) 2742 !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
2725 return -EINVAL; 2743 return -EINVAL;
2744 if (nla[NFTA_SET_ELEM_DATA] != NULL &&
2745 elem.flags & NFT_SET_ELEM_INTERVAL_END)
2746 return -EINVAL;
2726 } else { 2747 } else {
2727 if (nla[NFTA_SET_ELEM_DATA] != NULL) 2748 if (nla[NFTA_SET_ELEM_DATA] != NULL)
2728 return -EINVAL; 2749 return -EINVAL;
@@ -2977,6 +2998,9 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
2977 const struct nft_set_iter *iter, 2998 const struct nft_set_iter *iter,
2978 const struct nft_set_elem *elem) 2999 const struct nft_set_elem *elem)
2979{ 3000{
3001 if (elem->flags & NFT_SET_ELEM_INTERVAL_END)
3002 return 0;
3003
2980 switch (elem->data.verdict) { 3004 switch (elem->data.verdict) {
2981 case NFT_JUMP: 3005 case NFT_JUMP:
2982 case NFT_GOTO: 3006 case NFT_GOTO:
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 0d879fcb8763..90998a6ff8b9 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -103,9 +103,9 @@ static struct nf_loginfo trace_loginfo = {
103 }, 103 },
104}; 104};
105 105
106static inline void nft_trace_packet(const struct nft_pktinfo *pkt, 106static void nft_trace_packet(const struct nft_pktinfo *pkt,
107 const struct nft_chain *chain, 107 const struct nft_chain *chain,
108 int rulenum, enum nft_trace type) 108 int rulenum, enum nft_trace type)
109{ 109{
110 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); 110 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
111 111
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 917052e20602..46e275403838 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -226,6 +226,7 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr,
226 if (tb[NFTA_CT_DIRECTION] != NULL) 226 if (tb[NFTA_CT_DIRECTION] != NULL)
227 return -EINVAL; 227 return -EINVAL;
228 break; 228 break;
229 case NFT_CT_L3PROTOCOL:
229 case NFT_CT_PROTOCOL: 230 case NFT_CT_PROTOCOL:
230 case NFT_CT_SRC: 231 case NFT_CT_SRC:
231 case NFT_CT_DST: 232 case NFT_CT_DST:
@@ -311,8 +312,19 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
311 goto nla_put_failure; 312 goto nla_put_failure;
312 if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key))) 313 if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
313 goto nla_put_failure; 314 goto nla_put_failure;
314 if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir)) 315
315 goto nla_put_failure; 316 switch (priv->key) {
317 case NFT_CT_PROTOCOL:
318 case NFT_CT_SRC:
319 case NFT_CT_DST:
320 case NFT_CT_PROTO_SRC:
321 case NFT_CT_PROTO_DST:
322 if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
323 goto nla_put_failure;
324 default:
325 break;
326 }
327
316 return 0; 328 return 0;
317 329
318nla_put_failure: 330nla_put_failure:
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 5af790123ad8..26c5154e05f3 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -23,7 +23,6 @@ static const char *nft_log_null_prefix = "";
23struct nft_log { 23struct nft_log {
24 struct nf_loginfo loginfo; 24 struct nf_loginfo loginfo;
25 char *prefix; 25 char *prefix;
26 int family;
27}; 26};
28 27
29static void nft_log_eval(const struct nft_expr *expr, 28static void nft_log_eval(const struct nft_expr *expr,
@@ -33,7 +32,7 @@ static void nft_log_eval(const struct nft_expr *expr,
33 const struct nft_log *priv = nft_expr_priv(expr); 32 const struct nft_log *priv = nft_expr_priv(expr);
34 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); 33 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
35 34
36 nf_log_packet(net, priv->family, pkt->ops->hooknum, pkt->skb, pkt->in, 35 nf_log_packet(net, pkt->ops->pf, pkt->ops->hooknum, pkt->skb, pkt->in,
37 pkt->out, &priv->loginfo, "%s", priv->prefix); 36 pkt->out, &priv->loginfo, "%s", priv->prefix);
38} 37}
39 38
@@ -52,8 +51,6 @@ static int nft_log_init(const struct nft_ctx *ctx,
52 struct nf_loginfo *li = &priv->loginfo; 51 struct nf_loginfo *li = &priv->loginfo;
53 const struct nlattr *nla; 52 const struct nlattr *nla;
54 53
55 priv->family = ctx->afi->family;
56
57 nla = tb[NFTA_LOG_PREFIX]; 54 nla = tb[NFTA_LOG_PREFIX];
58 if (nla != NULL) { 55 if (nla != NULL) {
59 priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL); 56 priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 8a6116b75b5a..bb4ef4cccb6e 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -16,6 +16,7 @@
16#include <linux/netfilter.h> 16#include <linux/netfilter.h>
17#include <linux/netfilter/nf_tables.h> 17#include <linux/netfilter/nf_tables.h>
18#include <net/netfilter/nf_tables.h> 18#include <net/netfilter/nf_tables.h>
19#include <net/netfilter/nf_tables_core.h>
19 20
20struct nft_lookup { 21struct nft_lookup {
21 struct nft_set *set; 22 struct nft_set *set;
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index cbea473d69e9..e8ae2f6bf232 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -25,7 +25,6 @@ struct nft_queue {
25 u16 queuenum; 25 u16 queuenum;
26 u16 queues_total; 26 u16 queues_total;
27 u16 flags; 27 u16 flags;
28 u8 family;
29}; 28};
30 29
31static void nft_queue_eval(const struct nft_expr *expr, 30static void nft_queue_eval(const struct nft_expr *expr,
@@ -43,7 +42,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
43 queue = priv->queuenum + cpu % priv->queues_total; 42 queue = priv->queuenum + cpu % priv->queues_total;
44 } else { 43 } else {
45 queue = nfqueue_hash(pkt->skb, queue, 44 queue = nfqueue_hash(pkt->skb, queue,
46 priv->queues_total, priv->family, 45 priv->queues_total, pkt->ops->pf,
47 jhash_initval); 46 jhash_initval);
48 } 47 }
49 } 48 }
@@ -71,7 +70,6 @@ static int nft_queue_init(const struct nft_ctx *ctx,
71 return -EINVAL; 70 return -EINVAL;
72 71
73 init_hashrandom(&jhash_initval); 72 init_hashrandom(&jhash_initval);
74 priv->family = ctx->afi->family;
75 priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM])); 73 priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));
76 74
77 if (tb[NFTA_QUEUE_TOTAL] != NULL) 75 if (tb[NFTA_QUEUE_TOTAL] != NULL)
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index ca0c1b231bfe..e21d69d13506 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -69,8 +69,10 @@ static void nft_rbtree_elem_destroy(const struct nft_set *set,
69 struct nft_rbtree_elem *rbe) 69 struct nft_rbtree_elem *rbe)
70{ 70{
71 nft_data_uninit(&rbe->key, NFT_DATA_VALUE); 71 nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
72 if (set->flags & NFT_SET_MAP) 72 if (set->flags & NFT_SET_MAP &&
73 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
73 nft_data_uninit(rbe->data, set->dtype); 74 nft_data_uninit(rbe->data, set->dtype);
75
74 kfree(rbe); 76 kfree(rbe);
75} 77}
76 78
@@ -108,7 +110,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
108 int err; 110 int err;
109 111
110 size = sizeof(*rbe); 112 size = sizeof(*rbe);
111 if (set->flags & NFT_SET_MAP) 113 if (set->flags & NFT_SET_MAP &&
114 !(elem->flags & NFT_SET_ELEM_INTERVAL_END))
112 size += sizeof(rbe->data[0]); 115 size += sizeof(rbe->data[0]);
113 116
114 rbe = kzalloc(size, GFP_KERNEL); 117 rbe = kzalloc(size, GFP_KERNEL);
@@ -117,7 +120,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
117 120
118 rbe->flags = elem->flags; 121 rbe->flags = elem->flags;
119 nft_data_copy(&rbe->key, &elem->key); 122 nft_data_copy(&rbe->key, &elem->key);
120 if (set->flags & NFT_SET_MAP) 123 if (set->flags & NFT_SET_MAP &&
124 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
121 nft_data_copy(rbe->data, &elem->data); 125 nft_data_copy(rbe->data, &elem->data);
122 126
123 err = __nft_rbtree_insert(set, rbe); 127 err = __nft_rbtree_insert(set, rbe);
@@ -153,7 +157,8 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
153 parent = parent->rb_right; 157 parent = parent->rb_right;
154 else { 158 else {
155 elem->cookie = rbe; 159 elem->cookie = rbe;
156 if (set->flags & NFT_SET_MAP) 160 if (set->flags & NFT_SET_MAP &&
161 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
157 nft_data_copy(&elem->data, rbe->data); 162 nft_data_copy(&elem->data, rbe->data);
158 elem->flags = rbe->flags; 163 elem->flags = rbe->flags;
159 return 0; 164 return 0;
@@ -177,7 +182,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
177 182
178 rbe = rb_entry(node, struct nft_rbtree_elem, node); 183 rbe = rb_entry(node, struct nft_rbtree_elem, node);
179 nft_data_copy(&elem.key, &rbe->key); 184 nft_data_copy(&elem.key, &rbe->key);
180 if (set->flags & NFT_SET_MAP) 185 if (set->flags & NFT_SET_MAP &&
186 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
181 nft_data_copy(&elem.data, rbe->data); 187 nft_data_copy(&elem.data, rbe->data);
182 elem.flags = rbe->flags; 188 elem.flags = rbe->flags;
183 189
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 5e204711d704..f3448c296446 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -16,65 +16,23 @@
16#include <linux/netfilter.h> 16#include <linux/netfilter.h>
17#include <linux/netfilter/nf_tables.h> 17#include <linux/netfilter/nf_tables.h>
18#include <net/netfilter/nf_tables.h> 18#include <net/netfilter/nf_tables.h>
19#include <net/icmp.h> 19#include <net/netfilter/nft_reject.h>
20#include <net/netfilter/ipv4/nf_reject.h>
21 20
22#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) 21const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
23#include <net/netfilter/ipv6/nf_reject.h>
24#endif
25
26struct nft_reject {
27 enum nft_reject_types type:8;
28 u8 icmp_code;
29 u8 family;
30};
31
32static void nft_reject_eval(const struct nft_expr *expr,
33 struct nft_data data[NFT_REG_MAX + 1],
34 const struct nft_pktinfo *pkt)
35{
36 struct nft_reject *priv = nft_expr_priv(expr);
37#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
38 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
39#endif
40 switch (priv->type) {
41 case NFT_REJECT_ICMP_UNREACH:
42 if (priv->family == NFPROTO_IPV4)
43 nf_send_unreach(pkt->skb, priv->icmp_code);
44#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
45 else if (priv->family == NFPROTO_IPV6)
46 nf_send_unreach6(net, pkt->skb, priv->icmp_code,
47 pkt->ops->hooknum);
48#endif
49 break;
50 case NFT_REJECT_TCP_RST:
51 if (priv->family == NFPROTO_IPV4)
52 nf_send_reset(pkt->skb, pkt->ops->hooknum);
53#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
54 else if (priv->family == NFPROTO_IPV6)
55 nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
56#endif
57 break;
58 }
59
60 data[NFT_REG_VERDICT].verdict = NF_DROP;
61}
62
63static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
64 [NFTA_REJECT_TYPE] = { .type = NLA_U32 }, 22 [NFTA_REJECT_TYPE] = { .type = NLA_U32 },
65 [NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 }, 23 [NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 },
66}; 24};
25EXPORT_SYMBOL_GPL(nft_reject_policy);
67 26
68static int nft_reject_init(const struct nft_ctx *ctx, 27int nft_reject_init(const struct nft_ctx *ctx,
69 const struct nft_expr *expr, 28 const struct nft_expr *expr,
70 const struct nlattr * const tb[]) 29 const struct nlattr * const tb[])
71{ 30{
72 struct nft_reject *priv = nft_expr_priv(expr); 31 struct nft_reject *priv = nft_expr_priv(expr);
73 32
74 if (tb[NFTA_REJECT_TYPE] == NULL) 33 if (tb[NFTA_REJECT_TYPE] == NULL)
75 return -EINVAL; 34 return -EINVAL;
76 35
77 priv->family = ctx->afi->family;
78 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); 36 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
79 switch (priv->type) { 37 switch (priv->type) {
80 case NFT_REJECT_ICMP_UNREACH: 38 case NFT_REJECT_ICMP_UNREACH:
@@ -89,8 +47,9 @@ static int nft_reject_init(const struct nft_ctx *ctx,
89 47
90 return 0; 48 return 0;
91} 49}
50EXPORT_SYMBOL_GPL(nft_reject_init);
92 51
93static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) 52int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
94{ 53{
95 const struct nft_reject *priv = nft_expr_priv(expr); 54 const struct nft_reject *priv = nft_expr_priv(expr);
96 55
@@ -109,37 +68,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
109nla_put_failure: 68nla_put_failure:
110 return -1; 69 return -1;
111} 70}
112 71EXPORT_SYMBOL_GPL(nft_reject_dump);
113static struct nft_expr_type nft_reject_type;
114static const struct nft_expr_ops nft_reject_ops = {
115 .type = &nft_reject_type,
116 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
117 .eval = nft_reject_eval,
118 .init = nft_reject_init,
119 .dump = nft_reject_dump,
120};
121
122static struct nft_expr_type nft_reject_type __read_mostly = {
123 .name = "reject",
124 .ops = &nft_reject_ops,
125 .policy = nft_reject_policy,
126 .maxattr = NFTA_REJECT_MAX,
127 .owner = THIS_MODULE,
128};
129
130static int __init nft_reject_module_init(void)
131{
132 return nft_register_expr(&nft_reject_type);
133}
134
135static void __exit nft_reject_module_exit(void)
136{
137 nft_unregister_expr(&nft_reject_type);
138}
139
140module_init(nft_reject_module_init);
141module_exit(nft_reject_module_exit);
142 72
143MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
144MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 74MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
145MODULE_ALIAS_NFT_EXPR("reject");
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
new file mode 100644
index 000000000000..8a310f239c93
--- /dev/null
+++ b/net/netfilter/nft_reject_inet.c
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2014 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netlink.h>
13#include <linux/netfilter.h>
14#include <linux/netfilter/nf_tables.h>
15#include <net/netfilter/nf_tables.h>
16#include <net/netfilter/nft_reject.h>
17
18static void nft_reject_inet_eval(const struct nft_expr *expr,
19 struct nft_data data[NFT_REG_MAX + 1],
20 const struct nft_pktinfo *pkt)
21{
22 switch (pkt->ops->pf) {
23 case NFPROTO_IPV4:
24 nft_reject_ipv4_eval(expr, data, pkt);
25 case NFPROTO_IPV6:
26 nft_reject_ipv6_eval(expr, data, pkt);
27 }
28}
29
30static struct nft_expr_type nft_reject_inet_type;
31static const struct nft_expr_ops nft_reject_inet_ops = {
32 .type = &nft_reject_inet_type,
33 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
34 .eval = nft_reject_inet_eval,
35 .init = nft_reject_init,
36 .dump = nft_reject_dump,
37};
38
39static struct nft_expr_type nft_reject_inet_type __read_mostly = {
40 .family = NFPROTO_INET,
41 .name = "reject",
42 .ops = &nft_reject_inet_ops,
43 .policy = nft_reject_policy,
44 .maxattr = NFTA_REJECT_MAX,
45 .owner = THIS_MODULE,
46};
47
48static int __init nft_reject_inet_module_init(void)
49{
50 return nft_register_expr(&nft_reject_inet_type);
51}
52
53static void __exit nft_reject_inet_module_exit(void)
54{
55 nft_unregister_expr(&nft_reject_inet_type);
56}
57
58module_init(nft_reject_inet_module_init);
59module_exit(nft_reject_inet_module_exit);
60
61MODULE_LICENSE("GPL");
62MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
63MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 5929be622c5c..75747aecdebe 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -228,12 +228,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
228 goto err3; 228 goto err3;
229 } 229 }
230 230
231 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 231 nf_conntrack_tmpl_insert(par->net, ct);
232 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
233
234 /* Overload tuple linked list to put us in template list. */
235 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
236 &par->net->ct.tmpl);
237out: 232out:
238 info->ct = ct; 233 info->ct = ct;
239 return 0; 234 return 0;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index df4692826ead..e9a48baf8551 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -55,6 +55,7 @@
55 55
56#include "datapath.h" 56#include "datapath.h"
57#include "flow.h" 57#include "flow.h"
58#include "flow_table.h"
58#include "flow_netlink.h" 59#include "flow_netlink.h"
59#include "vport-internal_dev.h" 60#include "vport-internal_dev.h"
60#include "vport-netdev.h" 61#include "vport-netdev.h"
@@ -160,7 +161,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
160{ 161{
161 struct datapath *dp = container_of(rcu, struct datapath, rcu); 162 struct datapath *dp = container_of(rcu, struct datapath, rcu);
162 163
163 ovs_flow_tbl_destroy(&dp->table);
164 free_percpu(dp->stats_percpu); 164 free_percpu(dp->stats_percpu);
165 release_net(ovs_dp_get_net(dp)); 165 release_net(ovs_dp_get_net(dp));
166 kfree(dp->ports); 166 kfree(dp->ports);
@@ -466,6 +466,14 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
466 466
467 skb_zerocopy(user_skb, skb, skb->len, hlen); 467 skb_zerocopy(user_skb, skb, skb->len, hlen);
468 468
469 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
470 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
471 size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
472
473 if (plen > 0)
474 memset(skb_put(user_skb, plen), 0, plen);
475 }
476
469 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; 477 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
470 478
471 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); 479 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
@@ -852,11 +860,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
852 goto err_unlock_ovs; 860 goto err_unlock_ovs;
853 861
854 /* The unmasked key has to be the same for flow updates. */ 862 /* The unmasked key has to be the same for flow updates. */
855 error = -EINVAL; 863 if (!ovs_flow_cmp_unmasked_key(flow, &match))
856 if (!ovs_flow_cmp_unmasked_key(flow, &match)) {
857 OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
858 goto err_unlock_ovs; 864 goto err_unlock_ovs;
859 }
860 865
861 /* Update actions. */ 866 /* Update actions. */
862 old_acts = ovsl_dereference(flow->sf_acts); 867 old_acts = ovsl_dereference(flow->sf_acts);
@@ -1079,6 +1084,7 @@ static size_t ovs_dp_cmd_msg_size(void)
1079 msgsize += nla_total_size(IFNAMSIZ); 1084 msgsize += nla_total_size(IFNAMSIZ);
1080 msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); 1085 msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1081 msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); 1086 msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1087 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1082 1088
1083 return msgsize; 1089 return msgsize;
1084} 1090}
@@ -1279,7 +1285,7 @@ err_destroy_ports_array:
1279err_destroy_percpu: 1285err_destroy_percpu:
1280 free_percpu(dp->stats_percpu); 1286 free_percpu(dp->stats_percpu);
1281err_destroy_table: 1287err_destroy_table:
1282 ovs_flow_tbl_destroy(&dp->table); 1288 ovs_flow_tbl_destroy(&dp->table, false);
1283err_free_dp: 1289err_free_dp:
1284 release_net(ovs_dp_get_net(dp)); 1290 release_net(ovs_dp_get_net(dp));
1285 kfree(dp); 1291 kfree(dp);
@@ -1306,10 +1312,13 @@ static void __dp_destroy(struct datapath *dp)
1306 list_del_rcu(&dp->list_node); 1312 list_del_rcu(&dp->list_node);
1307 1313
1308 /* OVSP_LOCAL is datapath internal port. We need to make sure that 1314 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1309 * all port in datapath are destroyed first before freeing datapath. 1315 * all ports in datapath are destroyed first before freeing datapath.
1310 */ 1316 */
1311 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); 1317 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1312 1318
1319 /* RCU destroy the flow table */
1320 ovs_flow_tbl_destroy(&dp->table, true);
1321
1313 call_rcu(&dp->rcu, destroy_dp_rcu); 1322 call_rcu(&dp->rcu, destroy_dp_rcu);
1314} 1323}
1315 1324
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index c58a0fe3c889..3c268b3d71c3 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -153,29 +153,29 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
153 flow_free(flow); 153 flow_free(flow);
154} 154}
155 155
156static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
157{
158 if (!mask)
159 return;
160
161 BUG_ON(!mask->ref_count);
162 mask->ref_count--;
163
164 if (!mask->ref_count) {
165 list_del_rcu(&mask->list);
166 if (deferred)
167 kfree_rcu(mask, rcu);
168 else
169 kfree(mask);
170 }
171}
172
173void ovs_flow_free(struct sw_flow *flow, bool deferred) 156void ovs_flow_free(struct sw_flow *flow, bool deferred)
174{ 157{
175 if (!flow) 158 if (!flow)
176 return; 159 return;
177 160
178 flow_mask_del_ref(flow->mask, deferred); 161 if (flow->mask) {
162 struct sw_flow_mask *mask = flow->mask;
163
164 /* ovs-lock is required to protect mask-refcount and
165 * mask list.
166 */
167 ASSERT_OVSL();
168 BUG_ON(!mask->ref_count);
169 mask->ref_count--;
170
171 if (!mask->ref_count) {
172 list_del_rcu(&mask->list);
173 if (deferred)
174 kfree_rcu(mask, rcu);
175 else
176 kfree(mask);
177 }
178 }
179 179
180 if (deferred) 180 if (deferred)
181 call_rcu(&flow->rcu, rcu_free_flow_callback); 181 call_rcu(&flow->rcu, rcu_free_flow_callback);
@@ -188,26 +188,9 @@ static void free_buckets(struct flex_array *buckets)
188 flex_array_free(buckets); 188 flex_array_free(buckets);
189} 189}
190 190
191
191static void __table_instance_destroy(struct table_instance *ti) 192static void __table_instance_destroy(struct table_instance *ti)
192{ 193{
193 int i;
194
195 if (ti->keep_flows)
196 goto skip_flows;
197
198 for (i = 0; i < ti->n_buckets; i++) {
199 struct sw_flow *flow;
200 struct hlist_head *head = flex_array_get(ti->buckets, i);
201 struct hlist_node *n;
202 int ver = ti->node_ver;
203
204 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
205 hlist_del(&flow->hash_node[ver]);
206 ovs_flow_free(flow, false);
207 }
208 }
209
210skip_flows:
211 free_buckets(ti->buckets); 194 free_buckets(ti->buckets);
212 kfree(ti); 195 kfree(ti);
213} 196}
@@ -258,20 +241,38 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
258 241
259static void table_instance_destroy(struct table_instance *ti, bool deferred) 242static void table_instance_destroy(struct table_instance *ti, bool deferred)
260{ 243{
244 int i;
245
261 if (!ti) 246 if (!ti)
262 return; 247 return;
263 248
249 if (ti->keep_flows)
250 goto skip_flows;
251
252 for (i = 0; i < ti->n_buckets; i++) {
253 struct sw_flow *flow;
254 struct hlist_head *head = flex_array_get(ti->buckets, i);
255 struct hlist_node *n;
256 int ver = ti->node_ver;
257
258 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
259 hlist_del_rcu(&flow->hash_node[ver]);
260 ovs_flow_free(flow, deferred);
261 }
262 }
263
264skip_flows:
264 if (deferred) 265 if (deferred)
265 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 266 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
266 else 267 else
267 __table_instance_destroy(ti); 268 __table_instance_destroy(ti);
268} 269}
269 270
270void ovs_flow_tbl_destroy(struct flow_table *table) 271void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
271{ 272{
272 struct table_instance *ti = ovsl_dereference(table->ti); 273 struct table_instance *ti = ovsl_dereference(table->ti);
273 274
274 table_instance_destroy(ti, false); 275 table_instance_destroy(ti, deferred);
275} 276}
276 277
277struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, 278struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -504,16 +505,11 @@ static struct sw_flow_mask *mask_alloc(void)
504 505
505 mask = kmalloc(sizeof(*mask), GFP_KERNEL); 506 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
506 if (mask) 507 if (mask)
507 mask->ref_count = 0; 508 mask->ref_count = 1;
508 509
509 return mask; 510 return mask;
510} 511}
511 512
512static void mask_add_ref(struct sw_flow_mask *mask)
513{
514 mask->ref_count++;
515}
516
517static bool mask_equal(const struct sw_flow_mask *a, 513static bool mask_equal(const struct sw_flow_mask *a,
518 const struct sw_flow_mask *b) 514 const struct sw_flow_mask *b)
519{ 515{
@@ -554,9 +550,11 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
554 mask->key = new->key; 550 mask->key = new->key;
555 mask->range = new->range; 551 mask->range = new->range;
556 list_add_rcu(&mask->list, &tbl->mask_list); 552 list_add_rcu(&mask->list, &tbl->mask_list);
553 } else {
554 BUG_ON(!mask->ref_count);
555 mask->ref_count++;
557 } 556 }
558 557
559 mask_add_ref(mask);
560 flow->mask = mask; 558 flow->mask = mask;
561 return 0; 559 return 0;
562} 560}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 1996e34c0fd8..baaeb101924d 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -60,7 +60,7 @@ void ovs_flow_free(struct sw_flow *, bool deferred);
60 60
61int ovs_flow_tbl_init(struct flow_table *); 61int ovs_flow_tbl_init(struct flow_table *);
62int ovs_flow_tbl_count(struct flow_table *table); 62int ovs_flow_tbl_count(struct flow_table *table);
63void ovs_flow_tbl_destroy(struct flow_table *table); 63void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
64int ovs_flow_tbl_flush(struct flow_table *flow_table); 64int ovs_flow_tbl_flush(struct flow_table *flow_table);
65 65
66int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 66int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0f6259a6a932..2b1738ef9394 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -662,6 +662,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
662 */ 662 */
663 sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); 663 sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk);
664 664
665 newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
666
665 sk_refcnt_debug_inc(newsk); 667 sk_refcnt_debug_inc(newsk);
666 668
667 if (newsk->sk_prot->init(newsk)) { 669 if (newsk->sk_prot->init(newsk)) {
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 80a6640f329b..06c6ff0cb911 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -571,7 +571,7 @@ static void svc_check_conn_limits(struct svc_serv *serv)
571 } 571 }
572} 572}
573 573
574int svc_alloc_arg(struct svc_rqst *rqstp) 574static int svc_alloc_arg(struct svc_rqst *rqstp)
575{ 575{
576 struct svc_serv *serv = rqstp->rq_server; 576 struct svc_serv *serv = rqstp->rq_server;
577 struct xdr_buf *arg; 577 struct xdr_buf *arg;
@@ -612,7 +612,7 @@ int svc_alloc_arg(struct svc_rqst *rqstp)
612 return 0; 612 return 0;
613} 613}
614 614
615struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) 615static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
616{ 616{
617 struct svc_xprt *xprt; 617 struct svc_xprt *xprt;
618 struct svc_pool *pool = rqstp->rq_pool; 618 struct svc_pool *pool = rqstp->rq_pool;
@@ -691,7 +691,7 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
691 return xprt; 691 return xprt;
692} 692}
693 693
694void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) 694static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
695{ 695{
696 spin_lock_bh(&serv->sv_lock); 696 spin_lock_bh(&serv->sv_lock);
697 set_bit(XPT_TEMP, &newxpt->xpt_flags); 697 set_bit(XPT_TEMP, &newxpt->xpt_flags);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d89dee2259b5..010892b81a06 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -203,8 +203,11 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
203 203
204 rdev->opencount--; 204 rdev->opencount--;
205 205
206 WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev && 206 if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
207 !rdev->scan_req->notified); 207 if (WARN_ON(!rdev->scan_req->notified))
208 rdev->scan_req->aborted = true;
209 ___cfg80211_scan_done(rdev, false);
210 }
208} 211}
209 212
210static int cfg80211_rfkill_set_block(void *data, bool blocked) 213static int cfg80211_rfkill_set_block(void *data, bool blocked)
@@ -440,9 +443,6 @@ int wiphy_register(struct wiphy *wiphy)
440 int i; 443 int i;
441 u16 ifmodes = wiphy->interface_modes; 444 u16 ifmodes = wiphy->interface_modes;
442 445
443 /* support for 5/10 MHz is broken due to nl80211 API mess - disable */
444 wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
445
446 /* 446 /*
447 * There are major locking problems in nl80211/mac80211 for CSA, 447 * There are major locking problems in nl80211/mac80211 for CSA,
448 * disable for all drivers until this has been reworked. 448 * disable for all drivers until this has been reworked.
@@ -859,8 +859,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
859 break; 859 break;
860 case NETDEV_DOWN: 860 case NETDEV_DOWN:
861 cfg80211_update_iface_num(rdev, wdev->iftype, -1); 861 cfg80211_update_iface_num(rdev, wdev->iftype, -1);
862 WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev && 862 if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
863 !rdev->scan_req->notified); 863 if (WARN_ON(!rdev->scan_req->notified))
864 rdev->scan_req->aborted = true;
865 ___cfg80211_scan_done(rdev, false);
866 }
864 867
865 if (WARN_ON(rdev->sched_scan_req && 868 if (WARN_ON(rdev->sched_scan_req &&
866 rdev->sched_scan_req->dev == wdev->netdev)) { 869 rdev->sched_scan_req->dev == wdev->netdev)) {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 37ec16d7bb1a..f1d193b557b6 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -62,6 +62,7 @@ struct cfg80211_registered_device {
62 struct rb_root bss_tree; 62 struct rb_root bss_tree;
63 u32 bss_generation; 63 u32 bss_generation;
64 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 64 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
65 struct sk_buff *scan_msg;
65 struct cfg80211_sched_scan_request *sched_scan_req; 66 struct cfg80211_sched_scan_request *sched_scan_req;
66 unsigned long suspend_at; 67 unsigned long suspend_at;
67 struct work_struct scan_done_wk; 68 struct work_struct scan_done_wk;
@@ -361,7 +362,8 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
361 struct key_params *params, int key_idx, 362 struct key_params *params, int key_idx,
362 bool pairwise, const u8 *mac_addr); 363 bool pairwise, const u8 *mac_addr);
363void __cfg80211_scan_done(struct work_struct *wk); 364void __cfg80211_scan_done(struct work_struct *wk);
364void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev); 365void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
366 bool send_message);
365void __cfg80211_sched_scan_results(struct work_struct *wk); 367void __cfg80211_sched_scan_results(struct work_struct *wk);
366int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, 368int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
367 bool driver_initiated); 369 bool driver_initiated);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7a742594916e..4fe2e6e2bc76 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1719,9 +1719,10 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1719 * We can then retry with the larger buffer. 1719 * We can then retry with the larger buffer.
1720 */ 1720 */
1721 if ((ret == -ENOBUFS || ret == -EMSGSIZE) && 1721 if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
1722 !skb->len && 1722 !skb->len && !state->split &&
1723 cb->min_dump_alloc < 4096) { 1723 cb->min_dump_alloc < 4096) {
1724 cb->min_dump_alloc = 4096; 1724 cb->min_dump_alloc = 4096;
1725 state->split_start = 0;
1725 rtnl_unlock(); 1726 rtnl_unlock();
1726 return 1; 1727 return 1;
1727 } 1728 }
@@ -5244,7 +5245,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
5244 if (!rdev->ops->scan) 5245 if (!rdev->ops->scan)
5245 return -EOPNOTSUPP; 5246 return -EOPNOTSUPP;
5246 5247
5247 if (rdev->scan_req) { 5248 if (rdev->scan_req || rdev->scan_msg) {
5248 err = -EBUSY; 5249 err = -EBUSY;
5249 goto unlock; 5250 goto unlock;
5250 } 5251 }
@@ -10011,40 +10012,31 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
10011 NL80211_MCGRP_SCAN, GFP_KERNEL); 10012 NL80211_MCGRP_SCAN, GFP_KERNEL);
10012} 10013}
10013 10014
10014void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, 10015struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
10015 struct wireless_dev *wdev) 10016 struct wireless_dev *wdev, bool aborted)
10016{ 10017{
10017 struct sk_buff *msg; 10018 struct sk_buff *msg;
10018 10019
10019 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 10020 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10020 if (!msg) 10021 if (!msg)
10021 return; 10022 return NULL;
10022 10023
10023 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0, 10024 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
10024 NL80211_CMD_NEW_SCAN_RESULTS) < 0) { 10025 aborted ? NL80211_CMD_SCAN_ABORTED :
10026 NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
10025 nlmsg_free(msg); 10027 nlmsg_free(msg);
10026 return; 10028 return NULL;
10027 } 10029 }
10028 10030
10029 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, 10031 return msg;
10030 NL80211_MCGRP_SCAN, GFP_KERNEL);
10031} 10032}
10032 10033
10033void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 10034void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
10034 struct wireless_dev *wdev) 10035 struct sk_buff *msg)
10035{ 10036{
10036 struct sk_buff *msg;
10037
10038 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10039 if (!msg) 10037 if (!msg)
10040 return; 10038 return;
10041 10039
10042 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
10043 NL80211_CMD_SCAN_ABORTED) < 0) {
10044 nlmsg_free(msg);
10045 return;
10046 }
10047
10048 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, 10040 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10049 NL80211_MCGRP_SCAN, GFP_KERNEL); 10041 NL80211_MCGRP_SCAN, GFP_KERNEL);
10050} 10042}
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index b1b231324e10..75799746d845 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -8,10 +8,10 @@ void nl80211_exit(void);
8void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); 8void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
9void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, 9void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
10 struct wireless_dev *wdev); 10 struct wireless_dev *wdev);
11void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, 11struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
12 struct wireless_dev *wdev); 12 struct wireless_dev *wdev, bool aborted);
13void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 13void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
14 struct wireless_dev *wdev); 14 struct sk_buff *msg);
15void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, 15void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
16 struct net_device *netdev, u32 cmd); 16 struct net_device *netdev, u32 cmd);
17void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, 17void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index b528e31da2cf..d1ed4aebbbb7 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -161,18 +161,25 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
161 dev->bss_generation++; 161 dev->bss_generation++;
162} 162}
163 163
164void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev) 164void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
165 bool send_message)
165{ 166{
166 struct cfg80211_scan_request *request; 167 struct cfg80211_scan_request *request;
167 struct wireless_dev *wdev; 168 struct wireless_dev *wdev;
169 struct sk_buff *msg;
168#ifdef CONFIG_CFG80211_WEXT 170#ifdef CONFIG_CFG80211_WEXT
169 union iwreq_data wrqu; 171 union iwreq_data wrqu;
170#endif 172#endif
171 173
172 ASSERT_RTNL(); 174 ASSERT_RTNL();
173 175
174 request = rdev->scan_req; 176 if (rdev->scan_msg) {
177 nl80211_send_scan_result(rdev, rdev->scan_msg);
178 rdev->scan_msg = NULL;
179 return;
180 }
175 181
182 request = rdev->scan_req;
176 if (!request) 183 if (!request)
177 return; 184 return;
178 185
@@ -186,18 +193,16 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
186 if (wdev->netdev) 193 if (wdev->netdev)
187 cfg80211_sme_scan_done(wdev->netdev); 194 cfg80211_sme_scan_done(wdev->netdev);
188 195
189 if (request->aborted) { 196 if (!request->aborted &&
190 nl80211_send_scan_aborted(rdev, wdev); 197 request->flags & NL80211_SCAN_FLAG_FLUSH) {
191 } else { 198 /* flush entries from previous scans */
192 if (request->flags & NL80211_SCAN_FLAG_FLUSH) { 199 spin_lock_bh(&rdev->bss_lock);
193 /* flush entries from previous scans */ 200 __cfg80211_bss_expire(rdev, request->scan_start);
194 spin_lock_bh(&rdev->bss_lock); 201 spin_unlock_bh(&rdev->bss_lock);
195 __cfg80211_bss_expire(rdev, request->scan_start);
196 spin_unlock_bh(&rdev->bss_lock);
197 }
198 nl80211_send_scan_done(rdev, wdev);
199 } 202 }
200 203
204 msg = nl80211_build_scan_msg(rdev, wdev, request->aborted);
205
201#ifdef CONFIG_CFG80211_WEXT 206#ifdef CONFIG_CFG80211_WEXT
202 if (wdev->netdev && !request->aborted) { 207 if (wdev->netdev && !request->aborted) {
203 memset(&wrqu, 0, sizeof(wrqu)); 208 memset(&wrqu, 0, sizeof(wrqu));
@@ -211,6 +216,11 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
211 216
212 rdev->scan_req = NULL; 217 rdev->scan_req = NULL;
213 kfree(request); 218 kfree(request);
219
220 if (!send_message)
221 rdev->scan_msg = msg;
222 else
223 nl80211_send_scan_result(rdev, msg);
214} 224}
215 225
216void __cfg80211_scan_done(struct work_struct *wk) 226void __cfg80211_scan_done(struct work_struct *wk)
@@ -221,7 +231,7 @@ void __cfg80211_scan_done(struct work_struct *wk)
221 scan_done_wk); 231 scan_done_wk);
222 232
223 rtnl_lock(); 233 rtnl_lock();
224 ___cfg80211_scan_done(rdev); 234 ___cfg80211_scan_done(rdev, true);
225 rtnl_unlock(); 235 rtnl_unlock();
226} 236}
227 237
@@ -1079,7 +1089,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
1079 if (IS_ERR(rdev)) 1089 if (IS_ERR(rdev))
1080 return PTR_ERR(rdev); 1090 return PTR_ERR(rdev);
1081 1091
1082 if (rdev->scan_req) { 1092 if (rdev->scan_req || rdev->scan_msg) {
1083 err = -EBUSY; 1093 err = -EBUSY;
1084 goto out; 1094 goto out;
1085 } 1095 }
@@ -1481,7 +1491,7 @@ int cfg80211_wext_giwscan(struct net_device *dev,
1481 if (IS_ERR(rdev)) 1491 if (IS_ERR(rdev))
1482 return PTR_ERR(rdev); 1492 return PTR_ERR(rdev);
1483 1493
1484 if (rdev->scan_req) 1494 if (rdev->scan_req || rdev->scan_msg)
1485 return -EAGAIN; 1495 return -EAGAIN;
1486 1496
1487 res = ieee80211_scan_results(rdev, info, extra, data->length); 1497 res = ieee80211_scan_results(rdev, info, extra, data->length);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a63509118508..f04d4c32e96e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -67,7 +67,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
67 ASSERT_RDEV_LOCK(rdev); 67 ASSERT_RDEV_LOCK(rdev);
68 ASSERT_WDEV_LOCK(wdev); 68 ASSERT_WDEV_LOCK(wdev);
69 69
70 if (rdev->scan_req) 70 if (rdev->scan_req || rdev->scan_msg)
71 return -EBUSY; 71 return -EBUSY;
72 72
73 if (wdev->conn->params.channel) 73 if (wdev->conn->params.channel)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 0ea2a1e24ade..464dcef79b35 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -471,7 +471,7 @@ sub seed_camelcase_includes {
471 471
472 $camelcase_seeded = 1; 472 $camelcase_seeded = 1;
473 473
474 if (-d ".git") { 474 if (-e ".git") {
475 my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`; 475 my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`;
476 chomp $git_last_include_commit; 476 chomp $git_last_include_commit;
477 $camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit"; 477 $camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
@@ -499,7 +499,7 @@ sub seed_camelcase_includes {
499 return; 499 return;
500 } 500 }
501 501
502 if (-d ".git") { 502 if (-e ".git") {
503 $files = `git ls-files "include/*.h"`; 503 $files = `git ls-files "include/*.h"`;
504 @include_files = split('\n', $files); 504 @include_files = split('\n', $files);
505 } 505 }
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 9c3986f4140c..41987885bd31 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -95,7 +95,7 @@ my %VCS_cmds;
95 95
96my %VCS_cmds_git = ( 96my %VCS_cmds_git = (
97 "execute_cmd" => \&git_execute_cmd, 97 "execute_cmd" => \&git_execute_cmd,
98 "available" => '(which("git") ne "") && (-d ".git")', 98 "available" => '(which("git") ne "") && (-e ".git")',
99 "find_signers_cmd" => 99 "find_signers_cmd" =>
100 "git log --no-color --follow --since=\$email_git_since " . 100 "git log --no-color --follow --since=\$email_git_since " .
101 '--numstat --no-merges ' . 101 '--numstat --no-merges ' .
diff --git a/security/Kconfig b/security/Kconfig
index e9c6ac724fef..beb86b500adf 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -103,7 +103,7 @@ config INTEL_TXT
103config LSM_MMAP_MIN_ADDR 103config LSM_MMAP_MIN_ADDR
104 int "Low address space for LSM to protect from user allocation" 104 int "Low address space for LSM to protect from user allocation"
105 depends on SECURITY && SECURITY_SELINUX 105 depends on SECURITY && SECURITY_SELINUX
106 default 32768 if ARM 106 default 32768 if ARM || (ARM64 && COMPAT)
107 default 65536 107 default 65536
108 help 108 help
109 This is the portion of low virtual memory which should be protected 109 This is the portion of low virtual memory which should be protected
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 332ac8a80cf5..2df7b900e259 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -17,6 +17,7 @@
17#include <linux/inet_diag.h> 17#include <linux/inet_diag.h>
18#include <linux/xfrm.h> 18#include <linux/xfrm.h>
19#include <linux/audit.h> 19#include <linux/audit.h>
20#include <linux/sock_diag.h>
20 21
21#include "flask.h" 22#include "flask.h"
22#include "av_permissions.h" 23#include "av_permissions.h"
@@ -78,6 +79,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
78{ 79{
79 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 80 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
80 { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 81 { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
82 { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
81}; 83};
82 84
83static struct nlmsg_perm nlmsg_xfrm_perms[] = 85static struct nlmsg_perm nlmsg_xfrm_perms[] =
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index c93c21127f0c..5d0144ee8ed6 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1232,6 +1232,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
1232 struct context context; 1232 struct context context;
1233 int rc = 0; 1233 int rc = 0;
1234 1234
1235 /* An empty security context is never valid. */
1236 if (!scontext_len)
1237 return -EINVAL;
1238
1235 if (!ss_initialized) { 1239 if (!ss_initialized) {
1236 int i; 1240 int i;
1237 1241
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 7a426ed491f2..df3652ad15ef 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -244,6 +244,19 @@ static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
244 } 244 }
245} 245}
246 246
247/* Toshiba Satellite L40 implements EAPD in a standard way unlike others */
248static void ad1986a_fixup_eapd(struct hda_codec *codec,
249 const struct hda_fixup *fix, int action)
250{
251 struct ad198x_spec *spec = codec->spec;
252
253 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
254 codec->inv_eapd = 0;
255 spec->gen.keep_eapd_on = 1;
256 spec->eapd_nid = 0x1b;
257 }
258}
259
247enum { 260enum {
248 AD1986A_FIXUP_INV_JACK_DETECT, 261 AD1986A_FIXUP_INV_JACK_DETECT,
249 AD1986A_FIXUP_ULTRA, 262 AD1986A_FIXUP_ULTRA,
@@ -251,6 +264,7 @@ enum {
251 AD1986A_FIXUP_3STACK, 264 AD1986A_FIXUP_3STACK,
252 AD1986A_FIXUP_LAPTOP, 265 AD1986A_FIXUP_LAPTOP,
253 AD1986A_FIXUP_LAPTOP_IMIC, 266 AD1986A_FIXUP_LAPTOP_IMIC,
267 AD1986A_FIXUP_EAPD,
254}; 268};
255 269
256static const struct hda_fixup ad1986a_fixups[] = { 270static const struct hda_fixup ad1986a_fixups[] = {
@@ -311,6 +325,10 @@ static const struct hda_fixup ad1986a_fixups[] = {
311 .chained_before = 1, 325 .chained_before = 1,
312 .chain_id = AD1986A_FIXUP_LAPTOP, 326 .chain_id = AD1986A_FIXUP_LAPTOP,
313 }, 327 },
328 [AD1986A_FIXUP_EAPD] = {
329 .type = HDA_FIXUP_FUNC,
330 .v.func = ad1986a_fixup_eapd,
331 },
314}; 332};
315 333
316static const struct snd_pci_quirk ad1986a_fixup_tbl[] = { 334static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
@@ -318,6 +336,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
318 SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK), 336 SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
319 SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK), 337 SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
320 SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK), 338 SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
339 SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40", AD1986A_FIXUP_EAPD),
321 SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP), 340 SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
322 SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG), 341 SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
323 SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA), 342 SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
@@ -472,6 +491,8 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec)
472static int patch_ad1983(struct hda_codec *codec) 491static int patch_ad1983(struct hda_codec *codec)
473{ 492{
474 struct ad198x_spec *spec; 493 struct ad198x_spec *spec;
494 static hda_nid_t conn_0c[] = { 0x08 };
495 static hda_nid_t conn_0d[] = { 0x09 };
475 int err; 496 int err;
476 497
477 err = alloc_ad_spec(codec); 498 err = alloc_ad_spec(codec);
@@ -479,8 +500,14 @@ static int patch_ad1983(struct hda_codec *codec)
479 return err; 500 return err;
480 spec = codec->spec; 501 spec = codec->spec;
481 502
503 spec->gen.mixer_nid = 0x0e;
482 spec->gen.beep_nid = 0x10; 504 spec->gen.beep_nid = 0x10;
483 set_beep_amp(spec, 0x10, 0, HDA_OUTPUT); 505 set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
506
507 /* limit the loopback routes not to confuse the parser */
508 snd_hda_override_conn_list(codec, 0x0c, ARRAY_SIZE(conn_0c), conn_0c);
509 snd_hda_override_conn_list(codec, 0x0d, ARRAY_SIZE(conn_0d), conn_0d);
510
484 err = ad198x_parse_auto_config(codec, false); 511 err = ad198x_parse_auto_config(codec, false);
485 if (err < 0) 512 if (err < 0)
486 goto error; 513 goto error;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 56a8f1876603..d9693ca9546f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1821,6 +1821,7 @@ enum {
1821 ALC889_FIXUP_IMAC91_VREF, 1821 ALC889_FIXUP_IMAC91_VREF,
1822 ALC889_FIXUP_MBA11_VREF, 1822 ALC889_FIXUP_MBA11_VREF,
1823 ALC889_FIXUP_MBA21_VREF, 1823 ALC889_FIXUP_MBA21_VREF,
1824 ALC889_FIXUP_MP11_VREF,
1824 ALC882_FIXUP_INV_DMIC, 1825 ALC882_FIXUP_INV_DMIC,
1825 ALC882_FIXUP_NO_PRIMARY_HP, 1826 ALC882_FIXUP_NO_PRIMARY_HP,
1826 ALC887_FIXUP_ASUS_BASS, 1827 ALC887_FIXUP_ASUS_BASS,
@@ -2190,6 +2191,12 @@ static const struct hda_fixup alc882_fixups[] = {
2190 .chained = true, 2191 .chained = true,
2191 .chain_id = ALC889_FIXUP_MBP_VREF, 2192 .chain_id = ALC889_FIXUP_MBP_VREF,
2192 }, 2193 },
2194 [ALC889_FIXUP_MP11_VREF] = {
2195 .type = HDA_FIXUP_FUNC,
2196 .v.func = alc889_fixup_mba11_vref,
2197 .chained = true,
2198 .chain_id = ALC885_FIXUP_MACPRO_GPIO,
2199 },
2193 [ALC882_FIXUP_INV_DMIC] = { 2200 [ALC882_FIXUP_INV_DMIC] = {
2194 .type = HDA_FIXUP_FUNC, 2201 .type = HDA_FIXUP_FUNC,
2195 .v.func = alc_fixup_inv_dmic_0x12, 2202 .v.func = alc_fixup_inv_dmic_0x12,
@@ -2253,7 +2260,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2253 SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), 2260 SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
2254 SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF), 2261 SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF),
2255 SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF), 2262 SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
2256 SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO), 2263 SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF),
2257 SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO), 2264 SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO),
2258 SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO), 2265 SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO),
2259 SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF), 2266 SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
@@ -4427,6 +4434,9 @@ static void alc269_fill_coef(struct hda_codec *codec)
4427 4434
4428 if (spec->codec_variant != ALC269_TYPE_ALC269VB) 4435 if (spec->codec_variant != ALC269_TYPE_ALC269VB)
4429 return; 4436 return;
4437 /* ALC271X doesn't seem to support these COEFs (bko#52181) */
4438 if (!strcmp(codec->chip_name, "ALC271X"))
4439 return;
4430 4440
4431 if ((alc_get_coef0(codec) & 0x00ff) < 0x015) { 4441 if ((alc_get_coef0(codec) & 0x00ff) < 0x015) {
4432 alc_write_coef_idx(codec, 0xf, 0x960b); 4442 alc_write_coef_idx(codec, 0xf, 0x960b);
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index de9408b83f75..e05a86b7c0da 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -14,6 +14,7 @@ config SND_USB_AUDIO
14 select SND_HWDEP 14 select SND_HWDEP
15 select SND_RAWMIDI 15 select SND_RAWMIDI
16 select SND_PCM 16 select SND_PCM
17 select BITREVERSE
17 help 18 help
18 Say Y here to include support for USB audio and USB MIDI 19 Say Y here to include support for USB audio and USB MIDI
19 devices. 20 devices.
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index cfede86161d8..b22dbb16f877 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -63,11 +63,35 @@ static int build_id_cache__kcore_dir(char *dir, size_t sz)
63 return 0; 63 return 0;
64} 64}
65 65
66static bool same_kallsyms_reloc(const char *from_dir, char *to_dir)
67{
68 char from[PATH_MAX];
69 char to[PATH_MAX];
70 const char *name;
71 u64 addr1 = 0, addr2 = 0;
72 int i;
73
74 scnprintf(from, sizeof(from), "%s/kallsyms", from_dir);
75 scnprintf(to, sizeof(to), "%s/kallsyms", to_dir);
76
77 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
78 addr1 = kallsyms__get_function_start(from, name);
79 if (addr1)
80 break;
81 }
82
83 if (name)
84 addr2 = kallsyms__get_function_start(to, name);
85
86 return addr1 == addr2;
87}
88
66static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir, 89static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
67 size_t to_dir_sz) 90 size_t to_dir_sz)
68{ 91{
69 char from[PATH_MAX]; 92 char from[PATH_MAX];
70 char to[PATH_MAX]; 93 char to[PATH_MAX];
94 char to_subdir[PATH_MAX];
71 struct dirent *dent; 95 struct dirent *dent;
72 int ret = -1; 96 int ret = -1;
73 DIR *d; 97 DIR *d;
@@ -86,10 +110,11 @@ static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
86 continue; 110 continue;
87 scnprintf(to, sizeof(to), "%s/%s/modules", to_dir, 111 scnprintf(to, sizeof(to), "%s/%s/modules", to_dir,
88 dent->d_name); 112 dent->d_name);
89 if (!compare_proc_modules(from, to)) { 113 scnprintf(to_subdir, sizeof(to_subdir), "%s/%s",
90 scnprintf(to, sizeof(to), "%s/%s", to_dir, 114 to_dir, dent->d_name);
91 dent->d_name); 115 if (!compare_proc_modules(from, to) &&
92 strlcpy(to_dir, to, to_dir_sz); 116 same_kallsyms_reloc(from_dir, to_subdir)) {
117 strlcpy(to_dir, to_subdir, to_dir_sz);
93 ret = 0; 118 ret = 0;
94 break; 119 break;
95 } 120 }
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3c394bf16fa8..af47531b82ec 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -287,10 +287,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
287 * have no _text sometimes. 287 * have no _text sometimes.
288 */ 288 */
289 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, 289 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
290 machine, "_text"); 290 machine);
291 if (err < 0)
292 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
293 machine, "_stext");
294 if (err < 0) 291 if (err < 0)
295 pr_err("Couldn't record guest kernel [%d]'s reference" 292 pr_err("Couldn't record guest kernel [%d]'s reference"
296 " relocation symbol.\n", machine->pid); 293 " relocation symbol.\n", machine->pid);
@@ -457,10 +454,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
457 } 454 }
458 455
459 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, 456 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
460 machine, "_text"); 457 machine);
461 if (err < 0)
462 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
463 machine, "_stext");
464 if (err < 0) 458 if (err < 0)
465 pr_err("Couldn't record kernel reference relocation symbol\n" 459 pr_err("Couldn't record kernel reference relocation symbol\n"
466 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 460 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index 67e5d0cace85..63a0e6f04a01 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -454,7 +454,6 @@ So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you
454will need at least this: 454will need at least this:
455 - asm/perf_event.h - a basic stub will suffice at first 455 - asm/perf_event.h - a basic stub will suffice at first
456 - support for atomic64 types (and associated helper functions) 456 - support for atomic64 types (and associated helper functions)
457 - set_perf_event_pending() implemented
458 457
459If your architecture does have hardware capabilities, you can override the 458If your architecture does have hardware capabilities, you can override the
460weak stub hw_perf_event_init() to register hardware counters. 459weak stub hw_perf_event_init() to register hardware counters.
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 7daa806d9050..e84fa26bc1be 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -100,8 +100,8 @@
100 100
101#ifdef __aarch64__ 101#ifdef __aarch64__
102#define mb() asm volatile("dmb ish" ::: "memory") 102#define mb() asm volatile("dmb ish" ::: "memory")
103#define wmb() asm volatile("dmb ishld" ::: "memory") 103#define wmb() asm volatile("dmb ishst" ::: "memory")
104#define rmb() asm volatile("dmb ishst" ::: "memory") 104#define rmb() asm volatile("dmb ishld" ::: "memory")
105#define cpu_relax() asm volatile("yield" ::: "memory") 105#define cpu_relax() asm volatile("yield" ::: "memory")
106#endif 106#endif
107 107
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 2bd13edcbc17..3d9088003a5b 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -26,7 +26,6 @@ int test__vmlinux_matches_kallsyms(void)
26 struct map *kallsyms_map, *vmlinux_map; 26 struct map *kallsyms_map, *vmlinux_map;
27 struct machine kallsyms, vmlinux; 27 struct machine kallsyms, vmlinux;
28 enum map_type type = MAP__FUNCTION; 28 enum map_type type = MAP__FUNCTION;
29 struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
30 u64 mem_start, mem_end; 29 u64 mem_start, mem_end;
31 30
32 /* 31 /*
@@ -70,14 +69,6 @@ int test__vmlinux_matches_kallsyms(void)
70 */ 69 */
71 kallsyms_map = machine__kernel_map(&kallsyms, type); 70 kallsyms_map = machine__kernel_map(&kallsyms, type);
72 71
73 sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
74 if (sym == NULL) {
75 pr_debug("dso__find_symbol_by_name ");
76 goto out;
77 }
78
79 ref_reloc_sym.addr = UM(sym->start);
80
81 /* 72 /*
82 * Step 5: 73 * Step 5:
83 * 74 *
@@ -89,7 +80,6 @@ int test__vmlinux_matches_kallsyms(void)
89 } 80 }
90 81
91 vmlinux_map = machine__kernel_map(&vmlinux, type); 82 vmlinux_map = machine__kernel_map(&vmlinux, type);
92 map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
93 83
94 /* 84 /*
95 * Step 6: 85 * Step 6:
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 1fc1c2f04772..b0f3ca850e9e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -470,23 +470,32 @@ static int find_symbol_cb(void *arg, const char *name, char type,
470 return 1; 470 return 1;
471} 471}
472 472
473u64 kallsyms__get_function_start(const char *kallsyms_filename,
474 const char *symbol_name)
475{
476 struct process_symbol_args args = { .name = symbol_name, };
477
478 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
479 return 0;
480
481 return args.start;
482}
483
473int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 484int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
474 perf_event__handler_t process, 485 perf_event__handler_t process,
475 struct machine *machine, 486 struct machine *machine)
476 const char *symbol_name)
477{ 487{
478 size_t size; 488 size_t size;
479 const char *filename, *mmap_name; 489 const char *mmap_name;
480 char path[PATH_MAX];
481 char name_buff[PATH_MAX]; 490 char name_buff[PATH_MAX];
482 struct map *map; 491 struct map *map;
492 struct kmap *kmap;
483 int err; 493 int err;
484 /* 494 /*
485 * We should get this from /sys/kernel/sections/.text, but till that is 495 * We should get this from /sys/kernel/sections/.text, but till that is
486 * available use this, and after it is use this as a fallback for older 496 * available use this, and after it is use this as a fallback for older
487 * kernels. 497 * kernels.
488 */ 498 */
489 struct process_symbol_args args = { .name = symbol_name, };
490 union perf_event *event = zalloc((sizeof(event->mmap) + 499 union perf_event *event = zalloc((sizeof(event->mmap) +
491 machine->id_hdr_size)); 500 machine->id_hdr_size));
492 if (event == NULL) { 501 if (event == NULL) {
@@ -502,30 +511,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
502 * see kernel/perf_event.c __perf_event_mmap 511 * see kernel/perf_event.c __perf_event_mmap
503 */ 512 */
504 event->header.misc = PERF_RECORD_MISC_KERNEL; 513 event->header.misc = PERF_RECORD_MISC_KERNEL;
505 filename = "/proc/kallsyms";
506 } else { 514 } else {
507 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 515 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
508 if (machine__is_default_guest(machine))
509 filename = (char *) symbol_conf.default_guest_kallsyms;
510 else {
511 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
512 filename = path;
513 }
514 }
515
516 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
517 free(event);
518 return -ENOENT;
519 } 516 }
520 517
521 map = machine->vmlinux_maps[MAP__FUNCTION]; 518 map = machine->vmlinux_maps[MAP__FUNCTION];
519 kmap = map__kmap(map);
522 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 520 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
523 "%s%s", mmap_name, symbol_name) + 1; 521 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
524 size = PERF_ALIGN(size, sizeof(u64)); 522 size = PERF_ALIGN(size, sizeof(u64));
525 event->mmap.header.type = PERF_RECORD_MMAP; 523 event->mmap.header.type = PERF_RECORD_MMAP;
526 event->mmap.header.size = (sizeof(event->mmap) - 524 event->mmap.header.size = (sizeof(event->mmap) -
527 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 525 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
528 event->mmap.pgoff = args.start; 526 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
529 event->mmap.start = map->start; 527 event->mmap.start = map->start;
530 event->mmap.len = map->end - event->mmap.start; 528 event->mmap.len = map->end - event->mmap.start;
531 event->mmap.pid = machine->pid; 529 event->mmap.pid = machine->pid;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index faf6e219be21..851fa06f4a42 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -214,8 +214,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
214 struct machine *machine, bool mmap_data); 214 struct machine *machine, bool mmap_data);
215int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 215int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
216 perf_event__handler_t process, 216 perf_event__handler_t process,
217 struct machine *machine, 217 struct machine *machine);
218 const char *symbol_name);
219 218
220int perf_event__synthesize_modules(struct perf_tool *tool, 219int perf_event__synthesize_modules(struct perf_tool *tool,
221 perf_event__handler_t process, 220 perf_event__handler_t process,
@@ -279,4 +278,7 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
279size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); 278size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
280size_t perf_event__fprintf(union perf_event *event, FILE *fp); 279size_t perf_event__fprintf(union perf_event *event, FILE *fp);
281 280
281u64 kallsyms__get_function_start(const char *kallsyms_filename,
282 const char *symbol_name);
283
282#endif /* __PERF_RECORD_H */ 284#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h
new file mode 100644
index 000000000000..d82b170bb216
--- /dev/null
+++ b/tools/perf/util/include/asm/hash.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_GENERIC_HASH_H
2#define __ASM_GENERIC_HASH_H
3
4/* Stub */
5
6#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index ded74590b92f..c872991e0f65 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,19 +496,22 @@ static int symbol__in_kernel(void *arg, const char *name,
496 return 1; 496 return 1;
497} 497}
498 498
499static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
500 size_t bufsz)
501{
502 if (machine__is_default_guest(machine))
503 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
504 else
505 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
506}
507
499/* Figure out the start address of kernel map from /proc/kallsyms */ 508/* Figure out the start address of kernel map from /proc/kallsyms */
500static u64 machine__get_kernel_start_addr(struct machine *machine) 509static u64 machine__get_kernel_start_addr(struct machine *machine)
501{ 510{
502 const char *filename; 511 char filename[PATH_MAX];
503 char path[PATH_MAX];
504 struct process_args args; 512 struct process_args args;
505 513
506 if (machine__is_default_guest(machine)) 514 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
507 filename = (char *)symbol_conf.default_guest_kallsyms;
508 else {
509 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
510 filename = path;
511 }
512 515
513 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 516 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
514 return 0; 517 return 0;
@@ -829,9 +832,25 @@ static int machine__create_modules(struct machine *machine)
829 return 0; 832 return 0;
830} 833}
831 834
835const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
836
832int machine__create_kernel_maps(struct machine *machine) 837int machine__create_kernel_maps(struct machine *machine)
833{ 838{
834 struct dso *kernel = machine__get_kernel(machine); 839 struct dso *kernel = machine__get_kernel(machine);
840 char filename[PATH_MAX];
841 const char *name;
842 u64 addr = 0;
843 int i;
844
845 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
846
847 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
848 addr = kallsyms__get_function_start(filename, name);
849 if (addr)
850 break;
851 }
852 if (!addr)
853 return -1;
835 854
836 if (kernel == NULL || 855 if (kernel == NULL ||
837 __machine__create_kernel_maps(machine, kernel) < 0) 856 __machine__create_kernel_maps(machine, kernel) < 0)
@@ -850,6 +869,13 @@ int machine__create_kernel_maps(struct machine *machine)
850 * Now that we have all the maps created, just set the ->end of them: 869 * Now that we have all the maps created, just set the ->end of them:
851 */ 870 */
852 map_groups__fixup_end(&machine->kmaps); 871 map_groups__fixup_end(&machine->kmaps);
872
873 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
874 addr)) {
875 machine__destroy_kernel_maps(machine);
876 return -1;
877 }
878
853 return 0; 879 return 0;
854} 880}
855 881
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 477133015440..f77e91e483dc 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -18,6 +18,8 @@ union perf_event;
18#define HOST_KERNEL_ID (-1) 18#define HOST_KERNEL_ID (-1)
19#define DEFAULT_GUEST_KERNEL_ID (0) 19#define DEFAULT_GUEST_KERNEL_ID (0)
20 20
21extern const char *ref_reloc_sym_names[];
22
21struct machine { 23struct machine {
22 struct rb_node rb_node; 24 struct rb_node rb_node;
23 pid_t pid; 25 pid_t pid;
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 3b97513f0e77..39cd2d0faff6 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -39,6 +39,7 @@ void map__init(struct map *map, enum map_type type,
39 map->start = start; 39 map->start = start;
40 map->end = end; 40 map->end = end;
41 map->pgoff = pgoff; 41 map->pgoff = pgoff;
42 map->reloc = 0;
42 map->dso = dso; 43 map->dso = dso;
43 map->map_ip = map__map_ip; 44 map->map_ip = map__map_ip;
44 map->unmap_ip = map__unmap_ip; 45 map->unmap_ip = map__unmap_ip;
@@ -288,7 +289,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
288 if (map->dso->rel) 289 if (map->dso->rel)
289 return rip - map->pgoff; 290 return rip - map->pgoff;
290 291
291 return map->unmap_ip(map, rip); 292 return map->unmap_ip(map, rip) - map->reloc;
292} 293}
293 294
294/** 295/**
@@ -311,7 +312,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
311 if (map->dso->rel) 312 if (map->dso->rel)
312 return map->unmap_ip(map, ip + map->pgoff); 313 return map->unmap_ip(map, ip + map->pgoff);
313 314
314 return ip; 315 return ip + map->reloc;
315} 316}
316 317
317void map_groups__init(struct map_groups *mg) 318void map_groups__init(struct map_groups *mg)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 18068c6b71c1..257e513205ce 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -36,6 +36,7 @@ struct map {
36 bool erange_warned; 36 bool erange_warned;
37 u32 priv; 37 u32 priv;
38 u64 pgoff; 38 u64 pgoff;
39 u64 reloc;
39 u32 maj, min; /* only valid for MMAP2 record */ 40 u32 maj, min; /* only valid for MMAP2 record */
40 u64 ino; /* only valid for MMAP2 record */ 41 u64 ino; /* only valid for MMAP2 record */
41 u64 ino_generation;/* only valid for MMAP2 record */ 42 u64 ino_generation;/* only valid for MMAP2 record */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 759456728703..3e9f336740fa 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -751,6 +751,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
751 if (strcmp(elf_name, kmap->ref_reloc_sym->name)) 751 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
752 continue; 752 continue;
753 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; 753 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
754 map->reloc = kmap->ref_reloc_sym->addr -
755 kmap->ref_reloc_sym->unrelocated_addr;
754 break; 756 break;
755 } 757 }
756 } 758 }
@@ -922,6 +924,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
922 (u64)shdr.sh_offset); 924 (u64)shdr.sh_offset);
923 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 925 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
924 } 926 }
927new_symbol:
925 /* 928 /*
926 * We need to figure out if the object was created from C++ sources 929 * We need to figure out if the object was created from C++ sources
927 * DWARF DW_compile_unit has this, but we don't always have access 930 * DWARF DW_compile_unit has this, but we don't always have access
@@ -933,7 +936,6 @@ int dso__load_sym(struct dso *dso, struct map *map,
933 if (demangled != NULL) 936 if (demangled != NULL)
934 elf_name = demangled; 937 elf_name = demangled;
935 } 938 }
936new_symbol:
937 f = symbol__new(sym.st_value, sym.st_size, 939 f = symbol__new(sym.st_value, sym.st_size,
938 GELF_ST_BIND(sym.st_info), elf_name); 940 GELF_ST_BIND(sym.st_info), elf_name);
939 free(demangled); 941 free(demangled);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 39ce9adbaaf0..a9d758a3b371 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -627,7 +627,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
627 * kernel range is broken in several maps, named [kernel].N, as we don't have 627 * kernel range is broken in several maps, named [kernel].N, as we don't have
628 * the original ELF section names vmlinux have. 628 * the original ELF section names vmlinux have.
629 */ 629 */
630static int dso__split_kallsyms(struct dso *dso, struct map *map, 630static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
631 symbol_filter_t filter) 631 symbol_filter_t filter)
632{ 632{
633 struct map_groups *kmaps = map__kmap(map)->kmaps; 633 struct map_groups *kmaps = map__kmap(map)->kmaps;
@@ -692,6 +692,12 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map,
692 char dso_name[PATH_MAX]; 692 char dso_name[PATH_MAX];
693 struct dso *ndso; 693 struct dso *ndso;
694 694
695 if (delta) {
696 /* Kernel was relocated at boot time */
697 pos->start -= delta;
698 pos->end -= delta;
699 }
700
695 if (count == 0) { 701 if (count == 0) {
696 curr_map = map; 702 curr_map = map;
697 goto filter_symbol; 703 goto filter_symbol;
@@ -721,6 +727,10 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map,
721 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 727 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
722 map_groups__insert(kmaps, curr_map); 728 map_groups__insert(kmaps, curr_map);
723 ++kernel_range; 729 ++kernel_range;
730 } else if (delta) {
731 /* Kernel was relocated at boot time */
732 pos->start -= delta;
733 pos->end -= delta;
724 } 734 }
725filter_symbol: 735filter_symbol:
726 if (filter && filter(curr_map, pos)) { 736 if (filter && filter(curr_map, pos)) {
@@ -976,6 +986,23 @@ static int validate_kcore_modules(const char *kallsyms_filename,
976 return 0; 986 return 0;
977} 987}
978 988
989static int validate_kcore_addresses(const char *kallsyms_filename,
990 struct map *map)
991{
992 struct kmap *kmap = map__kmap(map);
993
994 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
995 u64 start;
996
997 start = kallsyms__get_function_start(kallsyms_filename,
998 kmap->ref_reloc_sym->name);
999 if (start != kmap->ref_reloc_sym->addr)
1000 return -EINVAL;
1001 }
1002
1003 return validate_kcore_modules(kallsyms_filename, map);
1004}
1005
979struct kcore_mapfn_data { 1006struct kcore_mapfn_data {
980 struct dso *dso; 1007 struct dso *dso;
981 enum map_type type; 1008 enum map_type type;
@@ -1019,8 +1046,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
1019 kallsyms_filename)) 1046 kallsyms_filename))
1020 return -EINVAL; 1047 return -EINVAL;
1021 1048
1022 /* All modules must be present at their original addresses */ 1049 /* Modules and kernel must be present at their original addresses */
1023 if (validate_kcore_modules(kallsyms_filename, map)) 1050 if (validate_kcore_addresses(kallsyms_filename, map))
1024 return -EINVAL; 1051 return -EINVAL;
1025 1052
1026 md.dso = dso; 1053 md.dso = dso;
@@ -1113,15 +1140,41 @@ out_err:
1113 return -EINVAL; 1140 return -EINVAL;
1114} 1141}
1115 1142
1143/*
1144 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1145 * delta based on the relocation reference symbol.
1146 */
1147static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
1148{
1149 struct kmap *kmap = map__kmap(map);
1150 u64 addr;
1151
1152 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1153 return 0;
1154
1155 addr = kallsyms__get_function_start(filename,
1156 kmap->ref_reloc_sym->name);
1157 if (!addr)
1158 return -1;
1159
1160 *delta = addr - kmap->ref_reloc_sym->addr;
1161 return 0;
1162}
1163
1116int dso__load_kallsyms(struct dso *dso, const char *filename, 1164int dso__load_kallsyms(struct dso *dso, const char *filename,
1117 struct map *map, symbol_filter_t filter) 1165 struct map *map, symbol_filter_t filter)
1118{ 1166{
1167 u64 delta = 0;
1168
1119 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1169 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1120 return -1; 1170 return -1;
1121 1171
1122 if (dso__load_all_kallsyms(dso, filename, map) < 0) 1172 if (dso__load_all_kallsyms(dso, filename, map) < 0)
1123 return -1; 1173 return -1;
1124 1174
1175 if (kallsyms__delta(map, filename, &delta))
1176 return -1;
1177
1125 symbols__fixup_duplicate(&dso->symbols[map->type]); 1178 symbols__fixup_duplicate(&dso->symbols[map->type]);
1126 symbols__fixup_end(&dso->symbols[map->type]); 1179 symbols__fixup_end(&dso->symbols[map->type]);
1127 1180
@@ -1133,7 +1186,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
1133 if (!dso__load_kcore(dso, map, filename)) 1186 if (!dso__load_kcore(dso, map, filename))
1134 return dso__split_kallsyms_for_kcore(dso, map, filter); 1187 return dso__split_kallsyms_for_kcore(dso, map, filter);
1135 else 1188 else
1136 return dso__split_kallsyms(dso, map, filter); 1189 return dso__split_kallsyms(dso, map, delta, filter);
1137} 1190}
1138 1191
1139static int dso__load_perf_map(struct dso *dso, struct map *map, 1192static int dso__load_perf_map(struct dso *dso, struct map *map,
@@ -1424,7 +1477,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1424 continue; 1477 continue;
1425 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 1478 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1426 "%s/%s/kallsyms", dir, dent->d_name); 1479 "%s/%s/kallsyms", dir, dent->d_name);
1427 if (!validate_kcore_modules(kallsyms_filename, map)) { 1480 if (!validate_kcore_addresses(kallsyms_filename, map)) {
1428 strlcpy(dir, kallsyms_filename, dir_sz); 1481 strlcpy(dir, kallsyms_filename, dir_sz);
1429 ret = 0; 1482 ret = 0;
1430 break; 1483 break;
@@ -1479,7 +1532,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1479 if (fd != -1) { 1532 if (fd != -1) {
1480 close(fd); 1533 close(fd);
1481 /* If module maps match go with /proc/kallsyms */ 1534 /* If module maps match go with /proc/kallsyms */
1482 if (!validate_kcore_modules("/proc/kallsyms", map)) 1535 if (!validate_kcore_addresses("/proc/kallsyms", map))
1483 goto proc_kallsyms; 1536 goto proc_kallsyms;
1484 } 1537 }
1485 1538