aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-06 17:37:45 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-06 17:37:45 -0500
commit56a4342dfe3145cd66f766adccb28fd9b571606d (patch)
treed1593764488ff8cbb0b83cb9ae35fd968bf81760
parent805c1f4aedaba1bc8d839e7c27b128083dd5c2f0 (diff)
parentfe0d692bbc645786bce1a98439e548ae619269f5 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c net/ipv6/ip6_tunnel.c net/ipv6/ip6_vti.c ipv6 tunnel statistic bug fixes conflicting with consolidation into generic sw per-cpu net stats. qlogic conflict between queue counting bug fix and the addition of multiple MAC address support. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/block/null_blk.txt72
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/module-signing.txt240
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile24
-rw-r--r--arch/arc/include/uapi/asm/unistd.h8
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi28
-rw-r--r--arch/arm/mach-omap2/board-ldp.c7
-rw-r--r--arch/arm/mach-omap2/display.c38
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/lubbock.h2
-rw-r--r--arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c11
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c7
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c2
-rw-r--r--arch/arm/mach-shmobile/board-lager.c4
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h4
-rw-r--r--arch/arm64/kernel/ptrace.c38
-rw-r--r--arch/powerpc/boot/dts/mpc5125twr.dts6
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/include/asm/unaligned.h7
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/process.c32
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S23
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S19
-rw-r--r--arch/powerpc/kvm/book3s_pr.c22
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/booke.c12
-rw-r--r--arch/powerpc/lib/copyuser_64.S53
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c20
-rw-r--r--arch/powerpc/platforms/powernv/pci.h4
-rw-r--r--arch/sh/lib/Makefile2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h4
-rw-r--r--arch/x86/include/asm/pgtable.h11
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/mm/gup.c13
-rw-r--r--block/blk-mq-sysfs.c13
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/acpi_lpss.c1
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/erst.c1
-rw-r--r--drivers/ata/ahci.c18
-rw-r--r--drivers/ata/ahci_imx.c3
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/block/null_blk.c102
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/cpufreq/cpufreq.c69
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/at_hdmac_regs.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c31
-rw-r--r--drivers/dma/mv_xor.c101
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c27
-rw-r--r--drivers/dma/txx9dmac.c1
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/Kconfig6
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/efi-pstore.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c28
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c14
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/idle/intel_idle.c3
-rw-r--r--drivers/iio/adc/ad7887.c16
-rw-r--r--drivers/iio/imu/adis16400_core.c7
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/infiniband/core/iwcm.c11
-rw-r--r--drivers/infiniband/core/uverbs.h10
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c78
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c22
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/telespci.c4
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h12
-rw-r--r--drivers/md/bcache/btree.c27
-rw-r--r--drivers/md/bcache/movinggc.c21
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/sysfs.c50
-rw-r--r--drivers/md/bcache/util.c8
-rw-r--r--drivers/md/bcache/util.h2
-rw-r--r--drivers/md/bcache/writeback.c53
-rw-r--r--drivers/net/bonding/bond_3ad.c45
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c28
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c103
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c29
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c20
-rw-r--r--drivers/net/macvlan.c16
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/dm9601.c44
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/mcs7830.c19
-rw-r--r--drivers/net/virtio_net.c11
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/xen-netback/common.h19
-rw-r--r--drivers/net/xen-netback/interface.c11
-rw-r--r--drivers/net/xen-netback/netback.c18
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/address.c8
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/phy/Kconfig4
-rw-r--r--drivers/phy/phy-core.c26
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c1
-rw-r--r--drivers/powercap/intel_rapl.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c15
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c7
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c39
-rw-r--r--drivers/staging/imx-drm/imx-tve.c9
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c32
-rw-r--r--drivers/target/iscsi/iscsi_target.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c6
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_file.c8
-rw-r--r--drivers/target/target_core_file.h5
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/serial/8250/8250_dw.c8
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_ldsem.c16
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c8
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/host/ohci-at91.c26
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/zte_ev.c3
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/xen/balloon.c63
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/privcmd.c9
-rw-r--r--fs/aio.c113
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext4/ext4.h10
-rw-r--r--fs/ext4/ext4_jbd2.c9
-rw-r--r--fs/ext4/extents.c45
-rw-r--r--fs/ext4/inode.c12
-rw-r--r--fs/ext4/mballoc.c17
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/jbd2/journal.c18
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jbd2/transaction.c16
-rw-r--r--fs/pstore/platform.c7
-rw-r--r--fs/sysfs/file.c8
-rw-r--r--fs/xfs/xfs_bmap.c32
-rw-r--r--fs/xfs/xfs_bmap_util.c14
-rw-r--r--fs/xfs/xfs_buf.c37
-rw-r--r--fs/xfs/xfs_buf.h11
-rw-r--r--fs/xfs/xfs_buf_item.c21
-rw-r--r--fs/xfs/xfs_dir2_node.c26
-rw-r--r--fs/xfs/xfs_iops.c3
-rw-r--r--fs/xfs/xfs_log_recover.c13
-rw-r--r--fs/xfs/xfs_qm.c80
-rw-r--r--fs/xfs/xfs_trans_buf.c13
-rw-r--r--include/asm-generic/pgtable.h7
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/migrate.h12
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h52
-rw-r--r--include/linux/netdevice.h22
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/net/llc_pdu.h2
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/target/target_core_base.h5
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/xen/interface/io/blkif.h10
-rw-r--r--kernel/Makefile7
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/cgroup.c50
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/freezer.c6
-rw-r--r--kernel/kexec.c1
-rw-r--r--kernel/power/console.c1
-rw-r--r--kernel/reboot.c2
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/fair.c7
-rw-r--r--kernel/sched/rt.c14
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/user.c6
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/compaction.c4
-rw-r--r--mm/huge_memory.c45
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempolicy.c16
-rw-r--r--mm/migrate.c82
-rw-r--r--mm/mprotect.c13
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/pgtable-generic.c8
-rw-r--r--mm/rmap.c4
-rw-r--r--net/8021q/vlan_dev.c19
-rw-r--r--net/batman-adv/bat_iv_ogm.c36
-rw-r--r--net/batman-adv/distributed-arp-table.c6
-rw-r--r--net/batman-adv/fragmentation.c8
-rw-r--r--net/batman-adv/icmp_socket.c6
-rw-r--r--net/batman-adv/main.c16
-rw-r--r--net/batman-adv/network-coding.c22
-rw-r--r--net/batman-adv/packet.h124
-rw-r--r--net/batman-adv/routing.c30
-rw-r--r--net/batman-adv/send.c10
-rw-r--r--net/batman-adv/soft-interface.c18
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/bluetooth/hci_sock.c26
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/dccp/probe.c19
-rw-r--r--net/ieee802154/6lowpan.c2
-rw-r--r--net/ipv4/gre_offload.c11
-rw-r--r--net/ipv4/inet_diag.c16
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv4/udp_offload.c37
-rw-r--r--net/ipv6/addrconf.c17
-rw-r--r--net/ipv6/ip6_output.c36
-rw-r--r--net/ipv6/ip6_tunnel.c21
-rw-r--r--net/ipv6/ip6_vti.c25
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c6
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c5
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c1
-rw-r--r--net/netfilter/nf_tables_api.c26
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/rds/ib.c3
-rw-r--r--net/rose/af_rose.c16
-rw-r--r--net/sched/act_csum.c10
-rw-r--r--net/sched/act_gact.c7
-rw-r--r--net/sched/act_ipt.c8
-rw-r--r--net/sched/act_nat.c10
-rw-r--r--net/sched/act_pedit.c8
-rw-r--r--net/sched/act_police.c4
-rw-r--r--net/sched/act_simple.c9
-rw-r--r--net/sched/act_skbedit.c7
-rw-r--r--net/sctp/outqueue.c32
-rw-r--r--net/tipc/port.c45
-rw-r--r--net/tipc/port.h6
-rw-r--r--net/tipc/socket.c46
-rw-r--r--net/wireless/radiotap.c4
-rw-r--r--net/wireless/sme.c22
-rw-r--r--scripts/link-vmlinux.sh4
-rw-r--r--security/selinux/hooks.c8
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--sound/pci/hda/patch_realtek.c4
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c30
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c2
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8904.c2
-rw-r--r--sound/soc/codecs/wm8962.c13
-rw-r--r--sound/soc/codecs/wm_adsp.c10
-rw-r--r--sound/soc/fsl/imx-wm8962.c2
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c24
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c38
-rw-r--r--sound/soc/soc-pcm.c5
-rw-r--r--sound/soc/tegra/tegra20_i2s.c6
-rw-r--r--sound/soc/tegra/tegra20_spdif.c10
-rw-r--r--sound/soc/tegra/tegra30_i2s.c6
347 files changed, 3686 insertions, 1780 deletions
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
new file mode 100644
index 000000000000..b2830b435895
--- /dev/null
+++ b/Documentation/block/null_blk.txt
@@ -0,0 +1,72 @@
1Null block device driver
2================================================================================
3
4I. Overview
5
6The null block device (/dev/nullb*) is used for benchmarking the various
7block-layer implementations. It emulates a block device of X gigabytes in size.
8The following instances are possible:
9
10 Single-queue block-layer
11 - Request-based.
12 - Single submission queue per device.
13 - Implements IO scheduling algorithms (CFQ, Deadline, noop).
14 Multi-queue block-layer
15 - Request-based.
16 - Configurable submission queues per device.
17 No block-layer (Known as bio-based)
18 - Bio-based. IO requests are submitted directly to the device driver.
19 - Directly accepts bio data structure and returns them.
20
21All of them have a completion queue for each core in the system.
22
23II. Module parameters applicable for all instances:
24
25queue_mode=[0-2]: Default: 2-Multi-queue
26 Selects which block-layer the module should instantiate with.
27
28 0: Bio-based.
29 1: Single-queue.
30 2: Multi-queue.
31
32home_node=[0--nr_nodes]: Default: NUMA_NO_NODE
33 Selects what CPU node the data structures are allocated from.
34
35gb=[Size in GB]: Default: 250GB
36 The size of the device reported to the system.
37
38bs=[Block size (in bytes)]: Default: 512 bytes
39 The block size reported to the system.
40
41nr_devices=[Number of devices]: Default: 2
42 Number of block devices instantiated. They are instantiated as /dev/nullb0,
43 etc.
44
45irq_mode=[0-2]: Default: 1-Soft-irq
46 The completion mode used for completing IOs to the block-layer.
47
48 0: None.
49 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead
50 when IOs are issued from another CPU node than the home the device is
51 connected to.
52 2: Timer: Waits a specific period (completion_nsec) for each IO before
53 completion.
54
55completion_nsec=[ns]: Default: 10.000ns
56 Combined with irq_mode=2 (timer). The time each completion event must wait.
57
58submit_queues=[0..nr_cpus]:
59 The number of submission queues attached to the device driver. If unset, it
60 defaults to 1 on single-queue and bio-based instances. For multi-queue,
61 it is ignored when use_per_node_hctx module parameter is 1.
62
63hw_queue_depth=[0..qdepth]: Default: 64
64 The hardware queue depth of the device.
65
66III: Multi-queue specific parameters
67
68use_per_node_hctx=[0/1]: Default: 0
69 0: The number of submit queues are set to the value of the submit_queues
70 parameter.
71 1: The multi-queue block layer is instantiated with a hardware dispatch
72 queue for each CPU node in the system.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 50680a59a2ff..b9e9bd854298 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1529 1529
1530 * atapi_dmadir: Enable ATAPI DMADIR bridge support 1530 * atapi_dmadir: Enable ATAPI DMADIR bridge support
1531 1531
1532 * disable: Disable this device.
1533
1532 If there are multiple matching configurations changing 1534 If there are multiple matching configurations changing
1533 the same attribute, the last one is used. 1535 the same attribute, the last one is used.
1534 1536
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
new file mode 100644
index 000000000000..2b40e04d3c49
--- /dev/null
+++ b/Documentation/module-signing.txt
@@ -0,0 +1,240 @@
1 ==============================
2 KERNEL MODULE SIGNING FACILITY
3 ==============================
4
5CONTENTS
6
7 - Overview.
8 - Configuring module signing.
9 - Generating signing keys.
10 - Public keys in the kernel.
11 - Manually signing modules.
12 - Signed modules and stripping.
13 - Loading signed modules.
14 - Non-valid signatures and unsigned modules.
15 - Administering/protecting the private key.
16
17
18========
19OVERVIEW
20========
21
22The kernel module signing facility cryptographically signs modules during
23installation and then checks the signature upon loading the module. This
24allows increased kernel security by disallowing the loading of unsigned modules
25or modules signed with an invalid key. Module signing increases security by
26making it harder to load a malicious module into the kernel. The module
27signature checking is done by the kernel so that it is not necessary to have
28trusted userspace bits.
29
30This facility uses X.509 ITU-T standard certificates to encode the public keys
31involved. The signatures are not themselves encoded in any industrial standard
32type. The facility currently only supports the RSA public key encryption
33standard (though it is pluggable and permits others to be used). The possible
34hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and
35SHA-512 (the algorithm is selected by data in the signature).
36
37
38==========================
39CONFIGURING MODULE SIGNING
40==========================
41
42The module signing facility is enabled by going to the "Enable Loadable Module
43Support" section of the kernel configuration and turning on
44
45 CONFIG_MODULE_SIG "Module signature verification"
46
47This has a number of options available:
48
49 (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
50
51 This specifies how the kernel should deal with a module that has a
52 signature for which the key is not known or a module that is unsigned.
53
54 If this is off (ie. "permissive"), then modules for which the key is not
55 available and modules that are unsigned are permitted, but the kernel will
56 be marked as being tainted.
57
58 If this is on (ie. "restrictive"), only modules that have a valid
59 signature that can be verified by a public key in the kernel's possession
60 will be loaded. All other modules will generate an error.
61
62 Irrespective of the setting here, if the module has a signature block that
63 cannot be parsed, it will be rejected out of hand.
64
65
66 (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
67
68 If this is on then modules will be automatically signed during the
69 modules_install phase of a build. If this is off, then the modules must
70 be signed manually using:
71
72 scripts/sign-file
73
74
75 (3) "Which hash algorithm should modules be signed with?"
76
77 This presents a choice of which hash algorithm the installation phase will
78 sign the modules with:
79
80 CONFIG_SIG_SHA1 "Sign modules with SHA-1"
81 CONFIG_SIG_SHA224 "Sign modules with SHA-224"
82 CONFIG_SIG_SHA256 "Sign modules with SHA-256"
83 CONFIG_SIG_SHA384 "Sign modules with SHA-384"
84 CONFIG_SIG_SHA512 "Sign modules with SHA-512"
85
86 The algorithm selected here will also be built into the kernel (rather
87 than being a module) so that modules signed with that algorithm can have
88 their signatures checked without causing a dependency loop.
89
90
91=======================
92GENERATING SIGNING KEYS
93=======================
94
95Cryptographic keypairs are required to generate and check signatures. A
96private key is used to generate a signature and the corresponding public key is
97used to check it. The private key is only needed during the build, after which
98it can be deleted or stored securely. The public key gets built into the
99kernel so that it can be used to check the signatures as the modules are
100loaded.
101
102Under normal conditions, the kernel build will automatically generate a new
103keypair using openssl if one does not exist in the files:
104
105 signing_key.priv
106 signing_key.x509
107
108during the building of vmlinux (the public part of the key needs to be built
109into vmlinux) using parameters in the:
110
111 x509.genkey
112
113file (which is also generated if it does not already exist).
114
115It is strongly recommended that you provide your own x509.genkey file.
116
117Most notably, in the x509.genkey file, the req_distinguished_name section
118should be altered from the default:
119
120 [ req_distinguished_name ]
121 O = Magrathea
122 CN = Glacier signing key
123 emailAddress = slartibartfast@magrathea.h2g2
124
125The generated RSA key size can also be set with:
126
127 [ req ]
128 default_bits = 4096
129
130
131It is also possible to manually generate the key private/public files using the
132x509.genkey key generation configuration file in the root node of the Linux
133kernel sources tree and the openssl command. The following is an example to
134generate the public/private key files:
135
136 openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
137 -config x509.genkey -outform DER -out signing_key.x509 \
138 -keyout signing_key.priv
139
140
141=========================
142PUBLIC KEYS IN THE KERNEL
143=========================
144
145The kernel contains a ring of public keys that can be viewed by root. They're
146in a keyring called ".system_keyring" that can be seen by:
147
148 [root@deneb ~]# cat /proc/keys
149 ...
150 223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1
151 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
152 ...
153
154Beyond the public key generated specifically for module signing, any file
155placed in the kernel source root directory or the kernel build root directory
156whose name is suffixed with ".x509" will be assumed to be an X.509 public key
157and will be added to the keyring.
158
159Further, the architecture code may take public keys from a hardware store and
160add those in also (e.g. from the UEFI key database).
161
162Finally, it is possible to add additional public keys by doing:
163
164 keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
165
166e.g.:
167
168 keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
169
170Note, however, that the kernel will only permit keys to be added to
171.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
172that is already resident in the .system_keyring at the time the key was added.
173
174
175=========================
176MANUALLY SIGNING MODULES
177=========================
178
179To manually sign a module, use the scripts/sign-file tool available in
180the Linux kernel source tree. The script requires 4 arguments:
181
182 1. The hash algorithm (e.g., sha256)
183 2. The private key filename
184 3. The public key filename
185 4. The kernel module to be signed
186
187The following is an example to sign a kernel module:
188
189 scripts/sign-file sha512 kernel-signkey.priv \
190 kernel-signkey.x509 module.ko
191
192The hash algorithm used does not have to match the one configured, but if it
193doesn't, you should make sure that hash algorithm is either built into the
194kernel or can be loaded without requiring itself.
195
196
197============================
198SIGNED MODULES AND STRIPPING
199============================
200
201A signed module has a digital signature simply appended at the end. The string
202"~Module signature appended~." at the end of the module's file confirms that a
203signature is present but it does not confirm that the signature is valid!
204
205Signed modules are BRITTLE as the signature is outside of the defined ELF
206container. Thus they MAY NOT be stripped once the signature is computed and
207attached. Note the entire module is the signed payload, including any and all
208debug information present at the time of signing.
209
210
211======================
212LOADING SIGNED MODULES
213======================
214
215Modules are loaded with insmod, modprobe, init_module() or finit_module(),
216exactly as for unsigned modules as no processing is done in userspace. The
217signature checking is all done within the kernel.
218
219
220=========================================
221NON-VALID SIGNATURES AND UNSIGNED MODULES
222=========================================
223
224If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on
225the kernel command line, the kernel will only load validly signed modules
226for which it has a public key. Otherwise, it will also load modules that are
227unsigned. Any module for which the kernel has a key, but which proves to have
228a signature mismatch will not be permitted to load.
229
230Any module that has an unparseable signature will be rejected.
231
232
233=========================================
234ADMINISTERING/PROTECTING THE PRIVATE KEY
235=========================================
236
237Since the private key is used to sign modules, viruses and malware could use
238the private key to sign modules and compromise the operating system. The
239private key must be either destroyed or moved to a secure location and not kept
240in the root node of the kernel source tree.
diff --git a/MAINTAINERS b/MAINTAINERS
index 23bd3c2ee6ee..e11d4952bb26 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts
783F: arch/arm/boot/dts/sama*.dtsi 783F: arch/arm/boot/dts/sama*.dtsi
784 784
785ARM/CALXEDA HIGHBANK ARCHITECTURE 785ARM/CALXEDA HIGHBANK ARCHITECTURE
786M: Rob Herring <rob.herring@calxeda.com> 786M: Rob Herring <robh@kernel.org>
787L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 787L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
788S: Maintained 788S: Maintained
789F: arch/arm/mach-highbank/ 789F: arch/arm/mach-highbank/
@@ -1008,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com>
1008L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1008L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1009S: Maintained 1009S: Maintained
1010F: arch/arm/mach-keystone/ 1010F: arch/arm/mach-keystone/
1011F: drivers/clk/keystone/
1012T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
1011 1013
1012ARM/LOGICPD PXA270 MACHINE SUPPORT 1014ARM/LOGICPD PXA270 MACHINE SUPPORT
1013M: Lennert Buytenhek <kernel@wantstofly.org> 1015M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -3823,6 +3825,12 @@ T: git git://linuxtv.org/media_tree.git
3823S: Maintained 3825S: Maintained
3824F: drivers/media/usb/gspca/ 3826F: drivers/media/usb/gspca/
3825 3827
3828GUID PARTITION TABLE (GPT)
3829M: Davidlohr Bueso <davidlohr@hp.com>
3830L: linux-efi@vger.kernel.org
3831S: Maintained
3832F: block/partitions/efi.*
3833
3826STK1160 USB VIDEO CAPTURE DRIVER 3834STK1160 USB VIDEO CAPTURE DRIVER
3827M: Ezequiel Garcia <elezegarcia@gmail.com> 3835M: Ezequiel Garcia <elezegarcia@gmail.com>
3828L: linux-media@vger.kernel.org 3836L: linux-media@vger.kernel.org
@@ -6240,7 +6248,7 @@ F: drivers/i2c/busses/i2c-ocores.c
6240 6248
6241OPEN FIRMWARE AND FLATTENED DEVICE TREE 6249OPEN FIRMWARE AND FLATTENED DEVICE TREE
6242M: Grant Likely <grant.likely@linaro.org> 6250M: Grant Likely <grant.likely@linaro.org>
6243M: Rob Herring <rob.herring@calxeda.com> 6251M: Rob Herring <robh+dt@kernel.org>
6244L: devicetree@vger.kernel.org 6252L: devicetree@vger.kernel.org
6245W: http://fdt.secretlab.ca 6253W: http://fdt.secretlab.ca
6246T: git git://git.secretlab.ca/git/linux-2.6.git 6254T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -6252,7 +6260,7 @@ K: of_get_property
6252K: of_match_table 6260K: of_match_table
6253 6261
6254OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS 6262OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
6255M: Rob Herring <rob.herring@calxeda.com> 6263M: Rob Herring <robh+dt@kernel.org>
6256M: Pawel Moll <pawel.moll@arm.com> 6264M: Pawel Moll <pawel.moll@arm.com>
6257M: Mark Rutland <mark.rutland@arm.com> 6265M: Mark Rutland <mark.rutland@arm.com>
6258M: Ian Campbell <ijc+devicetree@hellion.org.uk> 6266M: Ian Campbell <ijc+devicetree@hellion.org.uk>
@@ -9581,7 +9589,7 @@ F: drivers/xen/*swiotlb*
9581 9589
9582XFS FILESYSTEM 9590XFS FILESYSTEM
9583P: Silicon Graphics Inc 9591P: Silicon Graphics Inc
9584M: Dave Chinner <dchinner@fromorbit.com> 9592M: Dave Chinner <david@fromorbit.com>
9585M: Ben Myers <bpm@sgi.com> 9593M: Ben Myers <bpm@sgi.com>
9586M: xfs@oss.sgi.com 9594M: xfs@oss.sgi.com
9587L: xfs@oss.sgi.com 9595L: xfs@oss.sgi.com
diff --git a/Makefile b/Makefile
index 858a147fd836..ab80be7a38bc 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 13 2PATCHLEVEL = 13
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc6
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -732,19 +732,15 @@ export mod_strip_cmd
732# Select initial ramdisk compression format, default is gzip(1). 732# Select initial ramdisk compression format, default is gzip(1).
733# This shall be used by the dracut(8) tool while creating an initramfs image. 733# This shall be used by the dracut(8) tool while creating an initramfs image.
734# 734#
735INITRD_COMPRESS=gzip 735INITRD_COMPRESS-y := gzip
736ifeq ($(CONFIG_RD_BZIP2), y) 736INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
737 INITRD_COMPRESS=bzip2 737INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma
738else ifeq ($(CONFIG_RD_LZMA), y) 738INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz
739 INITRD_COMPRESS=lzma 739INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo
740else ifeq ($(CONFIG_RD_XZ), y) 740INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4
741 INITRD_COMPRESS=xz 741# do not export INITRD_COMPRESS, since we didn't actually
742else ifeq ($(CONFIG_RD_LZO), y) 742# choose a sane default compression above.
743 INITRD_COMPRESS=lzo 743# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
744else ifeq ($(CONFIG_RD_LZ4), y)
745 INITRD_COMPRESS=lz4
746endif
747export INITRD_COMPRESS
748 744
749ifdef CONFIG_MODULE_SIG_ALL 745ifdef CONFIG_MODULE_SIG_ALL
750MODSECKEY = ./signing_key.priv 746MODSECKEY = ./signing_key.priv
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 68125dd766c6..39e58d1cdf90 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -8,7 +8,11 @@
8 8
9/******** no-legacy-syscalls-ABI *******/ 9/******** no-legacy-syscalls-ABI *******/
10 10
11#ifndef _UAPI_ASM_ARC_UNISTD_H 11/*
12 * Non-typical guard macro to enable inclusion twice in ARCH sys.c
13 * That is how the Generic syscall wrapper generator works
14 */
15#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
12#define _UAPI_ASM_ARC_UNISTD_H 16#define _UAPI_ASM_ARC_UNISTD_H
13 17
14#define __ARCH_WANT_SYS_EXECVE 18#define __ARCH_WANT_SYS_EXECVE
@@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
36#define __NR_sysfs (__NR_arch_specific_syscall + 3) 40#define __NR_sysfs (__NR_arch_specific_syscall + 3)
37__SYSCALL(__NR_sysfs, sys_sysfs) 41__SYSCALL(__NR_sysfs, sys_sysfs)
38 42
43#undef __SYSCALL
44
39#endif 45#endif
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index ee845fad939b..9987dd0e9c59 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -87,9 +87,9 @@
87 interrupts = <1 9 0xf04>; 87 interrupts = <1 9 0xf04>;
88 }; 88 };
89 89
90 gpio0: gpio@ffc40000 { 90 gpio0: gpio@e6050000 {
91 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 91 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
92 reg = <0 0xffc40000 0 0x2c>; 92 reg = <0 0xe6050000 0 0x50>;
93 interrupt-parent = <&gic>; 93 interrupt-parent = <&gic>;
94 interrupts = <0 4 0x4>; 94 interrupts = <0 4 0x4>;
95 #gpio-cells = <2>; 95 #gpio-cells = <2>;
@@ -99,9 +99,9 @@
99 interrupt-controller; 99 interrupt-controller;
100 }; 100 };
101 101
102 gpio1: gpio@ffc41000 { 102 gpio1: gpio@e6051000 {
103 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 103 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
104 reg = <0 0xffc41000 0 0x2c>; 104 reg = <0 0xe6051000 0 0x50>;
105 interrupt-parent = <&gic>; 105 interrupt-parent = <&gic>;
106 interrupts = <0 5 0x4>; 106 interrupts = <0 5 0x4>;
107 #gpio-cells = <2>; 107 #gpio-cells = <2>;
@@ -111,9 +111,9 @@
111 interrupt-controller; 111 interrupt-controller;
112 }; 112 };
113 113
114 gpio2: gpio@ffc42000 { 114 gpio2: gpio@e6052000 {
115 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 115 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
116 reg = <0 0xffc42000 0 0x2c>; 116 reg = <0 0xe6052000 0 0x50>;
117 interrupt-parent = <&gic>; 117 interrupt-parent = <&gic>;
118 interrupts = <0 6 0x4>; 118 interrupts = <0 6 0x4>;
119 #gpio-cells = <2>; 119 #gpio-cells = <2>;
@@ -123,9 +123,9 @@
123 interrupt-controller; 123 interrupt-controller;
124 }; 124 };
125 125
126 gpio3: gpio@ffc43000 { 126 gpio3: gpio@e6053000 {
127 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 127 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
128 reg = <0 0xffc43000 0 0x2c>; 128 reg = <0 0xe6053000 0 0x50>;
129 interrupt-parent = <&gic>; 129 interrupt-parent = <&gic>;
130 interrupts = <0 7 0x4>; 130 interrupts = <0 7 0x4>;
131 #gpio-cells = <2>; 131 #gpio-cells = <2>;
@@ -135,9 +135,9 @@
135 interrupt-controller; 135 interrupt-controller;
136 }; 136 };
137 137
138 gpio4: gpio@ffc44000 { 138 gpio4: gpio@e6054000 {
139 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 139 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
140 reg = <0 0xffc44000 0 0x2c>; 140 reg = <0 0xe6054000 0 0x50>;
141 interrupt-parent = <&gic>; 141 interrupt-parent = <&gic>;
142 interrupts = <0 8 0x4>; 142 interrupts = <0 8 0x4>;
143 #gpio-cells = <2>; 143 #gpio-cells = <2>;
@@ -147,9 +147,9 @@
147 interrupt-controller; 147 interrupt-controller;
148 }; 148 };
149 149
150 gpio5: gpio@ffc45000 { 150 gpio5: gpio@e6055000 {
151 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; 151 compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
152 reg = <0 0xffc45000 0 0x2c>; 152 reg = <0 0xe6055000 0 0x50>;
153 interrupt-parent = <&gic>; 153 interrupt-parent = <&gic>;
154 interrupts = <0 9 0x4>; 154 interrupts = <0 9 0x4>;
155 #gpio-cells = <2>; 155 #gpio-cells = <2>;
@@ -241,7 +241,7 @@
241 241
242 sdhi0: sdhi@ee100000 { 242 sdhi0: sdhi@ee100000 {
243 compatible = "renesas,sdhi-r8a7790"; 243 compatible = "renesas,sdhi-r8a7790";
244 reg = <0 0xee100000 0 0x100>; 244 reg = <0 0xee100000 0 0x200>;
245 interrupt-parent = <&gic>; 245 interrupt-parent = <&gic>;
246 interrupts = <0 165 4>; 246 interrupts = <0 165 4>;
247 cap-sd-highspeed; 247 cap-sd-highspeed;
@@ -250,7 +250,7 @@
250 250
251 sdhi1: sdhi@ee120000 { 251 sdhi1: sdhi@ee120000 {
252 compatible = "renesas,sdhi-r8a7790"; 252 compatible = "renesas,sdhi-r8a7790";
253 reg = <0 0xee120000 0 0x100>; 253 reg = <0 0xee120000 0 0x200>;
254 interrupt-parent = <&gic>; 254 interrupt-parent = <&gic>;
255 interrupts = <0 166 4>; 255 interrupts = <0 166 4>;
256 cap-sd-highspeed; 256 cap-sd-highspeed;
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 4ec8d82b0492..44a59c3abfb0 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -242,12 +242,18 @@ static void __init ldp_display_init(void)
242 242
243static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) 243static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
244{ 244{
245 int res;
246
245 /* LCD enable GPIO */ 247 /* LCD enable GPIO */
246 ldp_lcd_pdata.enable_gpio = gpio + 7; 248 ldp_lcd_pdata.enable_gpio = gpio + 7;
247 249
248 /* Backlight enable GPIO */ 250 /* Backlight enable GPIO */
249 ldp_lcd_pdata.backlight_gpio = gpio + 15; 251 ldp_lcd_pdata.backlight_gpio = gpio + 15;
250 252
253 res = platform_device_register(&ldp_lcd_device);
254 if (res)
255 pr_err("Unable to register LCD: %d\n", res);
256
251 return 0; 257 return 0;
252} 258}
253 259
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
346 352
347static struct platform_device *ldp_devices[] __initdata = { 353static struct platform_device *ldp_devices[] __initdata = {
348 &ldp_gpio_keys_device, 354 &ldp_gpio_keys_device,
349 &ldp_lcd_device,
350}; 355};
351 356
352#ifdef CONFIG_OMAP_MUX 357#ifdef CONFIG_OMAP_MUX
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 58347bb874a0..4cf165502b35 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
101 { "dss_hdmi", "omapdss_hdmi", -1 }, 101 { "dss_hdmi", "omapdss_hdmi", -1 },
102}; 102};
103 103
104static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
105{
106 u32 enable_mask, enable_shift;
107 u32 pipd_mask, pipd_shift;
108 u32 reg;
109
110 if (dsi_id == 0) {
111 enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
112 enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
113 pipd_mask = OMAP4_DSI1_PIPD_MASK;
114 pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
115 } else if (dsi_id == 1) {
116 enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
117 enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
118 pipd_mask = OMAP4_DSI2_PIPD_MASK;
119 pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
120 } else {
121 return -ENODEV;
122 }
123
124 reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
125
126 reg &= ~enable_mask;
127 reg &= ~pipd_mask;
128
129 reg |= (lanes << enable_shift) & enable_mask;
130 reg |= (lanes << pipd_shift) & pipd_mask;
131
132 omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
133
134 return 0;
135}
136
104static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) 137static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
105{ 138{
139 if (cpu_is_omap44xx())
140 return omap4_dsi_mux_pads(dsi_id, lane_mask);
141
106 return 0; 142 return 0;
107} 143}
108 144
109static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) 145static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
110{ 146{
147 if (cpu_is_omap44xx())
148 omap4_dsi_mux_pads(dsi_id, 0);
111} 149}
112 150
113static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) 151static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 56cebb05509e..d23c77fadb31 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
796 796
797/* gpmc */ 797/* gpmc */
798static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { 798static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
799 { .irq = 20 }, 799 { .irq = 20 + OMAP_INTC_START, },
800 { .irq = -1 } 800 { .irq = -1 }
801}; 801};
802 802
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
841}; 841};
842 842
843static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { 843static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
844 { .irq = 52 }, 844 { .irq = 52 + OMAP_INTC_START, },
845 { .irq = -1 } 845 { .irq = -1 }
846}; 846};
847 847
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index d33742908f97..4c3b1e6df508 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
2165}; 2165};
2166 2166
2167static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { 2167static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
2168 { .irq = 20 }, 2168 { .irq = 20 + OMAP_INTC_START, },
2169 { .irq = -1 } 2169 { .irq = -1 }
2170}; 2170};
2171 2171
@@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
2999 2999
3000static struct omap_hwmod omap3xxx_mmu_isp_hwmod; 3000static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
3001static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { 3001static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
3002 { .irq = 24 }, 3002 { .irq = 24 + OMAP_INTC_START, },
3003 { .irq = -1 } 3003 { .irq = -1 }
3004}; 3004};
3005 3005
@@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
3041 3041
3042static struct omap_hwmod omap3xxx_mmu_iva_hwmod; 3042static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
3043static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { 3043static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
3044 { .irq = 28 }, 3044 { .irq = 28 + OMAP_INTC_START, },
3045 { .irq = -1 } 3045 { .irq = -1 }
3046}; 3046};
3047 3047
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index db32d5380b11..18f333c440db 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
1637 .class = &dra7xx_uart_hwmod_class, 1637 .class = &dra7xx_uart_hwmod_class,
1638 .clkdm_name = "l4per_clkdm", 1638 .clkdm_name = "l4per_clkdm",
1639 .main_clk = "uart1_gfclk_mux", 1639 .main_clk = "uart1_gfclk_mux",
1640 .flags = HWMOD_SWSUP_SIDLE_ACT, 1640 .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
1641 .prcm = { 1641 .prcm = {
1642 .omap4 = { 1642 .omap4 = {
1643 .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, 1643 .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h
index 2a086e8373eb..958cd6af9384 100644
--- a/arch/arm/mach-pxa/include/mach/lubbock.h
+++ b/arch/arm/mach-pxa/include/mach/lubbock.h
@@ -10,6 +10,8 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <mach/irqs.h>
14
13#define LUBBOCK_ETH_PHYS PXA_CS3_PHYS 15#define LUBBOCK_ETH_PHYS PXA_CS3_PHYS
14 16
15#define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS 17#define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
index 7eb9a10fc1af..2fddf38192df 100644
--- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
+++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
@@ -8,8 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9*/ 9*/
10 10
11#include <linux/clk-provider.h>
12#include <linux/irqchip.h>
13#include <linux/of_platform.h> 11#include <linux/of_platform.h>
14 12
15#include <asm/mach/arch.h> 13#include <asm/mach/arch.h>
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void)
48 panic("SoC is not S3C64xx!"); 46 panic("SoC is not S3C64xx!");
49} 47}
50 48
51static void __init s3c64xx_dt_init_irq(void)
52{
53 of_clk_init(NULL);
54 samsung_wdt_reset_of_init();
55 irqchip_init();
56};
57
58static void __init s3c64xx_dt_init_machine(void) 49static void __init s3c64xx_dt_init_machine(void)
59{ 50{
51 samsung_wdt_reset_of_init();
60 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 52 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
61} 53}
62 54
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
79 /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ 71 /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
80 .dt_compat = s3c64xx_dt_compat, 72 .dt_compat = s3c64xx_dt_compat,
81 .map_io = s3c64xx_dt_map_io, 73 .map_io = s3c64xx_dt_map_io,
82 .init_irq = s3c64xx_dt_init_irq,
83 .init_machine = s3c64xx_dt_init_machine, 74 .init_machine = s3c64xx_dt_init_machine,
84 .restart = s3c64xx_dt_restart, 75 .restart = s3c64xx_dt_restart,
85MACHINE_END 76MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 958e3cbf0ac2..c18689123023 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
614 REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), 614 REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
615}; 615};
616 616
617/* Fixed 3.3V regulator used by LCD backlight */
618static struct regulator_consumer_supply fixed5v0_power_consumers[] = {
619 REGULATOR_SUPPLY("power", "pwm-backlight.0"),
620};
621
617/* Fixed 3.3V regulator to be used by SDHI0 */ 622/* Fixed 3.3V regulator to be used by SDHI0 */
618static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { 623static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
619 REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), 624 REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
@@ -1196,6 +1201,8 @@ static void __init eva_init(void)
1196 1201
1197 regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, 1202 regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
1198 ARRAY_SIZE(fixed3v3_power_consumers), 3300000); 1203 ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
1204 regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers,
1205 ARRAY_SIZE(fixed5v0_power_consumers), 5000000);
1199 1206
1200 pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); 1207 pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
1201 pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); 1208 pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index 38611526fe9a..3c4995aebd22 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -679,7 +679,7 @@ static void __init bockw_init(void)
679 .id = i, 679 .id = i,
680 .data = &rsnd_card_info[i], 680 .data = &rsnd_card_info[i],
681 .size_data = sizeof(struct asoc_simple_card_info), 681 .size_data = sizeof(struct asoc_simple_card_info),
682 .dma_mask = ~0, 682 .dma_mask = DMA_BIT_MASK(32),
683 }; 683 };
684 684
685 platform_device_register_full(&cardinfo); 685 platform_device_register_full(&cardinfo);
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index a8d3ce646fb9..e0406fd37390 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -245,7 +245,9 @@ static void __init lager_init(void)
245{ 245{
246 lager_add_standard_devices(); 246 lager_add_standard_devices();
247 247
248 phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); 248 if (IS_ENABLED(CONFIG_PHYLIB))
249 phy_register_fixup_for_id("r8a7790-ether-ff:01",
250 lager_ksz8041_fixup);
249} 251}
250 252
251static const char * const lager_boards_compat_dt[] __initconst = { 253static const char * const lager_boards_compat_dt[] __initconst = {
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 83e4f959ee47..85501238b425 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
96 struct remap_data *info = data; 96 struct remap_data *info = data;
97 struct page *page = info->pages[info->index++]; 97 struct page *page = info->pages[info->index++];
98 unsigned long pfn = page_to_pfn(page); 98 unsigned long pfn = page_to_pfn(page);
99 pte_t pte = pfn_pte(pfn, info->prot); 99 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
100 100
101 if (map_foreign_page(pfn, info->fgmfn, info->domid)) 101 if (map_foreign_page(pfn, info->fgmfn, info->domid))
102 return -EFAULT; 102 return -EFAULT;
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
224 } 224 }
225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
226 return 0; 226 return 0;
227 xen_hvm_resume_frames = res.start >> PAGE_SHIFT; 227 xen_hvm_resume_frames = res.start;
228 xen_events_irq = irq_of_parse_and_map(node, 0); 228 xen_events_irq = irq_of_parse_and_map(node, 0);
229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", 229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
230 version, xen_events_irq, xen_hvm_resume_frames); 230 version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
231 xen_domain_type = XEN_HVM_DOMAIN; 231 xen_domain_type = XEN_HVM_DOMAIN;
232 232
233 xen_setup_features(); 233 xen_setup_features();
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2820f1a6eebe..dde3fc9c49f0 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir, 23 unsigned long offset, size_t size, enum dma_data_direction dir,
24 struct dma_attrs *attrs) 24 struct dma_attrs *attrs)
25{ 25{
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
27} 26}
28 27
29static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir, 29 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs) 30 struct dma_attrs *attrs)
32{ 31{
33 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
34} 32}
35 33
36static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 34static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
37 dma_addr_t handle, size_t size, enum dma_data_direction dir) 35 dma_addr_t handle, size_t size, enum dma_data_direction dir)
38{ 36{
39 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
40} 37}
41 38
42static inline void xen_dma_sync_single_for_device(struct device *hwdev, 39static inline void xen_dma_sync_single_for_device(struct device *hwdev,
43 dma_addr_t handle, size_t size, enum dma_data_direction dir) 40 dma_addr_t handle, size_t size, enum dma_data_direction dir)
44{ 41{
45 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
46} 42}
47#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ 43#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6777a2192b83..6a8928bba03c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
214{ 214{
215 int err, len, type, disabled = !ctrl.enabled; 215 int err, len, type, disabled = !ctrl.enabled;
216 216
217 if (disabled) { 217 attr->disabled = disabled;
218 len = 0; 218 if (disabled)
219 type = HW_BREAKPOINT_EMPTY; 219 return 0;
220 } else { 220
221 err = arch_bp_generic_fields(ctrl, &len, &type); 221 err = arch_bp_generic_fields(ctrl, &len, &type);
222 if (err) 222 if (err)
223 return err; 223 return err;
224 224
225 switch (note_type) { 225 switch (note_type) {
226 case NT_ARM_HW_BREAK: 226 case NT_ARM_HW_BREAK:
227 if ((type & HW_BREAKPOINT_X) != type) 227 if ((type & HW_BREAKPOINT_X) != type)
228 return -EINVAL;
229 break;
230 case NT_ARM_HW_WATCH:
231 if ((type & HW_BREAKPOINT_RW) != type)
232 return -EINVAL;
233 break;
234 default:
235 return -EINVAL; 228 return -EINVAL;
236 } 229 break;
230 case NT_ARM_HW_WATCH:
231 if ((type & HW_BREAKPOINT_RW) != type)
232 return -EINVAL;
233 break;
234 default:
235 return -EINVAL;
237 } 236 }
238 237
239 attr->bp_len = len; 238 attr->bp_len = len;
240 attr->bp_type = type; 239 attr->bp_type = type;
241 attr->disabled = disabled;
242 240
243 return 0; 241 return 0;
244} 242}
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts
index 4177b62240c2..a618dfc13e4c 100644
--- a/arch/powerpc/boot/dts/mpc5125twr.dts
+++ b/arch/powerpc/boot/dts/mpc5125twr.dts
@@ -58,7 +58,6 @@
58 compatible = "fsl,mpc5121-immr"; 58 compatible = "fsl,mpc5121-immr";
59 #address-cells = <1>; 59 #address-cells = <1>;
60 #size-cells = <1>; 60 #size-cells = <1>;
61 #interrupt-cells = <2>;
62 ranges = <0x0 0x80000000 0x400000>; 61 ranges = <0x0 0x80000000 0x400000>;
63 reg = <0x80000000 0x400000>; 62 reg = <0x80000000 0x400000>;
64 bus-frequency = <66000000>; // 66 MHz ips bus 63 bus-frequency = <66000000>; // 66 MHz ips bus
@@ -189,6 +188,10 @@
189 reg = <0xA000 0x1000>; 188 reg = <0xA000 0x1000>;
190 }; 189 };
191 190
191 // disable USB1 port
192 // TODO:
193 // correct pinmux config and fix USB3320 ulpi dependency
194 // before re-enabling it
192 usb@3000 { 195 usb@3000 {
193 compatible = "fsl,mpc5121-usb2-dr"; 196 compatible = "fsl,mpc5121-usb2-dr";
194 reg = <0x3000 0x400>; 197 reg = <0x3000 0x400>;
@@ -197,6 +200,7 @@
197 interrupts = <43 0x8>; 200 interrupts = <43 0x8>;
198 dr_mode = "host"; 201 dr_mode = "host";
199 phy_type = "ulpi"; 202 phy_type = "ulpi";
203 status = "disabled";
200 }; 204 };
201 205
202 // 5125 PSCs are not 52xx or 5121 PSC compatible 206 // 5125 PSCs are not 52xx or 5121 PSC compatible
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 894662a5d4d5..243ce69ad685 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -284,7 +284,7 @@ do_kvm_##n: \
284 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ 284 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
285 beq- 1f; \ 285 beq- 1f; \
286 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 286 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2871: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 2871: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
288 blt+ cr1,3f; /* abort if it is */ \ 288 blt+ cr1,3f; /* abort if it is */ \
289 li r1,(n); /* will be reloaded later */ \ 289 li r1,(n); /* will be reloaded later */ \
290 sth r1,PACA_TRAP_SAVE(r13); \ 290 sth r1,PACA_TRAP_SAVE(r13); \
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4a594b76674d..bc23b1ba7980 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
192extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 192extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
193extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 193extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
194extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 194extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
195extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
196 struct kvm_vcpu *vcpu);
197extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
198 struct kvmppc_book3s_shadow_vcpu *svcpu);
195 199
196static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 200static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
197{ 201{
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 0bd9348a4db9..192917d2239c 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -79,6 +79,7 @@ struct kvmppc_host_state {
79 ulong vmhandler; 79 ulong vmhandler;
80 ulong scratch0; 80 ulong scratch0;
81 ulong scratch1; 81 ulong scratch1;
82 ulong scratch2;
82 u8 in_guest; 83 u8 in_guest;
83 u8 restore_hid5; 84 u8 restore_hid5;
84 u8 napping; 85 u8 napping;
@@ -106,6 +107,7 @@ struct kvmppc_host_state {
106}; 107};
107 108
108struct kvmppc_book3s_shadow_vcpu { 109struct kvmppc_book3s_shadow_vcpu {
110 bool in_use;
109 ulong gpr[14]; 111 ulong gpr[14];
110 u32 cr; 112 u32 cr;
111 u32 xer; 113 u32 xer;
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 9ee12610af02..aace90547614 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
35extern void enable_kernel_spe(void); 35extern void enable_kernel_spe(void);
36extern void giveup_spe(struct task_struct *); 36extern void giveup_spe(struct task_struct *);
37extern void load_up_spe(struct task_struct *); 37extern void load_up_spe(struct task_struct *);
38extern void switch_booke_debug_regs(struct thread_struct *new_thread); 38extern void switch_booke_debug_regs(struct debug_reg *new_debug);
39 39
40#ifndef CONFIG_SMP 40#ifndef CONFIG_SMP
41extern void discard_lazy_cpu_state(void); 41extern void discard_lazy_cpu_state(void);
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
index 5f1b1e3c2137..8296381ae432 100644
--- a/arch/powerpc/include/asm/unaligned.h
+++ b/arch/powerpc/include/asm/unaligned.h
@@ -4,13 +4,18 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6/* 6/*
7 * The PowerPC can do unaligned accesses itself in big endian mode. 7 * The PowerPC can do unaligned accesses itself based on its endian mode.
8 */ 8 */
9#include <linux/unaligned/access_ok.h> 9#include <linux/unaligned/access_ok.h>
10#include <linux/unaligned/generic.h> 10#include <linux/unaligned/generic.h>
11 11
12#ifdef __LITTLE_ENDIAN__
13#define get_unaligned __get_unaligned_le
14#define put_unaligned __put_unaligned_le
15#else
12#define get_unaligned __get_unaligned_be 16#define get_unaligned __get_unaligned_be
13#define put_unaligned __put_unaligned_be 17#define put_unaligned __put_unaligned_be
18#endif
14 19
15#endif /* __KERNEL__ */ 20#endif /* __KERNEL__ */
16#endif /* _ASM_POWERPC_UNALIGNED_H */ 21#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 2ea5cc033ec8..d3de01066f7d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -576,6 +576,7 @@ int main(void)
576 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); 576 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
577 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); 577 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
578 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); 578 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
579 HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
579 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); 580 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
580 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); 581 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
581 HSTATE_FIELD(HSTATE_NAPPING, napping); 582 HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 2ae41aba4053..4f0946de2d5c 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
80 * of the function that the cpu should jump to to continue 80 * of the function that the cpu should jump to to continue
81 * initialization. 81 * initialization.
82 */ 82 */
83 .balign 8
83 .globl __secondary_hold_spinloop 84 .globl __secondary_hold_spinloop
84__secondary_hold_spinloop: 85__secondary_hold_spinloop:
85 .llong 0x0 86 .llong 0x0
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
470 mtctr r8 471 mtctr r8
471 bctr 472 bctr
472 473
474.balign 8
473p_end: .llong _end - _stext 475p_end: .llong _end - _stext
474 476
4754: /* Now copy the rest of the kernel up to _end */ 4774: /* Now copy the rest of the kernel up to _end */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3386d8ab7eb0..4a96556fd2d4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
339#endif 339#endif
340} 340}
341 341
342static void prime_debug_regs(struct thread_struct *thread) 342static void prime_debug_regs(struct debug_reg *debug)
343{ 343{
344 /* 344 /*
345 * We could have inherited MSR_DE from userspace, since 345 * We could have inherited MSR_DE from userspace, since
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
348 */ 348 */
349 mtmsr(mfmsr() & ~MSR_DE); 349 mtmsr(mfmsr() & ~MSR_DE);
350 350
351 mtspr(SPRN_IAC1, thread->debug.iac1); 351 mtspr(SPRN_IAC1, debug->iac1);
352 mtspr(SPRN_IAC2, thread->debug.iac2); 352 mtspr(SPRN_IAC2, debug->iac2);
353#if CONFIG_PPC_ADV_DEBUG_IACS > 2 353#if CONFIG_PPC_ADV_DEBUG_IACS > 2
354 mtspr(SPRN_IAC3, thread->debug.iac3); 354 mtspr(SPRN_IAC3, debug->iac3);
355 mtspr(SPRN_IAC4, thread->debug.iac4); 355 mtspr(SPRN_IAC4, debug->iac4);
356#endif 356#endif
357 mtspr(SPRN_DAC1, thread->debug.dac1); 357 mtspr(SPRN_DAC1, debug->dac1);
358 mtspr(SPRN_DAC2, thread->debug.dac2); 358 mtspr(SPRN_DAC2, debug->dac2);
359#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 359#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
360 mtspr(SPRN_DVC1, thread->debug.dvc1); 360 mtspr(SPRN_DVC1, debug->dvc1);
361 mtspr(SPRN_DVC2, thread->debug.dvc2); 361 mtspr(SPRN_DVC2, debug->dvc2);
362#endif 362#endif
363 mtspr(SPRN_DBCR0, thread->debug.dbcr0); 363 mtspr(SPRN_DBCR0, debug->dbcr0);
364 mtspr(SPRN_DBCR1, thread->debug.dbcr1); 364 mtspr(SPRN_DBCR1, debug->dbcr1);
365#ifdef CONFIG_BOOKE 365#ifdef CONFIG_BOOKE
366 mtspr(SPRN_DBCR2, thread->debug.dbcr2); 366 mtspr(SPRN_DBCR2, debug->dbcr2);
367#endif 367#endif
368} 368}
369/* 369/*
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
371 * debug registers, set the debug registers from the values 371 * debug registers, set the debug registers from the values
372 * stored in the new thread. 372 * stored in the new thread.
373 */ 373 */
374void switch_booke_debug_regs(struct thread_struct *new_thread) 374void switch_booke_debug_regs(struct debug_reg *new_debug)
375{ 375{
376 if ((current->thread.debug.dbcr0 & DBCR0_IDM) 376 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
377 || (new_thread->debug.dbcr0 & DBCR0_IDM)) 377 || (new_debug->dbcr0 & DBCR0_IDM))
378 prime_debug_regs(new_thread); 378 prime_debug_regs(new_debug);
379} 379}
380EXPORT_SYMBOL_GPL(switch_booke_debug_regs); 380EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
381#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 381#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
683#endif /* CONFIG_SMP */ 683#endif /* CONFIG_SMP */
684 684
685#ifdef CONFIG_PPC_ADV_DEBUG_REGS 685#ifdef CONFIG_PPC_ADV_DEBUG_REGS
686 switch_booke_debug_regs(&new->thread); 686 switch_booke_debug_regs(&new->thread.debug);
687#else 687#else
688/* 688/*
689 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would 689 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3ff587a8b7d..c5d148434c08 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
469 slb_v = vcpu->kvm->arch.vrma_slb_v; 469 slb_v = vcpu->kvm->arch.vrma_slb_v;
470 } 470 }
471 471
472 preempt_disable();
472 /* Find the HPTE in the hash table */ 473 /* Find the HPTE in the hash table */
473 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, 474 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
474 HPTE_V_VALID | HPTE_V_ABSENT); 475 HPTE_V_VALID | HPTE_V_ABSENT);
475 if (index < 0) 476 if (index < 0) {
477 preempt_enable();
476 return -ENOENT; 478 return -ENOENT;
479 }
477 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 480 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
478 v = hptep[0] & ~HPTE_V_HVLOCK; 481 v = hptep[0] & ~HPTE_V_HVLOCK;
479 gr = kvm->arch.revmap[index].guest_rpte; 482 gr = kvm->arch.revmap[index].guest_rpte;
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
481 /* Unlock the HPTE */ 484 /* Unlock the HPTE */
482 asm volatile("lwsync" : : : "memory"); 485 asm volatile("lwsync" : : : "memory");
483 hptep[0] = v; 486 hptep[0] = v;
487 preempt_enable();
484 488
485 gpte->eaddr = eaddr; 489 gpte->eaddr = eaddr;
486 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); 490 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
665 return -EFAULT; 669 return -EFAULT;
666 } else { 670 } else {
667 page = pages[0]; 671 page = pages[0];
672 pfn = page_to_pfn(page);
668 if (PageHuge(page)) { 673 if (PageHuge(page)) {
669 page = compound_head(page); 674 page = compound_head(page);
670 pte_size <<= compound_order(page); 675 pte_size <<= compound_order(page);
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
689 } 694 }
690 rcu_read_unlock_sched(); 695 rcu_read_unlock_sched();
691 } 696 }
692 pfn = page_to_pfn(page);
693 } 697 }
694 698
695 ret = -EFAULT; 699 ret = -EFAULT;
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
707 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; 711 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
708 } 712 }
709 713
710 /* Set the HPTE to point to pfn */ 714 /*
711 r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); 715 * Set the HPTE to point to pfn.
716 * Since the pfn is at PAGE_SIZE granularity, make sure we
717 * don't mask out lower-order bits if psize < PAGE_SIZE.
718 */
719 if (psize < PAGE_SIZE)
720 psize = PAGE_SIZE;
721 r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
712 if (hpte_is_writable(r) && !write_ok) 722 if (hpte_is_writable(r) && !write_ok)
713 r = hpte_make_readonly(r); 723 r = hpte_make_readonly(r);
714 ret = RESUME_GUEST; 724 ret = RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 072287f1c3bc..b51d5db78068 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) 131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
132{ 132{
133 struct kvmppc_vcore *vc = vcpu->arch.vcore; 133 struct kvmppc_vcore *vc = vcpu->arch.vcore;
134 unsigned long flags;
134 135
135 spin_lock(&vcpu->arch.tbacct_lock); 136 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
136 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && 137 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
137 vc->preempt_tb != TB_NIL) { 138 vc->preempt_tb != TB_NIL) {
138 vc->stolen_tb += mftb() - vc->preempt_tb; 139 vc->stolen_tb += mftb() - vc->preempt_tb;
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
143 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; 144 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
144 vcpu->arch.busy_preempt = TB_NIL; 145 vcpu->arch.busy_preempt = TB_NIL;
145 } 146 }
146 spin_unlock(&vcpu->arch.tbacct_lock); 147 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
147} 148}
148 149
149static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) 150static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
150{ 151{
151 struct kvmppc_vcore *vc = vcpu->arch.vcore; 152 struct kvmppc_vcore *vc = vcpu->arch.vcore;
153 unsigned long flags;
152 154
153 spin_lock(&vcpu->arch.tbacct_lock); 155 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
154 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) 156 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
155 vc->preempt_tb = mftb(); 157 vc->preempt_tb = mftb();
156 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) 158 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
157 vcpu->arch.busy_preempt = mftb(); 159 vcpu->arch.busy_preempt = mftb();
158 spin_unlock(&vcpu->arch.tbacct_lock); 160 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
159} 161}
160 162
161static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 163static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
486 */ 488 */
487 if (vc->vcore_state != VCORE_INACTIVE && 489 if (vc->vcore_state != VCORE_INACTIVE &&
488 vc->runner->arch.run_task != current) { 490 vc->runner->arch.run_task != current) {
489 spin_lock(&vc->runner->arch.tbacct_lock); 491 spin_lock_irq(&vc->runner->arch.tbacct_lock);
490 p = vc->stolen_tb; 492 p = vc->stolen_tb;
491 if (vc->preempt_tb != TB_NIL) 493 if (vc->preempt_tb != TB_NIL)
492 p += now - vc->preempt_tb; 494 p += now - vc->preempt_tb;
493 spin_unlock(&vc->runner->arch.tbacct_lock); 495 spin_unlock_irq(&vc->runner->arch.tbacct_lock);
494 } else { 496 } else {
495 p = vc->stolen_tb; 497 p = vc->stolen_tb;
496 } 498 }
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
512 core_stolen = vcore_stolen_time(vc, now); 514 core_stolen = vcore_stolen_time(vc, now);
513 stolen = core_stolen - vcpu->arch.stolen_logged; 515 stolen = core_stolen - vcpu->arch.stolen_logged;
514 vcpu->arch.stolen_logged = core_stolen; 516 vcpu->arch.stolen_logged = core_stolen;
515 spin_lock(&vcpu->arch.tbacct_lock); 517 spin_lock_irq(&vcpu->arch.tbacct_lock);
516 stolen += vcpu->arch.busy_stolen; 518 stolen += vcpu->arch.busy_stolen;
517 vcpu->arch.busy_stolen = 0; 519 vcpu->arch.busy_stolen = 0;
518 spin_unlock(&vcpu->arch.tbacct_lock); 520 spin_unlock_irq(&vcpu->arch.tbacct_lock);
519 if (!dt || !vpa) 521 if (!dt || !vpa)
520 return; 522 return;
521 memset(dt, 0, sizeof(struct dtl_entry)); 523 memset(dt, 0, sizeof(struct dtl_entry));
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
589 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) 591 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
590 return RESUME_HOST; 592 return RESUME_HOST;
591 593
594 idx = srcu_read_lock(&vcpu->kvm->srcu);
592 rc = kvmppc_rtas_hcall(vcpu); 595 rc = kvmppc_rtas_hcall(vcpu);
596 srcu_read_unlock(&vcpu->kvm->srcu, idx);
593 597
594 if (rc == -ENOENT) 598 if (rc == -ENOENT)
595 return RESUME_HOST; 599 return RESUME_HOST;
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1115 1119
1116 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 1120 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1117 return; 1121 return;
1118 spin_lock(&vcpu->arch.tbacct_lock); 1122 spin_lock_irq(&vcpu->arch.tbacct_lock);
1119 now = mftb(); 1123 now = mftb();
1120 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - 1124 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1121 vcpu->arch.stolen_logged; 1125 vcpu->arch.stolen_logged;
1122 vcpu->arch.busy_preempt = now; 1126 vcpu->arch.busy_preempt = now;
1123 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 1127 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1124 spin_unlock(&vcpu->arch.tbacct_lock); 1128 spin_unlock_irq(&vcpu->arch.tbacct_lock);
1125 --vc->n_runnable; 1129 --vc->n_runnable;
1126 list_del(&vcpu->arch.run_list); 1130 list_del(&vcpu->arch.run_list);
1127} 1131}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9c515440ad1a..8689e2e30857 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
225 is_io = pa & (HPTE_R_I | HPTE_R_W); 225 is_io = pa & (HPTE_R_I | HPTE_R_W);
226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); 226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
227 pa &= PAGE_MASK; 227 pa &= PAGE_MASK;
228 pa |= gpa & ~PAGE_MASK;
228 } else { 229 } else {
229 /* Translate to host virtual address */ 230 /* Translate to host virtual address */
230 hva = __gfn_to_hva_memslot(memslot, gfn); 231 hva = __gfn_to_hva_memslot(memslot, gfn);
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
238 ptel = hpte_make_readonly(ptel); 239 ptel = hpte_make_readonly(ptel);
239 is_io = hpte_cache_bits(pte_val(pte)); 240 is_io = hpte_cache_bits(pte_val(pte));
240 pa = pte_pfn(pte) << PAGE_SHIFT; 241 pa = pte_pfn(pte) << PAGE_SHIFT;
242 pa |= hva & (pte_size - 1);
243 pa |= gpa & ~PAGE_MASK;
241 } 244 }
242 } 245 }
243 246
244 if (pte_size < psize) 247 if (pte_size < psize)
245 return H_PARAMETER; 248 return H_PARAMETER;
246 if (pa && pte_size > psize)
247 pa |= gpa & (pte_size - 1);
248 249
249 ptel &= ~(HPTE_R_PP0 - psize); 250 ptel &= ~(HPTE_R_PP0 - psize);
250 ptel |= pa; 251 ptel |= pa;
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
749 20, /* 1M, unsupported */ 750 20, /* 1M, unsupported */
750}; 751};
751 752
753/* When called from virtmode, this func should be protected by
754 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
755 * can trigger deadlock issue.
756 */
752long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, 757long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
753 unsigned long valid) 758 unsigned long valid)
754{ 759{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bc8de75b1925..be4fa04a37c9 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
153 153
15413: b machine_check_fwnmi 15413: b machine_check_fwnmi
155 155
156
157/* 156/*
158 * We come in here when wakened from nap mode on a secondary hw thread. 157 * We come in here when wakened from nap mode on a secondary hw thread.
159 * Relocation is off and most register values are lost. 158 * Relocation is off and most register values are lost.
@@ -224,6 +223,11 @@ kvm_start_guest:
224 /* Clear our vcpu pointer so we don't come back in early */ 223 /* Clear our vcpu pointer so we don't come back in early */
225 li r0, 0 224 li r0, 0
226 std r0, HSTATE_KVM_VCPU(r13) 225 std r0, HSTATE_KVM_VCPU(r13)
226 /*
227 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
228 * the nap_count, because once the increment to nap_count is
229 * visible we could be given another vcpu.
230 */
227 lwsync 231 lwsync
228 /* Clear any pending IPI - we're an offline thread */ 232 /* Clear any pending IPI - we're an offline thread */
229 ld r5, HSTATE_XICS_PHYS(r13) 233 ld r5, HSTATE_XICS_PHYS(r13)
@@ -241,7 +245,6 @@ kvm_start_guest:
241 /* increment the nap count and then go to nap mode */ 245 /* increment the nap count and then go to nap mode */
242 ld r4, HSTATE_KVM_VCORE(r13) 246 ld r4, HSTATE_KVM_VCORE(r13)
243 addi r4, r4, VCORE_NAP_COUNT 247 addi r4, r4, VCORE_NAP_COUNT
244 lwsync /* make previous updates visible */
24551: lwarx r3, 0, r4 24851: lwarx r3, 0, r4
246 addi r3, r3, 1 249 addi r3, r3, 1
247 stwcx. r3, 0, r4 250 stwcx. r3, 0, r4
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
751 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 754 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
752 * guest R13 saved in SPRN_SCRATCH0 755 * guest R13 saved in SPRN_SCRATCH0
753 */ 756 */
754 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ 757 std r9, HSTATE_SCRATCH2(r13)
755 std r9, HSTATE_HOST_R2(r13)
756 758
757 lbz r9, HSTATE_IN_GUEST(r13) 759 lbz r9, HSTATE_IN_GUEST(r13)
758 cmpwi r9, KVM_GUEST_MODE_HOST_HV 760 cmpwi r9, KVM_GUEST_MODE_HOST_HV
759 beq kvmppc_bad_host_intr 761 beq kvmppc_bad_host_intr
760#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 762#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
761 cmpwi r9, KVM_GUEST_MODE_GUEST 763 cmpwi r9, KVM_GUEST_MODE_GUEST
762 ld r9, HSTATE_HOST_R2(r13) 764 ld r9, HSTATE_SCRATCH2(r13)
763 beq kvmppc_interrupt_pr 765 beq kvmppc_interrupt_pr
764#endif 766#endif
765 /* We're now back in the host but in guest MMU context */ 767 /* We're now back in the host but in guest MMU context */
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
779 std r6, VCPU_GPR(R6)(r9) 781 std r6, VCPU_GPR(R6)(r9)
780 std r7, VCPU_GPR(R7)(r9) 782 std r7, VCPU_GPR(R7)(r9)
781 std r8, VCPU_GPR(R8)(r9) 783 std r8, VCPU_GPR(R8)(r9)
782 ld r0, HSTATE_HOST_R2(r13) 784 ld r0, HSTATE_SCRATCH2(r13)
783 std r0, VCPU_GPR(R9)(r9) 785 std r0, VCPU_GPR(R9)(r9)
784 std r10, VCPU_GPR(R10)(r9) 786 std r10, VCPU_GPR(R10)(r9)
785 std r11, VCPU_GPR(R11)(r9) 787 std r11, VCPU_GPR(R11)(r9)
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
990 */ 992 */
991 /* Increment the threads-exiting-guest count in the 0xff00 993 /* Increment the threads-exiting-guest count in the 0xff00
992 bits of vcore->entry_exit_count */ 994 bits of vcore->entry_exit_count */
993 lwsync
994 ld r5,HSTATE_KVM_VCORE(r13) 995 ld r5,HSTATE_KVM_VCORE(r13)
995 addi r6,r5,VCORE_ENTRY_EXIT 996 addi r6,r5,VCORE_ENTRY_EXIT
99641: lwarx r3,0,r6 99741: lwarx r3,0,r6
997 addi r0,r3,0x100 998 addi r0,r3,0x100
998 stwcx. r0,0,r6 999 stwcx. r0,0,r6
999 bne 41b 1000 bne 41b
1000 lwsync 1001 isync /* order stwcx. vs. reading napping_threads */
1001 1002
1002 /* 1003 /*
1003 * At this point we have an interrupt that we have to pass 1004 * At this point we have an interrupt that we have to pass
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1030 sld r0,r0,r4 1031 sld r0,r0,r4
1031 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1032 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1032 beq 43f 1033 beq 43f
1034 /* Order entry/exit update vs. IPIs */
1035 sync
1033 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 1036 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1034 subf r6,r4,r13 1037 subf r6,r4,r13
103542: andi. r0,r3,1 103842: andi. r0,r3,1
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1638 bge kvm_cede_exit 1641 bge kvm_cede_exit
1639 stwcx. r4,0,r6 1642 stwcx. r4,0,r6
1640 bne 31b 1643 bne 31b
1644 /* order napping_threads update vs testing entry_exit_count */
1645 isync
1641 li r0,1 1646 li r0,1
1642 stb r0,HSTATE_NAPPING(r13) 1647 stb r0,HSTATE_NAPPING(r13)
1643 /* order napping_threads update vs testing entry_exit_count */
1644 lwsync
1645 mr r4,r3 1648 mr r4,r3
1646 lwz r7,VCORE_ENTRY_EXIT(r5) 1649 lwz r7,VCORE_ENTRY_EXIT(r5)
1647 cmpwi r7,0x100 1650 cmpwi r7,0x100
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index f4dd041c14ea..f779450cb07c 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -129,29 +129,32 @@ kvm_start_lightweight:
129 * R12 = exit handler id 129 * R12 = exit handler id
130 * R13 = PACA 130 * R13 = PACA
131 * SVCPU.* = guest * 131 * SVCPU.* = guest *
132 * MSR.EE = 1
132 * 133 *
133 */ 134 */
134 135
136 PPC_LL r3, GPR4(r1) /* vcpu pointer */
137
138 /*
139 * kvmppc_copy_from_svcpu can clobber volatile registers, save
140 * the exit handler id to the vcpu and restore it from there later.
141 */
142 stw r12, VCPU_TRAP(r3)
143
135 /* Transfer reg values from shadow vcpu back to vcpu struct */ 144 /* Transfer reg values from shadow vcpu back to vcpu struct */
136 /* On 64-bit, interrupts are still off at this point */ 145 /* On 64-bit, interrupts are still off at this point */
137 PPC_LL r3, GPR4(r1) /* vcpu pointer */ 146
138 GET_SHADOW_VCPU(r4) 147 GET_SHADOW_VCPU(r4)
139 bl FUNC(kvmppc_copy_from_svcpu) 148 bl FUNC(kvmppc_copy_from_svcpu)
140 nop 149 nop
141 150
142#ifdef CONFIG_PPC_BOOK3S_64 151#ifdef CONFIG_PPC_BOOK3S_64
143 /* Re-enable interrupts */
144 ld r3, HSTATE_HOST_MSR(r13)
145 ori r3, r3, MSR_EE
146 MTMSR_EERI(r3)
147
148 /* 152 /*
149 * Reload kernel SPRG3 value. 153 * Reload kernel SPRG3 value.
150 * No need to save guest value as usermode can't modify SPRG3. 154 * No need to save guest value as usermode can't modify SPRG3.
151 */ 155 */
152 ld r3, PACA_SPRG3(r13) 156 ld r3, PACA_SPRG3(r13)
153 mtspr SPRN_SPRG3, r3 157 mtspr SPRN_SPRG3, r3
154
155#endif /* CONFIG_PPC_BOOK3S_64 */ 158#endif /* CONFIG_PPC_BOOK3S_64 */
156 159
157 /* R7 = vcpu */ 160 /* R7 = vcpu */
@@ -177,7 +180,7 @@ kvm_start_lightweight:
177 PPC_STL r31, VCPU_GPR(R31)(r7) 180 PPC_STL r31, VCPU_GPR(R31)(r7)
178 181
179 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 182 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
180 mr r5, r12 183 lwz r5, VCPU_TRAP(r7)
181 184
182 /* Restore r3 (kvm_run) and r4 (vcpu) */ 185 /* Restore r3 (kvm_run) and r4 (vcpu) */
183 REST_2GPRS(3, r1) 186 REST_2GPRS(3, r1)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index fe14ca3dd171..5b9e9063cfaf 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); 67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; 68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
69 svcpu->in_use = 0;
69 svcpu_put(svcpu); 70 svcpu_put(svcpu);
70#endif 71#endif
71 vcpu->cpu = smp_processor_id(); 72 vcpu->cpu = smp_processor_id();
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
78{ 79{
79#ifdef CONFIG_PPC_BOOK3S_64 80#ifdef CONFIG_PPC_BOOK3S_64
80 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 81 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
82 if (svcpu->in_use) {
83 kvmppc_copy_from_svcpu(vcpu, svcpu);
84 }
81 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); 85 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; 86 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
83 svcpu_put(svcpu); 87 svcpu_put(svcpu);
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
110 svcpu->ctr = vcpu->arch.ctr; 114 svcpu->ctr = vcpu->arch.ctr;
111 svcpu->lr = vcpu->arch.lr; 115 svcpu->lr = vcpu->arch.lr;
112 svcpu->pc = vcpu->arch.pc; 116 svcpu->pc = vcpu->arch.pc;
117 svcpu->in_use = true;
113} 118}
114 119
115/* Copy data touched by real-mode code from shadow vcpu back to vcpu */ 120/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
116void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, 121void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
117 struct kvmppc_book3s_shadow_vcpu *svcpu) 122 struct kvmppc_book3s_shadow_vcpu *svcpu)
118{ 123{
124 /*
125 * vcpu_put would just call us again because in_use hasn't
126 * been updated yet.
127 */
128 preempt_disable();
129
130 /*
131 * Maybe we were already preempted and synced the svcpu from
132 * our preempt notifiers. Don't bother touching this svcpu then.
133 */
134 if (!svcpu->in_use)
135 goto out;
136
119 vcpu->arch.gpr[0] = svcpu->gpr[0]; 137 vcpu->arch.gpr[0] = svcpu->gpr[0];
120 vcpu->arch.gpr[1] = svcpu->gpr[1]; 138 vcpu->arch.gpr[1] = svcpu->gpr[1];
121 vcpu->arch.gpr[2] = svcpu->gpr[2]; 139 vcpu->arch.gpr[2] = svcpu->gpr[2];
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
139 vcpu->arch.fault_dar = svcpu->fault_dar; 157 vcpu->arch.fault_dar = svcpu->fault_dar;
140 vcpu->arch.fault_dsisr = svcpu->fault_dsisr; 158 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
141 vcpu->arch.last_inst = svcpu->last_inst; 159 vcpu->arch.last_inst = svcpu->last_inst;
160 svcpu->in_use = false;
161
162out:
163 preempt_enable();
142} 164}
143 165
144static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) 166static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index a38c4c9edab8..c3c5231adade 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
153 153
154 li r6, MSR_IR | MSR_DR 154 li r6, MSR_IR | MSR_DR
155 andc r6, r5, r6 /* Clear DR and IR in MSR value */ 155 andc r6, r5, r6 /* Clear DR and IR in MSR value */
156#ifdef CONFIG_PPC_BOOK3S_32
157 /* 156 /*
158 * Set EE in HOST_MSR so that it's enabled when we get into our 157 * Set EE in HOST_MSR so that it's enabled when we get into our
159 * C exit handler function. On 64-bit we delay enabling 158 * C exit handler function.
160 * interrupts until we have finished transferring stuff
161 * to or from the PACA.
162 */ 159 */
163 ori r5, r5, MSR_EE 160 ori r5, r5, MSR_EE
164#endif
165 mtsrr0 r7 161 mtsrr0 r7
166 mtsrr1 r6 162 mtsrr1 r6
167 RFI 163 RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 53e65a210b9a..0591e05db74b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
682{ 682{
683 int ret, s; 683 int ret, s;
684 struct thread_struct thread; 684 struct debug_reg debug;
685#ifdef CONFIG_PPC_FPU 685#ifdef CONFIG_PPC_FPU
686 struct thread_fp_state fp; 686 struct thread_fp_state fp;
687 int fpexc_mode; 687 int fpexc_mode;
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
723#endif 723#endif
724 724
725 /* Switch to guest debug context */ 725 /* Switch to guest debug context */
726 thread.debug = vcpu->arch.shadow_dbg_reg; 726 debug = vcpu->arch.shadow_dbg_reg;
727 switch_booke_debug_regs(&thread); 727 switch_booke_debug_regs(&debug);
728 thread.debug = current->thread.debug; 728 debug = current->thread.debug;
729 current->thread.debug = vcpu->arch.shadow_dbg_reg; 729 current->thread.debug = vcpu->arch.shadow_dbg_reg;
730 730
731 kvmppc_fix_ee_before_entry(); 731 kvmppc_fix_ee_before_entry();
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
736 We also get here with interrupts enabled. */ 736 We also get here with interrupts enabled. */
737 737
738 /* Switch back to user space debug context */ 738 /* Switch back to user space debug context */
739 switch_booke_debug_regs(&thread); 739 switch_booke_debug_regs(&debug);
740 current->thread.debug = thread.debug; 740 current->thread.debug = debug;
741 741
742#ifdef CONFIG_PPC_FPU 742#ifdef CONFIG_PPC_FPU
743 kvmppc_save_guest_fp(vcpu); 743 kvmppc_save_guest_fp(vcpu);
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index d73a59014900..596a285c0755 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -9,6 +9,14 @@
9#include <asm/processor.h> 9#include <asm/processor.h>
10#include <asm/ppc_asm.h> 10#include <asm/ppc_asm.h>
11 11
12#ifdef __BIG_ENDIAN__
13#define sLd sld /* Shift towards low-numbered address. */
14#define sHd srd /* Shift towards high-numbered address. */
15#else
16#define sLd srd /* Shift towards low-numbered address. */
17#define sHd sld /* Shift towards high-numbered address. */
18#endif
19
12 .align 7 20 .align 7
13_GLOBAL(__copy_tofrom_user) 21_GLOBAL(__copy_tofrom_user)
14BEGIN_FTR_SECTION 22BEGIN_FTR_SECTION
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
118 126
11924: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ 12724: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
12025: ld r0,8(r4) 12825: ld r0,8(r4)
121 sld r6,r9,r10 129 sLd r6,r9,r10
12226: ldu r9,16(r4) 13026: ldu r9,16(r4)
123 srd r7,r0,r11 131 sHd r7,r0,r11
124 sld r8,r0,r10 132 sLd r8,r0,r10
125 or r7,r7,r6 133 or r7,r7,r6
126 blt cr6,79f 134 blt cr6,79f
12727: ld r0,8(r4) 13527: ld r0,8(r4)
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
129 137
13028: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ 13828: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
13129: ldu r9,8(r4) 13929: ldu r9,8(r4)
132 sld r8,r0,r10 140 sLd r8,r0,r10
133 addi r3,r3,-8 141 addi r3,r3,-8
134 blt cr6,5f 142 blt cr6,5f
13530: ld r0,8(r4) 14330: ld r0,8(r4)
136 srd r12,r9,r11 144 sHd r12,r9,r11
137 sld r6,r9,r10 145 sLd r6,r9,r10
13831: ldu r9,16(r4) 14631: ldu r9,16(r4)
139 or r12,r8,r12 147 or r12,r8,r12
140 srd r7,r0,r11 148 sHd r7,r0,r11
141 sld r8,r0,r10 149 sLd r8,r0,r10
142 addi r3,r3,16 150 addi r3,r3,16
143 beq cr6,78f 151 beq cr6,78f
144 152
1451: or r7,r7,r6 1531: or r7,r7,r6
14632: ld r0,8(r4) 15432: ld r0,8(r4)
14776: std r12,8(r3) 15576: std r12,8(r3)
1482: srd r12,r9,r11 1562: sHd r12,r9,r11
149 sld r6,r9,r10 157 sLd r6,r9,r10
15033: ldu r9,16(r4) 15833: ldu r9,16(r4)
151 or r12,r8,r12 159 or r12,r8,r12
15277: stdu r7,16(r3) 16077: stdu r7,16(r3)
153 srd r7,r0,r11 161 sHd r7,r0,r11
154 sld r8,r0,r10 162 sLd r8,r0,r10
155 bdnz 1b 163 bdnz 1b
156 164
15778: std r12,8(r3) 16578: std r12,8(r3)
158 or r7,r7,r6 166 or r7,r7,r6
15979: std r7,16(r3) 16779: std r7,16(r3)
1605: srd r12,r9,r11 1685: sHd r12,r9,r11
161 or r12,r8,r12 169 or r12,r8,r12
16280: std r12,24(r3) 17080: std r12,24(r3)
163 bne 6f 171 bne 6f
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
165 blr 173 blr
1666: cmpwi cr1,r5,8 1746: cmpwi cr1,r5,8
167 addi r3,r3,32 175 addi r3,r3,32
168 sld r9,r9,r10 176 sLd r9,r9,r10
169 ble cr1,7f 177 ble cr1,7f
17034: ld r0,8(r4) 17834: ld r0,8(r4)
171 srd r7,r0,r11 179 sHd r7,r0,r11
172 or r9,r7,r9 180 or r9,r7,r9
1737: 1817:
174 bf cr7*4+1,1f 182 bf cr7*4+1,1f
183#ifdef __BIG_ENDIAN__
175 rotldi r9,r9,32 184 rotldi r9,r9,32
185#endif
17694: stw r9,0(r3) 18694: stw r9,0(r3)
187#ifdef __LITTLE_ENDIAN__
188 rotrdi r9,r9,32
189#endif
177 addi r3,r3,4 190 addi r3,r3,4
1781: bf cr7*4+2,2f 1911: bf cr7*4+2,2f
192#ifdef __BIG_ENDIAN__
179 rotldi r9,r9,16 193 rotldi r9,r9,16
194#endif
18095: sth r9,0(r3) 19595: sth r9,0(r3)
196#ifdef __LITTLE_ENDIAN__
197 rotrdi r9,r9,16
198#endif
181 addi r3,r3,2 199 addi r3,r3,2
1822: bf cr7*4+3,3f 2002: bf cr7*4+3,3f
201#ifdef __BIG_ENDIAN__
183 rotldi r9,r9,8 202 rotldi r9,r9,8
203#endif
18496: stb r9,0(r3) 20496: stb r9,0(r3)
205#ifdef __LITTLE_ENDIAN__
206 rotrdi r9,r9,8
207#endif
1853: li r3,0 2083: li r3,0
186 blr 209 blr
187 210
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 02245cee7818..d7ddcee7feb8 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -36,7 +36,6 @@
36#include "powernv.h" 36#include "powernv.h"
37#include "pci.h" 37#include "pci.h"
38 38
39static char *hub_diag = NULL;
40static int ioda_eeh_nb_init = 0; 39static int ioda_eeh_nb_init = 0;
41 40
42static int ioda_eeh_event(struct notifier_block *nb, 41static int ioda_eeh_event(struct notifier_block *nb,
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
140 ioda_eeh_nb_init = 1; 139 ioda_eeh_nb_init = 1;
141 } 140 }
142 141
143 /* We needn't HUB diag-data on PHB3 */
144 if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
145 hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
146 if (!hub_diag) {
147 pr_err("%s: Out of memory !\n", __func__);
148 return -ENOMEM;
149 }
150 }
151
152#ifdef CONFIG_DEBUG_FS 142#ifdef CONFIG_DEBUG_FS
153 if (phb->dbgfs) { 143 if (phb->dbgfs) {
154 debugfs_create_file("err_injct_outbound", 0600, 144 debugfs_create_file("err_injct_outbound", 0600,
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
633static void ioda_eeh_hub_diag(struct pci_controller *hose) 623static void ioda_eeh_hub_diag(struct pci_controller *hose)
634{ 624{
635 struct pnv_phb *phb = hose->private_data; 625 struct pnv_phb *phb = hose->private_data;
636 struct OpalIoP7IOCErrorData *data; 626 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
637 long rc; 627 long rc;
638 628
639 data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; 629 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
640 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
641 if (rc != OPAL_SUCCESS) { 630 if (rc != OPAL_SUCCESS) {
642 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", 631 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
643 __func__, phb->hub_id, rc); 632 __func__, phb->hub_id, rc);
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
820 struct OpalIoPhbErrorCommon *common; 809 struct OpalIoPhbErrorCommon *common;
821 long rc; 810 long rc;
822 811
823 common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; 812 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
824 rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); 813 PNV_PCI_DIAG_BUF_SIZE);
825 if (rc != OPAL_SUCCESS) { 814 if (rc != OPAL_SUCCESS) {
826 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", 815 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
827 __func__, hose->global_number, rc); 816 __func__, hose->global_number, rc);
828 return; 817 return;
829 } 818 }
830 819
820 common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
831 switch (common->ioType) { 821 switch (common->ioType) {
832 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 822 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
833 ioda_eeh_p7ioc_phb_diag(hose, common); 823 ioda_eeh_p7ioc_phb_diag(hose, common);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 911c24ef033e..1ed8d5f40f5a 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -172,11 +172,13 @@ struct pnv_phb {
172 } ioda; 172 } ioda;
173 }; 173 };
174 174
175 /* PHB status structure */ 175 /* PHB and hub status structure */
176 union { 176 union {
177 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; 177 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
178 struct OpalIoP7IOCPhbErrorData p7ioc; 178 struct OpalIoP7IOCPhbErrorData p7ioc;
179 struct OpalIoP7IOCErrorData hub_diag;
179 } diag; 180 } diag;
181
180}; 182};
181 183
182extern struct pci_ops pnv_pci_ops; 184extern struct pci_ops pnv_pci_ops;
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index 7b95f29e3174..3baff31e58cf 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
6 checksum.o strlen.o div64.o div64-generic.o 6 checksum.o strlen.o div64.o div64-generic.o
7 7
8# Extracted from libgcc 8# Extracted from libgcc
9lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ 9obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
10 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ 10 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
11 udiv_qrnnd.o 11 udiv_qrnnd.o
12 12
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8358dc144959..0f9e94537eee 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
619} 619}
620 620
621#define pte_accessible pte_accessible 621#define pte_accessible pte_accessible
622static inline unsigned long pte_accessible(pte_t a) 622static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
623{ 623{
624 return pte_val(a) & _PAGE_VALID; 624 return pte_val(a) & _PAGE_VALID;
625} 625}
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
847 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U 847 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
848 * and SUN4V pte layout, so this inline test is fine. 848 * and SUN4V pte layout, so this inline test is fine.
849 */ 849 */
850 if (likely(mm != &init_mm) && pte_accessible(orig)) 850 if (likely(mm != &init_mm) && pte_accessible(mm, orig))
851 tlb_batch_add(mm, addr, ptep, orig, fullmm); 851 tlb_batch_add(mm, addr, ptep, orig, fullmm);
852} 852}
853 853
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 3d1999458709..bbc8b12fa443 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
452} 452}
453 453
454#define pte_accessible pte_accessible 454#define pte_accessible pte_accessible
455static inline int pte_accessible(pte_t a) 455static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
456{ 456{
457 return pte_flags(a) & _PAGE_PRESENT; 457 if (pte_flags(a) & _PAGE_PRESENT)
458 return true;
459
460 if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
461 mm_tlb_flush_pending(mm))
462 return true;
463
464 return false;
458} 465}
459 466
460static inline int pte_hidden(pte_t pte) 467static inline int pte_hidden(pte_t pte)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index dc1ec0dff939..ea04b342c026 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c)
387 set_cpu_cap(c, X86_FEATURE_PEBS); 387 set_cpu_cap(c, X86_FEATURE_PEBS);
388 } 388 }
389 389
390 if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) 390 if (c->x86 == 6 && cpu_has_clflush &&
391 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
391 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); 392 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
392 393
393#ifdef CONFIG_X86_64 394#ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index dd74e46828c0..0596e8e0cc19 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
83 pte_t pte = gup_get_pte(ptep); 83 pte_t pte = gup_get_pte(ptep);
84 struct page *page; 84 struct page *page;
85 85
86 /* Similar to the PMD case, NUMA hinting must take slow path */
87 if (pte_numa(pte)) {
88 pte_unmap(ptep);
89 return 0;
90 }
91
86 if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { 92 if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
87 pte_unmap(ptep); 93 pte_unmap(ptep);
88 return 0; 94 return 0;
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
167 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) 173 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
168 return 0; 174 return 0;
169 if (unlikely(pmd_large(pmd))) { 175 if (unlikely(pmd_large(pmd))) {
176 /*
177 * NUMA hinting faults need to be handled in the GUP
178 * slowpath for accounting purposes and so that they
179 * can be serialised against THP migration.
180 */
181 if (pmd_numa(pmd))
182 return 0;
170 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) 183 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
171 return 0; 184 return 0;
172 } else { 185 } else {
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ba6cf8e9aa0a..b91ce75bd35d 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = {
335void blk_mq_unregister_disk(struct gendisk *disk) 335void blk_mq_unregister_disk(struct gendisk *disk)
336{ 336{
337 struct request_queue *q = disk->queue; 337 struct request_queue *q = disk->queue;
338 struct blk_mq_hw_ctx *hctx;
339 struct blk_mq_ctx *ctx;
340 int i, j;
341
342 queue_for_each_hw_ctx(q, hctx, i) {
343 hctx_for_each_ctx(hctx, ctx, j) {
344 kobject_del(&ctx->kobj);
345 kobject_put(&ctx->kobj);
346 }
347 kobject_del(&hctx->kobj);
348 kobject_put(&hctx->kobj);
349 }
338 350
339 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); 351 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
340 kobject_del(&q->mq_kobj); 352 kobject_del(&q->mq_kobj);
353 kobject_put(&q->mq_kobj);
341 354
342 kobject_put(&disk_to_dev(disk)->kobj); 355 kobject_put(&disk_to_dev(disk)->kobj);
343} 356}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5d9248526d78..4770de5707b9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
348config ACPI_EXTLOG 348config ACPI_EXTLOG
349 tristate "Extended Error Log support" 349 tristate "Extended Error Log support"
350 depends on X86_MCE && X86_LOCAL_APIC 350 depends on X86_MCE && X86_LOCAL_APIC
351 select EFI
352 select UEFI_CPER 351 select UEFI_CPER
353 default n 352 default n
354 help 353 help
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 6745fe137b9e..e60390597372 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
162 { "80860F14", (unsigned long)&byt_sdio_dev_desc }, 162 { "80860F14", (unsigned long)&byt_sdio_dev_desc },
163 { "80860F41", (unsigned long)&byt_i2c_dev_desc }, 163 { "80860F41", (unsigned long)&byt_i2c_dev_desc },
164 { "INT33B2", }, 164 { "INT33B2", },
165 { "INT33FC", },
165 166
166 { "INT3430", (unsigned long)&lpt_dev_desc }, 167 { "INT3430", (unsigned long)&lpt_dev_desc },
167 { "INT3431", (unsigned long)&lpt_dev_desc }, 168 { "INT3431", (unsigned long)&lpt_dev_desc },
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 786294bb682c..3650b2183227 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,7 +2,6 @@ config ACPI_APEI
2 bool "ACPI Platform Error Interface (APEI)" 2 bool "ACPI Platform Error Interface (APEI)"
3 select MISC_FILESYSTEMS 3 select MISC_FILESYSTEMS
4 select PSTORE 4 select PSTORE
5 select EFI
6 select UEFI_CPER 5 select UEFI_CPER
7 depends on X86 6 depends on X86
8 help 7 help
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 26311f23c824..cb1d557fc22c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
942static struct pstore_info erst_info = { 942static struct pstore_info erst_info = {
943 .owner = THIS_MODULE, 943 .owner = THIS_MODULE,
944 .name = "erst", 944 .name = "erst",
945 .flags = PSTORE_FLAGS_FRAGILE,
945 .open = erst_open_pstore, 946 .open = erst_open_pstore,
946 .close = erst_close_pstore, 947 .close = erst_close_pstore,
947 .read = erst_reader, 948 .read = erst_reader,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 14f1e9506338..c0ed4f273cf2 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1238,15 +1238,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1238 if (rc) 1238 if (rc)
1239 return rc; 1239 return rc;
1240 1240
1241 /* AHCI controllers often implement SFF compatible interface.
1242 * Grab all PCI BARs just in case.
1243 */
1244 rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
1245 if (rc == -EBUSY)
1246 pcim_pin_device(pdev);
1247 if (rc)
1248 return rc;
1249
1250 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 1241 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1251 (pdev->device == 0x2652 || pdev->device == 0x2653)) { 1242 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
1252 u8 map; 1243 u8 map;
@@ -1263,6 +1254,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1263 } 1254 }
1264 } 1255 }
1265 1256
1257 /* AHCI controllers often implement SFF compatible interface.
1258 * Grab all PCI BARs just in case.
1259 */
1260 rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
1261 if (rc == -EBUSY)
1262 pcim_pin_device(pdev);
1263 if (rc)
1264 return rc;
1265
1266 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1266 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1267 if (!hpriv) 1267 if (!hpriv)
1268 return -ENOMEM; 1268 return -ENOMEM;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index ae2d73fe321e..3e23e9941dad 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
113 /* 113 /*
114 * set PHY Paremeters, two steps to configure the GPR13, 114 * set PHY Paremeters, two steps to configure the GPR13,
115 * one write for rest of parameters, mask of first write 115 * one write for rest of parameters, mask of first write
116 * is 0x07fffffd, and the other one write for setting 116 * is 0x07ffffff, and the other one write for setting
117 * the mpll_clk_en. 117 * the mpll_clk_en.
118 */ 118 */
119 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK 119 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
@@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
124 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK 124 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
125 | IMX6Q_GPR13_SATA_TX_BOOST_MASK 125 | IMX6Q_GPR13_SATA_TX_BOOST_MASK
126 | IMX6Q_GPR13_SATA_TX_LVL_MASK 126 | IMX6Q_GPR13_SATA_TX_LVL_MASK
127 | IMX6Q_GPR13_SATA_MPLL_CLK_EN
127 | IMX6Q_GPR13_SATA_TX_EDGE_RATE 128 | IMX6Q_GPR13_SATA_TX_EDGE_RATE
128 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB 129 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
129 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M 130 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 75b93678bbcd..1393a5890ed5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
2149 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2149 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2150 err_mask); 2150 err_mask);
2151 } else { 2151 } else {
2152 u8 *cmds = dev->ncq_send_recv_cmds;
2153
2152 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2154 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2153 memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, 2155 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2154 ATA_LOG_NCQ_SEND_RECV_SIZE); 2156
2157 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2158 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2159 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2160 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2161 }
2155 } 2162 }
2156 } 2163 }
2157 2164
@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4156 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4163 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4157 ATA_HORKAGE_FIRMWARE_WARN }, 4164 ATA_HORKAGE_FIRMWARE_WARN },
4158 4165
4166 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4167 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4168
4159 /* Blacklist entries taken from Silicon Image 3124/3132 4169 /* Blacklist entries taken from Silicon Image 3124/3132
4160 Windows driver .inf file - also several Linux problem reports */ 4170 Windows driver .inf file - also several Linux problem reports */
4161 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4171 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4202 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4212 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4203 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4213 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4204 4214
4215 /* devices that don't properly handle queued TRIM commands */
4216 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4217 { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4218
4205 /* End Marker */ 4219 /* End Marker */
4206 { } 4220 { }
4207}; 4221};
@@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur,
6519 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6533 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6520 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6534 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6521 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6535 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6536 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6522 }; 6537 };
6523 char *start = *cur, *p = *cur; 6538 char *start = *cur, *p = *cur;
6524 char *id, *val, *endp; 6539 char *id, *val, *endp;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ab58556d347c..377eb889f555 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
3872 return; 3872 return;
3873 } 3873 }
3874 3874
3875 /*
3876 * XXX - UGLY HACK
3877 *
3878 * The block layer suspend/resume path is fundamentally broken due
3879 * to freezable kthreads and workqueue and may deadlock if a block
3880 * device gets removed while resume is in progress. I don't know
3881 * what the solution is short of removing freezable kthreads and
3882 * workqueues altogether.
3883 *
3884 * The following is an ugly hack to avoid kicking off device
3885 * removal while freezer is active. This is a joke but does avoid
3886 * this particular deadlock scenario.
3887 *
3888 * https://bugzilla.kernel.org/show_bug.cgi?id=62801
3889 * http://marc.info/?l=linux-kernel&m=138695698516487
3890 */
3891#ifdef CONFIG_FREEZER
3892 while (pm_freezing)
3893 msleep(10);
3894#endif
3895
3875 DPRINTK("ENTER\n"); 3896 DPRINTK("ENTER\n");
3876 mutex_lock(&ap->scsi_scan_mutex); 3897 mutex_lock(&ap->scsi_scan_mutex);
3877 3898
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index f370fc13aea5..a2e69d26266d 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2
2#include <linux/moduleparam.h> 3#include <linux/moduleparam.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/fs.h> 5#include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
65 NULL_Q_MQ = 2, 66 NULL_Q_MQ = 2,
66}; 67};
67 68
68static int submit_queues = 1; 69static int submit_queues;
69module_param(submit_queues, int, S_IRUGO); 70module_param(submit_queues, int, S_IRUGO);
70MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 71MODULE_PARM_DESC(submit_queues, "Number of submission queues");
71 72
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
101module_param(hw_queue_depth, int, S_IRUGO); 102module_param(hw_queue_depth, int, S_IRUGO);
102MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 103MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
103 104
104static bool use_per_node_hctx = true; 105static bool use_per_node_hctx = false;
105module_param(use_per_node_hctx, bool, S_IRUGO); 106module_param(use_per_node_hctx, bool, S_IRUGO);
106MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); 107MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
107 108
108static void put_tag(struct nullb_queue *nq, unsigned int tag) 109static void put_tag(struct nullb_queue *nq, unsigned int tag)
109{ 110{
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
346 347
347static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) 348static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
348{ 349{
349 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, 350 int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
350 hctx_index); 351 int tip = (reg->nr_hw_queues % nr_online_nodes);
352 int node = 0, i, n;
353
354 /*
355 * Split submit queues evenly wrt to the number of nodes. If uneven,
356 * fill the first buckets with one extra, until the rest is filled with
357 * no extra.
358 */
359 for (i = 0, n = 1; i < hctx_index; i++, n++) {
360 if (n % b_size == 0) {
361 n = 0;
362 node++;
363
364 tip--;
365 if (!tip)
366 b_size = reg->nr_hw_queues / nr_online_nodes;
367 }
368 }
369
370 /*
371 * A node might not be online, therefore map the relative node id to the
372 * real node id.
373 */
374 for_each_online_node(n) {
375 if (!node)
376 break;
377 node--;
378 }
379
380 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
351} 381}
352 382
353static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) 383static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
355 kfree(hctx); 385 kfree(hctx);
356} 386}
357 387
388static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
389{
390 BUG_ON(!nullb);
391 BUG_ON(!nq);
392
393 init_waitqueue_head(&nq->wait);
394 nq->queue_depth = nullb->queue_depth;
395}
396
358static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 397static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
359 unsigned int index) 398 unsigned int index)
360{ 399{
361 struct nullb *nullb = data; 400 struct nullb *nullb = data;
362 struct nullb_queue *nq = &nullb->queues[index]; 401 struct nullb_queue *nq = &nullb->queues[index];
363 402
364 init_waitqueue_head(&nq->wait);
365 nq->queue_depth = nullb->queue_depth;
366 nullb->nr_queues++;
367 hctx->driver_data = nq; 403 hctx->driver_data = nq;
404 null_init_queue(nullb, nq);
405 nullb->nr_queues++;
368 406
369 return 0; 407 return 0;
370} 408}
@@ -417,13 +455,13 @@ static int setup_commands(struct nullb_queue *nq)
417 455
418 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); 456 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
419 if (!nq->cmds) 457 if (!nq->cmds)
420 return 1; 458 return -ENOMEM;
421 459
422 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; 460 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
423 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); 461 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
424 if (!nq->tag_map) { 462 if (!nq->tag_map) {
425 kfree(nq->cmds); 463 kfree(nq->cmds);
426 return 1; 464 return -ENOMEM;
427 } 465 }
428 466
429 for (i = 0; i < nq->queue_depth; i++) { 467 for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +492,37 @@ static void cleanup_queues(struct nullb *nullb)
454 492
455static int setup_queues(struct nullb *nullb) 493static int setup_queues(struct nullb *nullb)
456{ 494{
457 struct nullb_queue *nq; 495 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
458 int i; 496 GFP_KERNEL);
459
460 nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
461 if (!nullb->queues) 497 if (!nullb->queues)
462 return 1; 498 return -ENOMEM;
463 499
464 nullb->nr_queues = 0; 500 nullb->nr_queues = 0;
465 nullb->queue_depth = hw_queue_depth; 501 nullb->queue_depth = hw_queue_depth;
466 502
467 if (queue_mode == NULL_Q_MQ) 503 return 0;
468 return 0; 504}
505
506static int init_driver_queues(struct nullb *nullb)
507{
508 struct nullb_queue *nq;
509 int i, ret = 0;
469 510
470 for (i = 0; i < submit_queues; i++) { 511 for (i = 0; i < submit_queues; i++) {
471 nq = &nullb->queues[i]; 512 nq = &nullb->queues[i];
472 init_waitqueue_head(&nq->wait); 513
473 nq->queue_depth = hw_queue_depth; 514 null_init_queue(nullb, nq);
474 if (setup_commands(nq)) 515
475 break; 516 ret = setup_commands(nq);
517 if (ret)
518 goto err_queue;
476 nullb->nr_queues++; 519 nullb->nr_queues++;
477 } 520 }
478 521
479 if (i == submit_queues) 522 return 0;
480 return 0; 523err_queue:
481
482 cleanup_queues(nullb); 524 cleanup_queues(nullb);
483 return 1; 525 return ret;
484} 526}
485 527
486static int null_add_dev(void) 528static int null_add_dev(void)
@@ -518,11 +560,13 @@ static int null_add_dev(void)
518 } else if (queue_mode == NULL_Q_BIO) { 560 } else if (queue_mode == NULL_Q_BIO) {
519 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 561 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
520 blk_queue_make_request(nullb->q, null_queue_bio); 562 blk_queue_make_request(nullb->q, null_queue_bio);
563 init_driver_queues(nullb);
521 } else { 564 } else {
522 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 565 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
523 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 566 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
524 if (nullb->q) 567 if (nullb->q)
525 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 568 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
569 init_driver_queues(nullb);
526 } 570 }
527 571
528 if (!nullb->q) 572 if (!nullb->q)
@@ -579,7 +623,13 @@ static int __init null_init(void)
579 } 623 }
580#endif 624#endif
581 625
582 if (submit_queues > nr_cpu_ids) 626 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
627 if (submit_queues < nr_online_nodes) {
628 pr_warn("null_blk: submit_queues param is set to %u.",
629 nr_online_nodes);
630 submit_queues = nr_online_nodes;
631 }
632 } else if (submit_queues > nr_cpu_ids)
583 submit_queues = nr_cpu_ids; 633 submit_queues = nr_cpu_ids;
584 else if (!submit_queues) 634 else if (!submit_queues)
585 submit_queues = 1; 635 submit_queues = 1;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9199c93be926..eb6e1e0e8db2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5269 } 5269 }
5270} 5270}
5271 5271
5272const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) 5272static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5273{ 5273{
5274 switch (state) { 5274 switch (state) {
5275 case SKD_MSG_STATE_IDLE: 5275 case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5281 } 5281 }
5282} 5282}
5283 5283
5284const char *skd_skreq_state_to_str(enum skd_req_state state) 5284static const char *skd_skreq_state_to_str(enum skd_req_state state)
5285{ 5285{
5286 switch (state) { 5286 switch (state) {
5287 case SKD_REQ_STATE_IDLE: 5287 case SKD_REQ_STATE_IDLE:
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index d3fdc32b579d..106d1d8e16ad 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = {
88 { USB_DEVICE(0x0CF3, 0xE004) }, 88 { USB_DEVICE(0x0CF3, 0xE004) },
89 { USB_DEVICE(0x0CF3, 0xE005) }, 89 { USB_DEVICE(0x0CF3, 0xE005) },
90 { USB_DEVICE(0x0930, 0x0219) }, 90 { USB_DEVICE(0x0930, 0x0219) },
91 { USB_DEVICE(0x0930, 0x0220) },
91 { USB_DEVICE(0x0489, 0xe057) }, 92 { USB_DEVICE(0x0489, 0xe057) },
92 { USB_DEVICE(0x13d3, 0x3393) }, 93 { USB_DEVICE(0x13d3, 0x3393) },
93 { USB_DEVICE(0x0489, 0xe04e) }, 94 { USB_DEVICE(0x0489, 0xe04e) },
@@ -132,6 +133,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
132 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 137 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 138 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
137 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 139 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index bfbcc5a772a6..9f7e539de510 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -155,6 +155,7 @@ static const struct usb_device_id blacklist_table[] = {
155 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 155 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
156 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 157 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
158 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
158 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 159 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
159 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 160 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
160 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 161 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 02d534da22dd..16d7b4ac94be 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -828,6 +828,12 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
828 int ret = 0; 828 int ret = 0;
829 829
830 memcpy(&new_policy, policy, sizeof(*policy)); 830 memcpy(&new_policy, policy, sizeof(*policy));
831
832 /* Use the default policy if its valid. */
833 if (cpufreq_driver->setpolicy)
834 cpufreq_parse_governor(policy->governor->name,
835 &new_policy.policy, NULL);
836
831 /* assure that the starting sequence is run in cpufreq_set_policy */ 837 /* assure that the starting sequence is run in cpufreq_set_policy */
832 policy->governor = NULL; 838 policy->governor = NULL;
833 839
@@ -845,8 +851,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
845 851
846#ifdef CONFIG_HOTPLUG_CPU 852#ifdef CONFIG_HOTPLUG_CPU
847static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 853static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
848 unsigned int cpu, struct device *dev, 854 unsigned int cpu, struct device *dev)
849 bool frozen)
850{ 855{
851 int ret = 0; 856 int ret = 0;
852 unsigned long flags; 857 unsigned long flags;
@@ -877,11 +882,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
877 } 882 }
878 } 883 }
879 884
880 /* Don't touch sysfs links during light-weight init */ 885 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
881 if (!frozen)
882 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
883
884 return ret;
885} 886}
886#endif 887#endif
887 888
@@ -926,6 +927,27 @@ err_free_policy:
926 return NULL; 927 return NULL;
927} 928}
928 929
930static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
931{
932 struct kobject *kobj;
933 struct completion *cmp;
934
935 down_read(&policy->rwsem);
936 kobj = &policy->kobj;
937 cmp = &policy->kobj_unregister;
938 up_read(&policy->rwsem);
939 kobject_put(kobj);
940
941 /*
942 * We need to make sure that the underlying kobj is
943 * actually not referenced anymore by anybody before we
944 * proceed with unloading.
945 */
946 pr_debug("waiting for dropping of refcount\n");
947 wait_for_completion(cmp);
948 pr_debug("wait complete\n");
949}
950
929static void cpufreq_policy_free(struct cpufreq_policy *policy) 951static void cpufreq_policy_free(struct cpufreq_policy *policy)
930{ 952{
931 free_cpumask_var(policy->related_cpus); 953 free_cpumask_var(policy->related_cpus);
@@ -986,7 +1008,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
986 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { 1008 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
987 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { 1009 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
988 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1010 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
989 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); 1011 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
990 up_read(&cpufreq_rwsem); 1012 up_read(&cpufreq_rwsem);
991 return ret; 1013 return ret;
992 } 1014 }
@@ -1096,7 +1118,10 @@ err_get_freq:
1096 if (cpufreq_driver->exit) 1118 if (cpufreq_driver->exit)
1097 cpufreq_driver->exit(policy); 1119 cpufreq_driver->exit(policy);
1098err_set_policy_cpu: 1120err_set_policy_cpu:
1121 if (frozen)
1122 cpufreq_policy_put_kobj(policy);
1099 cpufreq_policy_free(policy); 1123 cpufreq_policy_free(policy);
1124
1100nomem_out: 1125nomem_out:
1101 up_read(&cpufreq_rwsem); 1126 up_read(&cpufreq_rwsem);
1102 1127
@@ -1118,7 +1143,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1118} 1143}
1119 1144
1120static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, 1145static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1121 unsigned int old_cpu, bool frozen) 1146 unsigned int old_cpu)
1122{ 1147{
1123 struct device *cpu_dev; 1148 struct device *cpu_dev;
1124 int ret; 1149 int ret;
@@ -1126,10 +1151,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1126 /* first sibling now owns the new sysfs dir */ 1151 /* first sibling now owns the new sysfs dir */
1127 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); 1152 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1128 1153
1129 /* Don't touch sysfs files during light-weight tear-down */
1130 if (frozen)
1131 return cpu_dev->id;
1132
1133 sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 1154 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1134 ret = kobject_move(&policy->kobj, &cpu_dev->kobj); 1155 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1135 if (ret) { 1156 if (ret) {
@@ -1196,7 +1217,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1196 if (!frozen) 1217 if (!frozen)
1197 sysfs_remove_link(&dev->kobj, "cpufreq"); 1218 sysfs_remove_link(&dev->kobj, "cpufreq");
1198 } else if (cpus > 1) { 1219 } else if (cpus > 1) {
1199 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); 1220 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1200 if (new_cpu >= 0) { 1221 if (new_cpu >= 0) {
1201 update_policy_cpu(policy, new_cpu); 1222 update_policy_cpu(policy, new_cpu);
1202 1223
@@ -1218,8 +1239,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1218 int ret; 1239 int ret;
1219 unsigned long flags; 1240 unsigned long flags;
1220 struct cpufreq_policy *policy; 1241 struct cpufreq_policy *policy;
1221 struct kobject *kobj;
1222 struct completion *cmp;
1223 1242
1224 read_lock_irqsave(&cpufreq_driver_lock, flags); 1243 read_lock_irqsave(&cpufreq_driver_lock, flags);
1225 policy = per_cpu(cpufreq_cpu_data, cpu); 1244 policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1249,22 +1268,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1249 } 1268 }
1250 } 1269 }
1251 1270
1252 if (!frozen) { 1271 if (!frozen)
1253 down_read(&policy->rwsem); 1272 cpufreq_policy_put_kobj(policy);
1254 kobj = &policy->kobj;
1255 cmp = &policy->kobj_unregister;
1256 up_read(&policy->rwsem);
1257 kobject_put(kobj);
1258
1259 /*
1260 * We need to make sure that the underlying kobj is
1261 * actually not referenced anymore by anybody before we
1262 * proceed with unloading.
1263 */
1264 pr_debug("waiting for dropping of refcount\n");
1265 wait_for_completion(cmp);
1266 pr_debug("wait complete\n");
1267 }
1268 1273
1269 /* 1274 /*
1270 * Perform the ->exit() even during light-weight tear-down, 1275 * Perform the ->exit() even during light-weight tear-down,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 446687cc2334..c823daaf9043 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
62 tristate "Intel I/OAT DMA support" 62 tristate "Intel I/OAT DMA support"
63 depends on PCI && X86 63 depends on PCI && X86
64 select DMA_ENGINE 64 select DMA_ENGINE
65 select DMA_ENGINE_RAID
65 select DCA 66 select DCA
66 help 67 help
67 Enable support for the Intel(R) I/OAT DMA engine present 68 Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
112 bool "Marvell XOR engine support" 113 bool "Marvell XOR engine support"
113 depends on PLAT_ORION 114 depends on PLAT_ORION
114 select DMA_ENGINE 115 select DMA_ENGINE
116 select DMA_ENGINE_RAID
115 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 117 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
116 ---help--- 118 ---help---
117 Enable support for the Marvell XOR engine. 119 Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
187 tristate "AMCC PPC440SPe ADMA support" 189 tristate "AMCC PPC440SPe ADMA support"
188 depends on 440SPe || 440SP 190 depends on 440SPe || 440SP
189 select DMA_ENGINE 191 select DMA_ENGINE
192 select DMA_ENGINE_RAID
190 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL 193 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
191 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 194 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
192 help 195 help
@@ -352,6 +355,7 @@ config NET_DMA
352 bool "Network: TCP receive copy offload" 355 bool "Network: TCP receive copy offload"
353 depends on DMA_ENGINE && NET 356 depends on DMA_ENGINE && NET
354 default (INTEL_IOATDMA || FSL_DMA) 357 default (INTEL_IOATDMA || FSL_DMA)
358 depends on BROKEN
355 help 359 help
356 This enables the use of DMA engines in the network stack to 360 This enables the use of DMA engines in the network stack to
357 offload receive copy-to-user operations, freeing CPU cycles. 361 offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +381,7 @@ config DMATEST
377 Simple DMA test client. Say N unless you're debugging a 381 Simple DMA test client. Say N unless you're debugging a
378 DMA Device driver. 382 DMA Device driver.
379 383
384config DMA_ENGINE_RAID
385 bool
386
380endif 387endif
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index f31d647acdfa..2787aba60c6b 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
347{ 347{
348 return &chan->dev->device; 348 return &chan->dev->device;
349} 349}
350static struct device *chan2parent(struct dma_chan *chan)
351{
352 return chan->dev->device.parent;
353}
354 350
355#if defined(VERBOSE_DEBUG) 351#if defined(VERBOSE_DEBUG)
356static void vdbg_dump_regs(struct at_dma_chan *atchan) 352static void vdbg_dump_regs(struct at_dma_chan *atchan)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ea806bdc12ef..ef63b9058f3c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
913static struct dmaengine_unmap_pool unmap_pool[] = { 913static struct dmaengine_unmap_pool unmap_pool[] = {
914 __UNMAP_POOL(2), 914 __UNMAP_POOL(2),
915 #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) 915 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
916 __UNMAP_POOL(16), 916 __UNMAP_POOL(16),
917 __UNMAP_POOL(128), 917 __UNMAP_POOL(128),
918 __UNMAP_POOL(256), 918 __UNMAP_POOL(256),
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1054 dma_cookie_t cookie; 1054 dma_cookie_t cookie;
1055 unsigned long flags; 1055 unsigned long flags;
1056 1056
1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); 1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1058 if (!unmap) 1058 if (!unmap)
1059 return -ENOMEM; 1059 return -ENOMEM;
1060 1060
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 20f9a3aaf926..9dfcaf5c1288 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
539 539
540 um->len = params->buf_size; 540 um->len = params->buf_size;
541 for (i = 0; i < src_cnt; i++) { 541 for (i = 0; i < src_cnt; i++) {
542 unsigned long buf = (unsigned long) thread->srcs[i]; 542 void *buf = thread->srcs[i];
543 struct page *pg = virt_to_page(buf); 543 struct page *pg = virt_to_page(buf);
544 unsigned pg_off = buf & ~PAGE_MASK; 544 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
545 545
546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off, 546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
547 um->len, DMA_TO_DEVICE); 547 um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
560 dsts = &um->addr[src_cnt]; 560 dsts = &um->addr[src_cnt];
561 for (i = 0; i < dst_cnt; i++) { 561 for (i = 0; i < dst_cnt; i++) {
562 unsigned long buf = (unsigned long) thread->dsts[i]; 562 void *buf = thread->dsts[i];
563 struct page *pg = virt_to_page(buf); 563 struct page *pg = virt_to_page(buf);
564 unsigned pg_off = buf & ~PAGE_MASK; 564 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
565 565
566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, 566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
567 DMA_BIDIRECTIONAL); 567 DMA_BIDIRECTIONAL);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 7086a16a55f2..f157c6f76b32 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
86 hw->count = CPU_TO_DMA(chan, count, 32); 86 hw->count = CPU_TO_DMA(chan, count, 32);
87} 87}
88 88
89static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
90{
91 return DMA_TO_CPU(chan, desc->hw.count, 32);
92}
93
94static void set_desc_src(struct fsldma_chan *chan, 89static void set_desc_src(struct fsldma_chan *chan,
95 struct fsl_dma_ld_hw *hw, dma_addr_t src) 90 struct fsl_dma_ld_hw *hw, dma_addr_t src)
96{ 91{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
101 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 96 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
102} 97}
103 98
104static dma_addr_t get_desc_src(struct fsldma_chan *chan,
105 struct fsl_desc_sw *desc)
106{
107 u64 snoop_bits;
108
109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
111 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
112}
113
114static void set_desc_dst(struct fsldma_chan *chan, 99static void set_desc_dst(struct fsldma_chan *chan,
115 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 100 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116{ 101{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 106 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122} 107}
123 108
124static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
125 struct fsl_desc_sw *desc)
126{
127 u64 snoop_bits;
128
129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
130 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
131 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
132}
133
134static void set_desc_next(struct fsldma_chan *chan, 109static void set_desc_next(struct fsldma_chan *chan,
135 struct fsl_dma_ld_hw *hw, dma_addr_t next) 110 struct fsl_dma_ld_hw *hw, dma_addr_t next)
136{ 111{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
408 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 383 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
409 struct fsl_desc_sw *child; 384 struct fsl_desc_sw *child;
410 unsigned long flags; 385 unsigned long flags;
411 dma_cookie_t cookie; 386 dma_cookie_t cookie = -EINVAL;
412 387
413 spin_lock_irqsave(&chan->desc_lock, flags); 388 spin_lock_irqsave(&chan->desc_lock, flags);
414 389
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
854 struct fsl_desc_sw *desc) 829 struct fsl_desc_sw *desc)
855{ 830{
856 struct dma_async_tx_descriptor *txd = &desc->async_tx; 831 struct dma_async_tx_descriptor *txd = &desc->async_tx;
857 struct device *dev = chan->common.device->dev;
858 dma_addr_t src = get_desc_src(chan, desc);
859 dma_addr_t dst = get_desc_dst(chan, desc);
860 u32 len = get_desc_cnt(chan, desc);
861 832
862 /* Run the link descriptor callback function */ 833 /* Run the link descriptor callback function */
863 if (txd->callback) { 834 if (txd->callback) {
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7807f0ef4e20..53fb0c8365b0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
54 hw_desc->desc_command = (1 << 31); 54 hw_desc->desc_command = (1 << 31);
55} 55}
56 56
57static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58{
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
61}
62
63static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 57static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
64 u32 byte_count) 58 u32 byte_count)
65{ 59{
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
787/* 781/*
788 * Perform a transaction to verify the HW works. 782 * Perform a transaction to verify the HW works.
789 */ 783 */
790#define MV_XOR_TEST_SIZE 2000
791 784
792static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 785static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
793{ 786{
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
797 struct dma_chan *dma_chan; 790 struct dma_chan *dma_chan;
798 dma_cookie_t cookie; 791 dma_cookie_t cookie;
799 struct dma_async_tx_descriptor *tx; 792 struct dma_async_tx_descriptor *tx;
793 struct dmaengine_unmap_data *unmap;
800 int err = 0; 794 int err = 0;
801 795
802 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 796 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
803 if (!src) 797 if (!src)
804 return -ENOMEM; 798 return -ENOMEM;
805 799
806 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 800 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
807 if (!dest) { 801 if (!dest) {
808 kfree(src); 802 kfree(src);
809 return -ENOMEM; 803 return -ENOMEM;
810 } 804 }
811 805
812 /* Fill in src buffer */ 806 /* Fill in src buffer */
813 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 807 for (i = 0; i < PAGE_SIZE; i++)
814 ((u8 *) src)[i] = (u8)i; 808 ((u8 *) src)[i] = (u8)i;
815 809
816 dma_chan = &mv_chan->dmachan; 810 dma_chan = &mv_chan->dmachan;
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
819 goto out; 813 goto out;
820 } 814 }
821 815
822 dest_dma = dma_map_single(dma_chan->device->dev, dest, 816 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
823 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 817 if (!unmap) {
818 err = -ENOMEM;
819 goto free_resources;
820 }
821
822 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
823 PAGE_SIZE, DMA_TO_DEVICE);
824 unmap->to_cnt = 1;
825 unmap->addr[0] = src_dma;
824 826
825 src_dma = dma_map_single(dma_chan->device->dev, src, 827 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
826 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 828 PAGE_SIZE, DMA_FROM_DEVICE);
829 unmap->from_cnt = 1;
830 unmap->addr[1] = dest_dma;
831
832 unmap->len = PAGE_SIZE;
827 833
828 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 834 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
829 MV_XOR_TEST_SIZE, 0); 835 PAGE_SIZE, 0);
830 cookie = mv_xor_tx_submit(tx); 836 cookie = mv_xor_tx_submit(tx);
831 mv_xor_issue_pending(dma_chan); 837 mv_xor_issue_pending(dma_chan);
832 async_tx_ack(tx); 838 async_tx_ack(tx);
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
841 } 847 }
842 848
843 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 849 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
844 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 850 PAGE_SIZE, DMA_FROM_DEVICE);
845 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 851 if (memcmp(src, dest, PAGE_SIZE)) {
846 dev_err(dma_chan->device->dev, 852 dev_err(dma_chan->device->dev,
847 "Self-test copy failed compare, disabling\n"); 853 "Self-test copy failed compare, disabling\n");
848 err = -ENODEV; 854 err = -ENODEV;
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
850 } 856 }
851 857
852free_resources: 858free_resources:
859 dmaengine_unmap_put(unmap);
853 mv_xor_free_chan_resources(dma_chan); 860 mv_xor_free_chan_resources(dma_chan);
854out: 861out:
855 kfree(src); 862 kfree(src);
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
867 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 874 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
868 dma_addr_t dest_dma; 875 dma_addr_t dest_dma;
869 struct dma_async_tx_descriptor *tx; 876 struct dma_async_tx_descriptor *tx;
877 struct dmaengine_unmap_data *unmap;
870 struct dma_chan *dma_chan; 878 struct dma_chan *dma_chan;
871 dma_cookie_t cookie; 879 dma_cookie_t cookie;
872 u8 cmp_byte = 0; 880 u8 cmp_byte = 0;
873 u32 cmp_word; 881 u32 cmp_word;
874 int err = 0; 882 int err = 0;
883 int src_count = MV_XOR_NUM_SRC_TEST;
875 884
876 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 885 for (src_idx = 0; src_idx < src_count; src_idx++) {
877 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 886 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
878 if (!xor_srcs[src_idx]) { 887 if (!xor_srcs[src_idx]) {
879 while (src_idx--) 888 while (src_idx--)
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
890 } 899 }
891 900
892 /* Fill in src buffers */ 901 /* Fill in src buffers */
893 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 902 for (src_idx = 0; src_idx < src_count; src_idx++) {
894 u8 *ptr = page_address(xor_srcs[src_idx]); 903 u8 *ptr = page_address(xor_srcs[src_idx]);
895 for (i = 0; i < PAGE_SIZE; i++) 904 for (i = 0; i < PAGE_SIZE; i++)
896 ptr[i] = (1 << src_idx); 905 ptr[i] = (1 << src_idx);
897 } 906 }
898 907
899 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 908 for (src_idx = 0; src_idx < src_count; src_idx++)
900 cmp_byte ^= (u8) (1 << src_idx); 909 cmp_byte ^= (u8) (1 << src_idx);
901 910
902 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 911 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
910 goto out; 919 goto out;
911 } 920 }
912 921
922 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
923 GFP_KERNEL);
924 if (!unmap) {
925 err = -ENOMEM;
926 goto free_resources;
927 }
928
913 /* test xor */ 929 /* test xor */
914 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 930 for (i = 0; i < src_count; i++) {
915 DMA_FROM_DEVICE); 931 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
932 0, PAGE_SIZE, DMA_TO_DEVICE);
933 dma_srcs[i] = unmap->addr[i];
934 unmap->to_cnt++;
935 }
916 936
917 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 937 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
918 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 938 DMA_FROM_DEVICE);
919 0, PAGE_SIZE, DMA_TO_DEVICE); 939 dest_dma = unmap->addr[src_count];
940 unmap->from_cnt = 1;
941 unmap->len = PAGE_SIZE;
920 942
921 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 943 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
922 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 944 src_count, PAGE_SIZE, 0);
923 945
924 cookie = mv_xor_tx_submit(tx); 946 cookie = mv_xor_tx_submit(tx);
925 mv_xor_issue_pending(dma_chan); 947 mv_xor_issue_pending(dma_chan);
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
948 } 970 }
949 971
950free_resources: 972free_resources:
973 dmaengine_unmap_put(unmap);
951 mv_xor_free_chan_resources(dma_chan); 974 mv_xor_free_chan_resources(dma_chan);
952out: 975out:
953 src_idx = MV_XOR_NUM_SRC_TEST; 976 src_idx = src_count;
954 while (src_idx--) 977 while (src_idx--)
955 __free_page(xor_srcs[src_idx]); 978 __free_page(xor_srcs[src_idx]);
956 __free_page(dest); 979 __free_page(dest);
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
1176 int i = 0; 1199 int i = 0;
1177 1200
1178 for_each_child_of_node(pdev->dev.of_node, np) { 1201 for_each_child_of_node(pdev->dev.of_node, np) {
1202 struct mv_xor_chan *chan;
1179 dma_cap_mask_t cap_mask; 1203 dma_cap_mask_t cap_mask;
1180 int irq; 1204 int irq;
1181 1205
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
1193 goto err_channel_add; 1217 goto err_channel_add;
1194 } 1218 }
1195 1219
1196 xordev->channels[i] = 1220 chan = mv_xor_channel_add(xordev, pdev, i,
1197 mv_xor_channel_add(xordev, pdev, i, 1221 cap_mask, irq);
1198 cap_mask, irq); 1222 if (IS_ERR(chan)) {
1199 if (IS_ERR(xordev->channels[i])) { 1223 ret = PTR_ERR(chan);
1200 ret = PTR_ERR(xordev->channels[i]);
1201 xordev->channels[i] = NULL;
1202 irq_dispose_mapping(irq); 1224 irq_dispose_mapping(irq);
1203 goto err_channel_add; 1225 goto err_channel_add;
1204 } 1226 }
1205 1227
1228 xordev->channels[i] = chan;
1206 i++; 1229 i++;
1207 } 1230 }
1208 } else if (pdata && pdata->channels) { 1231 } else if (pdata && pdata->channels) {
1209 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1232 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1210 struct mv_xor_channel_data *cd; 1233 struct mv_xor_channel_data *cd;
1234 struct mv_xor_chan *chan;
1211 int irq; 1235 int irq;
1212 1236
1213 cd = &pdata->channels[i]; 1237 cd = &pdata->channels[i];
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
1222 goto err_channel_add; 1246 goto err_channel_add;
1223 } 1247 }
1224 1248
1225 xordev->channels[i] = 1249 chan = mv_xor_channel_add(xordev, pdev, i,
1226 mv_xor_channel_add(xordev, pdev, i, 1250 cd->cap_mask, irq);
1227 cd->cap_mask, irq); 1251 if (IS_ERR(chan)) {
1228 if (IS_ERR(xordev->channels[i])) { 1252 ret = PTR_ERR(chan);
1229 ret = PTR_ERR(xordev->channels[i]);
1230 goto err_channel_add; 1253 goto err_channel_add;
1231 } 1254 }
1255
1256 xordev->channels[i] = chan;
1232 } 1257 }
1233 } 1258 }
1234 1259
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cdf0483b8f2d..536632f6479c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2492 2492
2493static inline void _init_desc(struct dma_pl330_desc *desc) 2493static inline void _init_desc(struct dma_pl330_desc *desc)
2494{ 2494{
2495 desc->pchan = NULL;
2496 desc->req.x = &desc->px; 2495 desc->req.x = &desc->px;
2497 desc->req.token = desc; 2496 desc->req.token = desc;
2498 desc->rqcfg.swap = SWAP_NO; 2497 desc->rqcfg.swap = SWAP_NO;
2499 desc->rqcfg.privileged = 0;
2500 desc->rqcfg.insnaccess = 0;
2501 desc->rqcfg.scctl = SCCTRL0; 2498 desc->rqcfg.scctl = SCCTRL0;
2502 desc->rqcfg.dcctl = DCCTRL0; 2499 desc->rqcfg.dcctl = DCCTRL0;
2503 desc->req.cfg = &desc->rqcfg; 2500 desc->req.cfg = &desc->rqcfg;
@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2517 if (!pdmac) 2514 if (!pdmac)
2518 return 0; 2515 return 0;
2519 2516
2520 desc = kmalloc(count * sizeof(*desc), flg); 2517 desc = kcalloc(count, sizeof(*desc), flg);
2521 if (!desc) 2518 if (!desc)
2522 return 0; 2519 return 0;
2523 2520
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8da48c6b2a38..8bba298535b0 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
533} 533}
534 534
535/** 535/**
536 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
537 */
538static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
539 int value, unsigned long flags)
540{
541 struct dma_cdb *hw_desc = desc->hw_desc;
542
543 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
544 desc->hw_next = NULL;
545 desc->src_cnt = 1;
546 desc->dst_cnt = 1;
547
548 if (flags & DMA_PREP_INTERRUPT)
549 set_bit(PPC440SPE_DESC_INT, &desc->flags);
550 else
551 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
552
553 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
554 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
555 hw_desc->opc = DMA_CDB_OPC_DFILL128;
556}
557
558/**
559 * ppc440spe_desc_set_src_addr - set source address into the descriptor 536 * ppc440spe_desc_set_src_addr - set source address into the descriptor
560 */ 537 */
561static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, 538static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1504 struct ppc440spe_adma_chan *chan, 1481 struct ppc440spe_adma_chan *chan,
1505 dma_cookie_t cookie) 1482 dma_cookie_t cookie)
1506{ 1483{
1507 int i;
1508
1509 BUG_ON(desc->async_tx.cookie < 0); 1484 BUG_ON(desc->async_tx.cookie < 0);
1510 if (desc->async_tx.cookie > 0) { 1485 if (desc->async_tx.cookie > 0) {
1511 cookie = desc->async_tx.cookie; 1486 cookie = desc->async_tx.cookie;
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
3898 ppc440spe_adma_prep_dma_interrupt; 3873 ppc440spe_adma_prep_dma_interrupt;
3899 } 3874 }
3900 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " 3875 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
3901 "( %s%s%s%s%s%s%s)\n", 3876 "( %s%s%s%s%s%s)\n",
3902 dev_name(adev->dev), 3877 dev_name(adev->dev),
3903 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", 3878 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
3904 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", 3879 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index bae6c29f5502..17686caf64d5 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
406 dma_async_tx_callback callback; 406 dma_async_tx_callback callback;
407 void *param; 407 void *param;
408 struct dma_async_tx_descriptor *txd = &desc->txd; 408 struct dma_async_tx_descriptor *txd = &desc->txd;
409 struct txx9dmac_slave *ds = dc->chan.private;
410 409
411 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 410 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
412 txd->cookie, desc); 411 txd->cookie, desc);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index b0bb056458a3..281029daf98c 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
1623 .cmd_per_lun = 1, 1623 .cmd_per_lun = 1,
1624 .can_queue = 1, 1624 .can_queue = 1,
1625 .sdev_attrs = sbp2_scsi_sysfs_attrs, 1625 .sdev_attrs = sbp2_scsi_sysfs_attrs,
1626 .no_write_same = 1,
1627}; 1626};
1628 1627
1629MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 1628MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 299fad6b5867..5373dc5b6011 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
14 14
15obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ 15obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
16obj-$(CONFIG_EFI) += efi/ 16obj-$(CONFIG_EFI) += efi/
17obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 3150aa4874e8..6aecbc86ec94 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
36 backend for pstore by default. This setting can be overridden 36 backend for pstore by default. This setting can be overridden
37 using the efivars module's pstore_disable parameter. 37 using the efivars module's pstore_disable parameter.
38 38
39config UEFI_CPER
40 def_bool n
41
42endmenu 39endmenu
40
41config UEFI_CPER
42 bool
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 9ba156d3c775..6c2a41ec21ba 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# Makefile for linux kernel 2# Makefile for linux kernel
3# 3#
4obj-y += efi.o vars.o 4obj-$(CONFIG_EFI) += efi.o vars.o
5obj-$(CONFIG_EFI_VARS) += efivars.o 5obj-$(CONFIG_EFI_VARS) += efivars.o
6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o 6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
7obj-$(CONFIG_UEFI_CPER) += cper.o 7obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 743fd426f21b..4b9dc836dcf9 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
356static struct pstore_info efi_pstore_info = { 356static struct pstore_info efi_pstore_info = {
357 .owner = THIS_MODULE, 357 .owner = THIS_MODULE,
358 .name = "efi", 358 .name = "efi",
359 .flags = PSTORE_FLAGS_FRAGILE,
359 .open = efi_pstore_open, 360 .open = efi_pstore_open,
360 .close = efi_pstore_close, 361 .close = efi_pstore_close,
361 .read = efi_pstore_read, 362 .read = efi_pstore_read,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 621c7c67a643..76d3d1ab73c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2343 kfree(request); 2343 kfree(request);
2344} 2344}
2345 2345
2346static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 2346static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2347 struct intel_ring_buffer *ring) 2347 struct intel_ring_buffer *ring)
2348{ 2348{
2349 u32 completed_seqno; 2349 u32 completed_seqno = ring->get_seqno(ring, false);
2350 u32 acthd; 2350 u32 acthd = intel_ring_get_active_head(ring);
2351 struct drm_i915_gem_request *request;
2352
2353 list_for_each_entry(request, &ring->request_list, list) {
2354 if (i915_seqno_passed(completed_seqno, request->seqno))
2355 continue;
2351 2356
2352 acthd = intel_ring_get_active_head(ring); 2357 i915_set_reset_status(ring, request, acthd);
2353 completed_seqno = ring->get_seqno(ring, false); 2358 }
2359}
2354 2360
2361static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2362 struct intel_ring_buffer *ring)
2363{
2355 while (!list_empty(&ring->request_list)) { 2364 while (!list_empty(&ring->request_list)) {
2356 struct drm_i915_gem_request *request; 2365 struct drm_i915_gem_request *request;
2357 2366
@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2359 struct drm_i915_gem_request, 2368 struct drm_i915_gem_request,
2360 list); 2369 list);
2361 2370
2362 if (request->seqno > completed_seqno)
2363 i915_set_reset_status(ring, request, acthd);
2364
2365 i915_gem_free_request(request); 2371 i915_gem_free_request(request);
2366 } 2372 }
2367 2373
@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
2403 struct intel_ring_buffer *ring; 2409 struct intel_ring_buffer *ring;
2404 int i; 2410 int i;
2405 2411
2412 /*
2413 * Before we free the objects from the requests, we need to inspect
2414 * them for finding the guilty party. As the requests only borrow
2415 * their reference to the objects, the inspection must be done first.
2416 */
2417 for_each_ring(ring, dev_priv, i)
2418 i915_gem_reset_ring_status(dev_priv, ring);
2419
2406 for_each_ring(ring, dev_priv, i) 2420 for_each_ring(ring, dev_priv, i)
2407 i915_gem_reset_ring_lists(dev_priv, ring); 2421 i915_gem_reset_ring_cleanup(dev_priv, ring);
2408 2422
2409 i915_gem_cleanup_ringbuffer(dev); 2423 i915_gem_cleanup_ringbuffer(dev);
2410 2424
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b7e787fb4649..a3ba9a8cd687 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
93{ 93{
94 struct drm_i915_gem_object *obj; 94 struct drm_i915_gem_object *obj;
95 struct list_head objects; 95 struct list_head objects;
96 int i, ret = 0; 96 int i, ret;
97 97
98 INIT_LIST_HEAD(&objects); 98 INIT_LIST_HEAD(&objects);
99 spin_lock(&file->table_lock); 99 spin_lock(&file->table_lock);
@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
106 DRM_DEBUG("Invalid object handle %d at index %d\n", 106 DRM_DEBUG("Invalid object handle %d at index %d\n",
107 exec[i].handle, i); 107 exec[i].handle, i);
108 ret = -ENOENT; 108 ret = -ENOENT;
109 goto out; 109 goto err;
110 } 110 }
111 111
112 if (!list_empty(&obj->obj_exec_link)) { 112 if (!list_empty(&obj->obj_exec_link)) {
@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
115 obj, exec[i].handle, i); 115 obj, exec[i].handle, i);
116 ret = -EINVAL; 116 ret = -EINVAL;
117 goto out; 117 goto err;
118 } 118 }
119 119
120 drm_gem_object_reference(&obj->base); 120 drm_gem_object_reference(&obj->base);
@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
123 spin_unlock(&file->table_lock); 123 spin_unlock(&file->table_lock);
124 124
125 i = 0; 125 i = 0;
126 list_for_each_entry(obj, &objects, obj_exec_link) { 126 while (!list_empty(&objects)) {
127 struct i915_vma *vma; 127 struct i915_vma *vma;
128 128
129 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object,
131 obj_exec_link);
132
129 /* 133 /*
130 * NOTE: We can leak any vmas created here when something fails 134 * NOTE: We can leak any vmas created here when something fails
131 * later on. But that's no issue since vma_unbind can deal with 135 * later on. But that's no issue since vma_unbind can deal with
@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
138 if (IS_ERR(vma)) { 142 if (IS_ERR(vma)) {
139 DRM_DEBUG("Failed to lookup VMA\n"); 143 DRM_DEBUG("Failed to lookup VMA\n");
140 ret = PTR_ERR(vma); 144 ret = PTR_ERR(vma);
141 goto out; 145 goto err;
142 } 146 }
143 147
148 /* Transfer ownership from the objects list to the vmas list. */
144 list_add_tail(&vma->exec_list, &eb->vmas); 149 list_add_tail(&vma->exec_list, &eb->vmas);
150 list_del_init(&obj->obj_exec_link);
145 151
146 vma->exec_entry = &exec[i]; 152 vma->exec_entry = &exec[i];
147 if (eb->and < 0) { 153 if (eb->and < 0) {
@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
155 ++i; 161 ++i;
156 } 162 }
157 163
164 return 0;
165
158 166
159out: 167err:
160 while (!list_empty(&objects)) { 168 while (!list_empty(&objects)) {
161 obj = list_first_entry(&objects, 169 obj = list_first_entry(&objects,
162 struct drm_i915_gem_object, 170 struct drm_i915_gem_object,
163 obj_exec_link); 171 obj_exec_link);
164 list_del_init(&obj->obj_exec_link); 172 list_del_init(&obj->obj_exec_link);
165 if (ret) 173 drm_gem_object_unreference(&obj->base);
166 drm_gem_object_unreference(&obj->base);
167 } 174 }
175 /*
176 * Objects already transfered to the vmas list will be unreferenced by
177 * eb_destroy.
178 */
179
168 return ret; 180 return ret;
169} 181}
170 182
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8b8bde7dce53..54e82a80cf50 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6303 uint32_t val; 6303 uint32_t val;
6304 6304
6305 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) 6305 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6306 WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", 6306 WARN(crtc->active, "CRTC for pipe %c enabled\n",
6307 pipe_name(crtc->pipe)); 6307 pipe_name(crtc->pipe));
6308 6308
6309 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 6309 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -11126,14 +11126,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
11126int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 11126int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11127{ 11127{
11128 struct drm_i915_private *dev_priv = dev->dev_private; 11128 struct drm_i915_private *dev_priv = dev->dev_private;
11129 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11129 u16 gmch_ctrl; 11130 u16 gmch_ctrl;
11130 11131
11131 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); 11132 pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11132 if (state) 11133 if (state)
11133 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 11134 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11134 else 11135 else
11135 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 11136 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11136 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 11137 pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11137 return 0; 11138 return 0;
11138} 11139}
11139 11140
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3657ab43c8fd..26c29c173221 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5688,6 +5688,8 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5688 unsigned long irqflags; 5688 unsigned long irqflags;
5689 uint32_t tmp; 5689 uint32_t tmp;
5690 5690
5691 WARN_ON(dev_priv->pc8.enabled);
5692
5691 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5693 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5692 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 5694 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5693 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 5695 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5747,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5747static void __intel_power_well_get(struct drm_device *dev, 5749static void __intel_power_well_get(struct drm_device *dev,
5748 struct i915_power_well *power_well) 5750 struct i915_power_well *power_well)
5749{ 5751{
5750 if (!power_well->count++) 5752 struct drm_i915_private *dev_priv = dev->dev_private;
5753
5754 if (!power_well->count++) {
5755 hsw_disable_package_c8(dev_priv);
5751 __intel_set_power_well(dev, true); 5756 __intel_set_power_well(dev, true);
5757 }
5752} 5758}
5753 5759
5754static void __intel_power_well_put(struct drm_device *dev, 5760static void __intel_power_well_put(struct drm_device *dev,
5755 struct i915_power_well *power_well) 5761 struct i915_power_well *power_well)
5756{ 5762{
5763 struct drm_i915_private *dev_priv = dev->dev_private;
5764
5757 WARN_ON(!power_well->count); 5765 WARN_ON(!power_well->count);
5758 if (!--power_well->count && i915_disable_power_well) 5766 if (!--power_well->count && i915_disable_power_well) {
5759 __intel_set_power_well(dev, false); 5767 __intel_set_power_well(dev, false);
5768 hsw_enable_package_c8(dev_priv);
5769 }
5760} 5770}
5761 5771
5762void intel_display_power_get(struct drm_device *dev, 5772void intel_display_power_get(struct drm_device *dev,
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 037d324bf58f..66ac0ff95f5a 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -8,5 +8,6 @@ config DRM_QXL
8 select DRM_KMS_HELPER 8 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER 9 select DRM_KMS_FB_HELPER
10 select DRM_TTM 10 select DRM_TTM
11 select CRC32
11 help 12 help
12 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. 13 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5e827c29d194..d70aafb83307 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26 26
27#include "linux/crc32.h" 27#include <linux/crc32.h>
28 28
29#include "qxl_drv.h" 29#include "qxl_drv.h"
30#include "qxl_object.h" 30#include "qxl_object.h"
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index de86493cbc44..713a5d359901 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
174 } 174 }
175 175
176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
177 if (sad_count < 0) { 177 if (sad_count <= 0) {
178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
179 return; 179 return;
180 } 180 }
@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
235 } 235 }
236 236
237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
238 if (sad_count < 0) { 238 if (sad_count <= 0) {
239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
240 return; 240 return;
241 } 241 }
@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
308 rdev->audio.enabled = true; 308 rdev->audio.enabled = true;
309 309
310 if (ASIC_IS_DCE8(rdev)) 310 if (ASIC_IS_DCE8(rdev))
311 rdev->audio.num_pins = 7; 311 rdev->audio.num_pins = 6;
312 else if (ASIC_IS_DCE61(rdev))
313 rdev->audio.num_pins = 4;
312 else 314 else
313 rdev->audio.num_pins = 6; 315 rdev->audio.num_pins = 6;
314 316
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4feb3d..0c6d5cef4cf1 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
118 } 118 }
119 119
120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
121 if (sad_count < 0) { 121 if (sad_count <= 0) {
122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
123 return; 123 return;
124 } 124 }
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
173 } 173 }
174 174
175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
176 if (sad_count < 0) { 176 if (sad_count <= 0) {
177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
178 return; 178 return;
179 } 179 }
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 11aab2ab54ce..f59a9e9fccf8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
895 (rdev->pdev->device == 0x999C)) { 895 (rdev->pdev->device == 0x999C)) {
896 rdev->config.cayman.max_simds_per_se = 6; 896 rdev->config.cayman.max_simds_per_se = 6;
897 rdev->config.cayman.max_backends_per_se = 2; 897 rdev->config.cayman.max_backends_per_se = 2;
898 rdev->config.cayman.max_hw_contexts = 8;
899 rdev->config.cayman.sx_max_export_size = 256;
900 rdev->config.cayman.sx_max_export_pos_size = 64;
901 rdev->config.cayman.sx_max_export_smx_size = 192;
898 } else if ((rdev->pdev->device == 0x9903) || 902 } else if ((rdev->pdev->device == 0x9903) ||
899 (rdev->pdev->device == 0x9904) || 903 (rdev->pdev->device == 0x9904) ||
900 (rdev->pdev->device == 0x990A) || 904 (rdev->pdev->device == 0x990A) ||
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
905 (rdev->pdev->device == 0x999D)) { 909 (rdev->pdev->device == 0x999D)) {
906 rdev->config.cayman.max_simds_per_se = 4; 910 rdev->config.cayman.max_simds_per_se = 4;
907 rdev->config.cayman.max_backends_per_se = 2; 911 rdev->config.cayman.max_backends_per_se = 2;
912 rdev->config.cayman.max_hw_contexts = 8;
913 rdev->config.cayman.sx_max_export_size = 256;
914 rdev->config.cayman.sx_max_export_pos_size = 64;
915 rdev->config.cayman.sx_max_export_smx_size = 192;
908 } else if ((rdev->pdev->device == 0x9919) || 916 } else if ((rdev->pdev->device == 0x9919) ||
909 (rdev->pdev->device == 0x9990) || 917 (rdev->pdev->device == 0x9990) ||
910 (rdev->pdev->device == 0x9991) || 918 (rdev->pdev->device == 0x9991) ||
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
915 (rdev->pdev->device == 0x99A0)) { 923 (rdev->pdev->device == 0x99A0)) {
916 rdev->config.cayman.max_simds_per_se = 3; 924 rdev->config.cayman.max_simds_per_se = 3;
917 rdev->config.cayman.max_backends_per_se = 1; 925 rdev->config.cayman.max_backends_per_se = 1;
926 rdev->config.cayman.max_hw_contexts = 4;
927 rdev->config.cayman.sx_max_export_size = 128;
928 rdev->config.cayman.sx_max_export_pos_size = 32;
929 rdev->config.cayman.sx_max_export_smx_size = 96;
918 } else { 930 } else {
919 rdev->config.cayman.max_simds_per_se = 2; 931 rdev->config.cayman.max_simds_per_se = 2;
920 rdev->config.cayman.max_backends_per_se = 1; 932 rdev->config.cayman.max_backends_per_se = 1;
933 rdev->config.cayman.max_hw_contexts = 4;
934 rdev->config.cayman.sx_max_export_size = 128;
935 rdev->config.cayman.sx_max_export_pos_size = 32;
936 rdev->config.cayman.sx_max_export_smx_size = 96;
921 } 937 }
922 rdev->config.cayman.max_texture_channel_caches = 2; 938 rdev->config.cayman.max_texture_channel_caches = 2;
923 rdev->config.cayman.max_gprs = 256; 939 rdev->config.cayman.max_gprs = 256;
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
925 rdev->config.cayman.max_gs_threads = 32; 941 rdev->config.cayman.max_gs_threads = 32;
926 rdev->config.cayman.max_stack_entries = 512; 942 rdev->config.cayman.max_stack_entries = 512;
927 rdev->config.cayman.sx_num_of_sets = 8; 943 rdev->config.cayman.sx_num_of_sets = 8;
928 rdev->config.cayman.sx_max_export_size = 256;
929 rdev->config.cayman.sx_max_export_pos_size = 64;
930 rdev->config.cayman.sx_max_export_smx_size = 192;
931 rdev->config.cayman.max_hw_contexts = 8;
932 rdev->config.cayman.sq_num_cf_insts = 2; 944 rdev->config.cayman.sq_num_cf_insts = 2;
933 945
934 rdev->config.cayman.sc_prim_fifo_size = 0x40; 946 rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 913b025ae9b3..374499db20c7 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2328 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2328 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2329 ASIC_INTERNAL_MEMORY_SS, 0); 2329 ASIC_INTERNAL_MEMORY_SS, 0);
2330 2330
2331 /* disable ss, causes hangs on some cayman boards */
2332 if (rdev->family == CHIP_CAYMAN) {
2333 pi->sclk_ss = false;
2334 pi->mclk_ss = false;
2335 }
2336
2331 if (pi->sclk_ss || pi->mclk_ss) 2337 if (pi->sclk_ss || pi->mclk_ss)
2332 pi->dynamic_ss = true; 2338 pi->dynamic_ss = true;
2333 else 2339 else
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15b86a94949d..406152152315 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
353 * Don't move nonexistent data. Clear destination instead. 353 * Don't move nonexistent data. Clear destination instead.
354 */ 354 */
355 if (old_iomap == NULL && 355 if (old_iomap == NULL &&
356 (ttm == NULL || ttm->state == tt_unpopulated)) { 356 (ttm == NULL || (ttm->state == tt_unpopulated &&
357 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
357 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 358 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
358 goto out2; 359 goto out2;
359 } 360 }
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 92d1206482a6..f80b700f821c 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -377,6 +377,9 @@ static int intel_idle(struct cpuidle_device *dev,
377 377
378 if (!current_set_polling_and_test()) { 378 if (!current_set_polling_and_test()) {
379 379
380 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
381 clflush((void *)&current_thread_info()->flags);
382
380 __monitor((void *)&current_thread_info()->flags, 0, 0); 383 __monitor((void *)&current_thread_info()->flags, 0, 0);
381 smp_mb(); 384 smp_mb();
382 if (!need_resched()) 385 if (!need_resched())
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index acb7f90359a3..749a6cadab8b 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
201 .address = 1, 201 .address = 1,
202 .scan_index = 1, 202 .scan_index = 1,
203 .scan_type = IIO_ST('u', 12, 16, 0), 203 .scan_type = {
204 .sign = 'u',
205 .realbits = 12,
206 .storagebits = 16,
207 .shift = 0,
208 .endianness = IIO_BE,
209 },
204 }, 210 },
205 .channel[1] = { 211 .channel[1] = {
206 .type = IIO_VOLTAGE, 212 .type = IIO_VOLTAGE,
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
210 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 216 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
211 .address = 0, 217 .address = 0,
212 .scan_index = 0, 218 .scan_index = 0,
213 .scan_type = IIO_ST('u', 12, 16, 0), 219 .scan_type = {
220 .sign = 'u',
221 .realbits = 12,
222 .storagebits = 16,
223 .shift = 0,
224 .endianness = IIO_BE,
225 },
214 }, 226 },
215 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), 227 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
216 .int_vref_mv = 2500, 228 .int_vref_mv = 2500,
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 3fb7757a1028..368660dfe135 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
652 .address = ADIS16448_BARO_OUT, 652 .address = ADIS16448_BARO_OUT,
653 .scan_index = ADIS16400_SCAN_BARO, 653 .scan_index = ADIS16400_SCAN_BARO,
654 .scan_type = IIO_ST('s', 16, 16, 0), 654 .scan_type = {
655 .sign = 's',
656 .realbits = 16,
657 .storagebits = 16,
658 .endianness = IIO_BE,
659 },
655 }, 660 },
656 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), 661 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
657 IIO_CHAN_SOFT_TIMESTAMP(11) 662 IIO_CHAN_SOFT_TIMESTAMP(11)
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 21df57130018..0922e39b0ea9 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
387 return -EINVAL; 387 return -EINVAL;
388 } 388 }
389 389
390 return IIO_VAL_INT_PLUS_MICRO; 390 return IIO_VAL_INT;
391} 391}
392 392
393static int cm36651_write_int_time(struct cm36651_data *cm36651, 393static int cm36651_write_int_time(struct cm36651_data *cm36651,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c47c2034ca71..0717940ec3b5 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
181static void rem_ref(struct iw_cm_id *cm_id) 181static void rem_ref(struct iw_cm_id *cm_id)
182{ 182{
183 struct iwcm_id_private *cm_id_priv; 183 struct iwcm_id_private *cm_id_priv;
184 int cb_destroy;
185
184 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 186 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
185 if (iwcm_deref_id(cm_id_priv) && 187
186 test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { 188 /*
189 * Test bit before deref in case the cm_id gets freed on another
190 * thread.
191 */
192 cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
193 if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
187 BUG_ON(!list_empty(&cm_id_priv->work_list)); 194 BUG_ON(!list_empty(&cm_id_priv->work_list));
188 free_cm_id(cm_id_priv); 195 free_cm_id(cm_id_priv);
189 } 196 }
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bdc842e9faef..a283274a5a09 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -49,12 +49,20 @@
49 49
50#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 50#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
51 do { \ 51 do { \
52 (udata)->inbuf = (void __user *) (ibuf); \ 52 (udata)->inbuf = (const void __user *) (ibuf); \
53 (udata)->outbuf = (void __user *) (obuf); \ 53 (udata)->outbuf = (void __user *) (obuf); \
54 (udata)->inlen = (ilen); \ 54 (udata)->inlen = (ilen); \
55 (udata)->outlen = (olen); \ 55 (udata)->outlen = (olen); \
56 } while (0) 56 } while (0)
57 57
58#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
59 do { \
60 (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
61 (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
62 (udata)->inlen = (ilen); \
63 (udata)->outlen = (olen); \
64 } while (0)
65
58/* 66/*
59 * Our lifetime rules for these structs are the following: 67 * Our lifetime rules for these structs are the following:
60 * 68 *
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 65f6e7dc380c..f1cc83855af6 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2593,6 +2593,9 @@ out_put:
2593static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2593static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2594 union ib_flow_spec *ib_spec) 2594 union ib_flow_spec *ib_spec)
2595{ 2595{
2596 if (kern_spec->reserved)
2597 return -EINVAL;
2598
2596 ib_spec->type = kern_spec->type; 2599 ib_spec->type = kern_spec->type;
2597 2600
2598 switch (ib_spec->type) { 2601 switch (ib_spec->type) {
@@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2646 void *ib_spec; 2649 void *ib_spec;
2647 int i; 2650 int i;
2648 2651
2652 if (ucore->inlen < sizeof(cmd))
2653 return -EINVAL;
2654
2649 if (ucore->outlen < sizeof(resp)) 2655 if (ucore->outlen < sizeof(resp))
2650 return -ENOSPC; 2656 return -ENOSPC;
2651 2657
@@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2671 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 2677 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2672 return -EINVAL; 2678 return -EINVAL;
2673 2679
2680 if (cmd.flow_attr.reserved[0] ||
2681 cmd.flow_attr.reserved[1])
2682 return -EINVAL;
2683
2674 if (cmd.flow_attr.num_of_specs) { 2684 if (cmd.flow_attr.num_of_specs) {
2675 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 2685 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2676 GFP_KERNEL); 2686 GFP_KERNEL);
@@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2731 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 2741 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2732 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 2742 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2733 i, cmd.flow_attr.size); 2743 i, cmd.flow_attr.size);
2744 err = -EINVAL;
2734 goto err_free; 2745 goto err_free;
2735 } 2746 }
2736 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 2747 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2791 struct ib_uobject *uobj; 2802 struct ib_uobject *uobj;
2792 int ret; 2803 int ret;
2793 2804
2805 if (ucore->inlen < sizeof(cmd))
2806 return -EINVAL;
2807
2794 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2808 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2795 if (ret) 2809 if (ret)
2796 return ret; 2810 return ret;
2797 2811
2812 if (cmd.comp_mask)
2813 return -EINVAL;
2814
2798 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 2815 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2799 file->ucontext); 2816 file->ucontext);
2800 if (!uobj) 2817 if (!uobj)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 34386943ebcf..08219fb3338b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
668 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) 668 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
669 return -EINVAL; 669 return -EINVAL;
670 670
671 if (ex_hdr.cmd_hdr_reserved)
672 return -EINVAL;
673
671 if (ex_hdr.response) { 674 if (ex_hdr.response) {
672 if (!hdr.out_words && !ex_hdr.provider_out_words) 675 if (!hdr.out_words && !ex_hdr.provider_out_words)
673 return -EINVAL; 676 return -EINVAL;
677
678 if (!access_ok(VERIFY_WRITE,
679 (void __user *) (unsigned long) ex_hdr.response,
680 (hdr.out_words + ex_hdr.provider_out_words) * 8))
681 return -EFAULT;
674 } else { 682 } else {
675 if (hdr.out_words || ex_hdr.provider_out_words) 683 if (hdr.out_words || ex_hdr.provider_out_words)
676 return -EINVAL; 684 return -EINVAL;
677 } 685 }
678 686
679 INIT_UDATA(&ucore, 687 INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
680 (hdr.in_words) ? buf : 0, 688 hdr.in_words * 8, hdr.out_words * 8);
681 (unsigned long)ex_hdr.response, 689
682 hdr.in_words * 8, 690 INIT_UDATA_BUF_OR_NULL(&uhw,
683 hdr.out_words * 8); 691 buf + ucore.inlen,
684 692 (unsigned long) ex_hdr.response + ucore.outlen,
685 INIT_UDATA(&uhw, 693 ex_hdr.provider_in_words * 8,
686 (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, 694 ex_hdr.provider_out_words * 8);
687 (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
688 ex_hdr.provider_in_words * 8,
689 ex_hdr.provider_out_words * 8);
690 695
691 err = uverbs_ex_cmd_table[command](file, 696 err = uverbs_ex_cmd_table[command](file,
692 &ucore, 697 &ucore,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 12fef76c791c..45126879ad28 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
525} 525}
526 526
527#define VLAN_NONE 0xfff
528#define FILTER_SEL_VLAN_NONE 0xffff
529#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
530#define FILTER_SEL_WIDTH_VIN_P_FC \
531 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
532#define FILTER_SEL_WIDTH_TAG_P_FC \
533 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
534#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
535
536static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
537 struct l2t_entry *l2t)
538{
539 unsigned int ntuple = 0;
540 u32 viid;
541
542 switch (dev->rdev.lldi.filt_mode) {
543
544 /* default filter mode */
545 case HW_TPL_FR_MT_PR_IV_P_FC:
546 if (l2t->vlan == VLAN_NONE)
547 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
548 else {
549 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
550 ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
551 }
552 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
553 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
554 break;
555 case HW_TPL_FR_MT_PR_OV_P_FC: {
556 viid = cxgb4_port_viid(l2t->neigh->dev);
557
558 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
559 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
560 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
561 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
562 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
563 break;
564 }
565 default:
566 break;
567 }
568 return ntuple;
569}
570
571static int send_connect(struct c4iw_ep *ep) 527static int send_connect(struct c4iw_ep *ep)
572{ 528{
573 struct cpl_act_open_req *req; 529 struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
641 req->local_ip = la->sin_addr.s_addr; 597 req->local_ip = la->sin_addr.s_addr;
642 req->peer_ip = ra->sin_addr.s_addr; 598 req->peer_ip = ra->sin_addr.s_addr;
643 req->opt0 = cpu_to_be64(opt0); 599 req->opt0 = cpu_to_be64(opt0);
644 req->params = cpu_to_be32(select_ntuple(ep->com.dev, 600 req->params = cpu_to_be32(cxgb4_select_ntuple(
645 ep->dst, ep->l2t)); 601 ep->com.dev->rdev.lldi.ports[0],
602 ep->l2t));
646 req->opt2 = cpu_to_be32(opt2); 603 req->opt2 = cpu_to_be32(opt2);
647 } else { 604 } else {
648 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 605 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
662 req6->peer_ip_lo = *((__be64 *) 619 req6->peer_ip_lo = *((__be64 *)
663 (ra6->sin6_addr.s6_addr + 8)); 620 (ra6->sin6_addr.s6_addr + 8));
664 req6->opt0 = cpu_to_be64(opt0); 621 req6->opt0 = cpu_to_be64(opt0);
665 req6->params = cpu_to_be32( 622 req6->params = cpu_to_be32(cxgb4_select_ntuple(
666 select_ntuple(ep->com.dev, ep->dst, 623 ep->com.dev->rdev.lldi.ports[0],
667 ep->l2t)); 624 ep->l2t));
668 req6->opt2 = cpu_to_be32(opt2); 625 req6->opt2 = cpu_to_be32(opt2);
669 } 626 }
670 } else { 627 } else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
681 t5_req->peer_ip = ra->sin_addr.s_addr; 638 t5_req->peer_ip = ra->sin_addr.s_addr;
682 t5_req->opt0 = cpu_to_be64(opt0); 639 t5_req->opt0 = cpu_to_be64(opt0);
683 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 640 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
684 select_ntuple(ep->com.dev, 641 cxgb4_select_ntuple(
685 ep->dst, ep->l2t))); 642 ep->com.dev->rdev.lldi.ports[0],
643 ep->l2t)));
686 t5_req->opt2 = cpu_to_be32(opt2); 644 t5_req->opt2 = cpu_to_be32(opt2);
687 } else { 645 } else {
688 t5_req6 = (struct cpl_t5_act_open_req6 *) 646 t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
703 (ra6->sin6_addr.s6_addr + 8)); 661 (ra6->sin6_addr.s6_addr + 8));
704 t5_req6->opt0 = cpu_to_be64(opt0); 662 t5_req6->opt0 = cpu_to_be64(opt0);
705 t5_req6->params = (__force __be64)cpu_to_be32( 663 t5_req6->params = (__force __be64)cpu_to_be32(
706 select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 664 cxgb4_select_ntuple(
665 ep->com.dev->rdev.lldi.ports[0],
666 ep->l2t));
707 t5_req6->opt2 = cpu_to_be32(opt2); 667 t5_req6->opt2 = cpu_to_be32(opt2);
708 } 668 }
709 } 669 }
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1630 memset(req, 0, sizeof(*req)); 1590 memset(req, 0, sizeof(*req));
1631 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1591 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1632 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1592 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1633 req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, 1593 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1594 ep->com.dev->rdev.lldi.ports[0],
1634 ep->l2t)); 1595 ep->l2t));
1635 sin = (struct sockaddr_in *)&ep->com.local_addr; 1596 sin = (struct sockaddr_in *)&ep->com.local_addr;
1636 req->le.lport = sin->sin_port; 1597 req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2938 /* 2899 /*
2939 * Allocate a server TID. 2900 * Allocate a server TID.
2940 */ 2901 */
2941 if (dev->rdev.lldi.enable_fw_ofld_conn) 2902 if (dev->rdev.lldi.enable_fw_ofld_conn &&
2903 ep->com.local_addr.ss_family == AF_INET)
2942 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 2904 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
2943 cm_id->local_addr.ss_family, ep); 2905 cm_id->local_addr.ss_family, ep);
2944 else 2906 else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3323 /* 3285 /*
3324 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3286 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3325 */ 3287 */
3326 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) 3288 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3327 - dev->rdev.lldi.tids->sftid_base
3328 + dev->rdev.lldi.tids->nstids;
3329 3289
3330 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3290 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3331 if (!lep) { 3291 if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3397 window = (__force u16) htons((__force u16)tcph->window); 3357 window = (__force u16) htons((__force u16)tcph->window);
3398 3358
3399 /* Calcuate filter portion for LE region. */ 3359 /* Calcuate filter portion for LE region. */
3400 filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); 3360 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3361 dev->rdev.lldi.ports[0],
3362 e));
3401 3363
3402 /* 3364 /*
3403 * Synthesize the cpl_pass_accept_req. We have everything except the 3365 * Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 4cb8eb24497c..84e45006451c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
173 return ret; 173 return ret;
174} 174}
175 175
176int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 176static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
177{ 177{
178 u32 remain = len; 178 u32 remain = len;
179 u32 dmalen; 179 u32 dmalen;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c838833..cdc7df4fdb8a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/if_arp.h> /* For ARPHRD_xxx */
34#include <linux/module.h> 35#include <linux/module.h>
35#include <net/rtnetlink.h> 36#include <net/rtnetlink.h>
36#include "ipoib.h" 37#include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
103 return -EINVAL; 104 return -EINVAL;
104 105
105 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); 106 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
106 if (!pdev) 107 if (!pdev || pdev->type != ARPHRD_INFINIBAND)
107 return -ENODEV; 108 return -ENODEV;
108 109
109 ppriv = netdev_priv(pdev); 110 ppriv = netdev_priv(pdev);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6be57c38638d..9804fca6bf06 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
207 isert_conn->conn_rx_descs = NULL; 207 isert_conn->conn_rx_descs = NULL;
208} 208}
209 209
210static void isert_cq_tx_work(struct work_struct *);
210static void isert_cq_tx_callback(struct ib_cq *, void *); 211static void isert_cq_tx_callback(struct ib_cq *, void *);
212static void isert_cq_rx_work(struct work_struct *);
211static void isert_cq_rx_callback(struct ib_cq *, void *); 213static void isert_cq_rx_callback(struct ib_cq *, void *);
212 214
213static int 215static int
@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
259 cq_desc[i].device = device; 261 cq_desc[i].device = device;
260 cq_desc[i].cq_index = i; 262 cq_desc[i].cq_index = i;
261 263
264 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
262 device->dev_rx_cq[i] = ib_create_cq(device->ib_device, 265 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
263 isert_cq_rx_callback, 266 isert_cq_rx_callback,
264 isert_cq_event_callback, 267 isert_cq_event_callback,
265 (void *)&cq_desc[i], 268 (void *)&cq_desc[i],
266 ISER_MAX_RX_CQ_LEN, i); 269 ISER_MAX_RX_CQ_LEN, i);
267 if (IS_ERR(device->dev_rx_cq[i])) 270 if (IS_ERR(device->dev_rx_cq[i])) {
271 ret = PTR_ERR(device->dev_rx_cq[i]);
272 device->dev_rx_cq[i] = NULL;
268 goto out_cq; 273 goto out_cq;
274 }
269 275
276 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
270 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 277 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
271 isert_cq_tx_callback, 278 isert_cq_tx_callback,
272 isert_cq_event_callback, 279 isert_cq_event_callback,
273 (void *)&cq_desc[i], 280 (void *)&cq_desc[i],
274 ISER_MAX_TX_CQ_LEN, i); 281 ISER_MAX_TX_CQ_LEN, i);
275 if (IS_ERR(device->dev_tx_cq[i])) 282 if (IS_ERR(device->dev_tx_cq[i])) {
283 ret = PTR_ERR(device->dev_tx_cq[i]);
284 device->dev_tx_cq[i] = NULL;
276 goto out_cq; 285 goto out_cq;
286 }
277 287
278 if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) 288 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
289 if (ret)
279 goto out_cq; 290 goto out_cq;
280 291
281 if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) 292 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
293 if (ret)
282 goto out_cq; 294 goto out_cq;
283 } 295 }
284 296
@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
1724{ 1736{
1725 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1737 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1726 1738
1727 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1728 queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 1739 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1729} 1740}
1730 1741
@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
1768{ 1779{
1769 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1780 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1770 1781
1771 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1772 queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 1782 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1773} 1783}
1774 1784
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 82cec63a9011..3ee78f02e5d7 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, 149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
150 int irq, int do_mask) 150 int irq, int do_mask)
151{ 151{
152 int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */ 152 /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
153 int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */ 153 int bitfield_width = 4;
154 int shift = 32 - (irq + 1) * bitfield_width;
154 155
155 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, 156 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
156 shift, bitfield_width, 157 shift, bitfield_width,
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
159 160
160static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) 161static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
161{ 162{
163 /* The SENSE register is assumed to be 32-bit. */
162 int bitfield_width = p->config.sense_bitfield_width; 164 int bitfield_width = p->config.sense_bitfield_width;
163 int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */ 165 int shift = 32 - (irq + 1) * bitfield_width;
164 166
165 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); 167 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
166 168
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 497bd026c237..4a4825528188 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
1643 int i; 1643 int i;
1644 struct pci_dev *tmp_hfcpci = NULL; 1644 struct pci_dev *tmp_hfcpci = NULL;
1645 1645
1646#ifdef __BIG_ENDIAN
1647#error "not running on big endian machines now"
1648#endif
1649
1650 strcpy(tmp, hfcpci_revision); 1646 strcpy(tmp, hfcpci_revision);
1651 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); 1647 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1652 1648
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index f6ab63aa6995..33eeb4602c7e 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
290 struct IsdnCardState *cs = card->cs; 290 struct IsdnCardState *cs = card->cs;
291 char tmp[64]; 291 char tmp[64];
292 292
293#ifdef __BIG_ENDIAN
294#error "not running on big endian machines now"
295#endif
296
297 strcpy(tmp, telespci_revision); 293 strcpy(tmp, telespci_revision);
298 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); 294 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
299 if (cs->typ != ISDN_CTYPE_TELESPCI) 295 if (cs->typ != ISDN_CTYPE_TELESPCI)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 2b46bf1d7e40..4c9852d92b0a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -421,9 +421,11 @@ out:
421 421
422 if (watermark <= WATERMARK_METADATA) { 422 if (watermark <= WATERMARK_METADATA) {
423 SET_GC_MARK(b, GC_MARK_METADATA); 423 SET_GC_MARK(b, GC_MARK_METADATA);
424 SET_GC_MOVE(b, 0);
424 b->prio = BTREE_PRIO; 425 b->prio = BTREE_PRIO;
425 } else { 426 } else {
426 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 427 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
428 SET_GC_MOVE(b, 0);
427 b->prio = INITIAL_PRIO; 429 b->prio = INITIAL_PRIO;
428 } 430 }
429 431
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4beb55a0ff30..754f43177483 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -197,7 +197,7 @@ struct bucket {
197 uint8_t disk_gen; 197 uint8_t disk_gen;
198 uint8_t last_gc; /* Most out of date gen in the btree */ 198 uint8_t last_gc; /* Most out of date gen in the btree */
199 uint8_t gc_gen; 199 uint8_t gc_gen;
200 uint16_t gc_mark; 200 uint16_t gc_mark; /* Bitfield used by GC. See below for field */
201}; 201};
202 202
203/* 203/*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
209#define GC_MARK_RECLAIMABLE 0 209#define GC_MARK_RECLAIMABLE 0
210#define GC_MARK_DIRTY 1 210#define GC_MARK_DIRTY 1
211#define GC_MARK_METADATA 2 211#define GC_MARK_METADATA 2
212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); 212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
213BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
213 214
214#include "journal.h" 215#include "journal.h"
215#include "stats.h" 216#include "stats.h"
@@ -372,14 +373,14 @@ struct cached_dev {
372 unsigned char writeback_percent; 373 unsigned char writeback_percent;
373 unsigned writeback_delay; 374 unsigned writeback_delay;
374 375
375 int writeback_rate_change;
376 int64_t writeback_rate_derivative;
377 uint64_t writeback_rate_target; 376 uint64_t writeback_rate_target;
377 int64_t writeback_rate_proportional;
378 int64_t writeback_rate_derivative;
379 int64_t writeback_rate_change;
378 380
379 unsigned writeback_rate_update_seconds; 381 unsigned writeback_rate_update_seconds;
380 unsigned writeback_rate_d_term; 382 unsigned writeback_rate_d_term;
381 unsigned writeback_rate_p_term_inverse; 383 unsigned writeback_rate_p_term_inverse;
382 unsigned writeback_rate_d_smooth;
383}; 384};
384 385
385enum alloc_watermarks { 386enum alloc_watermarks {
@@ -445,7 +446,6 @@ struct cache {
445 * call prio_write() to keep gens from wrapping. 446 * call prio_write() to keep gens from wrapping.
446 */ 447 */
447 uint8_t need_save_prio; 448 uint8_t need_save_prio;
448 unsigned gc_move_threshold;
449 449
450 /* 450 /*
451 * If nonzero, we know we aren't going to find any buckets to invalidate 451 * If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765aadce1..31bb53fcc67a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1561 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1561 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1562 GC_MARK_METADATA); 1562 GC_MARK_METADATA);
1563 1563
1564 /* don't reclaim buckets to which writeback keys point */
1565 rcu_read_lock();
1566 for (i = 0; i < c->nr_uuids; i++) {
1567 struct bcache_device *d = c->devices[i];
1568 struct cached_dev *dc;
1569 struct keybuf_key *w, *n;
1570 unsigned j;
1571
1572 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1573 continue;
1574 dc = container_of(d, struct cached_dev, disk);
1575
1576 spin_lock(&dc->writeback_keys.lock);
1577 rbtree_postorder_for_each_entry_safe(w, n,
1578 &dc->writeback_keys.keys, node)
1579 for (j = 0; j < KEY_PTRS(&w->key); j++)
1580 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1581 GC_MARK_DIRTY);
1582 spin_unlock(&dc->writeback_keys.lock);
1583 }
1584 rcu_read_unlock();
1585
1564 for_each_cache(ca, c, i) { 1586 for_each_cache(ca, c, i) {
1565 uint64_t *i; 1587 uint64_t *i;
1566 1588
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
1817 if (KEY_START(k) > KEY_START(insert) + sectors_found) 1839 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1818 goto check_failed; 1840 goto check_failed;
1819 1841
1820 if (KEY_PTRS(replace_key) != KEY_PTRS(k)) 1842 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1843 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
1821 goto check_failed; 1844 goto check_failed;
1822 1845
1823 /* skip past gen */ 1846 /* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
2217 struct bkey *replace_key; 2240 struct bkey *replace_key;
2218}; 2241};
2219 2242
2220int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2243static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2221{ 2244{
2222 struct btree_insert_op *op = container_of(b_op, 2245 struct btree_insert_op *op = container_of(b_op,
2223 struct btree_insert_op, op); 2246 struct btree_insert_op, op);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..f2f0998c4a91 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
25 unsigned i; 25 unsigned i;
26 26
27 for (i = 0; i < KEY_PTRS(k); i++) { 27 for (i = 0; i < KEY_PTRS(k); i++) {
28 struct cache *ca = PTR_CACHE(c, k, i);
29 struct bucket *g = PTR_BUCKET(c, k, i); 28 struct bucket *g = PTR_BUCKET(c, k, i);
30 29
31 if (GC_SECTORS_USED(g) < ca->gc_move_threshold) 30 if (GC_MOVE(g))
32 return true; 31 return true;
33 } 32 }
34 33
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
65 64
66static void read_moving_endio(struct bio *bio, int error) 65static void read_moving_endio(struct bio *bio, int error)
67{ 66{
67 struct bbio *b = container_of(bio, struct bbio, bio);
68 struct moving_io *io = container_of(bio->bi_private, 68 struct moving_io *io = container_of(bio->bi_private,
69 struct moving_io, cl); 69 struct moving_io, cl);
70 70
71 if (error) 71 if (error)
72 io->op.error = error; 72 io->op.error = error;
73 else if (!KEY_DIRTY(&b->key) &&
74 ptr_stale(io->op.c, &b->key, 0)) {
75 io->op.error = -EINTR;
76 }
73 77
74 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); 78 bch_bbio_endio(io->op.c, bio, error, "reading data to move");
75} 79}
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
141 if (!w) 145 if (!w)
142 break; 146 break;
143 147
148 if (ptr_stale(c, &w->key, 0)) {
149 bch_keybuf_del(&c->moving_gc_keys, w);
150 continue;
151 }
152
144 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) 153 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
145 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 154 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
146 GFP_KERNEL); 155 GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
184 193
185static unsigned bucket_heap_top(struct cache *ca) 194static unsigned bucket_heap_top(struct cache *ca)
186{ 195{
187 return GC_SECTORS_USED(heap_peek(&ca->heap)); 196 struct bucket *b;
197 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
188} 198}
189 199
190void bch_moving_gc(struct cache_set *c) 200void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
226 sectors_to_move -= GC_SECTORS_USED(b); 236 sectors_to_move -= GC_SECTORS_USED(b);
227 } 237 }
228 238
229 ca->gc_move_threshold = bucket_heap_top(ca); 239 while (heap_pop(&ca->heap, b, bucket_cmp))
230 240 SET_GC_MOVE(b, 1);
231 pr_debug("threshold %u", ca->gc_move_threshold);
232 } 241 }
233 242
234 mutex_unlock(&c->bucket_lock); 243 mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dec15cd2d797..c57bfa071a57 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1676,7 +1676,7 @@ err:
1676static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1676static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1677{ 1677{
1678 return ca->sb.block_size == c->sb.block_size && 1678 return ca->sb.block_size == c->sb.block_size &&
1679 ca->sb.bucket_size == c->sb.block_size && 1679 ca->sb.bucket_size == c->sb.bucket_size &&
1680 ca->sb.nr_in_set == c->sb.nr_in_set; 1680 ca->sb.nr_in_set == c->sb.nr_in_set;
1681} 1681}
1682 1682
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 80d4c2bee18a..a1f85612f0b3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate);
83rw_attribute(writeback_rate_update_seconds); 83rw_attribute(writeback_rate_update_seconds);
84rw_attribute(writeback_rate_d_term); 84rw_attribute(writeback_rate_d_term);
85rw_attribute(writeback_rate_p_term_inverse); 85rw_attribute(writeback_rate_p_term_inverse);
86rw_attribute(writeback_rate_d_smooth);
87read_attribute(writeback_rate_debug); 86read_attribute(writeback_rate_debug);
88 87
89read_attribute(stripe_size); 88read_attribute(stripe_size);
@@ -129,31 +128,41 @@ SHOW(__bch_cached_dev)
129 var_printf(writeback_running, "%i"); 128 var_printf(writeback_running, "%i");
130 var_print(writeback_delay); 129 var_print(writeback_delay);
131 var_print(writeback_percent); 130 var_print(writeback_percent);
132 sysfs_print(writeback_rate, dc->writeback_rate.rate); 131 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
133 132
134 var_print(writeback_rate_update_seconds); 133 var_print(writeback_rate_update_seconds);
135 var_print(writeback_rate_d_term); 134 var_print(writeback_rate_d_term);
136 var_print(writeback_rate_p_term_inverse); 135 var_print(writeback_rate_p_term_inverse);
137 var_print(writeback_rate_d_smooth);
138 136
139 if (attr == &sysfs_writeback_rate_debug) { 137 if (attr == &sysfs_writeback_rate_debug) {
138 char rate[20];
140 char dirty[20]; 139 char dirty[20];
141 char derivative[20];
142 char target[20]; 140 char target[20];
143 bch_hprint(dirty, 141 char proportional[20];
144 bcache_dev_sectors_dirty(&dc->disk) << 9); 142 char derivative[20];
145 bch_hprint(derivative, dc->writeback_rate_derivative << 9); 143 char change[20];
144 s64 next_io;
145
146 bch_hprint(rate, dc->writeback_rate.rate << 9);
147 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
146 bch_hprint(target, dc->writeback_rate_target << 9); 148 bch_hprint(target, dc->writeback_rate_target << 9);
149 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
150 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
151 bch_hprint(change, dc->writeback_rate_change << 9);
152
153 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
154 NSEC_PER_MSEC);
147 155
148 return sprintf(buf, 156 return sprintf(buf,
149 "rate:\t\t%u\n" 157 "rate:\t\t%s/sec\n"
150 "change:\t\t%i\n"
151 "dirty:\t\t%s\n" 158 "dirty:\t\t%s\n"
159 "target:\t\t%s\n"
160 "proportional:\t%s\n"
152 "derivative:\t%s\n" 161 "derivative:\t%s\n"
153 "target:\t\t%s\n", 162 "change:\t\t%s/sec\n"
154 dc->writeback_rate.rate, 163 "next io:\t%llims\n",
155 dc->writeback_rate_change, 164 rate, dirty, target, proportional,
156 dirty, derivative, target); 165 derivative, change, next_io);
157 } 166 }
158 167
159 sysfs_hprint(dirty_data, 168 sysfs_hprint(dirty_data,
@@ -189,6 +198,7 @@ STORE(__cached_dev)
189 struct kobj_uevent_env *env; 198 struct kobj_uevent_env *env;
190 199
191#define d_strtoul(var) sysfs_strtoul(var, dc->var) 200#define d_strtoul(var) sysfs_strtoul(var, dc->var)
201#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
192#define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 202#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
193 203
194 sysfs_strtoul(data_csum, dc->disk.data_csum); 204 sysfs_strtoul(data_csum, dc->disk.data_csum);
@@ -197,16 +207,15 @@ STORE(__cached_dev)
197 d_strtoul(writeback_metadata); 207 d_strtoul(writeback_metadata);
198 d_strtoul(writeback_running); 208 d_strtoul(writeback_running);
199 d_strtoul(writeback_delay); 209 d_strtoul(writeback_delay);
200 sysfs_strtoul_clamp(writeback_rate, 210
201 dc->writeback_rate.rate, 1, 1000000);
202 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); 211 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
203 212
204 d_strtoul(writeback_rate_update_seconds); 213 sysfs_strtoul_clamp(writeback_rate,
214 dc->writeback_rate.rate, 1, INT_MAX);
215
216 d_strtoul_nonzero(writeback_rate_update_seconds);
205 d_strtoul(writeback_rate_d_term); 217 d_strtoul(writeback_rate_d_term);
206 d_strtoul(writeback_rate_p_term_inverse); 218 d_strtoul_nonzero(writeback_rate_p_term_inverse);
207 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
208 dc->writeback_rate_p_term_inverse, 1, INT_MAX);
209 d_strtoul(writeback_rate_d_smooth);
210 219
211 d_strtoi_h(sequential_cutoff); 220 d_strtoi_h(sequential_cutoff);
212 d_strtoi_h(readahead); 221 d_strtoi_h(readahead);
@@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = {
313 &sysfs_writeback_rate_update_seconds, 322 &sysfs_writeback_rate_update_seconds,
314 &sysfs_writeback_rate_d_term, 323 &sysfs_writeback_rate_d_term,
315 &sysfs_writeback_rate_p_term_inverse, 324 &sysfs_writeback_rate_p_term_inverse,
316 &sysfs_writeback_rate_d_smooth,
317 &sysfs_writeback_rate_debug, 325 &sysfs_writeback_rate_debug,
318 &sysfs_dirty_data, 326 &sysfs_dirty_data,
319 &sysfs_stripe_size, 327 &sysfs_stripe_size,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214eeacbe..bb37618e7664 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
209{ 209{
210 uint64_t now = local_clock(); 210 uint64_t now = local_clock();
211 211
212 d->next += div_u64(done, d->rate); 212 d->next += div_u64(done * NSEC_PER_SEC, d->rate);
213
214 if (time_before64(now + NSEC_PER_SEC, d->next))
215 d->next = now + NSEC_PER_SEC;
216
217 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
218 d->next = now - NSEC_PER_SEC * 2;
213 219
214 return time_after64(d->next, now) 220 return time_after64(d->next, now)
215 ? div_u64(d->next - now, NSEC_PER_SEC / HZ) 221 ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 362c4b3f8b4a..1030c6020e98 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -110,7 +110,7 @@ do { \
110 _r; \ 110 _r; \
111}) 111})
112 112
113#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) 113#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
114 114
115#define heap_full(h) ((h)->used == (h)->size) 115#define heap_full(h) ((h)->used == (h)->size)
116 116
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1251be..6c44fe059c27 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc)
30 30
31 /* PD controller */ 31 /* PD controller */
32 32
33 int change = 0;
34 int64_t error;
35 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); 33 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
36 int64_t derivative = dirty - dc->disk.sectors_dirty_last; 34 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
35 int64_t proportional = dirty - target;
36 int64_t change;
37 37
38 dc->disk.sectors_dirty_last = dirty; 38 dc->disk.sectors_dirty_last = dirty;
39 39
40 derivative *= dc->writeback_rate_d_term; 40 /* Scale to sectors per second */
41 derivative = clamp(derivative, -dirty, dirty);
42 41
43 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, 42 proportional *= dc->writeback_rate_update_seconds;
44 dc->writeback_rate_d_smooth, 0); 43 proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
45 44
46 /* Avoid divide by zero */ 45 derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
47 if (!target)
48 goto out;
49 46
50 error = div64_s64((dirty + derivative - target) << 8, target); 47 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
48 (dc->writeback_rate_d_term /
49 dc->writeback_rate_update_seconds) ?: 1, 0);
50
51 derivative *= dc->writeback_rate_d_term;
52 derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
51 53
52 change = div_s64((dc->writeback_rate.rate * error) >> 8, 54 change = proportional + derivative;
53 dc->writeback_rate_p_term_inverse);
54 55
55 /* Don't increase writeback rate if the device isn't keeping up */ 56 /* Don't increase writeback rate if the device isn't keeping up */
56 if (change > 0 && 57 if (change > 0 &&
57 time_after64(local_clock(), 58 time_after64(local_clock(),
58 dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) 59 dc->writeback_rate.next + NSEC_PER_MSEC))
59 change = 0; 60 change = 0;
60 61
61 dc->writeback_rate.rate = 62 dc->writeback_rate.rate =
62 clamp_t(int64_t, dc->writeback_rate.rate + change, 63 clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
63 1, NSEC_PER_MSEC); 64 1, NSEC_PER_MSEC);
64out: 65
66 dc->writeback_rate_proportional = proportional;
65 dc->writeback_rate_derivative = derivative; 67 dc->writeback_rate_derivative = derivative;
66 dc->writeback_rate_change = change; 68 dc->writeback_rate_change = change;
67 dc->writeback_rate_target = target; 69 dc->writeback_rate_target = target;
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work)
87 89
88static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 90static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
89{ 91{
90 uint64_t ret;
91
92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
93 !dc->writeback_percent) 93 !dc->writeback_percent)
94 return 0; 94 return 0;
95 95
96 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); 96 return bch_next_delay(&dc->writeback_rate, sectors);
97
98 return min_t(uint64_t, ret, HZ);
99} 97}
100 98
101struct dirty_io { 99struct dirty_io {
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc)
241 if (KEY_START(&w->key) != dc->last_read || 239 if (KEY_START(&w->key) != dc->last_read ||
242 jiffies_to_msecs(delay) > 50) 240 jiffies_to_msecs(delay) > 50)
243 while (!kthread_should_stop() && delay) 241 while (!kthread_should_stop() && delay)
244 delay = schedule_timeout_interruptible(delay); 242 delay = schedule_timeout_uninterruptible(delay);
245 243
246 dc->last_read = KEY_OFFSET(&w->key); 244 dc->last_read = KEY_OFFSET(&w->key);
247 245
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg)
438 while (delay && 436 while (delay &&
439 !kthread_should_stop() && 437 !kthread_should_stop() &&
440 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 438 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
441 delay = schedule_timeout_interruptible(delay); 439 delay = schedule_timeout_uninterruptible(delay);
442 } 440 }
443 } 441 }
444 442
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
476 474
477 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), 475 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
478 sectors_dirty_init_fn, 0); 476 sectors_dirty_init_fn, 0);
477
478 dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
479} 479}
480 480
481int bch_cached_dev_writeback_init(struct cached_dev *dc) 481int bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc)
490 dc->writeback_delay = 30; 490 dc->writeback_delay = 30;
491 dc->writeback_rate.rate = 1024; 491 dc->writeback_rate.rate = 1024;
492 492
493 dc->writeback_rate_update_seconds = 30; 493 dc->writeback_rate_update_seconds = 5;
494 dc->writeback_rate_d_term = 16; 494 dc->writeback_rate_d_term = 30;
495 dc->writeback_rate_p_term_inverse = 64; 495 dc->writeback_rate_p_term_inverse = 6000;
496 dc->writeback_rate_d_smooth = 8;
497 496
498 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, 497 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
499 "bcache_writeback"); 498 "bcache_writeback");
500 if (IS_ERR(dc->writeback_thread)) 499 if (IS_ERR(dc->writeback_thread))
501 return PTR_ERR(dc->writeback_thread); 500 return PTR_ERR(dc->writeback_thread);
502 501
503 set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
504
505 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); 502 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
506 schedule_delayed_work(&dc->writeback_rate_update, 503 schedule_delayed_work(&dc->writeback_rate_update,
507 dc->writeback_rate_update_seconds * HZ); 504 dc->writeback_rate_update_seconds * HZ);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 81559b2dedad..539e24a1c86c 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2209,20 +2209,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2209 2209
2210 port = &(SLAVE_AD_INFO(slave).port); 2210 port = &(SLAVE_AD_INFO(slave).port);
2211 2211
2212 // if slave is null, the whole port is not initialized 2212 /* if slave is null, the whole port is not initialized */
2213 if (!port->slave) { 2213 if (!port->slave) {
2214 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", 2214 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
2215 slave->bond->dev->name, slave->dev->name); 2215 slave->bond->dev->name, slave->dev->name);
2216 return; 2216 return;
2217 } 2217 }
2218 2218
2219 __get_state_machine_lock(port);
2220
2219 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; 2221 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
2220 port->actor_oper_port_key = port->actor_admin_port_key |= 2222 port->actor_oper_port_key = port->actor_admin_port_key |=
2221 (__get_link_speed(port) << 1); 2223 (__get_link_speed(port) << 1);
2222 pr_debug("Port %d changed speed\n", port->actor_port_number); 2224 pr_debug("Port %d changed speed\n", port->actor_port_number);
2223 // there is no need to reselect a new aggregator, just signal the 2225 /* there is no need to reselect a new aggregator, just signal the
2224 // state machines to reinitialize 2226 * state machines to reinitialize
2227 */
2225 port->sm_vars |= AD_PORT_BEGIN; 2228 port->sm_vars |= AD_PORT_BEGIN;
2229
2230 __release_state_machine_lock(port);
2226} 2231}
2227 2232
2228/** 2233/**
@@ -2237,20 +2242,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2237 2242
2238 port = &(SLAVE_AD_INFO(slave).port); 2243 port = &(SLAVE_AD_INFO(slave).port);
2239 2244
2240 // if slave is null, the whole port is not initialized 2245 /* if slave is null, the whole port is not initialized */
2241 if (!port->slave) { 2246 if (!port->slave) {
2242 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", 2247 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
2243 slave->bond->dev->name, slave->dev->name); 2248 slave->bond->dev->name, slave->dev->name);
2244 return; 2249 return;
2245 } 2250 }
2246 2251
2252 __get_state_machine_lock(port);
2253
2247 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2254 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2248 port->actor_oper_port_key = port->actor_admin_port_key |= 2255 port->actor_oper_port_key = port->actor_admin_port_key |=
2249 __get_duplex(port); 2256 __get_duplex(port);
2250 pr_debug("Port %d changed duplex\n", port->actor_port_number); 2257 pr_debug("Port %d changed duplex\n", port->actor_port_number);
2251 // there is no need to reselect a new aggregator, just signal the 2258 /* there is no need to reselect a new aggregator, just signal the
2252 // state machines to reinitialize 2259 * state machines to reinitialize
2260 */
2253 port->sm_vars |= AD_PORT_BEGIN; 2261 port->sm_vars |= AD_PORT_BEGIN;
2262
2263 __release_state_machine_lock(port);
2254} 2264}
2255 2265
2256/** 2266/**
@@ -2266,15 +2276,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2266 2276
2267 port = &(SLAVE_AD_INFO(slave).port); 2277 port = &(SLAVE_AD_INFO(slave).port);
2268 2278
2269 // if slave is null, the whole port is not initialized 2279 /* if slave is null, the whole port is not initialized */
2270 if (!port->slave) { 2280 if (!port->slave) {
2271 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", 2281 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
2272 slave->bond->dev->name, slave->dev->name); 2282 slave->bond->dev->name, slave->dev->name);
2273 return; 2283 return;
2274 } 2284 }
2275 2285
2276 // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) 2286 __get_state_machine_lock(port);
2277 // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report 2287 /* on link down we are zeroing duplex and speed since
2288 * some of the adaptors(ce1000.lan) report full duplex/speed
2289 * instead of N/A(duplex) / 0(speed).
2290 *
2291 * on link up we are forcing recheck on the duplex and speed since
2292 * some of he adaptors(ce1000.lan) report.
2293 */
2278 if (link == BOND_LINK_UP) { 2294 if (link == BOND_LINK_UP) {
2279 port->is_enabled = true; 2295 port->is_enabled = true;
2280 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2296 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2290,10 +2306,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2290 port->actor_oper_port_key = (port->actor_admin_port_key &= 2306 port->actor_oper_port_key = (port->actor_admin_port_key &=
2291 ~AD_SPEED_KEY_BITS); 2307 ~AD_SPEED_KEY_BITS);
2292 } 2308 }
2293 //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); 2309 pr_debug("Port %d changed link status to %s",
2294 // there is no need to reselect a new aggregator, just signal the 2310 port->actor_port_number,
2295 // state machines to reinitialize 2311 (link == BOND_LINK_UP) ? "UP" : "DOWN");
2312 /* there is no need to reselect a new aggregator, just signal the
2313 * state machines to reinitialize
2314 */
2296 port->sm_vars |= AD_PORT_BEGIN; 2315 port->sm_vars |= AD_PORT_BEGIN;
2316
2317 __release_state_machine_lock(port);
2297} 2318}
2298 2319
2299/* 2320/*
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index eedf2a5fc2be..eeecc29cf5b7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -555,6 +555,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
555 /* Make sure pointer to data buffer is set */ 555 /* Make sure pointer to data buffer is set */
556 wmb(); 556 wmb();
557 557
558 skb_tx_timestamp(skb);
559
558 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 560 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
559 561
560 /* Increment index to point to the next BD */ 562 /* Increment index to point to the next BD */
@@ -569,8 +571,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
569 571
570 arc_reg_set(priv, R_STATUS, TXPL_MASK); 572 arc_reg_set(priv, R_STATUS, TXPL_MASK);
571 573
572 skb_tx_timestamp(skb);
573
574 return NETDEV_TX_OK; 574 return NETDEV_TX_OK;
575} 575}
576 576
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a36a760ada28..29801750f239 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
145 * Mask some pcie error bits 145 * Mask some pcie error bits
146 */ 146 */
147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
148 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); 148 if (pos) {
149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); 149 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); 150 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
151 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
152 }
151 /* clear error status */ 153 /* clear error status */
152 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, 154 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
153 PCI_EXP_DEVSTA_NFED | 155 PCI_EXP_DEVSTA_NFED |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index dad67905f4e2..eb105abcf0e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1250,7 +1250,10 @@ struct bnx2x_slowpath {
1250 * Therefore, if they would have been defined in the same union, 1250 * Therefore, if they would have been defined in the same union,
1251 * data can get corrupted. 1251 * data can get corrupted.
1252 */ 1252 */
1253 struct afex_vif_list_ramrod_data func_afex_rdata; 1253 union {
1254 struct afex_vif_list_ramrod_data viflist_data;
1255 struct function_update_data func_update;
1256 } func_afex_rdata;
1254 1257
1255 /* used by dmae command executer */ 1258 /* used by dmae command executer */
1256 struct dmae_command dmae[MAX_DMAE_C]; 1259 struct dmae_command dmae[MAX_DMAE_C];
@@ -2501,4 +2504,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
2501#define MCPR_SCRATCH_BASE(bp) \ 2504#define MCPR_SCRATCH_BASE(bp) \
2502 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 2505 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2503 2506
2507#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
2508
2504#endif /* bnx2x.h */ 2509#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02431ca..11fc79585491 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3865 3865
3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3867 } else { 3867 } else {
3868 /* Enable Auto-Detect to support 1G over CL37 as well */
3869 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3870 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
3871
3872 /* Force cl48 sync_status LOW to avoid getting stuck in CL73
3873 * parallel-detect loop when CL73 and CL37 are enabled.
3874 */
3875 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3876 MDIO_AER_BLOCK_AER_REG, 0);
3877 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3878 MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
3879 bnx2x_set_aer_mmd(params, phy);
3880
3868 bnx2x_disable_kr2(params, vars, phy); 3881 bnx2x_disable_kr2(params, vars, phy);
3869 } 3882 }
3870 3883
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8120 *edc_mode = EDC_MODE_ACTIVE_DAC; 8133 *edc_mode = EDC_MODE_ACTIVE_DAC;
8121 else 8134 else
8122 check_limiting_mode = 1; 8135 check_limiting_mode = 1;
8123 } else if (copper_module_type & 8136 } else {
8124 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8137 *edc_mode = EDC_MODE_PASSIVE_DAC;
8138 /* Even in case PASSIVE_DAC indication is not set,
8139 * treat it as a passive DAC cable, since some cables
8140 * don't have this indication.
8141 */
8142 if (copper_module_type &
8143 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8125 DP(NETIF_MSG_LINK, 8144 DP(NETIF_MSG_LINK,
8126 "Passive Copper cable detected\n"); 8145 "Passive Copper cable detected\n");
8127 *edc_mode = 8146 } else {
8128 EDC_MODE_PASSIVE_DAC; 8147 DP(NETIF_MSG_LINK,
8129 } else { 8148 "Unknown copper-cable-type\n");
8130 DP(NETIF_MSG_LINK, 8149 }
8131 "Unknown copper-cable-type 0x%x !!!\n",
8132 copper_module_type);
8133 return -EINVAL;
8134 } 8150 }
8135 break; 8151 break;
8136 } 8152 }
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10825 (1<<11)); 10841 (1<<11));
10826 10842
10827 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10843 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10828 (phy->speed_cap_mask & 10844 (phy->speed_cap_mask &
10829 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 10845 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
10830 (phy->req_line_speed == SPEED_1000)) { 10846 (phy->req_line_speed == SPEED_1000)) {
10831 an_1000_val |= (1<<8); 10847 an_1000_val |= (1<<8);
10832 autoneg_val |= (1<<9 | 1<<12); 10848 autoneg_val |= (1<<9 | 1<<12);
10833 if (phy->req_duplex == DUPLEX_FULL) 10849 if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10843 0x09, 10859 0x09,
10844 &an_1000_val); 10860 &an_1000_val);
10845 10861
10846 /* Set 100 speed advertisement */ 10862 /* Advertise 10/100 link speed */
10847 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10863 if (phy->req_line_speed == SPEED_AUTO_NEG) {
10848 (phy->speed_cap_mask & 10864 if (phy->speed_cap_mask &
10849 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 10865 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
10850 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { 10866 an_10_100_val |= (1<<5);
10851 an_10_100_val |= (1<<7); 10867 autoneg_val |= (1<<9 | 1<<12);
10852 /* Enable autoneg and restart autoneg for legacy speeds */ 10868 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
10853 autoneg_val |= (1<<9 | 1<<12); 10869 }
10854 10870 if (phy->speed_cap_mask &
10855 if (phy->req_duplex == DUPLEX_FULL) 10871 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
10856 an_10_100_val |= (1<<8);
10857 DP(NETIF_MSG_LINK, "Advertising 100M\n");
10858 }
10859
10860 /* Set 10 speed advertisement */
10861 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10862 (phy->speed_cap_mask &
10863 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
10864 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
10865 an_10_100_val |= (1<<5);
10866 autoneg_val |= (1<<9 | 1<<12);
10867 if (phy->req_duplex == DUPLEX_FULL)
10868 an_10_100_val |= (1<<6); 10872 an_10_100_val |= (1<<6);
10869 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 10873 autoneg_val |= (1<<9 | 1<<12);
10874 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
10875 }
10876 if (phy->speed_cap_mask &
10877 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
10878 an_10_100_val |= (1<<7);
10879 autoneg_val |= (1<<9 | 1<<12);
10880 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
10881 }
10882 if (phy->speed_cap_mask &
10883 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
10884 an_10_100_val |= (1<<8);
10885 autoneg_val |= (1<<9 | 1<<12);
10886 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
10887 }
10870 } 10888 }
10871 10889
10872 /* Only 10/100 are allowed to work in FORCE mode */ 10890 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
13342 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 13360 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
13343 old_status, status); 13361 old_status, status);
13344 13362
13363 /* Do not touch the link in case physical link down */
13364 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
13365 return 1;
13366
13345 /* a. Update shmem->link_status accordingly 13367 /* a. Update shmem->link_status accordingly
13346 * b. Update link_vars->link_up 13368 * b. Update link_vars->link_up
13347 */ 13369 */
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13550 */ 13572 */
13551 not_kr2_device = (((base_page & 0x8000) == 0) || 13573 not_kr2_device = (((base_page & 0x8000) == 0) ||
13552 (((base_page & 0x8000) && 13574 (((base_page & 0x8000) &&
13553 ((next_page & 0xe0) == 0x2)))); 13575 ((next_page & 0xe0) == 0x20))));
13554 13576
13555 /* In case KR2 is already disabled, check if we need to re-enable it */ 13577 /* In case KR2 is already disabled, check if we need to re-enable it */
13556 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13578 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d3748bf3ac7b..18498fed520b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11517,9 +11517,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11517 } 11517 }
11518 } 11518 }
11519 11519
11520 /* adjust igu_sb_cnt to MF for E1x */ 11520 /* adjust igu_sb_cnt to MF for E1H */
11521 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 11521 if (CHIP_IS_E1H(bp) && IS_MF(bp))
11522 bp->igu_sb_cnt /= E1HVN_MAX; 11522 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
11523 11523
11524 /* port info */ 11524 /* port info */
11525 bnx2x_get_port_hwinfo(bp); 11525 bnx2x_get_port_hwinfo(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 08f8047188e9..2beb5430b876 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7180,6 +7180,7 @@ Theotherbitsarereservedandshouldbezero*/
7180#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca 7180#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
7181#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da 7181#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
7182#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea 7182#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
7183#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
7183#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 7184#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
7184#define MDIO_WC_REG_XGXS_STATUS3 0x8129 7185#define MDIO_WC_REG_XGXS_STATUS3 0x8129
7185#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 7186#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index babf7b954ae6..98cccd487fc2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2036,6 +2036,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2036 struct bnx2x_vlan_mac_ramrod_params p; 2036 struct bnx2x_vlan_mac_ramrod_params p;
2037 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 2037 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2038 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 2038 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2039 unsigned long flags;
2039 int read_lock; 2040 int read_lock;
2040 int rc = 0; 2041 int rc = 0;
2041 2042
@@ -2044,8 +2045,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2044 spin_lock_bh(&exeq->lock); 2045 spin_lock_bh(&exeq->lock);
2045 2046
2046 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 2047 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2047 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 2048 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2048 *vlan_mac_flags) { 2049 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2050 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2049 rc = exeq->remove(bp, exeq->owner, exeq_pos); 2051 rc = exeq->remove(bp, exeq->owner, exeq_pos);
2050 if (rc) { 2052 if (rc) {
2051 BNX2X_ERR("Failed to remove command\n"); 2053 BNX2X_ERR("Failed to remove command\n");
@@ -2078,7 +2080,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2078 return read_lock; 2080 return read_lock;
2079 2081
2080 list_for_each_entry(pos, &o->head, link) { 2082 list_for_each_entry(pos, &o->head, link) {
2081 if (pos->vlan_mac_flags == *vlan_mac_flags) { 2083 flags = pos->vlan_mac_flags;
2084 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2085 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2082 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2086 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2083 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 2087 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2084 rc = bnx2x_config_vlan_mac(bp, &p); 2088 rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4380,8 +4384,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
4380 struct bnx2x_raw_obj *r = &o->raw; 4384 struct bnx2x_raw_obj *r = &o->raw;
4381 4385
4382 /* Do nothing if only driver cleanup was requested */ 4386 /* Do nothing if only driver cleanup was requested */
4383 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 4387 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4388 DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4389 p->ramrod_flags);
4384 return 0; 4390 return 0;
4391 }
4385 4392
4386 r->set_pending(r); 4393 r->set_pending(r);
4387 4394
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e33abf9..6a53c15c85a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
266 BNX2X_DONT_CONSUME_CAM_CREDIT, 266 BNX2X_DONT_CONSUME_CAM_CREDIT,
267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
268}; 268};
269/* When looking for matching filters, some flags are not interesting */
270#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
271 1 << BNX2X_ETH_MAC | \
272 1 << BNX2X_ISCSI_ETH_MAC | \
273 1 << BNX2X_NETQ_ETH_MAC)
274#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
275 ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
269 276
270struct bnx2x_vlan_mac_ramrod_params { 277struct bnx2x_vlan_mac_ramrod_params {
271 /* Object to run the command from */ 278 /* Object to run the command from */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fe52d301dfe..31ab924600c1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1213,6 +1213,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1213 /* next state */ 1213 /* next state */
1214 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1214 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1215 1215
1216 /* record the accept flags in vfdb so hypervisor can modify them
1217 * if necessary
1218 */
1219 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
1220 ramrod->rx_accept_flags;
1216 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1221 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1217 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1222 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1218op_err: 1223op_err:
@@ -1228,39 +1233,43 @@ op_pending:
1228 return; 1233 return;
1229} 1234}
1230 1235
1236static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1237 struct bnx2x_rx_mode_ramrod_params *ramrod,
1238 struct bnx2x_virtf *vf,
1239 unsigned long accept_flags)
1240{
1241 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1242
1243 memset(ramrod, 0, sizeof(*ramrod));
1244 ramrod->cid = vfq->cid;
1245 ramrod->cl_id = vfq_cl_id(vf, vfq);
1246 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1247 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1248 ramrod->rx_accept_flags = accept_flags;
1249 ramrod->tx_accept_flags = accept_flags;
1250 ramrod->pstate = &vf->filter_state;
1251 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1252
1253 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1254 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1255 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1256
1257 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1258 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1259}
1260
1231int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1261int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1232 struct bnx2x_virtf *vf, 1262 struct bnx2x_virtf *vf,
1233 struct bnx2x_vfop_cmd *cmd, 1263 struct bnx2x_vfop_cmd *cmd,
1234 int qid, unsigned long accept_flags) 1264 int qid, unsigned long accept_flags)
1235{ 1265{
1236 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1237 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1266 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1238 1267
1239 if (vfop) { 1268 if (vfop) {
1240 struct bnx2x_rx_mode_ramrod_params *ramrod = 1269 struct bnx2x_rx_mode_ramrod_params *ramrod =
1241 &vf->op_params.rx_mode; 1270 &vf->op_params.rx_mode;
1242 1271
1243 memset(ramrod, 0, sizeof(*ramrod)); 1272 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
1244
1245 /* Prepare ramrod parameters */
1246 ramrod->cid = vfq->cid;
1247 ramrod->cl_id = vfq_cl_id(vf, vfq);
1248 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1249 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1250
1251 ramrod->rx_accept_flags = accept_flags;
1252 ramrod->tx_accept_flags = accept_flags;
1253 ramrod->pstate = &vf->filter_state;
1254 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1255
1256 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1257 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1258 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1259
1260 ramrod->rdata =
1261 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1262 ramrod->rdata_mapping =
1263 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1264 1273
1265 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1274 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1266 bnx2x_vfop_rxmode, cmd->done); 1275 bnx2x_vfop_rxmode, cmd->done);
@@ -3213,13 +3222,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3213 bnx2x_iov_static_resc(bp, vf); 3222 bnx2x_iov_static_resc(bp, vf);
3214 } 3223 }
3215 3224
3216 /* prepare msix vectors in VF configuration space */ 3225 /* prepare msix vectors in VF configuration space - the value in the
3226 * PCI configuration space should be the index of the last entry,
3227 * namely one less than the actual size of the table
3228 */
3217 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3229 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3218 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3230 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3219 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3231 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3220 num_vf_queues); 3232 num_vf_queues - 1);
3221 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3233 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3222 vf_idx, num_vf_queues); 3234 vf_idx, num_vf_queues - 1);
3223 } 3235 }
3224 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3236 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3225 3237
@@ -3447,10 +3459,18 @@ out:
3447 3459
3448int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3460int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3449{ 3461{
3462 struct bnx2x_queue_state_params q_params = {NULL};
3463 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3464 struct bnx2x_queue_update_params *update_params;
3465 struct pf_vf_bulletin_content *bulletin = NULL;
3466 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
3450 struct bnx2x *bp = netdev_priv(dev); 3467 struct bnx2x *bp = netdev_priv(dev);
3451 int rc, q_logical_state; 3468 struct bnx2x_vlan_mac_obj *vlan_obj;
3469 unsigned long vlan_mac_flags = 0;
3470 unsigned long ramrod_flags = 0;
3452 struct bnx2x_virtf *vf = NULL; 3471 struct bnx2x_virtf *vf = NULL;
3453 struct pf_vf_bulletin_content *bulletin = NULL; 3472 unsigned long accept_flags;
3473 int rc;
3454 3474
3455 /* sanity and init */ 3475 /* sanity and init */
3456 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3476 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3468,104 +3488,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3468 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3488 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3469 * to the VF since it doesn't have anything to do with it. But it useful 3489 * to the VF since it doesn't have anything to do with it. But it useful
3470 * to store it here in case the VF is not up yet and we can only 3490 * to store it here in case the VF is not up yet and we can only
3471 * configure the vlan later when it does. 3491 * configure the vlan later when it does. Treat vlan id 0 as remove the
3492 * Host tag.
3472 */ 3493 */
3473 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3494 if (vlan > 0)
3495 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3496 else
3497 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3474 bulletin->vlan = vlan; 3498 bulletin->vlan = vlan;
3475 3499
3476 /* is vf initialized and queue set up? */ 3500 /* is vf initialized and queue set up? */
3477 q_logical_state = 3501 if (vf->state != VF_ENABLED ||
3478 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3502 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
3479 if (vf->state == VF_ENABLED && 3503 BNX2X_Q_LOGICAL_STATE_ACTIVE)
3480 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3504 return rc;
3481 /* configure the vlan in device on this vf's queue */
3482 unsigned long ramrod_flags = 0;
3483 unsigned long vlan_mac_flags = 0;
3484 struct bnx2x_vlan_mac_obj *vlan_obj =
3485 &bnx2x_leading_vfq(vf, vlan_obj);
3486 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3487 struct bnx2x_queue_state_params q_params = {NULL};
3488 struct bnx2x_queue_update_params *update_params;
3489 3505
3490 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3506 /* configure the vlan in device on this vf's queue */
3491 if (rc) 3507 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3492 return rc; 3508 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3493 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3509 if (rc)
3510 return rc;
3494 3511
3495 /* must lock vfpf channel to protect against vf flows */ 3512 /* must lock vfpf channel to protect against vf flows */
3496 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3513 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3497 3514
3498 /* remove existing vlans */ 3515 /* remove existing vlans */
3499 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3516 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3500 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3517 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3501 &ramrod_flags); 3518 &ramrod_flags);
3502 if (rc) { 3519 if (rc) {
3503 BNX2X_ERR("failed to delete vlans\n"); 3520 BNX2X_ERR("failed to delete vlans\n");
3504 rc = -EINVAL; 3521 rc = -EINVAL;
3505 goto out; 3522 goto out;
3506 } 3523 }
3524
3525 /* need to remove/add the VF's accept_any_vlan bit */
3526 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
3527 if (vlan)
3528 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3529 else
3530 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3531
3532 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
3533 accept_flags);
3534 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
3535 bnx2x_config_rx_mode(bp, &rx_ramrod);
3536
3537 /* configure the new vlan to device */
3538 memset(&ramrod_param, 0, sizeof(ramrod_param));
3539 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3540 ramrod_param.vlan_mac_obj = vlan_obj;
3541 ramrod_param.ramrod_flags = ramrod_flags;
3542 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
3543 &ramrod_param.user_req.vlan_mac_flags);
3544 ramrod_param.user_req.u.vlan.vlan = vlan;
3545 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3546 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3547 if (rc) {
3548 BNX2X_ERR("failed to configure vlan\n");
3549 rc = -EINVAL;
3550 goto out;
3551 }
3507 3552
3508 /* send queue update ramrod to configure default vlan and silent 3553 /* send queue update ramrod to configure default vlan and silent
3509 * vlan removal 3554 * vlan removal
3555 */
3556 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3557 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3558 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3559 update_params = &q_params.params.update;
3560 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3561 &update_params->update_flags);
3562 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3563 &update_params->update_flags);
3564 if (vlan == 0) {
3565 /* if vlan is 0 then we want to leave the VF traffic
3566 * untagged, and leave the incoming traffic untouched
3567 * (i.e. do not remove any vlan tags).
3510 */ 3568 */
3511 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3569 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3512 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3570 &update_params->update_flags);
3513 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3571 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3514 update_params = &q_params.params.update; 3572 &update_params->update_flags);
3515 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3573 } else {
3574 /* configure default vlan to vf queue and set silent
3575 * vlan removal (the vf remains unaware of this vlan).
3576 */
3577 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3516 &update_params->update_flags); 3578 &update_params->update_flags);
3517 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3579 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3518 &update_params->update_flags); 3580 &update_params->update_flags);
3581 update_params->def_vlan = vlan;
3582 update_params->silent_removal_value =
3583 vlan & VLAN_VID_MASK;
3584 update_params->silent_removal_mask = VLAN_VID_MASK;
3585 }
3519 3586
3520 if (vlan == 0) { 3587 /* Update the Queue state */
3521 /* if vlan is 0 then we want to leave the VF traffic 3588 rc = bnx2x_queue_state_change(bp, &q_params);
3522 * untagged, and leave the incoming traffic untouched 3589 if (rc) {
3523 * (i.e. do not remove any vlan tags). 3590 BNX2X_ERR("Failed to configure default VLAN\n");
3524 */ 3591 goto out;
3525 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3592 }
3526 &update_params->update_flags);
3527 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3528 &update_params->update_flags);
3529 } else {
3530 /* configure the new vlan to device */
3531 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3532 ramrod_param.vlan_mac_obj = vlan_obj;
3533 ramrod_param.ramrod_flags = ramrod_flags;
3534 ramrod_param.user_req.u.vlan.vlan = vlan;
3535 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3536 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3537 if (rc) {
3538 BNX2X_ERR("failed to configure vlan\n");
3539 rc = -EINVAL;
3540 goto out;
3541 }
3542
3543 /* configure default vlan to vf queue and set silent
3544 * vlan removal (the vf remains unaware of this vlan).
3545 */
3546 update_params = &q_params.params.update;
3547 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3548 &update_params->update_flags);
3549 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3550 &update_params->update_flags);
3551 update_params->def_vlan = vlan;
3552 }
3553 3593
3554 /* Update the Queue state */
3555 rc = bnx2x_queue_state_change(bp, &q_params);
3556 if (rc) {
3557 BNX2X_ERR("Failed to configure default VLAN\n");
3558 goto out;
3559 }
3560 3594
3561 /* clear the flag indicating that this VF needs its vlan 3595 /* clear the flag indicating that this VF needs its vlan
3562 * (will only be set if the HV configured the Vlan before vf was 3596 * (will only be set if the HV configured the Vlan before vf was
3563 * up and we were called because the VF came up later 3597 * up and we were called because the VF came up later
3564 */ 3598 */
3565out: 3599out:
3566 vf->cfg_flags &= ~VF_CFG_VLAN; 3600 vf->cfg_flags &= ~VF_CFG_VLAN;
3567 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3601 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3568 } 3602
3569 return rc; 3603 return rc;
3570} 3604}
3571 3605
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index a5c84a7d454c..d72ab7e24de0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
74 /* VLANs object */ 74 /* VLANs object */
75 struct bnx2x_vlan_mac_obj vlan_obj; 75 struct bnx2x_vlan_mac_obj vlan_obj;
76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ 76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
77 unsigned long accept_flags; /* last accept flags configured */
77 78
78 /* Queue Slow-path State object */ 79 /* Queue Slow-path State object */
79 struct bnx2x_queue_sp_obj sp_obj; 80 struct bnx2x_queue_sp_obj sp_obj;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index e5f7985a372c..1b1ad31b4553 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
208 return -EINVAL; 208 return -EINVAL;
209 } 209 }
210 210
211 BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); 211 DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
212 212
213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; 213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
214 214
@@ -1610,6 +1610,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1610 1610
1611 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { 1611 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1612 unsigned long accept = 0; 1612 unsigned long accept = 0;
1613 struct pf_vf_bulletin_content *bulletin =
1614 BP_VF_BULLETIN(bp, vf->index);
1613 1615
1614 /* covert VF-PF if mask to bnx2x accept flags */ 1616 /* covert VF-PF if mask to bnx2x accept flags */
1615 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) 1617 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1629,9 +1631,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1629 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); 1631 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1630 1632
1631 /* A packet arriving the vf's mac should be accepted 1633 /* A packet arriving the vf's mac should be accepted
1632 * with any vlan 1634 * with any vlan, unless a vlan has already been
1635 * configured.
1633 */ 1636 */
1634 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); 1637 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
1638 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1635 1639
1636 /* set rx-mode */ 1640 /* set rx-mode */
1637 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, 1641 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1722,6 +1726,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1722 goto response; 1726 goto response;
1723 } 1727 }
1724 } 1728 }
1729 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1730 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1731 int i;
1732
1733 /* search for vlan filters */
1734 for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1735 if (filters->filters[i].flags &
1736 VFPF_Q_FILTER_VLAN_TAG_VALID) {
1737 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1738 vf->abs_vfid);
1739 vf->op_rc = -EPERM;
1740 goto response;
1741 }
1742 }
1743 }
1725 1744
1726 /* verify vf_qid */ 1745 /* verify vf_qid */
1727 if (filters->vf_qid > vf_rxq_count(vf)) 1746 if (filters->vf_qid > vf_rxq_count(vf))
@@ -1817,6 +1836,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1817 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; 1836 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1818 1837
1819 /* flags handled individually for backward/forward compatability */ 1838 /* flags handled individually for backward/forward compatability */
1839 vf_op_params->rss_flags = 0;
1840 vf_op_params->ramrod_flags = 0;
1841
1820 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) 1842 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1821 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); 1843 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1822 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) 1844 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d88ef551dfcd..c37e9f27ff6d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7638,7 +7638,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7638{ 7638{
7639 u32 base = (u32) mapping & 0xffffffff; 7639 u32 base = (u32) mapping & 0xffffffff;
7640 7640
7641 return (base > 0xffffdcc0) && (base + len + 8 < base); 7641 return base + len + 8 < base;
7642} 7642}
7643 7643
7644/* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7644/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 17fe50b91523..b97e35c33d17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -228,6 +228,25 @@ struct tp_params {
228 228
229 uint32_t dack_re; /* DACK timer resolution */ 229 uint32_t dack_re; /* DACK timer resolution */
230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ 230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
231
232 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
233 u32 ingress_config; /* cached TP_INGRESS_CONFIG */
234
235 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
236 * subset of the set of fields which may be present in the Compressed
237 * Filter Tuple portion of filters and TCP TCB connections. The
238 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
239 * Since a variable number of fields may or may not be present, their
240 * shifted field positions within the Compressed Filter Tuple may
241 * vary, or not even be present if the field isn't selected in
242 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
243 * places we store their offsets here, or a -1 if the field isn't
244 * present.
245 */
246 int vlan_shift;
247 int vnic_shift;
248 int port_shift;
249 int protocol_shift;
231}; 250};
232 251
233struct vpd_params { 252struct vpd_params {
@@ -925,6 +944,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
925 const u8 *fw_data, unsigned int fw_size, 944 const u8 *fw_data, unsigned int fw_size,
926 struct fw_hdr *card_fw, enum dev_state state, int *reset); 945 struct fw_hdr *card_fw, enum dev_state state, int *reset);
927int t4_prep_adapter(struct adapter *adapter); 946int t4_prep_adapter(struct adapter *adapter);
947int t4_init_tp_params(struct adapter *adap);
948int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
928int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 949int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
929void t4_fatal_err(struct adapter *adapter); 950void t4_fatal_err(struct adapter *adapter);
930int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 951int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d6b12e035a7d..fff02ed1295e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2986 if (stid >= 0) { 2986 if (stid >= 0) {
2987 t->stid_tab[stid].data = data; 2987 t->stid_tab[stid].data = data;
2988 stid += t->stid_base; 2988 stid += t->stid_base;
2989 t->stids_in_use++; 2989 /* IPv6 requires max of 520 bits or 16 cells in TCAM
2990 * This is equivalent to 4 TIDs. With CLIP enabled it
2991 * needs 2 TIDs.
2992 */
2993 if (family == PF_INET)
2994 t->stids_in_use++;
2995 else
2996 t->stids_in_use += 4;
2990 } 2997 }
2991 spin_unlock_bh(&t->stid_lock); 2998 spin_unlock_bh(&t->stid_lock);
2992 return stid; 2999 return stid;
@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3012 } 3019 }
3013 if (stid >= 0) { 3020 if (stid >= 0) {
3014 t->stid_tab[stid].data = data; 3021 t->stid_tab[stid].data = data;
3015 stid += t->stid_base; 3022 stid -= t->nstids;
3023 stid += t->sftid_base;
3016 t->stids_in_use++; 3024 t->stids_in_use++;
3017 } 3025 }
3018 spin_unlock_bh(&t->stid_lock); 3026 spin_unlock_bh(&t->stid_lock);
@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
3024 */ 3032 */
3025void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 3033void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3026{ 3034{
3027 stid -= t->stid_base; 3035 /* Is it a server filter TID? */
3036 if (t->nsftids && (stid >= t->sftid_base)) {
3037 stid -= t->sftid_base;
3038 stid += t->nstids;
3039 } else {
3040 stid -= t->stid_base;
3041 }
3042
3028 spin_lock_bh(&t->stid_lock); 3043 spin_lock_bh(&t->stid_lock);
3029 if (family == PF_INET) 3044 if (family == PF_INET)
3030 __clear_bit(stid, t->stid_bmap); 3045 __clear_bit(stid, t->stid_bmap);
3031 else 3046 else
3032 bitmap_release_region(t->stid_bmap, stid, 2); 3047 bitmap_release_region(t->stid_bmap, stid, 2);
3033 t->stid_tab[stid].data = NULL; 3048 t->stid_tab[stid].data = NULL;
3034 t->stids_in_use--; 3049 if (family == PF_INET)
3050 t->stids_in_use--;
3051 else
3052 t->stids_in_use -= 4;
3035 spin_unlock_bh(&t->stid_lock); 3053 spin_unlock_bh(&t->stid_lock);
3036} 3054}
3037EXPORT_SYMBOL(cxgb4_free_stid); 3055EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
3134 size_t size; 3152 size_t size;
3135 unsigned int stid_bmap_size; 3153 unsigned int stid_bmap_size;
3136 unsigned int natids = t->natids; 3154 unsigned int natids = t->natids;
3155 struct adapter *adap = container_of(t, struct adapter, tids);
3137 3156
3138 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); 3157 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3139 size = t->ntids * sizeof(*t->tid_tab) + 3158 size = t->ntids * sizeof(*t->tid_tab) +
@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
3167 t->afree = t->atid_tab; 3186 t->afree = t->atid_tab;
3168 } 3187 }
3169 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); 3188 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3189 /* Reserve stid 0 for T4/T5 adapters */
3190 if (!t->stid_base &&
3191 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3192 __set_bit(0, t->stid_bmap);
3193
3170 return 0; 3194 return 0;
3171} 3195}
3172 3196
@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3731 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 3755 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3732 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 3756 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3733 (adap->fn * 4)); 3757 (adap->fn * 4));
3734 lli.filt_mode = adap->filter_mode; 3758 lli.filt_mode = adap->params.tp.vlan_pri_map;
3735 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 3759 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3736 for (i = 0; i < NCHAN; i++) 3760 for (i = 0; i < NCHAN; i++)
3737 lli.tx_modq[i] = i; 3761 lli.tx_modq[i] = i;
@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4179 adap = netdev2adap(dev); 4203 adap = netdev2adap(dev);
4180 4204
4181 /* Adjust stid to correct filter index */ 4205 /* Adjust stid to correct filter index */
4182 stid -= adap->tids.nstids; 4206 stid -= adap->tids.sftid_base;
4183 stid += adap->tids.nftids; 4207 stid += adap->tids.nftids;
4184 4208
4185 /* Check to make sure the filter requested is writable ... 4209 /* Check to make sure the filter requested is writable ...
@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4205 f->fs.val.lip[i] = val[i]; 4229 f->fs.val.lip[i] = val[i];
4206 f->fs.mask.lip[i] = ~0; 4230 f->fs.mask.lip[i] = ~0;
4207 } 4231 }
4208 if (adap->filter_mode & F_PORT) { 4232 if (adap->params.tp.vlan_pri_map & F_PORT) {
4209 f->fs.val.iport = port; 4233 f->fs.val.iport = port;
4210 f->fs.mask.iport = mask; 4234 f->fs.mask.iport = mask;
4211 } 4235 }
4212 } 4236 }
4213 4237
4238 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4239 f->fs.val.proto = IPPROTO_TCP;
4240 f->fs.mask.proto = ~0;
4241 }
4242
4214 f->fs.dirsteer = 1; 4243 f->fs.dirsteer = 1;
4215 f->fs.iq = queue; 4244 f->fs.iq = queue;
4216 /* Mark filter as locked */ 4245 /* Mark filter as locked */
@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4237 adap = netdev2adap(dev); 4266 adap = netdev2adap(dev);
4238 4267
4239 /* Adjust stid to correct filter index */ 4268 /* Adjust stid to correct filter index */
4240 stid -= adap->tids.nstids; 4269 stid -= adap->tids.sftid_base;
4241 stid += adap->tids.nftids; 4270 stid += adap->tids.nftids;
4242 4271
4243 f = &adap->tids.ftid_tab[stid]; 4272 f = &adap->tids.ftid_tab[stid];
@@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
5092 enum dev_state state; 5121 enum dev_state state;
5093 u32 params[7], val[7]; 5122 u32 params[7], val[7];
5094 struct fw_caps_config_cmd caps_cmd; 5123 struct fw_caps_config_cmd caps_cmd;
5095 int reset = 1, j; 5124 int reset = 1;
5096 5125
5097 /* 5126 /*
5098 * Contact FW, advertising Master capability (and potentially forcing 5127 * Contact FW, advertising Master capability (and potentially forcing
@@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
5434 /* 5463 /*
5435 * These are finalized by FW initialization, load their values now. 5464 * These are finalized by FW initialization, load their values now.
5436 */ 5465 */
5437 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5438 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5439 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5440 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 5466 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5441 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 5467 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5442 adap->params.b_wnd); 5468 adap->params.b_wnd);
5443 5469
5444 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 5470 t4_init_tp_params(adap);
5445 for (j = 0; j < NCHAN; j++)
5446 adap->params.tp.tx_modq[j] = j;
5447
5448 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5449 &adap->filter_mode, 1,
5450 TP_VLAN_PRI_MAP);
5451
5452 adap->flags |= FW_OK; 5471 adap->flags |= FW_OK;
5453 return 0; 5472 return 0;
5454 5473
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6f21f2451c30..4dd0a82533e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
131 131
132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) 132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
133{ 133{
134 stid -= t->stid_base; 134 /* Is it a server filter TID? */
135 if (t->nsftids && (stid >= t->sftid_base)) {
136 stid -= t->sftid_base;
137 stid += t->nstids;
138 } else {
139 stid -= t->stid_base;
140 }
141
135 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; 142 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
136} 143}
137 144
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 29878098101e..cb05be905def 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -45,6 +45,7 @@
45#include "l2t.h" 45#include "l2t.h"
46#include "t4_msg.h" 46#include "t4_msg.h"
47#include "t4fw_api.h" 47#include "t4fw_api.h"
48#include "t4_regs.h"
48 49
49#define VLAN_NONE 0xfff 50#define VLAN_NONE 0xfff
50 51
@@ -411,6 +412,40 @@ done:
411} 412}
412EXPORT_SYMBOL(cxgb4_l2t_get); 413EXPORT_SYMBOL(cxgb4_l2t_get);
413 414
415u64 cxgb4_select_ntuple(struct net_device *dev,
416 const struct l2t_entry *l2t)
417{
418 struct adapter *adap = netdev2adap(dev);
419 struct tp_params *tp = &adap->params.tp;
420 u64 ntuple = 0;
421
422 /* Initialize each of the fields which we care about which are present
423 * in the Compressed Filter Tuple.
424 */
425 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
426 ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
427
428 if (tp->port_shift >= 0)
429 ntuple |= (u64)l2t->lport << tp->port_shift;
430
431 if (tp->protocol_shift >= 0)
432 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
433
434 if (tp->vnic_shift >= 0) {
435 u32 viid = cxgb4_port_viid(dev);
436 u32 vf = FW_VIID_VIN_GET(viid);
437 u32 pf = FW_VIID_PFN_GET(viid);
438 u32 vld = FW_VIID_VIVLD_GET(viid);
439
440 ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
441 V_FT_VNID_ID_PF(pf) |
442 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
443 }
444
445 return ntuple;
446}
447EXPORT_SYMBOL(cxgb4_select_ntuple);
448
414/* 449/*
415 * Called when address resolution fails for an L2T entry to handle packets 450 * Called when address resolution fails for an L2T entry to handle packets
416 * on the arpq head. If a packet specifies a failure handler it is invoked, 451 * on the arpq head. If a packet specifies a failure handler it is invoked,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 108c0f1fce1c..85eb5c71358d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, 98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
99 const struct net_device *physdev, 99 const struct net_device *physdev,
100 unsigned int priority); 100 unsigned int priority);
101 101u64 cxgb4_select_ntuple(struct net_device *dev,
102 const struct l2t_entry *l2t);
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); 103void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); 104struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, 105int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 42745438c1e0..47ffa64fcf19 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2583,7 +2583,7 @@ static int t4_sge_init_soft(struct adapter *adap)
2583 #undef READ_FL_BUF 2583 #undef READ_FL_BUF
2584 2584
2585 if (fl_small_pg != PAGE_SIZE || 2585 if (fl_small_pg != PAGE_SIZE ||
2586 (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || 2586 (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
2587 (fl_large_pg & (fl_large_pg-1)) != 0))) { 2587 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2588 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 2588 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2589 fl_small_pg, fl_large_pg); 2589 fl_small_pg, fl_large_pg);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 9903a66b7bad..a3964753935c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3682,6 +3682,109 @@ int t4_prep_adapter(struct adapter *adapter)
3682 return 0; 3682 return 0;
3683} 3683}
3684 3684
3685/**
3686 * t4_init_tp_params - initialize adap->params.tp
3687 * @adap: the adapter
3688 *
3689 * Initialize various fields of the adapter's TP Parameters structure.
3690 */
3691int t4_init_tp_params(struct adapter *adap)
3692{
3693 int chan;
3694 u32 v;
3695
3696 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3697 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3698 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3699
3700 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3701 for (chan = 0; chan < NCHAN; chan++)
3702 adap->params.tp.tx_modq[chan] = chan;
3703
3704 /* Cache the adapter's Compressed Filter Mode and global Incress
3705 * Configuration.
3706 */
3707 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3708 &adap->params.tp.vlan_pri_map, 1,
3709 TP_VLAN_PRI_MAP);
3710 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3711 &adap->params.tp.ingress_config, 1,
3712 TP_INGRESS_CONFIG);
3713
3714 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3715 * shift positions of several elements of the Compressed Filter Tuple
3716 * for this adapter which we need frequently ...
3717 */
3718 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3719 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3720 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3721 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3722 F_PROTOCOL);
3723
3724 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3725 * represents the presense of an Outer VLAN instead of a VNIC ID.
3726 */
3727 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3728 adap->params.tp.vnic_shift = -1;
3729
3730 return 0;
3731}
3732
3733/**
3734 * t4_filter_field_shift - calculate filter field shift
3735 * @adap: the adapter
3736 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3737 *
3738 * Return the shift position of a filter field within the Compressed
3739 * Filter Tuple. The filter field is specified via its selection bit
3740 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
3741 */
3742int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3743{
3744 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3745 unsigned int sel;
3746 int field_shift;
3747
3748 if ((filter_mode & filter_sel) == 0)
3749 return -1;
3750
3751 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3752 switch (filter_mode & sel) {
3753 case F_FCOE:
3754 field_shift += W_FT_FCOE;
3755 break;
3756 case F_PORT:
3757 field_shift += W_FT_PORT;
3758 break;
3759 case F_VNIC_ID:
3760 field_shift += W_FT_VNIC_ID;
3761 break;
3762 case F_VLAN:
3763 field_shift += W_FT_VLAN;
3764 break;
3765 case F_TOS:
3766 field_shift += W_FT_TOS;
3767 break;
3768 case F_PROTOCOL:
3769 field_shift += W_FT_PROTOCOL;
3770 break;
3771 case F_ETHERTYPE:
3772 field_shift += W_FT_ETHERTYPE;
3773 break;
3774 case F_MACMATCH:
3775 field_shift += W_FT_MACMATCH;
3776 break;
3777 case F_MPSHITTYPE:
3778 field_shift += W_FT_MPSHITTYPE;
3779 break;
3780 case F_FRAGMENTATION:
3781 field_shift += W_FT_FRAGMENTATION;
3782 break;
3783 }
3784 }
3785 return field_shift;
3786}
3787
3685int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 3788int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3686{ 3789{
3687 u8 addr[6]; 3790 u8 addr[6];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 0a8205d69d2c..4082522d8140 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1171,10 +1171,50 @@
1171 1171
1172#define A_TP_TX_SCHED_PCMD 0x25 1172#define A_TP_TX_SCHED_PCMD 0x25
1173 1173
1174#define S_VNIC 11
1175#define V_VNIC(x) ((x) << S_VNIC)
1176#define F_VNIC V_VNIC(1U)
1177
1178#define S_FRAGMENTATION 9
1179#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
1180#define F_FRAGMENTATION V_FRAGMENTATION(1U)
1181
1182#define S_MPSHITTYPE 8
1183#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
1184#define F_MPSHITTYPE V_MPSHITTYPE(1U)
1185
1186#define S_MACMATCH 7
1187#define V_MACMATCH(x) ((x) << S_MACMATCH)
1188#define F_MACMATCH V_MACMATCH(1U)
1189
1190#define S_ETHERTYPE 6
1191#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
1192#define F_ETHERTYPE V_ETHERTYPE(1U)
1193
1194#define S_PROTOCOL 5
1195#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
1196#define F_PROTOCOL V_PROTOCOL(1U)
1197
1198#define S_TOS 4
1199#define V_TOS(x) ((x) << S_TOS)
1200#define F_TOS V_TOS(1U)
1201
1202#define S_VLAN 3
1203#define V_VLAN(x) ((x) << S_VLAN)
1204#define F_VLAN V_VLAN(1U)
1205
1206#define S_VNIC_ID 2
1207#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
1208#define F_VNIC_ID V_VNIC_ID(1U)
1209
1174#define S_PORT 1 1210#define S_PORT 1
1175#define V_PORT(x) ((x) << S_PORT) 1211#define V_PORT(x) ((x) << S_PORT)
1176#define F_PORT V_PORT(1U) 1212#define F_PORT V_PORT(1U)
1177 1213
1214#define S_FCOE 0
1215#define V_FCOE(x) ((x) << S_FCOE)
1216#define F_FCOE V_FCOE(1U)
1217
1178#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 1218#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
1179#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 1219#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
1180 1220
@@ -1213,4 +1253,37 @@
1213#define V_CHIPID(x) ((x) << S_CHIPID) 1253#define V_CHIPID(x) ((x) << S_CHIPID)
1214#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) 1254#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
1215 1255
1256/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
1257 * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
1258 * selects for a particular field being present. These fields, when present
1259 * in the Compressed Filter Tuple, have the following widths in bits.
1260 */
1261#define W_FT_FCOE 1
1262#define W_FT_PORT 3
1263#define W_FT_VNIC_ID 17
1264#define W_FT_VLAN 17
1265#define W_FT_TOS 8
1266#define W_FT_PROTOCOL 8
1267#define W_FT_ETHERTYPE 16
1268#define W_FT_MACMATCH 9
1269#define W_FT_MPSHITTYPE 3
1270#define W_FT_FRAGMENTATION 1
1271
1272/* Some of the Compressed Filter Tuple fields have internal structure. These
1273 * bit shifts/masks describe those structures. All shifts are relative to the
1274 * base position of the fields within the Compressed Filter Tuple
1275 */
1276#define S_FT_VLAN_VLD 16
1277#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
1278#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
1279
1280#define S_FT_VNID_ID_VF 0
1281#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
1282
1283#define S_FT_VNID_ID_PF 7
1284#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
1285
1286#define S_FT_VNID_ID_VLD 16
1287#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
1288
1216#endif /* __T4_REGS_H */ 1289#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 5878df619b53..4ccaf9af6fc9 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
104#define BE3_MAX_RSS_QS 16 104#define BE3_MAX_RSS_QS 16
105#define BE3_MAX_TX_QS 16 105#define BE3_MAX_TX_QS 16
106#define BE3_MAX_EVT_QS 16 106#define BE3_MAX_EVT_QS 16
107#define BE3_SRIOV_MAX_EVT_QS 8
107 108
108#define MAX_RX_QS 32 109#define MAX_RX_QS 32
109#define MAX_EVT_QS 32 110#define MAX_EVT_QS 32
@@ -480,7 +481,7 @@ struct be_adapter {
480 struct list_head entry; 481 struct list_head entry;
481 482
482 u32 flash_status; 483 u32 flash_status;
483 struct completion flash_compl; 484 struct completion et_cmd_compl;
484 485
485 struct be_resources res; /* resources available for the func */ 486 struct be_resources res; /* resources available for the func */
486 u16 num_vfs; /* Number of VFs provisioned by PF */ 487 u16 num_vfs; /* Number of VFs provisioned by PF */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index e0e8bc1ef14c..94c35c8d799d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
141 subsystem = resp_hdr->subsystem; 141 subsystem = resp_hdr->subsystem;
142 } 142 }
143 143
144 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
145 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
146 complete(&adapter->et_cmd_compl);
147 return 0;
148 }
149
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 150 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 151 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) { 152 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status; 153 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl); 154 complete(&adapter->et_cmd_compl);
149 } 155 }
150 156
151 if (compl_status == MCC_STATUS_SUCCESS) { 157 if (compl_status == MCC_STATUS_SUCCESS) {
@@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2017 0x3ea83c02, 0x4a110304}; 2023 0x3ea83c02, 0x4a110304};
2018 int status; 2024 int status;
2019 2025
2026 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2027 return 0;
2028
2020 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2029 if (mutex_lock_interruptible(&adapter->mbox_lock))
2021 return -1; 2030 return -1;
2022 2031
@@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2160 be_mcc_notify(adapter); 2169 be_mcc_notify(adapter);
2161 spin_unlock_bh(&adapter->mcc_lock); 2170 spin_unlock_bh(&adapter->mcc_lock);
2162 2171
2163 if (!wait_for_completion_timeout(&adapter->flash_compl, 2172 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2164 msecs_to_jiffies(60000))) 2173 msecs_to_jiffies(60000)))
2165 status = -1; 2174 status = -1;
2166 else 2175 else
@@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2255 be_mcc_notify(adapter); 2264 be_mcc_notify(adapter);
2256 spin_unlock_bh(&adapter->mcc_lock); 2265 spin_unlock_bh(&adapter->mcc_lock);
2257 2266
2258 if (!wait_for_completion_timeout(&adapter->flash_compl, 2267 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2259 msecs_to_jiffies(40000))) 2268 msecs_to_jiffies(40000)))
2260 status = -1; 2269 status = -1;
2261 else 2270 else
2262 status = adapter->flash_status; 2271 status = adapter->flash_status;
@@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2367{ 2376{
2368 struct be_mcc_wrb *wrb; 2377 struct be_mcc_wrb *wrb;
2369 struct be_cmd_req_loopback_test *req; 2378 struct be_cmd_req_loopback_test *req;
2379 struct be_cmd_resp_loopback_test *resp;
2370 int status; 2380 int status;
2371 2381
2372 spin_lock_bh(&adapter->mcc_lock); 2382 spin_lock_bh(&adapter->mcc_lock);
@@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2381 2391
2382 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2392 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2383 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2393 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2384 req->hdr.timeout = cpu_to_le32(4);
2385 2394
2395 req->hdr.timeout = cpu_to_le32(15);
2386 req->pattern = cpu_to_le64(pattern); 2396 req->pattern = cpu_to_le64(pattern);
2387 req->src_port = cpu_to_le32(port_num); 2397 req->src_port = cpu_to_le32(port_num);
2388 req->dest_port = cpu_to_le32(port_num); 2398 req->dest_port = cpu_to_le32(port_num);
@@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2390 req->num_pkts = cpu_to_le32(num_pkts); 2400 req->num_pkts = cpu_to_le32(num_pkts);
2391 req->loopback_type = cpu_to_le32(loopback_type); 2401 req->loopback_type = cpu_to_le32(loopback_type);
2392 2402
2393 status = be_mcc_notify_wait(adapter); 2403 be_mcc_notify(adapter);
2394 if (!status) { 2404
2395 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); 2405 spin_unlock_bh(&adapter->mcc_lock);
2396 status = le32_to_cpu(resp->status);
2397 }
2398 2406
2407 wait_for_completion(&adapter->et_cmd_compl);
2408 resp = embedded_payload(wrb);
2409 status = le32_to_cpu(resp->status);
2410
2411 return status;
2399err: 2412err:
2400 spin_unlock_bh(&adapter->mcc_lock); 2413 spin_unlock_bh(&adapter->mcc_lock);
2401 return status; 2414 return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b5c238aa6861..3acf137b5784 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2744,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2744 if (!BEx_chip(adapter)) 2744 if (!BEx_chip(adapter))
2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | 2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2746 RSS_ENABLE_UDP_IPV6; 2746 RSS_ENABLE_UDP_IPV6;
2747 } else {
2748 /* Disable RSS, if only default RX Q is created */
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 }
2747 2751
2748 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, 2752 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2749 128); 2753 128);
2750 if (rc) { 2754 if (rc) {
2751 adapter->rss_flags = 0; 2755 adapter->rss_flags = RSS_ENABLE_NONE;
2752 return rc; 2756 return rc;
2753 }
2754 } 2757 }
2755 2758
2756 /* First time posting */ 2759 /* First time posting */
@@ -3124,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3124{ 3127{
3125 struct pci_dev *pdev = adapter->pdev; 3128 struct pci_dev *pdev = adapter->pdev;
3126 bool use_sriov = false; 3129 bool use_sriov = false;
3130 int max_vfs;
3127 3131
3128 if (BE3_chip(adapter) && sriov_want(adapter)) { 3132 max_vfs = pci_sriov_get_totalvfs(pdev);
3129 int max_vfs;
3130 3133
3131 max_vfs = pci_sriov_get_totalvfs(pdev); 3134 if (BE3_chip(adapter) && sriov_want(adapter)) {
3132 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; 3135 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3133 use_sriov = res->max_vfs; 3136 use_sriov = res->max_vfs;
3134 } 3137 }
@@ -3159,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3159 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 3162 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3160 res->max_rx_qs = res->max_rss_qs + 1; 3163 res->max_rx_qs = res->max_rss_qs + 1;
3161 3164
3162 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; 3165 if (be_physfn(adapter))
3166 res->max_evt_qs = (max_vfs > 0) ?
3167 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3168 else
3169 res->max_evt_qs = 1;
3163 3170
3164 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; 3171 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3165 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) 3172 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -4205,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
4205 spin_lock_init(&adapter->mcc_lock); 4212 spin_lock_init(&adapter->mcc_lock);
4206 spin_lock_init(&adapter->mcc_cq_lock); 4213 spin_lock_init(&adapter->mcc_cq_lock);
4207 4214
4208 init_completion(&adapter->flash_compl); 4215 init_completion(&adapter->et_cmd_compl);
4209 pci_save_state(adapter->pdev); 4216 pci_save_state(adapter->pdev);
4210 return 0; 4217 return 0;
4211 4218
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 05cd81aa9813..6530177d53e7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
428 /* If this was the last BD in the ring, start at the beginning again. */ 428 /* If this was the last BD in the ring, start at the beginning again. */
429 bdp = fec_enet_get_nextdesc(bdp, fep); 429 bdp = fec_enet_get_nextdesc(bdp, fep);
430 430
431 skb_tx_timestamp(skb);
432
431 fep->cur_tx = bdp; 433 fep->cur_tx = bdp;
432 434
433 if (fep->cur_tx == fep->dirty_tx) 435 if (fep->cur_tx == fep->dirty_tx)
@@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
436 /* Trigger transmission start */ 438 /* Trigger transmission start */
437 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 439 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
438 440
439 skb_tx_timestamp(skb);
440
441 return NETDEV_TX_OK; 441 return NETDEV_TX_OK;
442} 442}
443 443
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e9bb3c..ff2d806eaef7 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
718 e1000_release_phy_80003es2lan(hw); 718 e1000_release_phy_80003es2lan(hw);
719 719
720 /* Disable IBIST slave mode (far-end loopback) */ 720 /* Disable IBIST slave mode (far-end loopback) */
721 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 721 ret_val =
722 &kum_reg_data); 722 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
723 &kum_reg_data);
724 if (ret_val)
725 return ret_val;
723 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; 726 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
724 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 727 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
725 kum_reg_data); 728 kum_reg_data);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 051d1583e211..d6570b2d5a6b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6184,7 +6184,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6184 return 0; 6184 return 0;
6185} 6185}
6186 6186
6187#ifdef CONFIG_PM_SLEEP 6187#ifdef CONFIG_PM
6188static int e1000_suspend(struct device *dev) 6188static int e1000_suspend(struct device *dev)
6189{ 6189{
6190 struct pci_dev *pdev = to_pci_dev(dev); 6190 struct pci_dev *pdev = to_pci_dev(dev);
@@ -6203,7 +6203,7 @@ static int e1000_resume(struct device *dev)
6203 6203
6204 return __e1000_resume(pdev); 6204 return __e1000_resume(pdev);
6205} 6205}
6206#endif /* CONFIG_PM_SLEEP */ 6206#endif /* CONFIG_PM */
6207 6207
6208#ifdef CONFIG_PM_RUNTIME 6208#ifdef CONFIG_PM_RUNTIME
6209static int e1000_runtime_suspend(struct device *dev) 6209static int e1000_runtime_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59505c0..20e71f4ca426 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1757 * it across the board. 1757 * it across the board.
1758 */ 1758 */
1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1760 if (ret_val) 1760 if (ret_val) {
1761 /* If the first read fails, another entity may have 1761 /* If the first read fails, another entity may have
1762 * ownership of the resources, wait and try again to 1762 * ownership of the resources, wait and try again to
1763 * see if they have relinquished the resources yet. 1763 * see if they have relinquished the resources yet.
1764 */ 1764 */
1765 udelay(usec_interval); 1765 if (usec_interval >= 1000)
1766 msleep(usec_interval / 1000);
1767 else
1768 udelay(usec_interval);
1769 }
1766 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1770 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1767 if (ret_val) 1771 if (ret_val)
1768 break; 1772 break;
1769 if (phy_status & BMSR_LSTATUS) 1773 if (phy_status & BMSR_LSTATUS)
1770 break; 1774 break;
1771 if (usec_interval >= 1000) 1775 if (usec_interval >= 1000)
1772 mdelay(usec_interval / 1000); 1776 msleep(usec_interval / 1000);
1773 else 1777 else
1774 udelay(usec_interval); 1778 udelay(usec_interval);
1775 } 1779 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 9ce07f3ef62d..359f6e60320d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
291{ 291{
292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
293 int err; 293 int err;
294#ifdef CONFIG_PCI_IOV
294 u32 current_flags = adapter->flags; 295 u32 current_flags = adapter->flags;
296#endif
295 297
296 err = ixgbe_disable_sriov(adapter); 298 err = ixgbe_disable_sriov(adapter);
297 299
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 7354960b583b..c4eeb69a5bee 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
92 if (time_is_before_jiffies(end)) 92 if (time_is_before_jiffies(end))
93 ++timedout; 93 ++timedout;
94 } else { 94 } else {
95 /* wait_event_timeout does not guarantee a delay of at
96 * least one whole jiffie, so timeout must be no less
97 * than two.
98 */
99 if (timeout < 2)
100 timeout = 2;
95 wait_event_timeout(dev->smi_busy_wait, 101 wait_event_timeout(dev->smi_busy_wait,
96 orion_mdio_smi_is_done(dev), 102 orion_mdio_smi_is_done(dev),
97 timeout); 103 timeout);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 3010abb55fbd..32058614151a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1602,13 +1602,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
1602 u32 seq_number; 1602 u32 seq_number;
1603 u8 vhdr_len = 0; 1603 u8 vhdr_len = 0;
1604 1604
1605 if (unlikely(ring > adapter->max_rds_rings)) 1605 if (unlikely(ring >= adapter->max_rds_rings))
1606 return NULL; 1606 return NULL;
1607 1607
1608 rds_ring = &recv_ctx->rds_rings[ring]; 1608 rds_ring = &recv_ctx->rds_rings[ring];
1609 1609
1610 index = netxen_get_lro_sts_refhandle(sts_data0); 1610 index = netxen_get_lro_sts_refhandle(sts_data0);
1611 if (unlikely(index > rds_ring->num_desc)) 1611 if (unlikely(index >= rds_ring->num_desc))
1612 return NULL; 1612 return NULL;
1613 1613
1614 buffer = &rds_ring->rx_buf_arr[index]; 1614 buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 4afdef0cc175..35d48766d842 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -493,6 +493,7 @@ struct qlcnic_hardware_context {
493 struct qlcnic_mailbox *mailbox; 493 struct qlcnic_mailbox *mailbox;
494 u8 extend_lb_time; 494 u8 extend_lb_time;
495 u8 phys_port_id[ETH_ALEN]; 495 u8 phys_port_id[ETH_ALEN];
496 u8 lb_mode;
496}; 497};
497 498
498struct qlcnic_adapter_stats { 499struct qlcnic_adapter_stats {
@@ -584,6 +585,8 @@ struct qlcnic_host_tx_ring {
584 dma_addr_t phys_addr; 585 dma_addr_t phys_addr;
585 dma_addr_t hw_cons_phys_addr; 586 dma_addr_t hw_cons_phys_addr;
586 struct netdev_queue *txq; 587 struct netdev_queue *txq;
588 /* Lock to protect Tx descriptors cleanup */
589 spinlock_t tx_clean_lock;
587} ____cacheline_internodealigned_in_smp; 590} ____cacheline_internodealigned_in_smp;
588 591
589/* 592/*
@@ -815,6 +818,7 @@ struct qlcnic_mac_vlan_list {
815 818
816#define QLCNIC_ILB_MODE 0x1 819#define QLCNIC_ILB_MODE 0x1
817#define QLCNIC_ELB_MODE 0x2 820#define QLCNIC_ELB_MODE 0x2
821#define QLCNIC_LB_MODE_MASK 0x3
818 822
819#define QLCNIC_LINKEVENT 0x1 823#define QLCNIC_LINKEVENT 0x1
820#define QLCNIC_LB_RESPONSE 0x2 824#define QLCNIC_LB_RESPONSE 0x2
@@ -1100,7 +1104,6 @@ struct qlcnic_adapter {
1100 struct qlcnic_filter_hash rx_fhash; 1104 struct qlcnic_filter_hash rx_fhash;
1101 struct list_head vf_mc_list; 1105 struct list_head vf_mc_list;
1102 1106
1103 spinlock_t tx_clean_lock;
1104 spinlock_t mac_learn_lock; 1107 spinlock_t mac_learn_lock;
1105 /* spinlock for catching rcv filters for eswitch traffic */ 1108 /* spinlock for catching rcv filters for eswitch traffic */
1106 spinlock_t rx_mac_learn_lock; 1109 spinlock_t rx_mac_learn_lock;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b3fd1605773e..03eb2ad9611a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1685,12 +1685,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1685 } 1685 }
1686 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); 1686 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
1687 1687
1688 /* Make sure carrier is off and queue is stopped during loopback */
1689 if (netif_running(netdev)) {
1690 netif_carrier_off(netdev);
1691 netif_tx_stop_all_queues(netdev);
1692 }
1693
1694 ret = qlcnic_do_lb_test(adapter, mode); 1688 ret = qlcnic_do_lb_test(adapter, mode);
1695 1689
1696 qlcnic_83xx_clear_lb_mode(adapter, mode); 1690 qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2122,6 +2116,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
2122 ahw->link_autoneg = MSB(MSW(data[3])); 2116 ahw->link_autoneg = MSB(MSW(data[3]));
2123 ahw->module_type = MSB(LSW(data[3])); 2117 ahw->module_type = MSB(LSW(data[3]));
2124 ahw->has_link_events = 1; 2118 ahw->has_link_events = 1;
2119 ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
2125 qlcnic_advert_link_change(adapter, link_status); 2120 qlcnic_advert_link_change(adapter, link_status);
2126} 2121}
2127 2122
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index e9c21e5d0ca9..c4262c23ed7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
134 struct qlcnic_skb_frag *buffrag; 134 struct qlcnic_skb_frag *buffrag;
135 int i, j; 135 int i, j;
136 136
137 spin_lock(&tx_ring->tx_clean_lock);
138
137 cmd_buf = tx_ring->cmd_buf_arr; 139 cmd_buf = tx_ring->cmd_buf_arr;
138 for (i = 0; i < tx_ring->num_desc; i++) { 140 for (i = 0; i < tx_ring->num_desc; i++) {
139 buffrag = cmd_buf->frag_array; 141 buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
157 } 159 }
158 cmd_buf++; 160 cmd_buf++;
159 } 161 }
162
163 spin_unlock(&tx_ring->tx_clean_lock);
160} 164}
161 165
162void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) 166void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index a215e0f69335..6373f6022486 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -689,6 +689,10 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
689 adapter->ahw->linkup = 0; 689 adapter->ahw->linkup = 0;
690 netif_carrier_off(netdev); 690 netif_carrier_off(netdev);
691 } else if (!adapter->ahw->linkup && linkup) { 691 } else if (!adapter->ahw->linkup && linkup) {
692 /* Do not advertise Link up if the port is in loopback mode */
693 if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
694 return;
695
692 netdev_info(netdev, "NIC Link is up\n"); 696 netdev_info(netdev, "NIC Link is up\n");
693 adapter->ahw->linkup = 1; 697 adapter->ahw->linkup = 1;
694 netif_carrier_on(netdev); 698 netif_carrier_on(netdev);
@@ -778,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
778 struct net_device *netdev = adapter->netdev; 782 struct net_device *netdev = adapter->netdev;
779 struct qlcnic_skb_frag *frag; 783 struct qlcnic_skb_frag *frag;
780 784
781 if (!spin_trylock(&adapter->tx_clean_lock)) 785 if (!spin_trylock(&tx_ring->tx_clean_lock))
782 return 1; 786 return 1;
783 787
784 sw_consumer = tx_ring->sw_consumer; 788 sw_consumer = tx_ring->sw_consumer;
@@ -807,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
807 break; 811 break;
808 } 812 }
809 813
814 tx_ring->sw_consumer = sw_consumer;
815
810 if (count && netif_running(netdev)) { 816 if (count && netif_running(netdev)) {
811 tx_ring->sw_consumer = sw_consumer;
812 smp_mb(); 817 smp_mb();
813 if (netif_tx_queue_stopped(tx_ring->txq) && 818 if (netif_tx_queue_stopped(tx_ring->txq) &&
814 netif_carrier_ok(netdev)) { 819 netif_carrier_ok(netdev)) {
@@ -834,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
834 */ 839 */
835 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 840 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
836 done = (sw_consumer == hw_consumer); 841 done = (sw_consumer == hw_consumer);
837 spin_unlock(&adapter->tx_clean_lock); 842
843 spin_unlock(&tx_ring->tx_clean_lock);
838 844
839 return done; 845 return done;
840} 846}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d131ec1321e8..eeec83a0e664 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1757,7 +1757,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1757 if (qlcnic_sriov_vf_check(adapter)) 1757 if (qlcnic_sriov_vf_check(adapter))
1758 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); 1758 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1759 smp_mb(); 1759 smp_mb();
1760 spin_lock(&adapter->tx_clean_lock);
1761 netif_carrier_off(netdev); 1760 netif_carrier_off(netdev);
1762 adapter->ahw->linkup = 0; 1761 adapter->ahw->linkup = 0;
1763 netif_tx_disable(netdev); 1762 netif_tx_disable(netdev);
@@ -1778,7 +1777,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1778 1777
1779 for (ring = 0; ring < adapter->drv_tx_rings; ring++) 1778 for (ring = 0; ring < adapter->drv_tx_rings; ring++)
1780 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); 1779 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
1781 spin_unlock(&adapter->tx_clean_lock);
1782} 1780}
1783 1781
1784/* Usage: During suspend and firmware recovery module */ 1782/* Usage: During suspend and firmware recovery module */
@@ -2173,6 +2171,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
2173 } 2171 }
2174 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); 2172 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
2175 tx_ring->cmd_buf_arr = cmd_buf_arr; 2173 tx_ring->cmd_buf_arr = cmd_buf_arr;
2174 spin_lock_init(&tx_ring->tx_clean_lock);
2176 } 2175 }
2177 2176
2178 if (qlcnic_83xx_check(adapter) || 2177 if (qlcnic_83xx_check(adapter) ||
@@ -2300,7 +2299,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2300 rwlock_init(&adapter->ahw->crb_lock); 2299 rwlock_init(&adapter->ahw->crb_lock);
2301 mutex_init(&adapter->ahw->mem_lock); 2300 mutex_init(&adapter->ahw->mem_lock);
2302 2301
2303 spin_lock_init(&adapter->tx_clean_lock);
2304 INIT_LIST_HEAD(&adapter->mac_list); 2302 INIT_LIST_HEAD(&adapter->mac_list);
2305 2303
2306 qlcnic_register_dcb(adapter); 2304 qlcnic_register_dcb(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 98b621fb1227..d14d9a139eef 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -81,9 +81,12 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
81 if (qlcnic_83xx_pf_check(adapter)) 81 if (qlcnic_83xx_pf_check(adapter))
82 num_macs = 1; 82 num_macs = 1;
83 83
84 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
85
84 if (adapter->ahw->pci_func == func) { 86 if (adapter->ahw->pci_func == func) {
85 info->min_tx_bw = 0; 87 info->min_tx_bw = 0;
86 info->max_tx_bw = MAX_BW; 88 info->max_tx_bw = MAX_BW;
89
87 temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs; 90 temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
88 info->max_rx_ucast_mac_filters = temp; 91 info->max_rx_ucast_mac_filters = temp;
89 temp = res->num_tx_mac_filters - num_macs * num_vfs; 92 temp = res->num_tx_mac_filters - num_macs * num_vfs;
@@ -92,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
92 temp = res->num_rx_mcast_mac_filters - temp; 95 temp = res->num_rx_mcast_mac_filters - temp;
93 info->max_rx_mcast_mac_filters = temp; 96 info->max_rx_mcast_mac_filters = temp;
94 97
98 info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
95 } else { 99 } else {
96 id = qlcnic_sriov_func_to_index(adapter, func); 100 id = qlcnic_sriov_func_to_index(adapter, func);
97 if (id < 0) 101 if (id < 0)
@@ -99,10 +103,13 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
99 vp = sriov->vf_info[id].vp; 103 vp = sriov->vf_info[id].vp;
100 info->min_tx_bw = vp->min_tx_bw; 104 info->min_tx_bw = vp->min_tx_bw;
101 info->max_tx_bw = vp->max_tx_bw; 105 info->max_tx_bw = vp->max_tx_bw;
106
102 info->max_rx_ucast_mac_filters = num_macs; 107 info->max_rx_ucast_mac_filters = num_macs;
103 info->max_tx_mac_filters = num_macs; 108 info->max_tx_mac_filters = num_macs;
104 temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC; 109 temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
105 info->max_rx_mcast_mac_filters = temp; 110 info->max_rx_mcast_mac_filters = temp;
111
112 info->max_tx_ques = QLCNIC_SINGLE_RING;
106 } 113 }
107 114
108 info->max_rx_ip_addr = res->num_destip / max; 115 info->max_rx_ip_addr = res->num_destip / max;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 216141028125..b8e3a4ce24b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
623 return -EOPNOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 if (netif_msg_hw(priv)) { 625 priv->adv_ts = 0;
626 if (priv->dma_cap.time_stamp) { 626 if (priv->dma_cap.atime_stamp && priv->extend_desc)
627 pr_debug("IEEE 1588-2002 Time Stamp supported\n"); 627 priv->adv_ts = 1;
628 priv->adv_ts = 0; 628
629 } 629 if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
630 if (priv->dma_cap.atime_stamp && priv->extend_desc) { 630 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
631 pr_debug 631
632 ("IEEE 1588-2008 Advanced Time Stamp supported\n"); 632 if (netif_msg_hw(priv) && priv->adv_ts)
633 priv->adv_ts = 1; 633 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
634 }
635 }
636 634
637 priv->hw->ptp = &stmmac_ptp; 635 priv->hw->ptp = &stmmac_ptp;
638 priv->hwts_tx_en = 0; 636 priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b8b0eeed0f92..7680581ebe12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
56 56
57 priv->hw->ptp->config_addend(priv->ioaddr, addend); 57 priv->hw->ptp->config_addend(priv->ioaddr, addend);
58 58
59 spin_unlock_irqrestore(&priv->lock, flags); 59 spin_unlock_irqrestore(&priv->ptp_lock, flags);
60 60
61 return 0; 61 return 0;
62} 62}
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
91 91
92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); 92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
93 93
94 spin_unlock_irqrestore(&priv->lock, flags); 94 spin_unlock_irqrestore(&priv->ptp_lock, flags);
95 95
96 return 0; 96 return 0;
97} 97}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 243fffbe18e8..e8bb77d25d98 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
740 /* set speed_in input in case RMII mode is used in 100Mbps */ 740 /* set speed_in input in case RMII mode is used in 100Mbps */
741 if (phy->speed == 100) 741 if (phy->speed == 100)
742 mac_control |= BIT(15); 742 mac_control |= BIT(15);
743 else if (phy->speed == 10)
744 mac_control |= BIT(18); /* In Band mode */
743 745
744 *link = true; 746 *link = true;
745 } else { 747 } else {
@@ -2126,7 +2128,7 @@ static int cpsw_probe(struct platform_device *pdev)
2126 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2128 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2127 for (i = res->start; i <= res->end; i++) { 2129 for (i = res->start; i <= res->end; i++) {
2128 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, 2130 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
2129 dev_name(priv->dev), priv)) { 2131 dev_name(&pdev->dev), priv)) {
2130 dev_err(priv->dev, "error attaching irq\n"); 2132 dev_err(priv->dev, "error attaching irq\n");
2131 goto clean_ale_ret; 2133 goto clean_ale_ret;
2132 } 2134 }
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 3169252613fa..5d78c1d08abd 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
571 case HDLCDRVCTL_CALIBRATE: 571 case HDLCDRVCTL_CALIBRATE:
572 if(!capable(CAP_SYS_RAWIO)) 572 if(!capable(CAP_SYS_RAWIO))
573 return -EPERM; 573 return -EPERM;
574 if (bi.data.calibrate > INT_MAX / s->par.bitrate)
575 return -EINVAL;
574 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; 576 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
575 return 0; 577 return 0;
576 578
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1971411574db..61dd2447e1bb 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1057 break; 1057 break;
1058 1058
1059 case SIOCYAMGCFG: 1059 case SIOCYAMGCFG:
1060 memset(&yi, 0, sizeof(yi));
1060 yi.cfg.mask = 0xffffffff; 1061 yi.cfg.mask = 0xffffffff;
1061 yi.cfg.iobase = yp->iobase; 1062 yi.cfg.iobase = yp->iobase;
1062 yi.cfg.irq = yp->irq; 1063 yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f80bd0c90f1e..7756118c2f0a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -260,9 +260,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
260 struct sk_buff *skb; 260 struct sk_buff *skb;
261 261
262 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; 262 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
263 if (!net) { 263 if (!net || net->reg_state != NETREG_REGISTERED) {
264 netdev_err(net, "got receive callback but net device"
265 " not initialized yet\n");
266 packet->status = NVSP_STAT_FAIL; 264 packet->status = NVSP_STAT_FAIL;
267 return 0; 265 return 0;
268 } 266 }
@@ -434,19 +432,11 @@ static int netvsc_probe(struct hv_device *dev,
434 SET_ETHTOOL_OPS(net, &ethtool_ops); 432 SET_ETHTOOL_OPS(net, &ethtool_ops);
435 SET_NETDEV_DEV(net, &dev->device); 433 SET_NETDEV_DEV(net, &dev->device);
436 434
437 ret = register_netdev(net);
438 if (ret != 0) {
439 pr_err("Unable to register netdev.\n");
440 free_netdev(net);
441 goto out;
442 }
443
444 /* Notify the netvsc driver of the new device */ 435 /* Notify the netvsc driver of the new device */
445 device_info.ring_size = ring_size; 436 device_info.ring_size = ring_size;
446 ret = rndis_filter_device_add(dev, &device_info); 437 ret = rndis_filter_device_add(dev, &device_info);
447 if (ret != 0) { 438 if (ret != 0) {
448 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 439 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
449 unregister_netdev(net);
450 free_netdev(net); 440 free_netdev(net);
451 hv_set_drvdata(dev, NULL); 441 hv_set_drvdata(dev, NULL);
452 return ret; 442 return ret;
@@ -455,7 +445,13 @@ static int netvsc_probe(struct hv_device *dev,
455 445
456 netif_carrier_on(net); 446 netif_carrier_on(net);
457 447
458out: 448 ret = register_netdev(net);
449 if (ret != 0) {
450 pr_err("Unable to register netdev.\n");
451 rndis_filter_device_remove(dev);
452 free_netdev(net);
453 }
454
459 return ret; 455 return ret;
460} 456}
461 457
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 94198366de7f..09ababe54a5b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -689,8 +689,19 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
689 netdev_features_t features) 689 netdev_features_t features)
690{ 690{
691 struct macvlan_dev *vlan = netdev_priv(dev); 691 struct macvlan_dev *vlan = netdev_priv(dev);
692 netdev_features_t mask;
692 693
693 return features & (vlan->set_features | ~MACVLAN_FEATURES); 694 features |= NETIF_F_ALL_FOR_ALL;
695 features &= (vlan->set_features | ~MACVLAN_FEATURES);
696 mask = features;
697
698 features = netdev_increment_features(vlan->lowerdev->features,
699 features,
700 mask);
701 if (!vlan->fwd_priv)
702 features |= NETIF_F_LLTX;
703
704 return features;
694} 705}
695 706
696static const struct ethtool_ops macvlan_ethtool_ops = { 707static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -1009,9 +1020,8 @@ static int macvlan_device_event(struct notifier_block *unused,
1009 break; 1020 break;
1010 case NETDEV_FEAT_CHANGE: 1021 case NETDEV_FEAT_CHANGE:
1011 list_for_each_entry(vlan, &port->vlans, list) { 1022 list_for_each_entry(vlan, &port->vlans, list) {
1012 vlan->dev->features = dev->features & MACVLAN_FEATURES;
1013 vlan->dev->gso_max_size = dev->gso_max_size; 1023 vlan->dev->gso_max_size = dev->gso_max_size;
1014 netdev_features_change(vlan->dev); 1024 netdev_update_features(vlan->dev);
1015 } 1025 }
1016 break; 1026 break;
1017 case NETDEV_UNREGISTER: 1027 case NETDEV_UNREGISTER:
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 19da5ab615bd..76e8936ab9e4 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -533,10 +533,8 @@ phy_err:
533int phy_start_interrupts(struct phy_device *phydev) 533int phy_start_interrupts(struct phy_device *phydev)
534{ 534{
535 atomic_set(&phydev->irq_disable, 0); 535 atomic_set(&phydev->irq_disable, 0);
536 if (request_irq(phydev->irq, phy_interrupt, 536 if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
537 IRQF_SHARED, 537 phydev) < 0) {
538 "phy_interrupt",
539 phydev) < 0) {
540 pr_warn("%s: Can't get IRQ %d (PHY)\n", 538 pr_warn("%s: Can't get IRQ %d (PHY)\n",
541 phydev->bus->name, phydev->irq); 539 phydev->bus->name, phydev->irq);
542 phydev->irq = PHY_POLL; 540 phydev->irq = PHY_POLL;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 85e4a01670f0..47b0f732b0b1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
276 module will be called cdc_mbim. 276 module will be called cdc_mbim.
277 277
278config USB_NET_DM9601 278config USB_NET_DM9601
279 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 279 tristate "Davicom DM96xx based USB 10/100 ethernet devices"
280 depends on USB_USBNET 280 depends on USB_USBNET
281 select CRC32 281 select CRC32
282 help 282 help
283 This option adds support for Davicom DM9601 based USB 1.1 283 This option adds support for Davicom DM9601/DM9620/DM9621A
284 10/100 Ethernet adapters. 284 based USB 10/100 Ethernet adapters.
285 285
286config USB_NET_SR9700 286config USB_NET_SR9700
287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" 287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index c6867f926cff..14aa48fa8d7e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices 2 * Davicom DM96xx USB 10/100Mbps ethernet devices
3 * 3 *
4 * Peter Korsgaard <jacmet@sunsite.dk> 4 * Peter Korsgaard <jacmet@sunsite.dk>
5 * 5 *
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
364 dev->net->ethtool_ops = &dm9601_ethtool_ops; 364 dev->net->ethtool_ops = &dm9601_ethtool_ops;
365 dev->net->hard_header_len += DM_TX_OVERHEAD; 365 dev->net->hard_header_len += DM_TX_OVERHEAD;
366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
367 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; 367
368 /* dm9620/21a require room for 4 byte padding, even in dm9601
369 * mode, so we need +1 to be able to receive full size
370 * ethernet frames.
371 */
372 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
368 373
369 dev->mii.dev = dev->net; 374 dev->mii.dev = dev->net;
370 dev->mii.mdio_read = dm9601_mdio_read; 375 dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
468static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 473static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
469 gfp_t flags) 474 gfp_t flags)
470{ 475{
471 int len; 476 int len, pad;
472 477
473 /* format: 478 /* format:
474 b1: packet length low 479 b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
476 b3..n: packet data 481 b3..n: packet data
477 */ 482 */
478 483
479 len = skb->len; 484 len = skb->len + DM_TX_OVERHEAD;
485
486 /* workaround for dm962x errata with tx fifo getting out of
487 * sync if a USB bulk transfer retry happens right after a
488 * packet with odd / maxpacket length by adding up to 3 bytes
489 * padding.
490 */
491 while ((len & 1) || !(len % dev->maxpacket))
492 len++;
480 493
481 if (skb_headroom(skb) < DM_TX_OVERHEAD) { 494 len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
495 pad = len - skb->len;
496
497 if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
482 struct sk_buff *skb2; 498 struct sk_buff *skb2;
483 499
484 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); 500 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
485 dev_kfree_skb_any(skb); 501 dev_kfree_skb_any(skb);
486 skb = skb2; 502 skb = skb2;
487 if (!skb) 503 if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
490 506
491 __skb_push(skb, DM_TX_OVERHEAD); 507 __skb_push(skb, DM_TX_OVERHEAD);
492 508
493 /* usbnet adds padding if length is a multiple of packet size 509 if (pad) {
494 if so, adjust length value in header */ 510 memset(skb->data + skb->len, 0, pad);
495 if ((skb->len % dev->maxpacket) == 0) 511 __skb_put(skb, pad);
496 len++; 512 }
497 513
498 skb->data[0] = len; 514 skb->data[0] = len;
499 skb->data[1] = len >> 8; 515 skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
543} 559}
544 560
545static const struct driver_info dm9601_info = { 561static const struct driver_info dm9601_info = {
546 .description = "Davicom DM9601 USB Ethernet", 562 .description = "Davicom DM96xx USB 10/100 Ethernet",
547 .flags = FLAG_ETHER | FLAG_LINK_INTR, 563 .flags = FLAG_ETHER | FLAG_LINK_INTR,
548 .bind = dm9601_bind, 564 .bind = dm9601_bind,
549 .rx_fixup = dm9601_rx_fixup, 565 .rx_fixup = dm9601_rx_fixup,
@@ -594,6 +610,10 @@ static const struct usb_device_id products[] = {
594 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ 610 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
595 .driver_info = (unsigned long)&dm9601_info, 611 .driver_info = (unsigned long)&dm9601_info,
596 }, 612 },
613 {
614 USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
615 .driver_info = (unsigned long)&dm9601_info,
616 },
597 {}, // END 617 {}, // END
598}; 618};
599 619
@@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = {
612module_usb_driver(dm9601_driver); 632module_usb_driver(dm9601_driver);
613 633
614MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); 634MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
615MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); 635MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
616MODULE_LICENSE("GPL"); 636MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 86292e6aaf49..1a482344b3f5 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
185#define BM_REQUEST_TYPE (0xa1) 185#define BM_REQUEST_TYPE (0xa1)
186#define B_NOTIFICATION (0x20) 186#define B_NOTIFICATION (0x20)
187#define W_VALUE (0x0) 187#define W_VALUE (0x0)
188#define W_INDEX (0x2)
189#define W_LENGTH (0x2) 188#define W_LENGTH (0x2)
190 189
191#define B_OVERRUN (0x1<<6) 190#define B_OVERRUN (0x1<<6)
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
1487 struct uart_icount *icount; 1486 struct uart_icount *icount;
1488 struct hso_serial_state_notification *serial_state_notification; 1487 struct hso_serial_state_notification *serial_state_notification;
1489 struct usb_device *usb; 1488 struct usb_device *usb;
1489 int if_num;
1490 1490
1491 /* Sanity checks */ 1491 /* Sanity checks */
1492 if (!serial) 1492 if (!serial)
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
1495 handle_usb_error(status, __func__, serial->parent); 1495 handle_usb_error(status, __func__, serial->parent);
1496 return; 1496 return;
1497 } 1497 }
1498
1499 /* tiocmget is only supported on HSO_PORT_MODEM */
1498 tiocmget = serial->tiocmget; 1500 tiocmget = serial->tiocmget;
1499 if (!tiocmget) 1501 if (!tiocmget)
1500 return; 1502 return;
1503 BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
1504
1501 usb = serial->parent->usb; 1505 usb = serial->parent->usb;
1506 if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
1507
1508 /* wIndex should be the USB interface number of the port to which the
1509 * notification applies, which should always be the Modem port.
1510 */
1502 serial_state_notification = &tiocmget->serial_state_notification; 1511 serial_state_notification = &tiocmget->serial_state_notification;
1503 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || 1512 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
1504 serial_state_notification->bNotification != B_NOTIFICATION || 1513 serial_state_notification->bNotification != B_NOTIFICATION ||
1505 le16_to_cpu(serial_state_notification->wValue) != W_VALUE || 1514 le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
1506 le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || 1515 le16_to_cpu(serial_state_notification->wIndex) != if_num ||
1507 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { 1516 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
1508 dev_warn(&usb->dev, 1517 dev_warn(&usb->dev,
1509 "hso received invalid serial state notification\n"); 1518 "hso received invalid serial state notification\n");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index aea68bc33583..36ff0019aa32 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -116,7 +116,6 @@ enum {
116struct mcs7830_data { 116struct mcs7830_data {
117 u8 multi_filter[8]; 117 u8 multi_filter[8];
118 u8 config; 118 u8 config;
119 u8 link_counter;
120}; 119};
121 120
122static const char driver_name[] = "MOSCHIP usb-ethernet driver"; 121static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -560,26 +559,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
560{ 559{
561 u8 *buf = urb->transfer_buffer; 560 u8 *buf = urb->transfer_buffer;
562 bool link, link_changed; 561 bool link, link_changed;
563 struct mcs7830_data *data = mcs7830_get_data(dev);
564 562
565 if (urb->actual_length < 16) 563 if (urb->actual_length < 16)
566 return; 564 return;
567 565
568 link = !(buf[1] & 0x20); 566 link = !(buf[1] == 0x20);
569 link_changed = netif_carrier_ok(dev->net) != link; 567 link_changed = netif_carrier_ok(dev->net) != link;
570 if (link_changed) { 568 if (link_changed) {
571 data->link_counter++; 569 usbnet_link_change(dev, link, 0);
572 /* 570 netdev_dbg(dev->net, "Link Status is: %d\n", link);
573 track link state 20 times to guard against erroneous 571 }
574 link state changes reported sometimes by the chip
575 */
576 if (data->link_counter > 20) {
577 data->link_counter = 0;
578 usbnet_link_change(dev, link, 0);
579 netdev_dbg(dev->net, "Link Status is: %d\n", link);
580 }
581 } else
582 data->link_counter = 0;
583} 572}
584 573
585static const struct driver_info moschip_info = { 574static const struct driver_info moschip_info = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c51a98867a40..7b172408cff0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1788,16 +1788,17 @@ static int virtnet_restore(struct virtio_device *vdev)
1788 if (err) 1788 if (err)
1789 return err; 1789 return err;
1790 1790
1791 if (netif_running(vi->dev)) 1791 if (netif_running(vi->dev)) {
1792 for (i = 0; i < vi->curr_queue_pairs; i++)
1793 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1794 schedule_delayed_work(&vi->refill, 0);
1795
1792 for (i = 0; i < vi->max_queue_pairs; i++) 1796 for (i = 0; i < vi->max_queue_pairs; i++)
1793 virtnet_napi_enable(&vi->rq[i]); 1797 virtnet_napi_enable(&vi->rq[i]);
1798 }
1794 1799
1795 netif_device_attach(vi->dev); 1800 netif_device_attach(vi->dev);
1796 1801
1797 for (i = 0; i < vi->curr_queue_pairs; i++)
1798 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1799 schedule_delayed_work(&vi->refill, 0);
1800
1801 mutex_lock(&vi->config_lock); 1802 mutex_lock(&vi->config_lock);
1802 vi->config_enable = true; 1803 vi->config_enable = true;
1803 mutex_unlock(&vi->config_lock); 1804 mutex_unlock(&vi->config_lock);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ab2e92eec949..481f85d604a4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2466,7 +2466,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2466 /* update header length based on lower device */ 2466 /* update header length based on lower device */
2467 dev->hard_header_len = lowerdev->hard_header_len + 2467 dev->hard_header_len = lowerdev->hard_header_len +
2468 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2468 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2469 } 2469 } else if (use_ipv6)
2470 vxlan->flags |= VXLAN_F_IPV6;
2470 2471
2471 if (data[IFLA_VXLAN_TOS]) 2472 if (data[IFLA_VXLAN_TOS])
2472 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 2473 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 857ede3a999c..741b38ddcb37 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -77,9 +77,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
77 mask2 |= ATH9K_INT_CST; 77 mask2 |= ATH9K_INT_CST;
78 if (isr2 & AR_ISR_S2_TSFOOR) 78 if (isr2 & AR_ISR_S2_TSFOOR)
79 mask2 |= ATH9K_INT_TSFOOR; 79 mask2 |= ATH9K_INT_TSFOOR;
80
81 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
82 REG_WRITE(ah, AR_ISR_S2, isr2);
83 isr &= ~AR_ISR_BCNMISC;
84 }
80 } 85 }
81 86
82 isr = REG_READ(ah, AR_ISR_RAC); 87 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
88 isr = REG_READ(ah, AR_ISR_RAC);
89
83 if (isr == 0xffffffff) { 90 if (isr == 0xffffffff) {
84 *masked = 0; 91 *masked = 0;
85 return false; 92 return false;
@@ -98,11 +105,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
98 105
99 *masked |= ATH9K_INT_TX; 106 *masked |= ATH9K_INT_TX;
100 107
101 s0_s = REG_READ(ah, AR_ISR_S0_S); 108 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
109 s0_s = REG_READ(ah, AR_ISR_S0_S);
110 s1_s = REG_READ(ah, AR_ISR_S1_S);
111 } else {
112 s0_s = REG_READ(ah, AR_ISR_S0);
113 REG_WRITE(ah, AR_ISR_S0, s0_s);
114 s1_s = REG_READ(ah, AR_ISR_S1);
115 REG_WRITE(ah, AR_ISR_S1, s1_s);
116
117 isr &= ~(AR_ISR_TXOK |
118 AR_ISR_TXDESC |
119 AR_ISR_TXERR |
120 AR_ISR_TXEOL);
121 }
122
102 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); 123 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
103 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); 124 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
104
105 s1_s = REG_READ(ah, AR_ISR_S1_S);
106 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); 125 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
107 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); 126 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
108 } 127 }
@@ -115,13 +134,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
115 *masked |= mask2; 134 *masked |= mask2;
116 } 135 }
117 136
118 if (AR_SREV_9100(ah)) 137 if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
119 return true;
120
121 if (isr & AR_ISR_GENTMR) {
122 u32 s5_s; 138 u32 s5_s;
123 139
124 s5_s = REG_READ(ah, AR_ISR_S5_S); 140 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
141 s5_s = REG_READ(ah, AR_ISR_S5_S);
142 } else {
143 s5_s = REG_READ(ah, AR_ISR_S5);
144 }
145
125 ah->intr_gen_timer_trigger = 146 ah->intr_gen_timer_trigger =
126 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); 147 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
127 148
@@ -134,8 +155,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
134 if ((s5_s & AR_ISR_S5_TIM_TIMER) && 155 if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
135 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 156 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
136 *masked |= ATH9K_INT_TIM_TIMER; 157 *masked |= ATH9K_INT_TIM_TIMER;
158
159 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
160 REG_WRITE(ah, AR_ISR_S5, s5_s);
161 isr &= ~AR_ISR_GENTMR;
162 }
137 } 163 }
138 164
165 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
166 REG_WRITE(ah, AR_ISR, isr);
167 REG_READ(ah, AR_ISR);
168 }
169
170 if (AR_SREV_9100(ah))
171 return true;
172
139 if (sync_cause) { 173 if (sync_cause) {
140 if (sync_cause_p) 174 if (sync_cause_p)
141 *sync_cause_p = sync_cause; 175 *sync_cause_p = sync_cause;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a2657fdd9cc..608d739d1378 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
127 struct ath9k_vif_iter_data *iter_data = data; 127 struct ath9k_vif_iter_data *iter_data = data;
128 int i; 128 int i;
129 129
130 for (i = 0; i < ETH_ALEN; i++) 130 if (iter_data->hw_macaddr != NULL) {
131 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); 131 for (i = 0; i < ETH_ALEN; i++)
132 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
133 } else {
134 iter_data->hw_macaddr = mac;
135 }
132} 136}
133 137
134static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, 138static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
135 struct ieee80211_vif *vif) 139 struct ieee80211_vif *vif)
136{ 140{
137 struct ath_common *common = ath9k_hw_common(priv->ah); 141 struct ath_common *common = ath9k_hw_common(priv->ah);
138 struct ath9k_vif_iter_data iter_data; 142 struct ath9k_vif_iter_data iter_data;
139 143
140 /* 144 /*
141 * Use the hardware MAC address as reference, the hardware uses it 145 * Pick the MAC address of the first interface as the new hardware
142 * together with the BSSID mask when matching addresses. 146 * MAC address. The hardware will use it together with the BSSID mask
147 * when matching addresses.
143 */ 148 */
144 iter_data.hw_macaddr = common->macaddr; 149 iter_data.hw_macaddr = NULL;
145 memset(&iter_data.mask, 0xff, ETH_ALEN); 150 memset(&iter_data.mask, 0xff, ETH_ALEN);
146 151
147 if (vif) 152 if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
153 ath9k_htc_bssid_iter, &iter_data); 158 ath9k_htc_bssid_iter, &iter_data);
154 159
155 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 160 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
161
162 if (iter_data.hw_macaddr)
163 memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
164
156 ath_hw_setbssidmask(common); 165 ath_hw_setbssidmask(common);
157} 166}
158 167
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1063 goto out; 1072 goto out;
1064 } 1073 }
1065 1074
1066 ath9k_htc_set_bssid_mask(priv, vif); 1075 ath9k_htc_set_mac_bssid_mask(priv, vif);
1067 1076
1068 priv->vif_slot |= (1 << avp->index); 1077 priv->vif_slot |= (1 << avp->index);
1069 priv->nvifs++; 1078 priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1128 1137
1129 ath9k_htc_set_opmode(priv); 1138 ath9k_htc_set_opmode(priv);
1130 1139
1131 ath9k_htc_set_bssid_mask(priv, vif); 1140 ath9k_htc_set_mac_bssid_mask(priv, vif);
1132 1141
1133 /* 1142 /*
1134 * Stop ANI only if there are no associated station interfaces. 1143 * Stop ANI only if there are no associated station interfaces.
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 173a889f9dbb..21b764ba6400 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -994,8 +994,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
994 struct ath_common *common = ath9k_hw_common(ah); 994 struct ath_common *common = ath9k_hw_common(ah);
995 995
996 /* 996 /*
997 * Use the hardware MAC address as reference, the hardware uses it 997 * Pick the MAC address of the first interface as the new hardware
998 * together with the BSSID mask when matching addresses. 998 * MAC address. The hardware will use it together with the BSSID mask
999 * when matching addresses.
999 */ 1000 */
1000 memset(iter_data, 0, sizeof(*iter_data)); 1001 memset(iter_data, 0, sizeof(*iter_data));
1001 memset(&iter_data->mask, 0xff, ETH_ALEN); 1002 memset(&iter_data->mask, 0xff, ETH_ALEN);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 8707d1a94995..d7aa165fe677 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -738,6 +738,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
738 }; 738 };
739 int index = rtlpci->rx_ring[rx_queue_idx].idx; 739 int index = rtlpci->rx_ring[rx_queue_idx].idx;
740 740
741 if (rtlpci->driver_is_goingto_unload)
742 return;
741 /*RX NORMAL PKT */ 743 /*RX NORMAL PKT */
742 while (count--) { 744 while (count--) {
743 /*rx descriptor */ 745 /*rx descriptor */
@@ -1634,6 +1636,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1634 */ 1636 */
1635 set_hal_stop(rtlhal); 1637 set_hal_stop(rtlhal);
1636 1638
1639 rtlpci->driver_is_goingto_unload = true;
1637 rtlpriv->cfg->ops->disable_interrupt(hw); 1640 rtlpriv->cfg->ops->disable_interrupt(hw);
1638 cancel_work_sync(&rtlpriv->works.lps_change_work); 1641 cancel_work_sync(&rtlpriv->works.lps_change_work);
1639 1642
@@ -1651,7 +1654,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1651 ppsc->rfchange_inprogress = true; 1654 ppsc->rfchange_inprogress = true;
1652 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); 1655 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1653 1656
1654 rtlpci->driver_is_goingto_unload = true;
1655 rtlpriv->cfg->ops->hw_disable(hw); 1657 rtlpriv->cfg->ops->hw_disable(hw);
1656 /* some things are not needed if firmware not available */ 1658 /* some things are not needed if firmware not available */
1657 if (!rtlpriv->max_fw_size) 1659 if (!rtlpriv->max_fw_size)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index ba30a6d9fefa..c955fc39d69a 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
101 101
102#define MAX_PENDING_REQS 256 102#define MAX_PENDING_REQS 256
103 103
104/* It's possible for an skb to have a maximal number of frags
105 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
106 * worst-case number of copy operations is MAX_SKB_FRAGS per
107 * ring slot.
108 */
109#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
110
104struct xenvif { 111struct xenvif {
105 /* Unique identifier for this interface. */ 112 /* Unique identifier for this interface. */
106 domid_t domid; 113 domid_t domid;
@@ -141,13 +148,13 @@ struct xenvif {
141 */ 148 */
142 bool rx_event; 149 bool rx_event;
143 150
144 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each 151 /* This array is allocated seperately as it is large */
145 * head/fragment page uses 2 copy operations because it 152 struct gnttab_copy *grant_copy_op;
146 * straddles two buffers in the frontend.
147 */
148 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
149 struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
150 153
154 /* We create one meta structure per ring request we consume, so
155 * the maximum number is the same as the ring size.
156 */
157 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
151 158
152 u8 fe_dev_addr[6]; 159 u8 fe_dev_addr[6];
153 160
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1dcb9606e6e0..b9de31ea7fc4 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/vmalloc.h>
37 38
38#include <xen/events.h> 39#include <xen/events.h>
39#include <asm/xen/hypercall.h> 40#include <asm/xen/hypercall.h>
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
307 SET_NETDEV_DEV(dev, parent); 308 SET_NETDEV_DEV(dev, parent);
308 309
309 vif = netdev_priv(dev); 310 vif = netdev_priv(dev);
311
312 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
313 MAX_GRANT_COPY_OPS);
314 if (vif->grant_copy_op == NULL) {
315 pr_warn("Could not allocate grant copy space for %s\n", name);
316 free_netdev(dev);
317 return ERR_PTR(-ENOMEM);
318 }
319
310 vif->domid = domid; 320 vif->domid = domid;
311 vif->handle = handle; 321 vif->handle = handle;
312 vif->can_sg = 1; 322 vif->can_sg = 1;
@@ -488,6 +498,7 @@ void xenvif_free(struct xenvif *vif)
488 498
489 unregister_netdev(vif->dev); 499 unregister_netdev(vif->dev);
490 500
501 vfree(vif->grant_copy_op);
491 free_netdev(vif->dev); 502 free_netdev(vif->dev);
492 503
493 module_put(THIS_MODULE); 504 module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 611aebee4583..4f81ac0e2f0a 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -524,7 +524,7 @@ static void xenvif_rx_action(struct xenvif *vif)
524 if (!npo.copy_prod) 524 if (!npo.copy_prod)
525 goto done; 525 goto done;
526 526
527 BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); 527 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
528 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 528 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
529 529
530 while ((skb = __skb_dequeue(&rxq)) != NULL) { 530 while ((skb = __skb_dequeue(&rxq)) != NULL) {
@@ -1108,8 +1108,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1108 goto out; 1108 goto out;
1109 1109
1110 if (!skb_partial_csum_set(skb, off, 1110 if (!skb_partial_csum_set(skb, off,
1111 offsetof(struct tcphdr, check))) 1111 offsetof(struct tcphdr, check))) {
1112 err = -EPROTO;
1112 goto out; 1113 goto out;
1114 }
1113 1115
1114 if (recalculate_partial_csum) 1116 if (recalculate_partial_csum)
1115 tcp_hdr(skb)->check = 1117 tcp_hdr(skb)->check =
@@ -1126,8 +1128,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1126 goto out; 1128 goto out;
1127 1129
1128 if (!skb_partial_csum_set(skb, off, 1130 if (!skb_partial_csum_set(skb, off,
1129 offsetof(struct udphdr, check))) 1131 offsetof(struct udphdr, check))) {
1132 err = -EPROTO;
1130 goto out; 1133 goto out;
1134 }
1131 1135
1132 if (recalculate_partial_csum) 1136 if (recalculate_partial_csum)
1133 udp_hdr(skb)->check = 1137 udp_hdr(skb)->check =
@@ -1249,8 +1253,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1249 goto out; 1253 goto out;
1250 1254
1251 if (!skb_partial_csum_set(skb, off, 1255 if (!skb_partial_csum_set(skb, off,
1252 offsetof(struct tcphdr, check))) 1256 offsetof(struct tcphdr, check))) {
1257 err = -EPROTO;
1253 goto out; 1258 goto out;
1259 }
1254 1260
1255 if (recalculate_partial_csum) 1261 if (recalculate_partial_csum)
1256 tcp_hdr(skb)->check = 1262 tcp_hdr(skb)->check =
@@ -1267,8 +1273,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1267 goto out; 1273 goto out;
1268 1274
1269 if (!skb_partial_csum_set(skb, off, 1275 if (!skb_partial_csum_set(skb, off,
1270 offsetof(struct udphdr, check))) 1276 offsetof(struct udphdr, check))) {
1277 err = -EPROTO;
1271 goto out; 1278 goto out;
1279 }
1272 1280
1273 if (recalculate_partial_csum) 1281 if (recalculate_partial_csum)
1274 udp_hdr(skb)->check = 1282 udp_hdr(skb)->check =
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index de6f8990246f..c6973f101a3e 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -20,7 +20,7 @@ config OF_SELFTEST
20 depends on OF_IRQ 20 depends on OF_IRQ
21 help 21 help
22 This option builds in test cases for the device tree infrastructure 22 This option builds in test cases for the device tree infrastructure
23 that are executed one at boot time, and the results dumped to the 23 that are executed once at boot time, and the results dumped to the
24 console. 24 console.
25 25
26 If unsure, say N here, but this option is safe to enable. 26 If unsure, say N here, but this option is safe to enable.
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 4b9317bdb81c..d3dd41c840f1 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
69 (unsigned long long)cp, (unsigned long long)s, 69 (unsigned long long)cp, (unsigned long long)s,
70 (unsigned long long)da); 70 (unsigned long long)da);
71 71
72 /*
73 * If the number of address cells is larger than 2 we assume the
74 * mapping doesn't specify a physical address. Rather, the address
75 * specifies an identifier that must match exactly.
76 */
77 if (na > 2 && memcmp(range, addr, na * 4) != 0)
78 return OF_BAD_ADDR;
79
80 if (da < cp || da >= (cp + s)) 72 if (da < cp || da >= (cp + s))
81 return OF_BAD_ADDR; 73 return OF_BAD_ADDR;
82 return da - cp; 74 return da - cp;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2fa024b97c43..758b4f8b30b7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
922 */ 922 */
923void __init unflatten_and_copy_device_tree(void) 923void __init unflatten_and_copy_device_tree(void)
924{ 924{
925 int size = __be32_to_cpu(initial_boot_params->totalsize); 925 int size;
926 void *dt = early_init_dt_alloc_memory_arch(size, 926 void *dt;
927
928 if (!initial_boot_params) {
929 pr_warn("No valid device tree found, continuing without\n");
930 return;
931 }
932
933 size = __be32_to_cpu(initial_boot_params->totalsize);
934 dt = early_init_dt_alloc_memory_arch(size,
927 __alignof__(struct boot_param_header)); 935 __alignof__(struct boot_param_header));
928 936
929 if (dt) { 937 if (dt) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 786b0b47fae4..27212402c532 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
165 if (of_get_property(ipar, "interrupt-controller", NULL) != 165 if (of_get_property(ipar, "interrupt-controller", NULL) !=
166 NULL) { 166 NULL) {
167 pr_debug(" -> got it !\n"); 167 pr_debug(" -> got it !\n");
168 of_node_put(old);
169 return 0; 168 return 0;
170 } 169 }
171 170
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
250 * Successfully parsed an interrrupt-map translation; copy new 249 * Successfully parsed an interrrupt-map translation; copy new
251 * interrupt specifier into the out_irq structure 250 * interrupt specifier into the out_irq structure
252 */ 251 */
253 of_node_put(out_irq->np); 252 out_irq->np = newpar;
254 out_irq->np = of_node_get(newpar);
255 253
256 match_array = imap - newaddrsize - newintsize; 254 match_array = imap - newaddrsize - newintsize;
257 for (i = 0; i < newintsize; i++) 255 for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
268 } 266 }
269 fail: 267 fail:
270 of_node_put(ipar); 268 of_node_put(ipar);
271 of_node_put(out_irq->np);
272 of_node_put(newpar); 269 of_node_put(newpar);
273 270
274 return -EINVAL; 271 return -EINVAL;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a344f3d52361..330ef2d06567 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO
24config OMAP_USB2 24config OMAP_USB2
25 tristate "OMAP USB2 PHY Driver" 25 tristate "OMAP USB2 PHY Driver"
26 depends on ARCH_OMAP2PLUS 26 depends on ARCH_OMAP2PLUS
27 depends on USB_PHY
27 select GENERIC_PHY 28 select GENERIC_PHY
28 select USB_PHY
29 select OMAP_CONTROL_USB 29 select OMAP_CONTROL_USB
30 help 30 help
31 Enable this to support the transceiver that is part of SOC. This 31 Enable this to support the transceiver that is part of SOC. This
@@ -36,8 +36,8 @@ config OMAP_USB2
36config TWL4030_USB 36config TWL4030_USB
37 tristate "TWL4030 USB Transceiver Driver" 37 tristate "TWL4030 USB Transceiver Driver"
38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS 38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
39 depends on USB_PHY
39 select GENERIC_PHY 40 select GENERIC_PHY
40 select USB_PHY
41 help 41 help
42 Enable this to support the USB OTG transceiver on TWL4030 42 Enable this to support the USB OTG transceiver on TWL4030
43 family chips (including the TWL5030 and TPS659x0 devices). 43 family chips (including the TWL5030 and TPS659x0 devices).
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 03cf8fb81554..58e0e9739028 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
437 int id; 437 int id;
438 struct phy *phy; 438 struct phy *phy;
439 439
440 if (!dev) { 440 if (WARN_ON(!dev))
441 dev_WARN(dev, "no device provided for PHY\n"); 441 return ERR_PTR(-EINVAL);
442 ret = -EINVAL;
443 goto err0;
444 }
445 442
446 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 443 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
447 if (!phy) { 444 if (!phy)
448 ret = -ENOMEM; 445 return ERR_PTR(-ENOMEM);
449 goto err0;
450 }
451 446
452 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); 447 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
453 if (id < 0) { 448 if (id < 0) {
454 dev_err(dev, "unable to get id\n"); 449 dev_err(dev, "unable to get id\n");
455 ret = id; 450 ret = id;
456 goto err0; 451 goto free_phy;
457 } 452 }
458 453
459 device_initialize(&phy->dev); 454 device_initialize(&phy->dev);
@@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
468 463
469 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 464 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
470 if (ret) 465 if (ret)
471 goto err1; 466 goto put_dev;
472 467
473 ret = device_add(&phy->dev); 468 ret = device_add(&phy->dev);
474 if (ret) 469 if (ret)
475 goto err1; 470 goto put_dev;
476 471
477 if (pm_runtime_enabled(dev)) { 472 if (pm_runtime_enabled(dev)) {
478 pm_runtime_enable(&phy->dev); 473 pm_runtime_enable(&phy->dev);
@@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
481 476
482 return phy; 477 return phy;
483 478
484err1: 479put_dev:
485 ida_remove(&phy_ida, phy->id);
486 put_device(&phy->dev); 480 put_device(&phy->dev);
481 ida_remove(&phy_ida, phy->id);
482free_phy:
487 kfree(phy); 483 kfree(phy);
488
489err0:
490 return ERR_PTR(ret); 484 return ERR_PTR(ret);
491} 485}
492EXPORT_SYMBOL_GPL(phy_create); 486EXPORT_SYMBOL_GPL(phy_create);
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 2832576d8b12..114f5ef4b73a 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
512 512
513static const struct acpi_device_id byt_gpio_acpi_match[] = { 513static const struct acpi_device_id byt_gpio_acpi_match[] = {
514 { "INT33B2", 0 }, 514 { "INT33B2", 0 },
515 { "INT33FC", 0 },
515 { } 516 { }
516}; 517};
517MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); 518MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 2a786c504460..3c6768378a94 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
833 return 0; 833 return 0;
834} 834}
835 835
836static const struct x86_cpu_id energy_unit_quirk_ids[] = {
837 { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
838 {}
839};
840
836static int rapl_check_unit(struct rapl_package *rp, int cpu) 841static int rapl_check_unit(struct rapl_package *rp, int cpu)
837{ 842{
838 u64 msr_val; 843 u64 msr_val;
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
853 * time unit: 1/time_unit_divisor Seconds 858 * time unit: 1/time_unit_divisor Seconds
854 */ 859 */
855 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 860 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
856 rp->energy_unit_divisor = 1 << value; 861 /* some CPUs have different way to calculate energy unit */
857 862 if (x86_match_cpu(energy_unit_quirk_ids))
863 rp->energy_unit_divisor = 1000000 / (1 << value);
864 else
865 rp->energy_unit_divisor = 1 << value;
858 866
859 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 867 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
860 rp->power_unit_divisor = 1 << value; 868 rp->power_unit_divisor = 1 << value;
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id)
941static const struct x86_cpu_id rapl_ids[] = { 949static const struct x86_cpu_id rapl_ids[] = {
942 { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ 950 { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
943 { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ 951 { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
952 { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
944 { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ 953 { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
945 { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ 954 { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
946 /* TODO: Add more CPU IDs after testing */ 955 /* TODO: Add more CPU IDs after testing */
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 596480022b0a..38a1257e76e1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
471 schedule_delayed_work(&tgt->sess_del_work, 0); 471 schedule_delayed_work(&tgt->sess_del_work, 0);
472 else 472 else
473 schedule_delayed_work(&tgt->sess_del_work, 473 schedule_delayed_work(&tgt->sess_del_work,
474 jiffies - sess->expires); 474 sess->expires - jiffies);
475} 475}
476 476
477/* ha->hardware_lock supposed to be held on entry */ 477/* ha->hardware_lock supposed to be held on entry */
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
550 struct scsi_qla_host *vha = tgt->vha; 550 struct scsi_qla_host *vha = tgt->vha;
551 struct qla_hw_data *ha = vha->hw; 551 struct qla_hw_data *ha = vha->hw;
552 struct qla_tgt_sess *sess; 552 struct qla_tgt_sess *sess;
553 unsigned long flags; 553 unsigned long flags, elapsed;
554 554
555 spin_lock_irqsave(&ha->hardware_lock, flags); 555 spin_lock_irqsave(&ha->hardware_lock, flags);
556 while (!list_empty(&tgt->del_sess_list)) { 556 while (!list_empty(&tgt->del_sess_list)) {
557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
558 del_list_entry); 558 del_list_entry);
559 if (time_after_eq(jiffies, sess->expires)) { 559 elapsed = jiffies;
560 if (time_after_eq(elapsed, sess->expires)) {
560 qlt_undelete_sess(sess); 561 qlt_undelete_sess(sess);
561 562
562 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
566 ha->tgt.tgt_ops->put_sess(sess); 567 ha->tgt.tgt_ops->put_sess(sess);
567 } else { 568 } else {
568 schedule_delayed_work(&tgt->sess_del_work, 569 schedule_delayed_work(&tgt->sess_del_work,
569 jiffies - sess->expires); 570 sess->expires - elapsed);
570 break; 571 break;
571 } 572 }
572 } 573 }
@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4290 if (rc != 0) { 4291 if (rc != 0) {
4291 ha->tgt.tgt_ops = NULL; 4292 ha->tgt.tgt_ops = NULL;
4292 ha->tgt.target_lport_ptr = NULL; 4293 ha->tgt.target_lport_ptr = NULL;
4294 scsi_host_put(host);
4293 } 4295 }
4294 mutex_unlock(&qla_tgt_mutex); 4296 mutex_unlock(&qla_tgt_mutex);
4295 return rc; 4297 return rc;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 8f02bf66e20b..4964d2a2fc7d 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev,
446 release_firmware(fw); 446 release_firmware(fw);
447 } 447 }
448 448
449 return ret; 449 return ret < 0 ? ret : 0;
450} 450}
451EXPORT_SYMBOL_GPL(comedi_load_firmware); 451EXPORT_SYMBOL_GPL(comedi_load_firmware);
452 452
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 432e3f9c3301..c55f234b29e6 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -63,7 +63,8 @@ enum pci_8255_boardid {
63 BOARD_ADLINK_PCI7296, 63 BOARD_ADLINK_PCI7296,
64 BOARD_CB_PCIDIO24, 64 BOARD_CB_PCIDIO24,
65 BOARD_CB_PCIDIO24H, 65 BOARD_CB_PCIDIO24H,
66 BOARD_CB_PCIDIO48H, 66 BOARD_CB_PCIDIO48H_OLD,
67 BOARD_CB_PCIDIO48H_NEW,
67 BOARD_CB_PCIDIO96H, 68 BOARD_CB_PCIDIO96H,
68 BOARD_NI_PCIDIO96, 69 BOARD_NI_PCIDIO96,
69 BOARD_NI_PCIDIO96B, 70 BOARD_NI_PCIDIO96B,
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
106 .dio_badr = 2, 107 .dio_badr = 2,
107 .n_8255 = 1, 108 .n_8255 = 1,
108 }, 109 },
109 [BOARD_CB_PCIDIO48H] = { 110 [BOARD_CB_PCIDIO48H_OLD] = {
110 .name = "cb_pci-dio48h", 111 .name = "cb_pci-dio48h",
111 .dio_badr = 1, 112 .dio_badr = 1,
112 .n_8255 = 2, 113 .n_8255 = 2,
113 }, 114 },
115 [BOARD_CB_PCIDIO48H_NEW] = {
116 .name = "cb_pci-dio48h",
117 .dio_badr = 2,
118 .n_8255 = 2,
119 },
114 [BOARD_CB_PCIDIO96H] = { 120 [BOARD_CB_PCIDIO96H] = {
115 .name = "cb_pci-dio96h", 121 .name = "cb_pci-dio96h",
116 .dio_badr = 2, 122 .dio_badr = 2,
@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
263 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, 269 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
264 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, 270 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
265 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, 271 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
266 { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, 272 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
273 .driver_data = BOARD_CB_PCIDIO48H_OLD },
274 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
275 .driver_data = BOARD_CB_PCIDIO48H_NEW },
267 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, 276 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
268 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, 277 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
269 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, 278 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 99421f90d189..0485d7f39867 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -451,7 +451,12 @@ done:
451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ 451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \ 452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
453 .scan_index = idx, \ 453 .scan_index = idx, \
454 .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ 454 .scan_type = { \
455 .sign = 's', \
456 .realbits = 16, \
457 .storagebits = 16, \
458 .endianness = IIO_BE, \
459 }, \
455 } 460 }
456 461
457static const struct iio_chan_spec hmc5843_channels[] = { 462static const struct iio_chan_spec hmc5843_channels[] = {
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 6bd015ac9d68..96e4eee344ef 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
88 88
89 imx_drm_device_put(); 89 imx_drm_device_put();
90 90
91 drm_mode_config_cleanup(imxdrm->drm); 91 drm_vblank_cleanup(imxdrm->drm);
92 drm_kms_helper_poll_fini(imxdrm->drm); 92 drm_kms_helper_poll_fini(imxdrm->drm);
93 drm_mode_config_cleanup(imxdrm->drm);
93 94
94 return 0; 95 return 0;
95} 96}
@@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
199 if (!file->is_master) 200 if (!file->is_master)
200 return; 201 return;
201 202
202 for (i = 0; i < 4; i++) 203 for (i = 0; i < MAX_CRTC; i++)
203 imx_drm_disable_vblank(drm , i); 204 imx_drm_disable_vblank(drm, i);
204} 205}
205 206
206static const struct file_operations imx_drm_driver_fops = { 207static const struct file_operations imx_drm_driver_fops = {
@@ -376,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
376 struct imx_drm_device *imxdrm = __imx_drm_device(); 377 struct imx_drm_device *imxdrm = __imx_drm_device();
377 int ret; 378 int ret;
378 379
379 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
380 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
381 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); 380 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
382 if (ret) 381 if (ret)
383 return ret; 382 return ret;
@@ -385,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
385 drm_crtc_helper_add(imx_drm_crtc->crtc, 384 drm_crtc_helper_add(imx_drm_crtc->crtc,
386 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 385 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
387 386
387 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
388 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
389
388 drm_mode_group_reinit(imxdrm->drm); 390 drm_mode_group_reinit(imxdrm->drm);
389 391
390 return 0; 392 return 0;
@@ -428,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
428 ret = drm_mode_group_init_legacy_group(imxdrm->drm, 430 ret = drm_mode_group_init_legacy_group(imxdrm->drm,
429 &imxdrm->drm->primary->mode_group); 431 &imxdrm->drm->primary->mode_group);
430 if (ret) 432 if (ret)
431 goto err_init; 433 goto err_kms;
432 434
433 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); 435 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
434 if (ret) 436 if (ret)
435 goto err_init; 437 goto err_kms;
436 438
437 /* 439 /*
438 * with vblank_disable_allowed = true, vblank interrupt will be disabled 440 * with vblank_disable_allowed = true, vblank interrupt will be disabled
@@ -441,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
441 */ 443 */
442 imxdrm->drm->vblank_disable_allowed = true; 444 imxdrm->drm->vblank_disable_allowed = true;
443 445
444 if (!imx_drm_device_get()) 446 if (!imx_drm_device_get()) {
445 ret = -EINVAL; 447 ret = -EINVAL;
448 goto err_vblank;
449 }
446 450
447 ret = 0; 451 mutex_unlock(&imxdrm->mutex);
452 return 0;
448 453
449err_init: 454err_vblank:
455 drm_vblank_cleanup(drm);
456err_kms:
457 drm_kms_helper_poll_fini(drm);
458 drm_mode_config_cleanup(drm);
450 mutex_unlock(&imxdrm->mutex); 459 mutex_unlock(&imxdrm->mutex);
451 460
452 return ret; 461 return ret;
@@ -492,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
492 501
493 mutex_lock(&imxdrm->mutex); 502 mutex_lock(&imxdrm->mutex);
494 503
504 /*
505 * The vblank arrays are dimensioned by MAX_CRTC - we can't
506 * pass IDs greater than this to those functions.
507 */
508 if (imxdrm->pipes >= MAX_CRTC) {
509 ret = -EINVAL;
510 goto err_busy;
511 }
512
495 if (imxdrm->drm->open_count) { 513 if (imxdrm->drm->open_count) {
496 ret = -EBUSY; 514 ret = -EBUSY;
497 goto err_busy; 515 goto err_busy;
@@ -528,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
528 return 0; 546 return 0;
529 547
530err_register: 548err_register:
549 list_del(&imx_drm_crtc->list);
531 kfree(imx_drm_crtc); 550 kfree(imx_drm_crtc);
532err_alloc: 551err_alloc:
533err_busy: 552err_busy:
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 680f4c8fa081..2c44fef8d58b 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -114,7 +114,6 @@ struct imx_tve {
114 struct drm_encoder encoder; 114 struct drm_encoder encoder;
115 struct imx_drm_encoder *imx_drm_encoder; 115 struct imx_drm_encoder *imx_drm_encoder;
116 struct device *dev; 116 struct device *dev;
117 spinlock_t enable_lock; /* serializes tve_enable/disable */
118 spinlock_t lock; /* register lock */ 117 spinlock_t lock; /* register lock */
119 bool enabled; 118 bool enabled;
120 int mode; 119 int mode;
@@ -146,10 +145,8 @@ __releases(&tve->lock)
146 145
147static void tve_enable(struct imx_tve *tve) 146static void tve_enable(struct imx_tve *tve)
148{ 147{
149 unsigned long flags;
150 int ret; 148 int ret;
151 149
152 spin_lock_irqsave(&tve->enable_lock, flags);
153 if (!tve->enabled) { 150 if (!tve->enabled) {
154 tve->enabled = true; 151 tve->enabled = true;
155 clk_prepare_enable(tve->clk); 152 clk_prepare_enable(tve->clk);
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve)
169 TVE_CD_SM_IEN | 166 TVE_CD_SM_IEN |
170 TVE_CD_LM_IEN | 167 TVE_CD_LM_IEN |
171 TVE_CD_MON_END_IEN); 168 TVE_CD_MON_END_IEN);
172
173 spin_unlock_irqrestore(&tve->enable_lock, flags);
174} 169}
175 170
176static void tve_disable(struct imx_tve *tve) 171static void tve_disable(struct imx_tve *tve)
177{ 172{
178 unsigned long flags;
179 int ret; 173 int ret;
180 174
181 spin_lock_irqsave(&tve->enable_lock, flags);
182 if (tve->enabled) { 175 if (tve->enabled) {
183 tve->enabled = false; 176 tve->enabled = false;
184 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, 177 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
185 TVE_IPU_CLK_EN | TVE_EN, 0); 178 TVE_IPU_CLK_EN | TVE_EN, 0);
186 clk_disable_unprepare(tve->clk); 179 clk_disable_unprepare(tve->clk);
187 } 180 }
188 spin_unlock_irqrestore(&tve->enable_lock, flags);
189} 181}
190 182
191static int tve_setup_tvout(struct imx_tve *tve) 183static int tve_setup_tvout(struct imx_tve *tve)
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev)
601 593
602 tve->dev = &pdev->dev; 594 tve->dev = &pdev->dev;
603 spin_lock_init(&tve->lock); 595 spin_lock_init(&tve->lock);
604 spin_lock_init(&tve->enable_lock);
605 596
606 ddc_node = of_parse_phandle(np, "ddc", 0); 597 ddc_node = of_parse_phandle(np, "ddc", 0);
607 if (ddc_node) { 598 if (ddc_node) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 7a22ce619ed2..97ca6924dbb3 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = {
996 }, 996 },
997}; 997};
998 998
999static DEFINE_MUTEX(ipu_client_id_mutex);
999static int ipu_client_id; 1000static int ipu_client_id;
1000 1001
1001static int ipu_add_subdevice_pdata(struct device *dev,
1002 const struct ipu_platform_reg *reg)
1003{
1004 struct platform_device *pdev;
1005
1006 pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
1007 &reg->pdata, sizeof(struct ipu_platform_reg));
1008
1009 return PTR_ERR_OR_ZERO(pdev);
1010}
1011
1012static int ipu_add_client_devices(struct ipu_soc *ipu) 1002static int ipu_add_client_devices(struct ipu_soc *ipu)
1013{ 1003{
1014 int ret; 1004 struct device *dev = ipu->dev;
1015 int i; 1005 unsigned i;
1006 int id, ret;
1007
1008 mutex_lock(&ipu_client_id_mutex);
1009 id = ipu_client_id;
1010 ipu_client_id += ARRAY_SIZE(client_reg);
1011 mutex_unlock(&ipu_client_id_mutex);
1016 1012
1017 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1013 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1018 const struct ipu_platform_reg *reg = &client_reg[i]; 1014 const struct ipu_platform_reg *reg = &client_reg[i];
1019 ret = ipu_add_subdevice_pdata(ipu->dev, reg); 1015 struct platform_device *pdev;
1020 if (ret) 1016
1017 pdev = platform_device_register_data(dev, reg->name,
1018 id++, &reg->pdata, sizeof(reg->pdata));
1019
1020 if (IS_ERR(pdev))
1021 goto err_register; 1021 goto err_register;
1022 } 1022 }
1023 1023
1024 return 0; 1024 return 0;
1025 1025
1026err_register: 1026err_register:
1027 platform_device_unregister_children(to_platform_device(ipu->dev)); 1027 platform_device_unregister_children(to_platform_device(dev));
1028 1028
1029 return ret; 1029 return ret;
1030} 1030}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d70e9119e906..00867190413c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
465 */ 465 */
466 send_sig(SIGINT, np->np_thread, 1); 466 send_sig(SIGINT, np->np_thread, 1);
467 kthread_stop(np->np_thread); 467 kthread_stop(np->np_thread);
468 np->np_thread = NULL;
468 } 469 }
469 470
470 np->np_transport->iscsit_free_np(np); 471 np->np_transport->iscsit_free_np(np);
@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
823 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 824 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
824 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 825 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
825 /* 826 /*
826 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 827 * From RFC-3720 Section 10.3.1:
827 * that adds support for RESERVE/RELEASE. There is a bug 828 *
828 * add with this new functionality that sets R/W bits when 829 * "Either or both of R and W MAY be 1 when either the
829 * neither CDB carries any READ or WRITE datapayloads. 830 * Expected Data Transfer Length and/or Bidirectional Read
831 * Expected Data Transfer Length are 0"
832 *
833 * For this case, go ahead and clear the unnecssary bits
834 * to avoid any confusion with ->data_direction.
830 */ 835 */
831 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 836 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
832 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 837 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
833 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
834 goto done;
835 }
836 838
837 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 839 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
838 " set when Expected Data Transfer Length is 0 for" 840 " set when Expected Data Transfer Length is 0 for"
839 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 841 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
840 return iscsit_add_reject_cmd(cmd,
841 ISCSI_REASON_BOOKMARK_INVALID, buf);
842 } 842 }
843done:
844 843
845 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 844 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
846 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 845 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index e3318edb233d..1c0088fe9e99 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
474 \ 474 \
475 if (!capable(CAP_SYS_ADMIN)) \ 475 if (!capable(CAP_SYS_ADMIN)) \
476 return -EPERM; \ 476 return -EPERM; \
477 \ 477 if (count >= sizeof(auth->name)) \
478 return -EINVAL; \
478 snprintf(auth->name, sizeof(auth->name), "%s", page); \ 479 snprintf(auth->name, sizeof(auth->name), "%s", page); \
479 if (!strncmp("NULL", auth->name, 4)) \ 480 if (!strncmp("NULL", auth->name, 4)) \
480 auth->naf_flags &= ~flags; \ 481 auth->naf_flags &= ~flags; \
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 4eb93b2b6473..e29279e6b577 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1403,11 +1403,6 @@ old_sess_out:
1403 1403
1404out: 1404out:
1405 stop = kthread_should_stop(); 1405 stop = kthread_should_stop();
1406 if (!stop && signal_pending(current)) {
1407 spin_lock_bh(&np->np_thread_lock);
1408 stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
1409 spin_unlock_bh(&np->np_thread_lock);
1410 }
1411 /* Wait for another socket.. */ 1406 /* Wait for another socket.. */
1412 if (!stop) 1407 if (!stop)
1413 return 1; 1408 return 1;
@@ -1415,7 +1410,6 @@ exit:
1415 iscsi_stop_login_thread_timer(np); 1410 iscsi_stop_login_thread_timer(np);
1416 spin_lock_bh(&np->np_thread_lock); 1411 spin_lock_bh(&np->np_thread_lock);
1417 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1412 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
1418 np->np_thread = NULL;
1419 spin_unlock_bh(&np->np_thread_lock); 1413 spin_unlock_bh(&np->np_thread_lock);
1420 1414
1421 return 0; 1415 return 0;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 207b340498a3..d06de84b069b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1106 dev->dev_attrib.block_size = block_size; 1106 dev->dev_attrib.block_size = block_size;
1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1108 dev, block_size); 1108 dev, block_size);
1109
1110 if (dev->dev_attrib.max_bytes_per_io)
1111 dev->dev_attrib.hw_max_sectors =
1112 dev->dev_attrib.max_bytes_per_io / block_size;
1113
1109 return 0; 1114 return 0;
1110} 1115}
1111 1116
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0e34cda3271e..78241a53b555 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
68 TARGET_CORE_MOD_VERSION); 68 TARGET_CORE_MOD_VERSION);
69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
70 " MaxSectors: %u\n", 70 hba->hba_id, fd_host->fd_host_id);
71 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
72 71
73 return 0; 72 return 0;
74} 73}
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
220 } 219 }
221 220
222 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; 221 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 222 dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
225 225
226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 37ffc5bd2399..d7772c167685 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -7,7 +7,10 @@
7#define FD_DEVICE_QUEUE_DEPTH 32 7#define FD_DEVICE_QUEUE_DEPTH 32
8#define FD_MAX_DEVICE_QUEUE_DEPTH 128 8#define FD_MAX_DEVICE_QUEUE_DEPTH 128
9#define FD_BLOCKSIZE 512 9#define FD_BLOCKSIZE 512
10#define FD_MAX_SECTORS 2048 10/*
11 * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
12 */
13#define FD_MAX_BYTES 8388608
11 14
12#define RRF_EMULATE_CDB 0x01 15#define RRF_EMULATE_CDB 0x01
13#define RRF_GOT_LBA 0x02 16#define RRF_GOT_LBA 0x02
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index f697f8baec54..2a573de19a9f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
279 acl->se_tpg = tpg; 279 acl->se_tpg = tpg;
280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281 spin_lock_init(&acl->stats_lock);
282 acl->dynamic_node_acl = 1; 281 acl->dynamic_node_acl = 1;
283 282
284 tpg->se_tpg_tfo->set_default_node_attributes(acl); 283 tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
406 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 405 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
407 acl->se_tpg = tpg; 406 acl->se_tpg = tpg;
408 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 407 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
409 spin_lock_init(&acl->stats_lock);
410 408
411 tpg->se_tpg_tfo->set_default_node_attributes(acl); 409 tpg->se_tpg_tfo->set_default_node_attributes(acl);
412 410
@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
658 spin_lock_init(&lun->lun_sep_lock); 656 spin_lock_init(&lun->lun_sep_lock);
659 init_completion(&lun->lun_ref_comp); 657 init_completion(&lun->lun_ref_comp);
660 658
661 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
662 if (ret < 0)
663 return ret;
664
665 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 659 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
666 if (ret < 0) { 660 if (ret < 0)
667 percpu_ref_cancel_init(&lun->lun_ref);
668 return ret; 661 return ret;
669 }
670 662
671 return 0; 663 return 0;
672} 664}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 268b62768f2b..34aacaaae14a 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -93,6 +93,7 @@ struct n_tty_data {
93 size_t canon_head; 93 size_t canon_head;
94 size_t echo_head; 94 size_t echo_head;
95 size_t echo_commit; 95 size_t echo_commit;
96 size_t echo_mark;
96 DECLARE_BITMAP(char_map, 256); 97 DECLARE_BITMAP(char_map, 256);
97 98
98 /* private to n_tty_receive_overrun (single-threaded) */ 99 /* private to n_tty_receive_overrun (single-threaded) */
@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
336{ 337{
337 ldata->read_head = ldata->canon_head = ldata->read_tail = 0; 338 ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
338 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; 339 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
340 ldata->echo_mark = 0;
339 ldata->line_start = 0; 341 ldata->line_start = 0;
340 342
341 ldata->erasing = 0; 343 ldata->erasing = 0;
@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
787 size_t head; 789 size_t head;
788 790
789 head = ldata->echo_head; 791 head = ldata->echo_head;
792 ldata->echo_mark = head;
790 old = ldata->echo_commit - ldata->echo_tail; 793 old = ldata->echo_commit - ldata->echo_tail;
791 794
792 /* Process committed echoes if the accumulated # of bytes 795 /* Process committed echoes if the accumulated # of bytes
@@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty)
811 size_t echoed; 814 size_t echoed;
812 815
813 if ((!L_ECHO(tty) && !L_ECHONL(tty)) || 816 if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
814 ldata->echo_commit == ldata->echo_tail) 817 ldata->echo_mark == ldata->echo_tail)
815 return; 818 return;
816 819
817 mutex_lock(&ldata->output_lock); 820 mutex_lock(&ldata->output_lock);
821 ldata->echo_commit = ldata->echo_mark;
818 echoed = __process_echoes(tty); 822 echoed = __process_echoes(tty);
819 mutex_unlock(&ldata->output_lock); 823 mutex_unlock(&ldata->output_lock);
820 824
@@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty)
822 tty->ops->flush_chars(tty); 826 tty->ops->flush_chars(tty);
823} 827}
824 828
829/* NB: echo_mark and echo_head should be equivalent here */
825static void flush_echoes(struct tty_struct *tty) 830static void flush_echoes(struct tty_struct *tty)
826{ 831{
827 struct n_tty_data *ldata = tty->disc_data; 832 struct n_tty_data *ldata = tty->disc_data;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4658e3e0ec42..06525f10e364 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
96 if (offset == UART_LCR) { 96 if (offset == UART_LCR) {
97 int tries = 1000; 97 int tries = 1000;
98 while (tries--) { 98 while (tries--) {
99 if (value == p->serial_in(p, UART_LCR)) 99 unsigned int lcr = p->serial_in(p, UART_LCR);
100 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
100 return; 101 return;
101 dw8250_force_idle(p); 102 dw8250_force_idle(p);
102 writeb(value, p->membase + (UART_LCR << p->regshift)); 103 writeb(value, p->membase + (UART_LCR << p->regshift));
@@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
132 if (offset == UART_LCR) { 133 if (offset == UART_LCR) {
133 int tries = 1000; 134 int tries = 1000;
134 while (tries--) { 135 while (tries--) {
135 if (value == p->serial_in(p, UART_LCR)) 136 unsigned int lcr = p->serial_in(p, UART_LCR);
137 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
136 return; 138 return;
137 dw8250_force_idle(p); 139 dw8250_force_idle(p);
138 writel(value, p->membase + (UART_LCR << p->regshift)); 140 writel(value, p->membase + (UART_LCR << p->regshift));
@@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
455static const struct acpi_device_id dw8250_acpi_match[] = { 457static const struct acpi_device_id dw8250_acpi_match[] = {
456 { "INT33C4", 0 }, 458 { "INT33C4", 0 },
457 { "INT33C5", 0 }, 459 { "INT33C5", 0 },
460 { "INT3434", 0 },
461 { "INT3435", 0 },
458 { "80860F0A", 0 }, 462 { "80860F0A", 0 },
459 { }, 463 { },
460}; 464};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index e46e9f3f19b9..f619ad5b5eae 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
240 continue; 240 continue;
241 } 241 }
242 242
243#ifdef SUPPORT_SYSRQ
243 /* 244 /*
244 * uart_handle_sysrq_char() doesn't work if 245 * uart_handle_sysrq_char() doesn't work if
245 * spinlocked, for some reason 246 * spinlocked, for some reason
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
253 } 254 }
254 spin_lock(&port->lock); 255 spin_lock(&port->lock);
255 } 256 }
257#endif
256 258
257 port->icount.rx++; 259 port->icount.rx++;
258 260
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 22fad8ad5ac2..d8a55e87877f 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); 86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
87} 87}
88 88
89/*
90 * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
91 * Returns 1 if count was successfully changed; @*old will have @new value.
92 * Returns 0 if count was not changed; @*old will have most recent sem->count
93 */
89static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) 94static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
90{ 95{
91 long tmp = *old; 96 long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
92 *old = atomic_long_cmpxchg(&sem->count, *old, new); 97 if (tmp == *old) {
93 return *old == tmp; 98 *old = new;
99 return 1;
100 } else {
101 *old = tmp;
102 return 0;
103 }
94} 104}
95 105
96/* 106/*
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5d8981c5235e..6e73f8cd60e5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
642 : CI_ROLE_GADGET; 642 : CI_ROLE_GADGET;
643 } 643 }
644 644
645 /* only update vbus status for peripheral */
646 if (ci->role == CI_ROLE_GADGET)
647 ci_handle_vbus_change(ci);
648
645 ret = ci_role_start(ci, ci->role); 649 ret = ci_role_start(ci, ci->role);
646 if (ret) { 650 if (ret) {
647 dev_err(dev, "can't start %s role\n", ci_role(ci)->name); 651 dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 59e6020ea753..526cd77563d8 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
88 return ret; 88 return ret;
89 89
90disable_reg: 90disable_reg:
91 regulator_disable(ci->platdata->reg_vbus); 91 if (ci->platdata->reg_vbus)
92 regulator_disable(ci->platdata->reg_vbus);
92 93
93put_hcd: 94put_hcd:
94 usb_put_hcd(hcd); 95 usb_put_hcd(hcd);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index b34c81969cba..69d20fbb38a2 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci)
1795 pm_runtime_no_callbacks(&ci->gadget.dev); 1795 pm_runtime_no_callbacks(&ci->gadget.dev);
1796 pm_runtime_enable(&ci->gadget.dev); 1796 pm_runtime_enable(&ci->gadget.dev);
1797 1797
1798 /* Update ci->vbus_active */
1799 ci_handle_vbus_change(ci);
1800
1801 return retval; 1798 return retval;
1802 1799
1803destroy_eps: 1800destroy_eps:
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4d387596f3f0..0b23a8639311 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
854{ 854{
855 /* need autopm_get/put here to ensure the usbcore sees the new value */ 855 /* need autopm_get/put here to ensure the usbcore sees the new value */
856 int rv = usb_autopm_get_interface(intf); 856 int rv = usb_autopm_get_interface(intf);
857 if (rv < 0)
858 goto err;
859 857
860 intf->needs_remote_wakeup = on; 858 intf->needs_remote_wakeup = on;
861 usb_autopm_put_interface(intf); 859 if (!rv)
862err: 860 usb_autopm_put_interface(intf);
863 return rv; 861 return 0;
864} 862}
865 863
866static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) 864static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 74f9cf02da07..a49217ae3533 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev)
455 if (IS_ERR(regs)) 455 if (IS_ERR(regs))
456 return PTR_ERR(regs); 456 return PTR_ERR(regs);
457 457
458 usb_phy_set_suspend(dwc->usb2_phy, 0);
459 usb_phy_set_suspend(dwc->usb3_phy, 0);
460
461 spin_lock_init(&dwc->lock); 458 spin_lock_init(&dwc->lock);
462 platform_set_drvdata(pdev, dwc); 459 platform_set_drvdata(pdev, dwc);
463 460
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev)
488 goto err0; 485 goto err0;
489 } 486 }
490 487
488 usb_phy_set_suspend(dwc->usb2_phy, 0);
489 usb_phy_set_suspend(dwc->usb3_phy, 0);
490
491 ret = dwc3_event_buffers_setup(dwc); 491 ret = dwc3_event_buffers_setup(dwc);
492 if (ret) { 492 if (ret) {
493 dev_err(dwc->dev, "failed to setup event buffers\n"); 493 dev_err(dwc->dev, "failed to setup event buffers\n");
@@ -569,6 +569,8 @@ err2:
569 dwc3_event_buffers_cleanup(dwc); 569 dwc3_event_buffers_cleanup(dwc);
570 570
571err1: 571err1:
572 usb_phy_set_suspend(dwc->usb2_phy, 1);
573 usb_phy_set_suspend(dwc->usb3_phy, 1);
572 dwc3_core_exit(dwc); 574 dwc3_core_exit(dwc);
573 575
574err0: 576err0:
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 418444ebb1b8..8c356af79409 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
136 struct ohci_hcd *ohci; 136 struct ohci_hcd *ohci;
137 int retval; 137 int retval;
138 struct usb_hcd *hcd = NULL; 138 struct usb_hcd *hcd = NULL;
139 139 struct device *dev = &pdev->dev;
140 if (pdev->num_resources != 2) { 140 struct resource *res;
141 pr_debug("hcd probe: invalid num_resources"); 141 int irq;
142 return -ENODEV; 142
143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 if (!res) {
145 dev_dbg(dev, "hcd probe: missing memory resource\n");
146 return -ENXIO;
143 } 147 }
144 148
145 if ((pdev->resource[0].flags != IORESOURCE_MEM) 149 irq = platform_get_irq(pdev, 0);
146 || (pdev->resource[1].flags != IORESOURCE_IRQ)) { 150 if (irq < 0) {
147 pr_debug("hcd probe: invalid resource type\n"); 151 dev_dbg(dev, "hcd probe: missing irq resource\n");
148 return -ENODEV; 152 return irq;
149 } 153 }
150 154
151 hcd = usb_create_hcd(driver, &pdev->dev, "at91"); 155 hcd = usb_create_hcd(driver, &pdev->dev, "at91");
152 if (!hcd) 156 if (!hcd)
153 return -ENOMEM; 157 return -ENOMEM;
154 hcd->rsrc_start = pdev->resource[0].start; 158 hcd->rsrc_start = res->start;
155 hcd->rsrc_len = resource_size(&pdev->resource[0]); 159 hcd->rsrc_len = resource_size(res);
156 160
157 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 161 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
158 pr_debug("request_mem_region failed\n"); 162 pr_debug("request_mem_region failed\n");
@@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
199 ohci->num_ports = board->ports; 203 ohci->num_ports = board->ports;
200 at91_start_hc(pdev); 204 at91_start_hc(pdev);
201 205
202 retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); 206 retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
203 if (retval == 0) 207 if (retval == 0)
204 return retval; 208 return retval;
205 209
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b8dffd59eb25..73f5208714a4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
128 * any other sleep) on Haswell machines with LPT and LPT-LP 128 * any other sleep) on Haswell machines with LPT and LPT-LP
129 * with the new Intel BIOS 129 * with the new Intel BIOS
130 */ 130 */
131 xhci->quirks |= XHCI_SPURIOUS_WAKEUP; 131 /* Limit the quirk to only known vendors, as this triggers
132 * yet another BIOS bug on some other machines
133 * https://bugzilla.kernel.org/show_bug.cgi?id=66171
134 */
135 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
136 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
132 } 137 }
133 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 138 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
134 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 139 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 08e2f39027ec..2b41c636a52a 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -19,8 +19,9 @@ config AB8500_USB
19 in host mode, low speed. 19 in host mode, low speed.
20 20
21config FSL_USB2_OTG 21config FSL_USB2_OTG
22 bool "Freescale USB OTG Transceiver Driver" 22 tristate "Freescale USB OTG Transceiver Driver"
23 depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME 23 depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
24 depends on USB
24 select USB_OTG 25 select USB_OTG
25 select USB_PHY 26 select USB_PHY
26 help 27 help
@@ -29,6 +30,7 @@ config FSL_USB2_OTG
29config ISP1301_OMAP 30config ISP1301_OMAP
30 tristate "Philips ISP1301 with OMAP OTG" 31 tristate "Philips ISP1301 with OMAP OTG"
31 depends on I2C && ARCH_OMAP_OTG 32 depends on I2C && ARCH_OMAP_OTG
33 depends on USB
32 select USB_PHY 34 select USB_PHY
33 help 35 help
34 If you say yes here you get support for the Philips ISP1301 36 If you say yes here you get support for the Philips ISP1301
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 82232acf1ab6..bbe4f8e6e8d7 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
876 876
877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, 877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
878 resource_size(res)); 878 resource_size(res));
879 if (!tegra_phy->regs) { 879 if (!tegra_phy->pad_regs) {
880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); 880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
881 return -ENOMEM; 881 return -ENOMEM;
882 } 882 }
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 30e8a61552d4..bad57ce77ba5 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
127 127
128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) 128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
129{ 129{
130 u8 data, ret = 0; 130 u8 data;
131 int ret;
131 132
132 ret = twl_i2c_read_u8(module, &data, address); 133 ret = twl_i2c_read_u8(module, &data, address);
133 if (ret >= 0) 134 if (ret >= 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 496b7e39d5be..cc7a24154490 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
251#define ZTE_PRODUCT_MF628 0x0015 251#define ZTE_PRODUCT_MF628 0x0015
252#define ZTE_PRODUCT_MF626 0x0031 252#define ZTE_PRODUCT_MF626 0x0031
253#define ZTE_PRODUCT_MC2718 0xffe8 253#define ZTE_PRODUCT_MC2718 0xffe8
254#define ZTE_PRODUCT_AC2726 0xfff1
254 255
255#define BENQ_VENDOR_ID 0x04a5 256#define BENQ_VENDOR_ID 0x04a5
256#define BENQ_PRODUCT_H10 0x4068 257#define BENQ_PRODUCT_H10 0x4068
@@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
1453 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1454 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
1454 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1455 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
1455 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1456 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
1457 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1456 1458
1457 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 1459 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
1458 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 1460 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index fca4c752a4ed..eae2c873b39f 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
281 { USB_DEVICE(0x19d2, 0xfffd) }, 281 { USB_DEVICE(0x19d2, 0xfffd) },
282 { USB_DEVICE(0x19d2, 0xfffc) }, 282 { USB_DEVICE(0x19d2, 0xfffc) },
283 { USB_DEVICE(0x19d2, 0xfffb) }, 283 { USB_DEVICE(0x19d2, 0xfffb) },
284 /* AC2726, AC8710_V3 */ 284 /* AC8710_V3 */
285 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
286 { USB_DEVICE(0x19d2, 0xfff6) }, 285 { USB_DEVICE(0x19d2, 0xfff6) },
287 { USB_DEVICE(0x19d2, 0xfff7) }, 286 { USB_DEVICE(0x19d2, 0xfff7) },
288 { USB_DEVICE(0x19d2, 0xfff8) }, 287 { USB_DEVICE(0x19d2, 0xfff8) },
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c444654fc33f..5c4a95b516cf 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
285{ 285{
286 __le32 actual = cpu_to_le32(vb->num_pages); 286 __le32 actual = cpu_to_le32(vb->num_pages);
287 287
288 virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages, 288 virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
289 &actual); 289 &actual);
290} 290}
291 291
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7c70b..4c02e2b94103 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
350 350
351 pfn = page_to_pfn(page); 351 pfn = page_to_pfn(page);
352 352
353 set_phys_to_machine(pfn, frame_list[i]);
354
355#ifdef CONFIG_XEN_HAVE_PVMMU 353#ifdef CONFIG_XEN_HAVE_PVMMU
356 /* Link back into the page tables if not highmem. */ 354 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
357 if (xen_pv_domain() && !PageHighMem(page)) { 355 set_phys_to_machine(pfn, frame_list[i]);
358 int ret; 356
359 ret = HYPERVISOR_update_va_mapping( 357 /* Link back into the page tables if not highmem. */
360 (unsigned long)__va(pfn << PAGE_SHIFT), 358 if (!PageHighMem(page)) {
361 mfn_pte(frame_list[i], PAGE_KERNEL), 359 int ret;
362 0); 360 ret = HYPERVISOR_update_va_mapping(
363 BUG_ON(ret); 361 (unsigned long)__va(pfn << PAGE_SHIFT),
362 mfn_pte(frame_list[i], PAGE_KERNEL),
363 0);
364 BUG_ON(ret);
365 }
364 } 366 }
365#endif 367#endif
366 368
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
378 enum bp_state state = BP_DONE; 380 enum bp_state state = BP_DONE;
379 unsigned long pfn, i; 381 unsigned long pfn, i;
380 struct page *page; 382 struct page *page;
381 struct page *scratch_page;
382 int ret; 383 int ret;
383 struct xen_memory_reservation reservation = { 384 struct xen_memory_reservation reservation = {
384 .address_bits = 0, 385 .address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
411 412
412 scrub_page(page); 413 scrub_page(page);
413 414
415#ifdef CONFIG_XEN_HAVE_PVMMU
414 /* 416 /*
415 * Ballooned out frames are effectively replaced with 417 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the 418 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent. 419 * p2m are consistent.
418 */ 420 */
419 scratch_page = get_balloon_scratch_page();
420#ifdef CONFIG_XEN_HAVE_PVMMU
421 if (xen_pv_domain() && !PageHighMem(page)) {
422 ret = HYPERVISOR_update_va_mapping(
423 (unsigned long)__va(pfn << PAGE_SHIFT),
424 pfn_pte(page_to_pfn(scratch_page),
425 PAGE_KERNEL_RO), 0);
426 BUG_ON(ret);
427 }
428#endif
429 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 421 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
430 unsigned long p; 422 unsigned long p;
423 struct page *scratch_page = get_balloon_scratch_page();
424
425 if (!PageHighMem(page)) {
426 ret = HYPERVISOR_update_va_mapping(
427 (unsigned long)__va(pfn << PAGE_SHIFT),
428 pfn_pte(page_to_pfn(scratch_page),
429 PAGE_KERNEL_RO), 0);
430 BUG_ON(ret);
431 }
431 p = page_to_pfn(scratch_page); 432 p = page_to_pfn(scratch_page);
432 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 433 __set_phys_to_machine(pfn, pfn_to_mfn(p));
434
435 put_balloon_scratch_page();
433 } 436 }
434 put_balloon_scratch_page(); 437#endif
435 438
436 balloon_append(pfn_to_page(pfn)); 439 balloon_append(pfn_to_page(pfn));
437 } 440 }
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
627 if (!xen_domain()) 630 if (!xen_domain())
628 return -ENODEV; 631 return -ENODEV;
629 632
630 for_each_online_cpu(cpu) 633 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
631 { 634 for_each_online_cpu(cpu)
632 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 635 {
633 if (per_cpu(balloon_scratch_page, cpu) == NULL) { 636 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
634 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 637 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
635 return -ENOMEM; 638 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
639 return -ENOMEM;
640 }
636 } 641 }
642 register_cpu_notifier(&balloon_cpu_notifier);
637 } 643 }
638 register_cpu_notifier(&balloon_cpu_notifier);
639 644
640 pr_info("Initialising balloon driver\n"); 645 pr_info("Initialising balloon driver\n");
641 646
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 028387192b60..aa846a48f400 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1176,7 +1176,8 @@ static int gnttab_setup(void)
1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, 1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
1177 PAGE_SIZE * max_nr_gframes); 1177 PAGE_SIZE * max_nr_gframes);
1178 if (gnttab_shared.addr == NULL) { 1178 if (gnttab_shared.addr == NULL) {
1179 pr_warn("Failed to ioremap gnttab share frames!\n"); 1179 pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
1180 xen_hvm_resume_frames);
1180 return -ENOMEM; 1181 return -ENOMEM;
1181 } 1182 }
1182 } 1183 }
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590fa1bb..569a13b9e856 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
533{ 533{
534 struct page **pages = vma->vm_private_data; 534 struct page **pages = vma->vm_private_data;
535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
536 int rc;
536 537
537 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 538 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
538 return; 539 return;
539 540
540 xen_unmap_domain_mfn_range(vma, numpgs, pages); 541 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
541 free_xenballooned_pages(numpgs, pages); 542 if (rc == 0)
543 free_xenballooned_pages(numpgs, pages);
544 else
545 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
546 numpgs, rc);
542 kfree(pages); 547 kfree(pages);
543} 548}
544 549
diff --git a/fs/aio.c b/fs/aio.c
index 6efb7f6cb22e..062a5f6a1448 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx)
244 int i; 244 int i;
245 245
246 for (i = 0; i < ctx->nr_pages; i++) { 246 for (i = 0; i < ctx->nr_pages; i++) {
247 struct page *page;
247 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 248 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
248 page_count(ctx->ring_pages[i])); 249 page_count(ctx->ring_pages[i]));
249 put_page(ctx->ring_pages[i]); 250 page = ctx->ring_pages[i];
251 if (!page)
252 continue;
253 ctx->ring_pages[i] = NULL;
254 put_page(page);
250 } 255 }
251 256
252 put_aio_ring_file(ctx); 257 put_aio_ring_file(ctx);
@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
280 unsigned long flags; 285 unsigned long flags;
281 int rc; 286 int rc;
282 287
288 rc = 0;
289
290 /* Make sure the old page hasn't already been changed */
291 spin_lock(&mapping->private_lock);
292 ctx = mapping->private_data;
293 if (ctx) {
294 pgoff_t idx;
295 spin_lock_irqsave(&ctx->completion_lock, flags);
296 idx = old->index;
297 if (idx < (pgoff_t)ctx->nr_pages) {
298 if (ctx->ring_pages[idx] != old)
299 rc = -EAGAIN;
300 } else
301 rc = -EINVAL;
302 spin_unlock_irqrestore(&ctx->completion_lock, flags);
303 } else
304 rc = -EINVAL;
305 spin_unlock(&mapping->private_lock);
306
307 if (rc != 0)
308 return rc;
309
283 /* Writeback must be complete */ 310 /* Writeback must be complete */
284 BUG_ON(PageWriteback(old)); 311 BUG_ON(PageWriteback(old));
285 put_page(old); 312 get_page(new);
286 313
287 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); 314 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
288 if (rc != MIGRATEPAGE_SUCCESS) { 315 if (rc != MIGRATEPAGE_SUCCESS) {
289 get_page(old); 316 put_page(new);
290 return rc; 317 return rc;
291 } 318 }
292 319
293 get_page(new);
294
295 /* We can potentially race against kioctx teardown here. Use the 320 /* We can potentially race against kioctx teardown here. Use the
296 * address_space's private data lock to protect the mapping's 321 * address_space's private data lock to protect the mapping's
297 * private_data. 322 * private_data.
@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
303 spin_lock_irqsave(&ctx->completion_lock, flags); 328 spin_lock_irqsave(&ctx->completion_lock, flags);
304 migrate_page_copy(new, old); 329 migrate_page_copy(new, old);
305 idx = old->index; 330 idx = old->index;
306 if (idx < (pgoff_t)ctx->nr_pages) 331 if (idx < (pgoff_t)ctx->nr_pages) {
307 ctx->ring_pages[idx] = new; 332 /* And only do the move if things haven't changed */
333 if (ctx->ring_pages[idx] == old)
334 ctx->ring_pages[idx] = new;
335 else
336 rc = -EAGAIN;
337 } else
338 rc = -EINVAL;
308 spin_unlock_irqrestore(&ctx->completion_lock, flags); 339 spin_unlock_irqrestore(&ctx->completion_lock, flags);
309 } else 340 } else
310 rc = -EBUSY; 341 rc = -EBUSY;
311 spin_unlock(&mapping->private_lock); 342 spin_unlock(&mapping->private_lock);
312 343
344 if (rc == MIGRATEPAGE_SUCCESS)
345 put_page(old);
346 else
347 put_page(new);
348
313 return rc; 349 return rc;
314} 350}
315#endif 351#endif
@@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx)
326 struct aio_ring *ring; 362 struct aio_ring *ring;
327 unsigned nr_events = ctx->max_reqs; 363 unsigned nr_events = ctx->max_reqs;
328 struct mm_struct *mm = current->mm; 364 struct mm_struct *mm = current->mm;
329 unsigned long size, populate; 365 unsigned long size, unused;
330 int nr_pages; 366 int nr_pages;
331 int i; 367 int i;
332 struct file *file; 368 struct file *file;
@@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx)
347 return -EAGAIN; 383 return -EAGAIN;
348 } 384 }
349 385
386 ctx->aio_ring_file = file;
387 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
388 / sizeof(struct io_event);
389
390 ctx->ring_pages = ctx->internal_pages;
391 if (nr_pages > AIO_RING_PAGES) {
392 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
393 GFP_KERNEL);
394 if (!ctx->ring_pages) {
395 put_aio_ring_file(ctx);
396 return -ENOMEM;
397 }
398 }
399
350 for (i = 0; i < nr_pages; i++) { 400 for (i = 0; i < nr_pages; i++) {
351 struct page *page; 401 struct page *page;
352 page = find_or_create_page(file->f_inode->i_mapping, 402 page = find_or_create_page(file->f_inode->i_mapping,
@@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx)
358 SetPageUptodate(page); 408 SetPageUptodate(page);
359 SetPageDirty(page); 409 SetPageDirty(page);
360 unlock_page(page); 410 unlock_page(page);
411
412 ctx->ring_pages[i] = page;
361 } 413 }
362 ctx->aio_ring_file = file; 414 ctx->nr_pages = i;
363 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
364 / sizeof(struct io_event);
365 415
366 ctx->ring_pages = ctx->internal_pages; 416 if (unlikely(i != nr_pages)) {
367 if (nr_pages > AIO_RING_PAGES) { 417 aio_free_ring(ctx);
368 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 418 return -EAGAIN;
369 GFP_KERNEL);
370 if (!ctx->ring_pages) {
371 put_aio_ring_file(ctx);
372 return -ENOMEM;
373 }
374 } 419 }
375 420
376 ctx->mmap_size = nr_pages * PAGE_SIZE; 421 ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx)
379 down_write(&mm->mmap_sem); 424 down_write(&mm->mmap_sem);
380 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, 425 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
381 PROT_READ | PROT_WRITE, 426 PROT_READ | PROT_WRITE,
382 MAP_SHARED | MAP_POPULATE, 0, &populate); 427 MAP_SHARED, 0, &unused);
428 up_write(&mm->mmap_sem);
383 if (IS_ERR((void *)ctx->mmap_base)) { 429 if (IS_ERR((void *)ctx->mmap_base)) {
384 up_write(&mm->mmap_sem);
385 ctx->mmap_size = 0; 430 ctx->mmap_size = 0;
386 aio_free_ring(ctx); 431 aio_free_ring(ctx);
387 return -EAGAIN; 432 return -EAGAIN;
@@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx)
389 434
390 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 435 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
391 436
392 /* We must do this while still holding mmap_sem for write, as we
393 * need to be protected against userspace attempting to mremap()
394 * or munmap() the ring buffer.
395 */
396 ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
397 1, 0, ctx->ring_pages, NULL);
398
399 /* Dropping the reference here is safe as the page cache will hold
400 * onto the pages for us. It is also required so that page migration
401 * can unmap the pages and get the right reference count.
402 */
403 for (i = 0; i < ctx->nr_pages; i++)
404 put_page(ctx->ring_pages[i]);
405
406 up_write(&mm->mmap_sem);
407
408 if (unlikely(ctx->nr_pages != nr_pages)) {
409 aio_free_ring(ctx);
410 return -EAGAIN;
411 }
412
413 ctx->user_id = ctx->mmap_base; 437 ctx->user_id = ctx->mmap_base;
414 ctx->nr_events = nr_events; /* trusted copy */ 438 ctx->nr_events = nr_events; /* trusted copy */
415 439
@@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
652 aio_nr += ctx->max_reqs; 676 aio_nr += ctx->max_reqs;
653 spin_unlock(&aio_nr_lock); 677 spin_unlock(&aio_nr_lock);
654 678
655 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 679 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
680 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
656 681
657 err = ioctx_add_table(ctx, mm); 682 err = ioctx_add_table(ctx, mm);
658 if (err) 683 if (err)
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 288534920fe5..20d6697bd638 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
1493 sb->s_blocksize - offset : towrite; 1493 sb->s_blocksize - offset : towrite;
1494 1494
1495 tmp_bh.b_state = 0; 1495 tmp_bh.b_state = 0;
1496 tmp_bh.b_size = sb->s_blocksize;
1496 err = ext2_get_block(inode, blk, &tmp_bh, 1); 1497 err = ext2_get_block(inode, blk, &tmp_bh, 1);
1497 if (err < 0) 1498 if (err < 0)
1498 goto out; 1499 goto out;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index e6185031c1cc..ece55565b9cd 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -268,6 +268,16 @@ struct ext4_io_submit {
268/* Translate # of blks to # of clusters */ 268/* Translate # of blks to # of clusters */
269#define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ 269#define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
270 (sbi)->s_cluster_bits) 270 (sbi)->s_cluster_bits)
271/* Mask out the low bits to get the starting block of the cluster */
272#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
273 ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
274#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
275 ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
276/* Get the cluster offset */
277#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
278 ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
279#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
280 ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
271 281
272/* 282/*
273 * Structure of a blocks group descriptor 283 * Structure of a blocks group descriptor
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 17ac112ab101..3fe29de832c8 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
259 if (WARN_ON_ONCE(err)) { 259 if (WARN_ON_ONCE(err)) {
260 ext4_journal_abort_handle(where, line, __func__, bh, 260 ext4_journal_abort_handle(where, line, __func__, bh,
261 handle, err); 261 handle, err);
262 ext4_error_inode(inode, where, line,
263 bh->b_blocknr,
264 "journal_dirty_metadata failed: "
265 "handle type %u started at line %u, "
266 "credits %u/%u, errcode %d",
267 handle->h_type,
268 handle->h_line_no,
269 handle->h_requested_credits,
270 handle->h_buffer_credits, err);
262 } 271 }
263 } else { 272 } else {
264 if (inode) 273 if (inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 35f65cf4f318..4410cc3d6ee2 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
360{ 360{
361 ext4_fsblk_t block = ext4_ext_pblock(ext); 361 ext4_fsblk_t block = ext4_ext_pblock(ext);
362 int len = ext4_ext_get_actual_len(ext); 362 int len = ext4_ext_get_actual_len(ext);
363 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
364 ext4_lblk_t last = lblock + len - 1;
363 365
364 if (len == 0) 366 if (lblock > last)
365 return 0; 367 return 0;
366 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 368 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
367} 369}
@@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
387 if (depth == 0) { 389 if (depth == 0) {
388 /* leaf entries */ 390 /* leaf entries */
389 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 391 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
392 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
393 ext4_fsblk_t pblock = 0;
394 ext4_lblk_t lblock = 0;
395 ext4_lblk_t prev = 0;
396 int len = 0;
390 while (entries) { 397 while (entries) {
391 if (!ext4_valid_extent(inode, ext)) 398 if (!ext4_valid_extent(inode, ext))
392 return 0; 399 return 0;
400
401 /* Check for overlapping extents */
402 lblock = le32_to_cpu(ext->ee_block);
403 len = ext4_ext_get_actual_len(ext);
404 if ((lblock <= prev) && prev) {
405 pblock = ext4_ext_pblock(ext);
406 es->s_last_error_block = cpu_to_le64(pblock);
407 return 0;
408 }
393 ext++; 409 ext++;
394 entries--; 410 entries--;
411 prev = lblock + len - 1;
395 } 412 }
396 } else { 413 } else {
397 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 414 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
@@ -1834,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1834 depth = ext_depth(inode); 1851 depth = ext_depth(inode);
1835 if (!path[depth].p_ext) 1852 if (!path[depth].p_ext)
1836 goto out; 1853 goto out;
1837 b2 = le32_to_cpu(path[depth].p_ext->ee_block); 1854 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1838 b2 &= ~(sbi->s_cluster_ratio - 1);
1839 1855
1840 /* 1856 /*
1841 * get the next allocated block if the extent in the path 1857 * get the next allocated block if the extent in the path
@@ -1845,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1845 b2 = ext4_ext_next_allocated_block(path); 1861 b2 = ext4_ext_next_allocated_block(path);
1846 if (b2 == EXT_MAX_BLOCKS) 1862 if (b2 == EXT_MAX_BLOCKS)
1847 goto out; 1863 goto out;
1848 b2 &= ~(sbi->s_cluster_ratio - 1); 1864 b2 = EXT4_LBLK_CMASK(sbi, b2);
1849 } 1865 }
1850 1866
1851 /* check for wrap through zero on extent logical start block*/ 1867 /* check for wrap through zero on extent logical start block*/
@@ -2504,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2504 * extent, we have to mark the cluster as used (store negative 2520 * extent, we have to mark the cluster as used (store negative
2505 * cluster number in partial_cluster). 2521 * cluster number in partial_cluster).
2506 */ 2522 */
2507 unaligned = pblk & (sbi->s_cluster_ratio - 1); 2523 unaligned = EXT4_PBLK_COFF(sbi, pblk);
2508 if (unaligned && (ee_len == num) && 2524 if (unaligned && (ee_len == num) &&
2509 (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) 2525 (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
2510 *partial_cluster = EXT4_B2C(sbi, pblk); 2526 *partial_cluster = EXT4_B2C(sbi, pblk);
@@ -2598,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2598 * accidentally freeing it later on 2614 * accidentally freeing it later on
2599 */ 2615 */
2600 pblk = ext4_ext_pblock(ex); 2616 pblk = ext4_ext_pblock(ex);
2601 if (pblk & (sbi->s_cluster_ratio - 1)) 2617 if (EXT4_PBLK_COFF(sbi, pblk))
2602 *partial_cluster = 2618 *partial_cluster =
2603 -((long long)EXT4_B2C(sbi, pblk)); 2619 -((long long)EXT4_B2C(sbi, pblk));
2604 ex--; 2620 ex--;
@@ -3753,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3753{ 3769{
3754 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3770 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3755 ext4_lblk_t lblk_start, lblk_end; 3771 ext4_lblk_t lblk_start, lblk_end;
3756 lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); 3772 lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
3757 lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 3773 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3758 3774
3759 return ext4_find_delalloc_range(inode, lblk_start, lblk_end); 3775 return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
@@ -3812,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3812 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3828 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3813 3829
3814 /* Check towards left side */ 3830 /* Check towards left side */
3815 c_offset = lblk_start & (sbi->s_cluster_ratio - 1); 3831 c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
3816 if (c_offset) { 3832 if (c_offset) {
3817 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); 3833 lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
3818 lblk_to = lblk_from + c_offset - 1; 3834 lblk_to = lblk_from + c_offset - 1;
3819 3835
3820 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 3836 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
@@ -3822,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3822 } 3838 }
3823 3839
3824 /* Now check towards right. */ 3840 /* Now check towards right. */
3825 c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); 3841 c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
3826 if (allocated_clusters && c_offset) { 3842 if (allocated_clusters && c_offset) {
3827 lblk_from = lblk_start + num_blks; 3843 lblk_from = lblk_start + num_blks;
3828 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 3844 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
@@ -4030,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
4030 struct ext4_ext_path *path) 4046 struct ext4_ext_path *path)
4031{ 4047{
4032 struct ext4_sb_info *sbi = EXT4_SB(sb); 4048 struct ext4_sb_info *sbi = EXT4_SB(sb);
4033 ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 4049 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4034 ext4_lblk_t ex_cluster_start, ex_cluster_end; 4050 ext4_lblk_t ex_cluster_start, ex_cluster_end;
4035 ext4_lblk_t rr_cluster_start; 4051 ext4_lblk_t rr_cluster_start;
4036 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4052 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
@@ -4048,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
4048 (rr_cluster_start == ex_cluster_start)) { 4064 (rr_cluster_start == ex_cluster_start)) {
4049 if (rr_cluster_start == ex_cluster_end) 4065 if (rr_cluster_start == ex_cluster_end)
4050 ee_start += ee_len - 1; 4066 ee_start += ee_len - 1;
4051 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + 4067 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4052 c_offset;
4053 map->m_len = min(map->m_len, 4068 map->m_len = min(map->m_len,
4054 (unsigned) sbi->s_cluster_ratio - c_offset); 4069 (unsigned) sbi->s_cluster_ratio - c_offset);
4055 /* 4070 /*
@@ -4203,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4203 */ 4218 */
4204 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 4219 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4205 newex.ee_block = cpu_to_le32(map->m_lblk); 4220 newex.ee_block = cpu_to_le32(map->m_lblk);
4206 cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 4221 cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk);
4207 4222
4208 /* 4223 /*
4209 * If we are doing bigalloc, check to see if the extent returned 4224 * If we are doing bigalloc, check to see if the extent returned
@@ -4271,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4271 * needed so that future calls to get_implied_cluster_alloc() 4286 * needed so that future calls to get_implied_cluster_alloc()
4272 * work correctly. 4287 * work correctly.
4273 */ 4288 */
4274 offset = map->m_lblk & (sbi->s_cluster_ratio - 1); 4289 offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4275 ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 4290 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4276 ar.goal -= offset; 4291 ar.goal -= offset;
4277 ar.logical -= offset; 4292 ar.logical -= offset;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 075763474118..61d49ff22c81 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file,
1206 */ 1206 */
1207static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) 1207static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1208{ 1208{
1209 int retries = 0;
1210 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1209 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1211 struct ext4_inode_info *ei = EXT4_I(inode); 1210 struct ext4_inode_info *ei = EXT4_I(inode);
1212 unsigned int md_needed; 1211 unsigned int md_needed;
@@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1218 * in order to allocate nrblocks 1217 * in order to allocate nrblocks
1219 * worse case is one extent per block 1218 * worse case is one extent per block
1220 */ 1219 */
1221repeat:
1222 spin_lock(&ei->i_block_reservation_lock); 1220 spin_lock(&ei->i_block_reservation_lock);
1223 /* 1221 /*
1224 * ext4_calc_metadata_amount() has side effects, which we have 1222 * ext4_calc_metadata_amount() has side effects, which we have
@@ -1238,10 +1236,6 @@ repeat:
1238 ei->i_da_metadata_calc_len = save_len; 1236 ei->i_da_metadata_calc_len = save_len;
1239 ei->i_da_metadata_calc_last_lblock = save_last_lblock; 1237 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1240 spin_unlock(&ei->i_block_reservation_lock); 1238 spin_unlock(&ei->i_block_reservation_lock);
1241 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1242 cond_resched();
1243 goto repeat;
1244 }
1245 return -ENOSPC; 1239 return -ENOSPC;
1246 } 1240 }
1247 ei->i_reserved_meta_blocks += md_needed; 1241 ei->i_reserved_meta_blocks += md_needed;
@@ -1255,7 +1249,6 @@ repeat:
1255 */ 1249 */
1256static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1250static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1257{ 1251{
1258 int retries = 0;
1259 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1252 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1260 struct ext4_inode_info *ei = EXT4_I(inode); 1253 struct ext4_inode_info *ei = EXT4_I(inode);
1261 unsigned int md_needed; 1254 unsigned int md_needed;
@@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1277 * in order to allocate nrblocks 1270 * in order to allocate nrblocks
1278 * worse case is one extent per block 1271 * worse case is one extent per block
1279 */ 1272 */
1280repeat:
1281 spin_lock(&ei->i_block_reservation_lock); 1273 spin_lock(&ei->i_block_reservation_lock);
1282 /* 1274 /*
1283 * ext4_calc_metadata_amount() has side effects, which we have 1275 * ext4_calc_metadata_amount() has side effects, which we have
@@ -1297,10 +1289,6 @@ repeat:
1297 ei->i_da_metadata_calc_len = save_len; 1289 ei->i_da_metadata_calc_len = save_len;
1298 ei->i_da_metadata_calc_last_lblock = save_last_lblock; 1290 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1299 spin_unlock(&ei->i_block_reservation_lock); 1291 spin_unlock(&ei->i_block_reservation_lock);
1300 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1301 cond_resched();
1302 goto repeat;
1303 }
1304 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1292 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1305 return -ENOSPC; 1293 return -ENOSPC;
1306 } 1294 }
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 4d113efa024c..04a5c7504be9 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
3442{ 3442{
3443 struct ext4_prealloc_space *pa; 3443 struct ext4_prealloc_space *pa;
3444 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 3444 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3445
3446 BUG_ON(atomic_read(&pa->pa_count));
3447 BUG_ON(pa->pa_deleted == 0);
3445 kmem_cache_free(ext4_pspace_cachep, pa); 3448 kmem_cache_free(ext4_pspace_cachep, pa);
3446} 3449}
3447 3450
@@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3455 ext4_group_t grp; 3458 ext4_group_t grp;
3456 ext4_fsblk_t grp_blk; 3459 ext4_fsblk_t grp_blk;
3457 3460
3458 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3459 return;
3460
3461 /* in this short window concurrent discard can set pa_deleted */ 3461 /* in this short window concurrent discard can set pa_deleted */
3462 spin_lock(&pa->pa_lock); 3462 spin_lock(&pa->pa_lock);
3463 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
3464 spin_unlock(&pa->pa_lock);
3465 return;
3466 }
3467
3463 if (pa->pa_deleted == 1) { 3468 if (pa->pa_deleted == 1) {
3464 spin_unlock(&pa->pa_lock); 3469 spin_unlock(&pa->pa_lock);
3465 return; 3470 return;
@@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4121 ext4_get_group_no_and_offset(sb, goal, &group, &block); 4126 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4122 4127
4123 /* set up allocation goals */ 4128 /* set up allocation goals */
4124 ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); 4129 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
4125 ac->ac_status = AC_STATUS_CONTINUE; 4130 ac->ac_status = AC_STATUS_CONTINUE;
4126 ac->ac_sb = sb; 4131 ac->ac_sb = sb;
4127 ac->ac_inode = ar->inode; 4132 ac->ac_inode = ar->inode;
@@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
4663 * blocks at the beginning or the end unless we are explicitly 4668 * blocks at the beginning or the end unless we are explicitly
4664 * requested to avoid doing so. 4669 * requested to avoid doing so.
4665 */ 4670 */
4666 overflow = block & (sbi->s_cluster_ratio - 1); 4671 overflow = EXT4_PBLK_COFF(sbi, block);
4667 if (overflow) { 4672 if (overflow) {
4668 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 4673 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4669 overflow = sbi->s_cluster_ratio - overflow; 4674 overflow = sbi->s_cluster_ratio - overflow;
@@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
4677 count += overflow; 4682 count += overflow;
4678 } 4683 }
4679 } 4684 }
4680 overflow = count & (sbi->s_cluster_ratio - 1); 4685 overflow = EXT4_LBLK_COFF(sbi, count);
4681 if (overflow) { 4686 if (overflow) {
4682 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 4687 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4683 if (count > overflow) 4688 if (count > overflow)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c977f4e4e63b..1f7784de05b6 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb)
792 } 792 }
793 793
794 ext4_es_unregister_shrinker(sbi); 794 ext4_es_unregister_shrinker(sbi);
795 del_timer(&sbi->s_err_report); 795 del_timer_sync(&sbi->s_err_report);
796 ext4_release_system_zone(sb); 796 ext4_release_system_zone(sb);
797 ext4_mb_release(sb); 797 ext4_mb_release(sb);
798 ext4_ext_release(sb); 798 ext4_ext_release(sb);
@@ -3316,11 +3316,19 @@ int ext4_calculate_overhead(struct super_block *sb)
3316} 3316}
3317 3317
3318 3318
3319static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi) 3319static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
3320{ 3320{
3321 ext4_fsblk_t resv_clusters; 3321 ext4_fsblk_t resv_clusters;
3322 3322
3323 /* 3323 /*
3324 * There's no need to reserve anything when we aren't using extents.
3325 * The space estimates are exact, there are no unwritten extents,
3326 * hole punching doesn't need new metadata... This is needed especially
3327 * to keep ext2/3 backward compatibility.
3328 */
3329 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
3330 return 0;
3331 /*
3324 * By default we reserve 2% or 4096 clusters, whichever is smaller. 3332 * By default we reserve 2% or 4096 clusters, whichever is smaller.
3325 * This should cover the situations where we can not afford to run 3333 * This should cover the situations where we can not afford to run
3326 * out of space like for example punch hole, or converting 3334 * out of space like for example punch hole, or converting
@@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
3328 * allocation would require 1, or 2 blocks, higher numbers are 3336 * allocation would require 1, or 2 blocks, higher numbers are
3329 * very rare. 3337 * very rare.
3330 */ 3338 */
3331 resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; 3339 resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
3340 EXT4_SB(sb)->s_cluster_bits;
3332 3341
3333 do_div(resv_clusters, 50); 3342 do_div(resv_clusters, 50);
3334 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); 3343 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
@@ -4071,10 +4080,10 @@ no_journal:
4071 "available"); 4080 "available");
4072 } 4081 }
4073 4082
4074 err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi)); 4083 err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
4075 if (err) { 4084 if (err) {
4076 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " 4085 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
4077 "reserved pool", ext4_calculate_resv_clusters(sbi)); 4086 "reserved pool", ext4_calculate_resv_clusters(sb));
4078 goto failed_mount4a; 4087 goto failed_mount4a;
4079 } 4088 }
4080 4089
@@ -4184,7 +4193,7 @@ failed_mount_wq:
4184 } 4193 }
4185failed_mount3: 4194failed_mount3:
4186 ext4_es_unregister_shrinker(sbi); 4195 ext4_es_unregister_shrinker(sbi);
4187 del_timer(&sbi->s_err_report); 4196 del_timer_sync(&sbi->s_err_report);
4188 if (sbi->s_flex_groups) 4197 if (sbi->s_flex_groups)
4189 ext4_kvfree(sbi->s_flex_groups); 4198 ext4_kvfree(sbi->s_flex_groups);
4190 percpu_counter_destroy(&sbi->s_freeclusters_counter); 4199 percpu_counter_destroy(&sbi->s_freeclusters_counter);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 52032647dd4a..5fa344afb49a 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
702 read_lock(&journal->j_state_lock); 702 read_lock(&journal->j_state_lock);
703#ifdef CONFIG_JBD2_DEBUG 703#ifdef CONFIG_JBD2_DEBUG
704 if (!tid_geq(journal->j_commit_request, tid)) { 704 if (!tid_geq(journal->j_commit_request, tid)) {
705 printk(KERN_EMERG 705 printk(KERN_ERR
706 "%s: error: j_commit_request=%d, tid=%d\n", 706 "%s: error: j_commit_request=%d, tid=%d\n",
707 __func__, journal->j_commit_request, tid); 707 __func__, journal->j_commit_request, tid);
708 } 708 }
@@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
718 } 718 }
719 read_unlock(&journal->j_state_lock); 719 read_unlock(&journal->j_state_lock);
720 720
721 if (unlikely(is_journal_aborted(journal))) { 721 if (unlikely(is_journal_aborted(journal)))
722 printk(KERN_EMERG "journal commit I/O error\n");
723 err = -EIO; 722 err = -EIO;
724 }
725 return err; 723 return err;
726} 724}
727 725
@@ -1527,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal)
1527 if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && 1525 if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
1528 JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { 1526 JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
1529 /* Can't have checksum v1 and v2 on at the same time! */ 1527 /* Can't have checksum v1 and v2 on at the same time! */
1530 printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 " 1528 printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
1531 "at the same time!\n"); 1529 "at the same time!\n");
1532 goto out; 1530 goto out;
1533 } 1531 }
1534 1532
1535 if (!jbd2_verify_csum_type(journal, sb)) { 1533 if (!jbd2_verify_csum_type(journal, sb)) {
1536 printk(KERN_ERR "JBD: Unknown checksum type\n"); 1534 printk(KERN_ERR "JBD2: Unknown checksum type\n");
1537 goto out; 1535 goto out;
1538 } 1536 }
1539 1537
@@ -1541,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal)
1541 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { 1539 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
1542 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); 1540 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
1543 if (IS_ERR(journal->j_chksum_driver)) { 1541 if (IS_ERR(journal->j_chksum_driver)) {
1544 printk(KERN_ERR "JBD: Cannot load crc32c driver.\n"); 1542 printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
1545 err = PTR_ERR(journal->j_chksum_driver); 1543 err = PTR_ERR(journal->j_chksum_driver);
1546 journal->j_chksum_driver = NULL; 1544 journal->j_chksum_driver = NULL;
1547 goto out; 1545 goto out;
@@ -1550,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal)
1550 1548
1551 /* Check superblock checksum */ 1549 /* Check superblock checksum */
1552 if (!jbd2_superblock_csum_verify(journal, sb)) { 1550 if (!jbd2_superblock_csum_verify(journal, sb)) {
1553 printk(KERN_ERR "JBD: journal checksum error\n"); 1551 printk(KERN_ERR "JBD2: journal checksum error\n");
1554 goto out; 1552 goto out;
1555 } 1553 }
1556 1554
@@ -1836,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
1836 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 1834 journal->j_chksum_driver = crypto_alloc_shash("crc32c",
1837 0, 0); 1835 0, 0);
1838 if (IS_ERR(journal->j_chksum_driver)) { 1836 if (IS_ERR(journal->j_chksum_driver)) {
1839 printk(KERN_ERR "JBD: Cannot load crc32c " 1837 printk(KERN_ERR "JBD2: Cannot load crc32c "
1840 "driver.\n"); 1838 "driver.\n");
1841 journal->j_chksum_driver = NULL; 1839 journal->j_chksum_driver = NULL;
1842 return 0; 1840 return 0;
@@ -2645,7 +2643,7 @@ static void __exit journal_exit(void)
2645#ifdef CONFIG_JBD2_DEBUG 2643#ifdef CONFIG_JBD2_DEBUG
2646 int n = atomic_read(&nr_journal_heads); 2644 int n = atomic_read(&nr_journal_heads);
2647 if (n) 2645 if (n)
2648 printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n); 2646 printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n);
2649#endif 2647#endif
2650 jbd2_remove_jbd_stats_proc_entry(); 2648 jbd2_remove_jbd_stats_proc_entry();
2651 jbd2_journal_destroy_caches(); 2649 jbd2_journal_destroy_caches();
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 3929c50428b1..3b6bb19d60b1 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal,
594 be32_to_cpu(tmp->h_sequence))) { 594 be32_to_cpu(tmp->h_sequence))) {
595 brelse(obh); 595 brelse(obh);
596 success = -EIO; 596 success = -EIO;
597 printk(KERN_ERR "JBD: Invalid " 597 printk(KERN_ERR "JBD2: Invalid "
598 "checksum recovering " 598 "checksum recovering "
599 "block %llu in log\n", 599 "block %llu in log\n",
600 blocknr); 600 blocknr);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 7aa9a32573bb..8360674c85bc 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -932,7 +932,7 @@ repeat:
932 jbd2_alloc(jh2bh(jh)->b_size, 932 jbd2_alloc(jh2bh(jh)->b_size,
933 GFP_NOFS); 933 GFP_NOFS);
934 if (!frozen_buffer) { 934 if (!frozen_buffer) {
935 printk(KERN_EMERG 935 printk(KERN_ERR
936 "%s: OOM for frozen_buffer\n", 936 "%s: OOM for frozen_buffer\n",
937 __func__); 937 __func__);
938 JBUFFER_TRACE(jh, "oom!"); 938 JBUFFER_TRACE(jh, "oom!");
@@ -1166,7 +1166,7 @@ repeat:
1166 if (!jh->b_committed_data) { 1166 if (!jh->b_committed_data) {
1167 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); 1167 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
1168 if (!committed_data) { 1168 if (!committed_data) {
1169 printk(KERN_EMERG "%s: No memory for committed data\n", 1169 printk(KERN_ERR "%s: No memory for committed data\n",
1170 __func__); 1170 __func__);
1171 err = -ENOMEM; 1171 err = -ENOMEM;
1172 goto out; 1172 goto out;
@@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1290 * once a transaction -bzzz 1290 * once a transaction -bzzz
1291 */ 1291 */
1292 jh->b_modified = 1; 1292 jh->b_modified = 1;
1293 J_ASSERT_JH(jh, handle->h_buffer_credits > 0); 1293 if (handle->h_buffer_credits <= 0) {
1294 ret = -ENOSPC;
1295 goto out_unlock_bh;
1296 }
1294 handle->h_buffer_credits--; 1297 handle->h_buffer_credits--;
1295 } 1298 }
1296 1299
@@ -1305,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1305 JBUFFER_TRACE(jh, "fastpath"); 1308 JBUFFER_TRACE(jh, "fastpath");
1306 if (unlikely(jh->b_transaction != 1309 if (unlikely(jh->b_transaction !=
1307 journal->j_running_transaction)) { 1310 journal->j_running_transaction)) {
1308 printk(KERN_EMERG "JBD: %s: " 1311 printk(KERN_ERR "JBD2: %s: "
1309 "jh->b_transaction (%llu, %p, %u) != " 1312 "jh->b_transaction (%llu, %p, %u) != "
1310 "journal->j_running_transaction (%p, %u)", 1313 "journal->j_running_transaction (%p, %u)",
1311 journal->j_devname, 1314 journal->j_devname,
@@ -1332,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1332 JBUFFER_TRACE(jh, "already on other transaction"); 1335 JBUFFER_TRACE(jh, "already on other transaction");
1333 if (unlikely(jh->b_transaction != 1336 if (unlikely(jh->b_transaction !=
1334 journal->j_committing_transaction)) { 1337 journal->j_committing_transaction)) {
1335 printk(KERN_EMERG "JBD: %s: " 1338 printk(KERN_ERR "JBD2: %s: "
1336 "jh->b_transaction (%llu, %p, %u) != " 1339 "jh->b_transaction (%llu, %p, %u) != "
1337 "journal->j_committing_transaction (%p, %u)", 1340 "journal->j_committing_transaction (%p, %u)",
1338 journal->j_devname, 1341 journal->j_devname,
@@ -1345,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1345 ret = -EINVAL; 1348 ret = -EINVAL;
1346 } 1349 }
1347 if (unlikely(jh->b_next_transaction != transaction)) { 1350 if (unlikely(jh->b_next_transaction != transaction)) {
1348 printk(KERN_EMERG "JBD: %s: " 1351 printk(KERN_ERR "JBD2: %s: "
1349 "jh->b_next_transaction (%llu, %p, %u) != " 1352 "jh->b_next_transaction (%llu, %p, %u) != "
1350 "transaction (%p, %u)", 1353 "transaction (%p, %u)",
1351 journal->j_devname, 1354 journal->j_devname,
@@ -1373,7 +1376,6 @@ out_unlock_bh:
1373 jbd2_journal_put_journal_head(jh); 1376 jbd2_journal_put_journal_head(jh);
1374out: 1377out:
1375 JBUFFER_TRACE(jh, "exit"); 1378 JBUFFER_TRACE(jh, "exit");
1376 WARN_ON(ret); /* All errors are bugs, so dump the stack */
1377 return ret; 1379 return ret;
1378} 1380}
1379 1381
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b8e93a40a5d3..78c3c2097787 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi)
443 pstore_get_records(0); 443 pstore_get_records(0);
444 444
445 kmsg_dump_register(&pstore_dumper); 445 kmsg_dump_register(&pstore_dumper);
446 pstore_register_console(); 446
447 pstore_register_ftrace(); 447 if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
448 pstore_register_console();
449 pstore_register_ftrace();
450 }
448 451
449 if (pstore_update_ms >= 0) { 452 if (pstore_update_ms >= 0) {
450 pstore_timer.expires = jiffies + 453 pstore_timer.expires = jiffies +
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b94f93685093..35e7d08fe629 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
609 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; 609 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
610 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; 610 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
611 struct sysfs_open_file *of; 611 struct sysfs_open_file *of;
612 bool has_read, has_write, has_mmap; 612 bool has_read, has_write;
613 int error = -EACCES; 613 int error = -EACCES;
614 614
615 /* need attr_sd for attr and ops, its parent for kobj */ 615 /* need attr_sd for attr and ops, its parent for kobj */
@@ -621,7 +621,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
621 621
622 has_read = battr->read || battr->mmap; 622 has_read = battr->read || battr->mmap;
623 has_write = battr->write || battr->mmap; 623 has_write = battr->write || battr->mmap;
624 has_mmap = battr->mmap;
625 } else { 624 } else {
626 const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); 625 const struct sysfs_ops *ops = sysfs_file_ops(attr_sd);
627 626
@@ -633,7 +632,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
633 632
634 has_read = ops->show; 633 has_read = ops->show;
635 has_write = ops->store; 634 has_write = ops->store;
636 has_mmap = false;
637 } 635 }
638 636
639 /* check perms and supported operations */ 637 /* check perms and supported operations */
@@ -661,9 +659,9 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
661 * open file has a separate mutex, it's okay as long as those don't 659 * open file has a separate mutex, it's okay as long as those don't
662 * happen on the same file. At this point, we can't easily give 660 * happen on the same file. At this point, we can't easily give
663 * each file a separate locking class. Let's differentiate on 661 * each file a separate locking class. Let's differentiate on
664 * whether the file has mmap or not for now. 662 * whether the file is bin or not for now.
665 */ 663 */
666 if (has_mmap) 664 if (sysfs_is_bin(attr_sd))
667 mutex_init(&of->mutex); 665 mutex_init(&of->mutex);
668 else 666 else
669 mutex_init(&of->mutex); 667 mutex_init(&of->mutex);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3ef11b22e750..3b2c14b6f0fb 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -1635,7 +1635,7 @@ xfs_bmap_last_extent(
1635 * blocks at the end of the file which do not start at the previous data block, 1635 * blocks at the end of the file which do not start at the previous data block,
1636 * we will try to align the new blocks at stripe unit boundaries. 1636 * we will try to align the new blocks at stripe unit boundaries.
1637 * 1637 *
1638 * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be 1638 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1639 * at, or past the EOF. 1639 * at, or past the EOF.
1640 */ 1640 */
1641STATIC int 1641STATIC int
@@ -1650,9 +1650,14 @@ xfs_bmap_isaeof(
1650 bma->aeof = 0; 1650 bma->aeof = 0;
1651 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1651 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1652 &is_empty); 1652 &is_empty);
1653 if (error || is_empty) 1653 if (error)
1654 return error; 1654 return error;
1655 1655
1656 if (is_empty) {
1657 bma->aeof = 1;
1658 return 0;
1659 }
1660
1656 /* 1661 /*
1657 * Check if we are allocation or past the last extent, or at least into 1662 * Check if we are allocation or past the last extent, or at least into
1658 * the last delayed allocated extent. 1663 * the last delayed allocated extent.
@@ -3643,10 +3648,19 @@ xfs_bmap_btalloc(
3643 int isaligned; 3648 int isaligned;
3644 int tryagain; 3649 int tryagain;
3645 int error; 3650 int error;
3651 int stripe_align;
3646 3652
3647 ASSERT(ap->length); 3653 ASSERT(ap->length);
3648 3654
3649 mp = ap->ip->i_mount; 3655 mp = ap->ip->i_mount;
3656
3657 /* stripe alignment for allocation is determined by mount parameters */
3658 stripe_align = 0;
3659 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3660 stripe_align = mp->m_swidth;
3661 else if (mp->m_dalign)
3662 stripe_align = mp->m_dalign;
3663
3650 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; 3664 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
3651 if (unlikely(align)) { 3665 if (unlikely(align)) {
3652 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3666 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
@@ -3655,6 +3669,8 @@ xfs_bmap_btalloc(
3655 ASSERT(!error); 3669 ASSERT(!error);
3656 ASSERT(ap->length); 3670 ASSERT(ap->length);
3657 } 3671 }
3672
3673
3658 nullfb = *ap->firstblock == NULLFSBLOCK; 3674 nullfb = *ap->firstblock == NULLFSBLOCK;
3659 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); 3675 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3660 if (nullfb) { 3676 if (nullfb) {
@@ -3730,7 +3746,7 @@ xfs_bmap_btalloc(
3730 */ 3746 */
3731 if (!ap->flist->xbf_low && ap->aeof) { 3747 if (!ap->flist->xbf_low && ap->aeof) {
3732 if (!ap->offset) { 3748 if (!ap->offset) {
3733 args.alignment = mp->m_dalign; 3749 args.alignment = stripe_align;
3734 atype = args.type; 3750 atype = args.type;
3735 isaligned = 1; 3751 isaligned = 1;
3736 /* 3752 /*
@@ -3755,13 +3771,13 @@ xfs_bmap_btalloc(
3755 * of minlen+alignment+slop doesn't go up 3771 * of minlen+alignment+slop doesn't go up
3756 * between the calls. 3772 * between the calls.
3757 */ 3773 */
3758 if (blen > mp->m_dalign && blen <= args.maxlen) 3774 if (blen > stripe_align && blen <= args.maxlen)
3759 nextminlen = blen - mp->m_dalign; 3775 nextminlen = blen - stripe_align;
3760 else 3776 else
3761 nextminlen = args.minlen; 3777 nextminlen = args.minlen;
3762 if (nextminlen + mp->m_dalign > args.minlen + 1) 3778 if (nextminlen + stripe_align > args.minlen + 1)
3763 args.minalignslop = 3779 args.minalignslop =
3764 nextminlen + mp->m_dalign - 3780 nextminlen + stripe_align -
3765 args.minlen - 1; 3781 args.minlen - 1;
3766 else 3782 else
3767 args.minalignslop = 0; 3783 args.minalignslop = 0;
@@ -3783,7 +3799,7 @@ xfs_bmap_btalloc(
3783 */ 3799 */
3784 args.type = atype; 3800 args.type = atype;
3785 args.fsbno = ap->blkno; 3801 args.fsbno = ap->blkno;
3786 args.alignment = mp->m_dalign; 3802 args.alignment = stripe_align;
3787 args.minlen = nextminlen; 3803 args.minlen = nextminlen;
3788 args.minalignslop = 0; 3804 args.minalignslop = 0;
3789 isaligned = 1; 3805 isaligned = 1;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5887e41c0323..1394106ed22d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes(
1187 XFS_BUF_UNWRITE(bp); 1187 XFS_BUF_UNWRITE(bp);
1188 XFS_BUF_READ(bp); 1188 XFS_BUF_READ(bp);
1189 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); 1189 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
1190 xfsbdstrat(mp, bp); 1190
1191 if (XFS_FORCED_SHUTDOWN(mp)) {
1192 error = XFS_ERROR(EIO);
1193 break;
1194 }
1195 xfs_buf_iorequest(bp);
1191 error = xfs_buf_iowait(bp); 1196 error = xfs_buf_iowait(bp);
1192 if (error) { 1197 if (error) {
1193 xfs_buf_ioerror_alert(bp, 1198 xfs_buf_ioerror_alert(bp,
@@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes(
1200 XFS_BUF_UNDONE(bp); 1205 XFS_BUF_UNDONE(bp);
1201 XFS_BUF_UNREAD(bp); 1206 XFS_BUF_UNREAD(bp);
1202 XFS_BUF_WRITE(bp); 1207 XFS_BUF_WRITE(bp);
1203 xfsbdstrat(mp, bp); 1208
1209 if (XFS_FORCED_SHUTDOWN(mp)) {
1210 error = XFS_ERROR(EIO);
1211 break;
1212 }
1213 xfs_buf_iorequest(bp);
1204 error = xfs_buf_iowait(bp); 1214 error = xfs_buf_iowait(bp);
1205 if (error) { 1215 if (error) {
1206 xfs_buf_ioerror_alert(bp, 1216 xfs_buf_ioerror_alert(bp,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c7f0b77dcb00..afe7645e4b2b 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -698,7 +698,11 @@ xfs_buf_read_uncached(
698 bp->b_flags |= XBF_READ; 698 bp->b_flags |= XBF_READ;
699 bp->b_ops = ops; 699 bp->b_ops = ops;
700 700
701 xfsbdstrat(target->bt_mount, bp); 701 if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
702 xfs_buf_relse(bp);
703 return NULL;
704 }
705 xfs_buf_iorequest(bp);
702 xfs_buf_iowait(bp); 706 xfs_buf_iowait(bp);
703 return bp; 707 return bp;
704} 708}
@@ -1089,7 +1093,7 @@ xfs_bioerror(
1089 * This is meant for userdata errors; metadata bufs come with 1093 * This is meant for userdata errors; metadata bufs come with
1090 * iodone functions attached, so that we can track down errors. 1094 * iodone functions attached, so that we can track down errors.
1091 */ 1095 */
1092STATIC int 1096int
1093xfs_bioerror_relse( 1097xfs_bioerror_relse(
1094 struct xfs_buf *bp) 1098 struct xfs_buf *bp)
1095{ 1099{
@@ -1152,7 +1156,7 @@ xfs_bwrite(
1152 ASSERT(xfs_buf_islocked(bp)); 1156 ASSERT(xfs_buf_islocked(bp));
1153 1157
1154 bp->b_flags |= XBF_WRITE; 1158 bp->b_flags |= XBF_WRITE;
1155 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); 1159 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
1156 1160
1157 xfs_bdstrat_cb(bp); 1161 xfs_bdstrat_cb(bp);
1158 1162
@@ -1164,25 +1168,6 @@ xfs_bwrite(
1164 return error; 1168 return error;
1165} 1169}
1166 1170
1167/*
1168 * Wrapper around bdstrat so that we can stop data from going to disk in case
1169 * we are shutting down the filesystem. Typically user data goes thru this
1170 * path; one of the exceptions is the superblock.
1171 */
1172void
1173xfsbdstrat(
1174 struct xfs_mount *mp,
1175 struct xfs_buf *bp)
1176{
1177 if (XFS_FORCED_SHUTDOWN(mp)) {
1178 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1179 xfs_bioerror_relse(bp);
1180 return;
1181 }
1182
1183 xfs_buf_iorequest(bp);
1184}
1185
1186STATIC void 1171STATIC void
1187_xfs_buf_ioend( 1172_xfs_buf_ioend(
1188 xfs_buf_t *bp, 1173 xfs_buf_t *bp,
@@ -1516,6 +1501,12 @@ xfs_wait_buftarg(
1516 struct xfs_buf *bp; 1501 struct xfs_buf *bp;
1517 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1502 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1518 list_del_init(&bp->b_lru); 1503 list_del_init(&bp->b_lru);
1504 if (bp->b_flags & XBF_WRITE_FAIL) {
1505 xfs_alert(btp->bt_mount,
1506"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
1507"Please run xfs_repair to determine the extent of the problem.",
1508 (long long)bp->b_bn);
1509 }
1519 xfs_buf_rele(bp); 1510 xfs_buf_rele(bp);
1520 } 1511 }
1521 if (loop++ != 0) 1512 if (loop++ != 0)
@@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit(
1799 1790
1800 blk_start_plug(&plug); 1791 blk_start_plug(&plug);
1801 list_for_each_entry_safe(bp, n, io_list, b_list) { 1792 list_for_each_entry_safe(bp, n, io_list, b_list) {
1802 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); 1793 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
1803 bp->b_flags |= XBF_WRITE; 1794 bp->b_flags |= XBF_WRITE;
1804 1795
1805 if (!wait) { 1796 if (!wait) {
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index e65683361017..1cf21a4a9f22 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -45,6 +45,7 @@ typedef enum {
45#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ 45#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
46#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ 46#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
47#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ 47#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
48#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
48 49
49/* I/O hints for the BIO layer */ 50/* I/O hints for the BIO layer */
50#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ 51#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
@@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
70 { XBF_ASYNC, "ASYNC" }, \ 71 { XBF_ASYNC, "ASYNC" }, \
71 { XBF_DONE, "DONE" }, \ 72 { XBF_DONE, "DONE" }, \
72 { XBF_STALE, "STALE" }, \ 73 { XBF_STALE, "STALE" }, \
74 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
73 { XBF_SYNCIO, "SYNCIO" }, \ 75 { XBF_SYNCIO, "SYNCIO" }, \
74 { XBF_FUA, "FUA" }, \ 76 { XBF_FUA, "FUA" }, \
75 { XBF_FLUSH, "FLUSH" }, \ 77 { XBF_FLUSH, "FLUSH" }, \
@@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t;
80 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 82 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
81 { _XBF_COMPOUND, "COMPOUND" } 83 { _XBF_COMPOUND, "COMPOUND" }
82 84
85
83/* 86/*
84 * Internal state flags. 87 * Internal state flags.
85 */ 88 */
@@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
269 272
270/* Buffer Read and Write Routines */ 273/* Buffer Read and Write Routines */
271extern int xfs_bwrite(struct xfs_buf *bp); 274extern int xfs_bwrite(struct xfs_buf *bp);
272
273extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
274
275extern void xfs_buf_ioend(xfs_buf_t *, int); 275extern void xfs_buf_ioend(xfs_buf_t *, int);
276extern void xfs_buf_ioerror(xfs_buf_t *, int); 276extern void xfs_buf_ioerror(xfs_buf_t *, int);
277extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); 277extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
@@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
282#define xfs_buf_zero(bp, off, len) \ 282#define xfs_buf_zero(bp, off, len) \
283 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) 283 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
284 284
285extern int xfs_bioerror_relse(struct xfs_buf *);
286
285static inline int xfs_buf_geterror(xfs_buf_t *bp) 287static inline int xfs_buf_geterror(xfs_buf_t *bp)
286{ 288{
287 return bp ? bp->b_error : ENOMEM; 289 return bp ? bp->b_error : ENOMEM;
@@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void);
301 303
302#define XFS_BUF_ZEROFLAGS(bp) \ 304#define XFS_BUF_ZEROFLAGS(bp) \
303 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ 305 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
304 XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) 306 XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
307 XBF_WRITE_FAIL))
305 308
306void xfs_buf_stale(struct xfs_buf *bp); 309void xfs_buf_stale(struct xfs_buf *bp);
307#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) 310#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index a64f67ba25d3..2227b9b050bb 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -496,6 +496,14 @@ xfs_buf_item_unpin(
496 } 496 }
497} 497}
498 498
499/*
500 * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
501 * seconds so as to not spam logs too much on repeated detection of the same
502 * buffer being bad..
503 */
504
505DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
506
499STATIC uint 507STATIC uint
500xfs_buf_item_push( 508xfs_buf_item_push(
501 struct xfs_log_item *lip, 509 struct xfs_log_item *lip,
@@ -524,6 +532,14 @@ xfs_buf_item_push(
524 532
525 trace_xfs_buf_item_push(bip); 533 trace_xfs_buf_item_push(bip);
526 534
535 /* has a previous flush failed due to IO errors? */
536 if ((bp->b_flags & XBF_WRITE_FAIL) &&
537 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
538 xfs_warn(bp->b_target->bt_mount,
539"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
540 (long long)bp->b_bn);
541 }
542
527 if (!xfs_buf_delwri_queue(bp, buffer_list)) 543 if (!xfs_buf_delwri_queue(bp, buffer_list))
528 rval = XFS_ITEM_FLUSHING; 544 rval = XFS_ITEM_FLUSHING;
529 xfs_buf_unlock(bp); 545 xfs_buf_unlock(bp);
@@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks(
1096 1112
1097 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ 1113 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
1098 1114
1099 if (!XFS_BUF_ISSTALE(bp)) { 1115 if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
1100 bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; 1116 bp->b_flags |= XBF_WRITE | XBF_ASYNC |
1117 XBF_DONE | XBF_WRITE_FAIL;
1101 xfs_buf_iorequest(bp); 1118 xfs_buf_iorequest(bp);
1102 } else { 1119 } else {
1103 xfs_buf_relse(bp); 1120 xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 56369d4509d5..48c7d18f68c3 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup(
2067 */ 2067 */
2068int /* error */ 2068int /* error */
2069xfs_dir2_node_removename( 2069xfs_dir2_node_removename(
2070 xfs_da_args_t *args) /* operation arguments */ 2070 struct xfs_da_args *args) /* operation arguments */
2071{ 2071{
2072 xfs_da_state_blk_t *blk; /* leaf block */ 2072 struct xfs_da_state_blk *blk; /* leaf block */
2073 int error; /* error return value */ 2073 int error; /* error return value */
2074 int rval; /* operation return value */ 2074 int rval; /* operation return value */
2075 xfs_da_state_t *state; /* btree cursor */ 2075 struct xfs_da_state *state; /* btree cursor */
2076 2076
2077 trace_xfs_dir2_node_removename(args); 2077 trace_xfs_dir2_node_removename(args);
2078 2078
@@ -2084,19 +2084,18 @@ xfs_dir2_node_removename(
2084 state->mp = args->dp->i_mount; 2084 state->mp = args->dp->i_mount;
2085 state->blocksize = state->mp->m_dirblksize; 2085 state->blocksize = state->mp->m_dirblksize;
2086 state->node_ents = state->mp->m_dir_node_ents; 2086 state->node_ents = state->mp->m_dir_node_ents;
2087 /* 2087
2088 * Look up the entry we're deleting, set up the cursor. 2088 /* Look up the entry we're deleting, set up the cursor. */
2089 */
2090 error = xfs_da3_node_lookup_int(state, &rval); 2089 error = xfs_da3_node_lookup_int(state, &rval);
2091 if (error) 2090 if (error)
2092 rval = error; 2091 goto out_free;
2093 /* 2092
2094 * Didn't find it, upper layer screwed up. 2093 /* Didn't find it, upper layer screwed up. */
2095 */
2096 if (rval != EEXIST) { 2094 if (rval != EEXIST) {
2097 xfs_da_state_free(state); 2095 error = rval;
2098 return rval; 2096 goto out_free;
2099 } 2097 }
2098
2100 blk = &state->path.blk[state->path.active - 1]; 2099 blk = &state->path.blk[state->path.active - 1];
2101 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); 2100 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
2102 ASSERT(state->extravalid); 2101 ASSERT(state->extravalid);
@@ -2107,7 +2106,7 @@ xfs_dir2_node_removename(
2107 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, 2106 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index,
2108 &state->extrablk, &rval); 2107 &state->extrablk, &rval);
2109 if (error) 2108 if (error)
2110 return error; 2109 goto out_free;
2111 /* 2110 /*
2112 * Fix the hash values up the btree. 2111 * Fix the hash values up the btree.
2113 */ 2112 */
@@ -2122,6 +2121,7 @@ xfs_dir2_node_removename(
2122 */ 2121 */
2123 if (!error) 2122 if (!error)
2124 error = xfs_dir2_node_to_leaf(state); 2123 error = xfs_dir2_node_to_leaf(state);
2124out_free:
2125 xfs_da_state_free(state); 2125 xfs_da_state_free(state);
2126 return error; 2126 return error;
2127} 2127}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 27e0e544e963..104455b8046c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -618,7 +618,8 @@ xfs_setattr_nonsize(
618 } 618 }
619 if (!gid_eq(igid, gid)) { 619 if (!gid_eq(igid, gid)) {
620 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { 620 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
621 ASSERT(!XFS_IS_PQUOTA_ON(mp)); 621 ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
622 !XFS_IS_PQUOTA_ON(mp));
622 ASSERT(mask & ATTR_GID); 623 ASSERT(mask & ATTR_GID);
623 ASSERT(gdqp); 624 ASSERT(gdqp);
624 olddquot2 = xfs_qm_vop_chown(tp, ip, 625 olddquot2 = xfs_qm_vop_chown(tp, ip,
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b6b669df40f3..eae16920655b 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -193,7 +193,10 @@ xlog_bread_noalign(
193 bp->b_io_length = nbblks; 193 bp->b_io_length = nbblks;
194 bp->b_error = 0; 194 bp->b_error = 0;
195 195
196 xfsbdstrat(log->l_mp, bp); 196 if (XFS_FORCED_SHUTDOWN(log->l_mp))
197 return XFS_ERROR(EIO);
198
199 xfs_buf_iorequest(bp);
197 error = xfs_buf_iowait(bp); 200 error = xfs_buf_iowait(bp);
198 if (error) 201 if (error)
199 xfs_buf_ioerror_alert(bp, __func__); 202 xfs_buf_ioerror_alert(bp, __func__);
@@ -4397,7 +4400,13 @@ xlog_do_recover(
4397 XFS_BUF_READ(bp); 4400 XFS_BUF_READ(bp);
4398 XFS_BUF_UNASYNC(bp); 4401 XFS_BUF_UNASYNC(bp);
4399 bp->b_ops = &xfs_sb_buf_ops; 4402 bp->b_ops = &xfs_sb_buf_ops;
4400 xfsbdstrat(log->l_mp, bp); 4403
4404 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4405 xfs_buf_relse(bp);
4406 return XFS_ERROR(EIO);
4407 }
4408
4409 xfs_buf_iorequest(bp);
4401 error = xfs_buf_iowait(bp); 4410 error = xfs_buf_iowait(bp);
4402 if (error) { 4411 if (error) {
4403 xfs_buf_ioerror_alert(bp, __func__); 4412 xfs_buf_ioerror_alert(bp, __func__);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 14a4996cfec6..dd88f0e27bd8 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -134,8 +134,6 @@ xfs_qm_dqpurge(
134{ 134{
135 struct xfs_mount *mp = dqp->q_mount; 135 struct xfs_mount *mp = dqp->q_mount;
136 struct xfs_quotainfo *qi = mp->m_quotainfo; 136 struct xfs_quotainfo *qi = mp->m_quotainfo;
137 struct xfs_dquot *gdqp = NULL;
138 struct xfs_dquot *pdqp = NULL;
139 137
140 xfs_dqlock(dqp); 138 xfs_dqlock(dqp);
141 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 139 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
@@ -143,21 +141,6 @@ xfs_qm_dqpurge(
143 return EAGAIN; 141 return EAGAIN;
144 } 142 }
145 143
146 /*
147 * If this quota has a hint attached, prepare for releasing it now.
148 */
149 gdqp = dqp->q_gdquot;
150 if (gdqp) {
151 xfs_dqlock(gdqp);
152 dqp->q_gdquot = NULL;
153 }
154
155 pdqp = dqp->q_pdquot;
156 if (pdqp) {
157 xfs_dqlock(pdqp);
158 dqp->q_pdquot = NULL;
159 }
160
161 dqp->dq_flags |= XFS_DQ_FREEING; 144 dqp->dq_flags |= XFS_DQ_FREEING;
162 145
163 xfs_dqflock(dqp); 146 xfs_dqflock(dqp);
@@ -206,11 +189,47 @@ xfs_qm_dqpurge(
206 XFS_STATS_DEC(xs_qm_dquot_unused); 189 XFS_STATS_DEC(xs_qm_dquot_unused);
207 190
208 xfs_qm_dqdestroy(dqp); 191 xfs_qm_dqdestroy(dqp);
192 return 0;
193}
194
195/*
196 * Release the group or project dquot pointers the user dquots maybe carrying
197 * around as a hint, and proceed to purge the user dquot cache if requested.
198*/
199STATIC int
200xfs_qm_dqpurge_hints(
201 struct xfs_dquot *dqp,
202 void *data)
203{
204 struct xfs_dquot *gdqp = NULL;
205 struct xfs_dquot *pdqp = NULL;
206 uint flags = *((uint *)data);
207
208 xfs_dqlock(dqp);
209 if (dqp->dq_flags & XFS_DQ_FREEING) {
210 xfs_dqunlock(dqp);
211 return EAGAIN;
212 }
213
214 /* If this quota has a hint attached, prepare for releasing it now */
215 gdqp = dqp->q_gdquot;
216 if (gdqp)
217 dqp->q_gdquot = NULL;
218
219 pdqp = dqp->q_pdquot;
220 if (pdqp)
221 dqp->q_pdquot = NULL;
222
223 xfs_dqunlock(dqp);
209 224
210 if (gdqp) 225 if (gdqp)
211 xfs_qm_dqput(gdqp); 226 xfs_qm_dqrele(gdqp);
212 if (pdqp) 227 if (pdqp)
213 xfs_qm_dqput(pdqp); 228 xfs_qm_dqrele(pdqp);
229
230 if (flags & XFS_QMOPT_UQUOTA)
231 return xfs_qm_dqpurge(dqp, NULL);
232
214 return 0; 233 return 0;
215} 234}
216 235
@@ -222,8 +241,18 @@ xfs_qm_dqpurge_all(
222 struct xfs_mount *mp, 241 struct xfs_mount *mp,
223 uint flags) 242 uint flags)
224{ 243{
225 if (flags & XFS_QMOPT_UQUOTA) 244 /*
226 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); 245 * We have to release group/project dquot hint(s) from the user dquot
246 * at first if they are there, otherwise we would run into an infinite
247 * loop while walking through radix tree to purge other type of dquots
248 * since their refcount is not zero if the user dquot refers to them
249 * as hint.
250 *
251 * Call the special xfs_qm_dqpurge_hints() will end up go through the
252 * general xfs_qm_dqpurge() against user dquot cache if requested.
253 */
254 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
255
227 if (flags & XFS_QMOPT_GQUOTA) 256 if (flags & XFS_QMOPT_GQUOTA)
228 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); 257 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
229 if (flags & XFS_QMOPT_PQUOTA) 258 if (flags & XFS_QMOPT_PQUOTA)
@@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach(
2082 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2111 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2083 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 2112 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2084 2113
2085 if (udqp) { 2114 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2086 ASSERT(ip->i_udquot == NULL); 2115 ASSERT(ip->i_udquot == NULL);
2087 ASSERT(XFS_IS_UQUOTA_ON(mp));
2088 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 2116 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2089 2117
2090 ip->i_udquot = xfs_qm_dqhold(udqp); 2118 ip->i_udquot = xfs_qm_dqhold(udqp);
2091 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 2119 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2092 } 2120 }
2093 if (gdqp) { 2121 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2094 ASSERT(ip->i_gdquot == NULL); 2122 ASSERT(ip->i_gdquot == NULL);
2095 ASSERT(XFS_IS_GQUOTA_ON(mp));
2096 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); 2123 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
2097 ip->i_gdquot = xfs_qm_dqhold(gdqp); 2124 ip->i_gdquot = xfs_qm_dqhold(gdqp);
2098 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 2125 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2099 } 2126 }
2100 if (pdqp) { 2127 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2101 ASSERT(ip->i_pdquot == NULL); 2128 ASSERT(ip->i_pdquot == NULL);
2102 ASSERT(XFS_IS_PQUOTA_ON(mp));
2103 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); 2129 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
2104 2130
2105 ip->i_pdquot = xfs_qm_dqhold(pdqp); 2131 ip->i_pdquot = xfs_qm_dqhold(pdqp);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index c035d11b7734..647b6f1d8923 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -314,7 +314,18 @@ xfs_trans_read_buf_map(
314 ASSERT(bp->b_iodone == NULL); 314 ASSERT(bp->b_iodone == NULL);
315 XFS_BUF_READ(bp); 315 XFS_BUF_READ(bp);
316 bp->b_ops = ops; 316 bp->b_ops = ops;
317 xfsbdstrat(tp->t_mountp, bp); 317
318 /*
319 * XXX(hch): clean up the error handling here to be less
320 * of a mess..
321 */
322 if (XFS_FORCED_SHUTDOWN(mp)) {
323 trace_xfs_bdstrat_shut(bp, _RET_IP_);
324 xfs_bioerror_relse(bp);
325 } else {
326 xfs_buf_iorequest(bp);
327 }
328
318 error = xfs_buf_iowait(bp); 329 error = xfs_buf_iowait(bp);
319 if (error) { 330 if (error) {
320 xfs_buf_ioerror_alert(bp, __func__); 331 xfs_buf_ioerror_alert(bp, __func__);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f330d28e4d0e..db0923458940 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
217#endif 217#endif
218 218
219#ifndef pte_accessible 219#ifndef pte_accessible
220# define pte_accessible(pte) ((void)(pte),1) 220# define pte_accessible(mm, pte) ((void)(pte), 1)
221#endif 221#endif
222 222
223#ifndef flush_tlb_fix_spurious_fault 223#ifndef flush_tlb_fix_spurious_fault
@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
599#ifdef CONFIG_TRANSPARENT_HUGEPAGE 599#ifdef CONFIG_TRANSPARENT_HUGEPAGE
600 barrier(); 600 barrier();
601#endif 601#endif
602 if (pmd_none(pmdval)) 602 if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
603 return 1; 603 return 1;
604 if (unlikely(pmd_bad(pmdval))) { 604 if (unlikely(pmd_bad(pmdval))) {
605 if (!pmd_trans_huge(pmdval)) 605 pmd_clear_bad(pmd);
606 pmd_clear_bad(pmd);
607 return 1; 606 return 1;
608 } 607 }
609 return 0; 608 return 0;
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 669fef5c745a..3e0fbe441763 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -3,6 +3,6 @@
3 3
4#include <uapi/linux/auxvec.h> 4#include <uapi/linux/auxvec.h>
5 5
6#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ 6#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
7 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ 7 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
8#endif /* _LINUX_AUXVEC_H */ 8#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0e23c26485f4..9b503376738f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -418,6 +418,7 @@ enum {
418 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ 418 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
419 ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ 419 ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
420 ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ 420 ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
421 ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
421 422
422 /* DMA mask for user DMA control: User visible values; DO NOT 423 /* DMA mask for user DMA control: User visible values; DO NOT
423 renumber */ 424 renumber */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c8929c3832db..4bfde0e99ed5 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -19,7 +19,7 @@
19 19
20#define USE_CMPXCHG_LOCKREF \ 20#define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ 21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) 22 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
23 23
24struct lockref { 24struct lockref {
25 union { 25 union {
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f5096b58b20d..f015c059e159 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
55 struct page *newpage, struct page *page); 55 struct page *newpage, struct page *page);
56extern int migrate_page_move_mapping(struct address_space *mapping, 56extern int migrate_page_move_mapping(struct address_space *mapping,
57 struct page *newpage, struct page *page, 57 struct page *newpage, struct page *page,
58 struct buffer_head *head, enum migrate_mode mode); 58 struct buffer_head *head, enum migrate_mode mode,
59 int extra_count);
59#else 60#else
60 61
61static inline void putback_lru_pages(struct list_head *l) {} 62static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
90#endif /* CONFIG_MIGRATION */ 91#endif /* CONFIG_MIGRATION */
91 92
92#ifdef CONFIG_NUMA_BALANCING 93#ifdef CONFIG_NUMA_BALANCING
94extern bool pmd_trans_migrating(pmd_t pmd);
95extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
93extern int migrate_misplaced_page(struct page *page, 96extern int migrate_misplaced_page(struct page *page,
94 struct vm_area_struct *vma, int node); 97 struct vm_area_struct *vma, int node);
95extern bool migrate_ratelimited(int node); 98extern bool migrate_ratelimited(int node);
96#else 99#else
100static inline bool pmd_trans_migrating(pmd_t pmd)
101{
102 return false;
103}
104static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
105{
106}
97static inline int migrate_misplaced_page(struct page *page, 107static inline int migrate_misplaced_page(struct page *page,
98 struct vm_area_struct *vma, int node) 108 struct vm_area_struct *vma, int node)
99{ 109{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1cedd000cf29..35527173cf50 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1318 1318
1319#if USE_SPLIT_PTE_PTLOCKS 1319#if USE_SPLIT_PTE_PTLOCKS
1320#if BLOATED_SPINLOCKS 1320#if ALLOC_SPLIT_PTLOCKS
1321extern bool ptlock_alloc(struct page *page); 1321extern bool ptlock_alloc(struct page *page);
1322extern void ptlock_free(struct page *page); 1322extern void ptlock_free(struct page *page);
1323 1323
@@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1325{ 1325{
1326 return page->ptl; 1326 return page->ptl;
1327} 1327}
1328#else /* BLOATED_SPINLOCKS */ 1328#else /* ALLOC_SPLIT_PTLOCKS */
1329static inline bool ptlock_alloc(struct page *page) 1329static inline bool ptlock_alloc(struct page *page)
1330{ 1330{
1331 return true; 1331 return true;
@@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1339{ 1339{
1340 return &page->ptl; 1340 return &page->ptl;
1341} 1341}
1342#endif /* BLOATED_SPINLOCKS */ 1342#endif /* ALLOC_SPLIT_PTLOCKS */
1343 1343
1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1345{ 1345{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bd299418a934..290901a8c1de 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -26,6 +26,7 @@ struct address_space;
26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ 27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) 28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
29 30
30/* 31/*
31 * Each physical page in the system has a struct page associated with 32 * Each physical page in the system has a struct page associated with
@@ -155,7 +156,7 @@ struct page {
155 * system if PG_buddy is set. 156 * system if PG_buddy is set.
156 */ 157 */
157#if USE_SPLIT_PTE_PTLOCKS 158#if USE_SPLIT_PTE_PTLOCKS
158#if BLOATED_SPINLOCKS 159#if ALLOC_SPLIT_PTLOCKS
159 spinlock_t *ptl; 160 spinlock_t *ptl;
160#else 161#else
161 spinlock_t ptl; 162 spinlock_t ptl;
@@ -443,6 +444,14 @@ struct mm_struct {
443 /* numa_scan_seq prevents two threads setting pte_numa */ 444 /* numa_scan_seq prevents two threads setting pte_numa */
444 int numa_scan_seq; 445 int numa_scan_seq;
445#endif 446#endif
447#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
448 /*
449 * An operation with batched TLB flushing is going on. Anything that
450 * can move process memory needs to flush the TLB when moving a
451 * PROT_NONE or PROT_NUMA mapped page.
452 */
453 bool tlb_flush_pending;
454#endif
446 struct uprobes_state uprobes_state; 455 struct uprobes_state uprobes_state;
447}; 456};
448 457
@@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
459 return mm->cpu_vm_mask_var; 468 return mm->cpu_vm_mask_var;
460} 469}
461 470
471#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
472/*
473 * Memory barriers to keep this state in sync are graciously provided by
474 * the page table locks, outside of which no page table modifications happen.
475 * The barriers below prevent the compiler from re-ordering the instructions
476 * around the memory barriers that are already present in the code.
477 */
478static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
479{
480 barrier();
481 return mm->tlb_flush_pending;
482}
483static inline void set_tlb_flush_pending(struct mm_struct *mm)
484{
485 mm->tlb_flush_pending = true;
486
487 /*
488 * Guarantee that the tlb_flush_pending store does not leak into the
489 * critical section updating the page tables
490 */
491 smp_mb__before_spinlock();
492}
493/* Clearing is done after a TLB flush, which also provides a barrier. */
494static inline void clear_tlb_flush_pending(struct mm_struct *mm)
495{
496 barrier();
497 mm->tlb_flush_pending = false;
498}
499#else
500static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
501{
502 return false;
503}
504static inline void set_tlb_flush_pending(struct mm_struct *mm)
505{
506}
507static inline void clear_tlb_flush_pending(struct mm_struct *mm)
508{
509}
510#endif
511
462#endif /* _LINUX_MM_TYPES_H */ 512#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0c30af38be0d..d9c961aa6a7f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1921,6 +1921,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
1921 return dev->header_ops->parse(skb, haddr); 1921 return dev->header_ops->parse(skb, haddr);
1922} 1922}
1923 1923
1924static inline int dev_rebuild_header(struct sk_buff *skb)
1925{
1926 const struct net_device *dev = skb->dev;
1927
1928 if (!dev->header_ops || !dev->header_ops->rebuild)
1929 return 0;
1930 return dev->header_ops->rebuild(skb);
1931}
1932
1924typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 1933typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1925int register_gifconf(unsigned int family, gifconf_func_t *gifconf); 1934int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
1926static inline int unregister_gifconf(unsigned int family) 1935static inline int unregister_gifconf(unsigned int family)
@@ -3039,6 +3048,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
3039 dev->gso_max_size = size; 3048 dev->gso_max_size = size;
3040} 3049}
3041 3050
3051static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
3052 int pulled_hlen, u16 mac_offset,
3053 int mac_len)
3054{
3055 skb->protocol = protocol;
3056 skb->encapsulation = 1;
3057 skb_push(skb, pulled_hlen);
3058 skb_reset_transport_header(skb);
3059 skb->mac_header = mac_offset;
3060 skb->network_header = skb->mac_header + mac_len;
3061 skb->mac_len = mac_len;
3062}
3063
3042static inline bool netif_is_macvlan(struct net_device *dev) 3064static inline bool netif_is_macvlan(struct net_device *dev)
3043{ 3065{
3044 return dev->priv_flags & IFF_MACVLAN; 3066 return dev->priv_flags & IFF_MACVLAN;
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57e890abe1f0..a5fc7d01aad6 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -69,6 +69,7 @@
69 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 69 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
70 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 70 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
71 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 71 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
72 extern __PCPU_ATTRS(sec) __typeof__(type) name; \
72 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 73 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
73 __typeof__(type) name 74 __typeof__(type) name
74#else 75#else
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index abd437d0a8a7..ece0c6bbfcc5 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -51,6 +51,7 @@ struct pstore_info {
51 char *buf; 51 char *buf;
52 size_t bufsize; 52 size_t bufsize;
53 struct mutex read_mutex; /* serialize open/read/close */ 53 struct mutex read_mutex; /* serialize open/read/close */
54 int flags;
54 int (*open)(struct pstore_info *psi); 55 int (*open)(struct pstore_info *psi);
55 int (*close)(struct pstore_info *psi); 56 int (*close)(struct pstore_info *psi);
56 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 57 ssize_t (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
70 void *data; 71 void *data;
71}; 72};
72 73
74#define PSTORE_FLAGS_FRAGILE 1
75
73#ifdef CONFIG_PSTORE 76#ifdef CONFIG_PSTORE
74extern int pstore_register(struct pstore_info *); 77extern int pstore_register(struct pstore_info *);
75extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); 78extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 8e00f9f6f963..9e7db9e73cc1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
43 * Architecture-specific implementations of sys_reboot commands. 43 * Architecture-specific implementations of sys_reboot commands.
44 */ 44 */
45 45
46extern void migrate_to_reboot_cpu(void);
46extern void machine_restart(char *cmd); 47extern void machine_restart(char *cmd);
47extern void machine_halt(void); 48extern void machine_halt(void);
48extern void machine_power_off(void); 49extern void machine_power_off(void);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 939428ad25ac..8e3e66ac0a52 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -24,6 +24,11 @@ extern int rtnl_trylock(void);
24extern int rtnl_is_locked(void); 24extern int rtnl_is_locked(void);
25#ifdef CONFIG_PROVE_LOCKING 25#ifdef CONFIG_PROVE_LOCKING
26extern int lockdep_rtnl_is_held(void); 26extern int lockdep_rtnl_is_held(void);
27#else
28static inline int lockdep_rtnl_is_held(void)
29{
30 return 1;
31}
27#endif /* #ifdef CONFIG_PROVE_LOCKING */ 32#endif /* #ifdef CONFIG_PROVE_LOCKING */
28 33
29/** 34/**
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c5cd016f5120..88d4f2ebbec6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1715,6 +1715,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1715 skb->mac_header += offset; 1715 skb->mac_header += offset;
1716} 1716}
1717 1717
1718static inline void skb_pop_mac_header(struct sk_buff *skb)
1719{
1720 skb->mac_header = skb->network_header;
1721}
1722
1718static inline void skb_probe_transport_header(struct sk_buff *skb, 1723static inline void skb_probe_transport_header(struct sk_buff *skb,
1719 const int offset_hint) 1724 const int offset_hint)
1720{ 1725{
@@ -2621,6 +2626,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb)
2621 * Ethernet MAC Drivers should call this function in their hard_xmit() 2626 * Ethernet MAC Drivers should call this function in their hard_xmit()
2622 * function immediately before giving the sk_buff to the MAC hardware. 2627 * function immediately before giving the sk_buff to the MAC hardware.
2623 * 2628 *
2629 * Specifically, one should make absolutely sure that this function is
2630 * called before TX completion of this packet can trigger. Otherwise
2631 * the packet could potentially already be freed.
2632 *
2624 * @skb: A socket buffer. 2633 * @skb: A socket buffer.
2625 */ 2634 */
2626static inline void skb_tx_timestamp(struct sk_buff *skb) 2635static inline void skb_tx_timestamp(struct sk_buff *skb)
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 31e2de7d57c5..c0f0a13ed818 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -142,7 +142,7 @@
142#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0) 142#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
143 143
144#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1) 144#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1)
145#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO) 145#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO)
146 146
147/* FRMR information field macros */ 147/* FRMR information field macros */
148 148
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 41c7013e2699..e9f732fda950 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1045,9 +1045,6 @@ struct sctp_outq {
1045 1045
1046 /* Corked? */ 1046 /* Corked? */
1047 char cork; 1047 char cork;
1048
1049 /* Is this structure empty? */
1050 char empty;
1051}; 1048};
1052 1049
1053void sctp_outq_init(struct sctp_association *, struct sctp_outq *); 1050void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 979874c627ee..61e1935c91b1 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -978,7 +978,7 @@ struct ib_uobject {
978}; 978};
979 979
980struct ib_udata { 980struct ib_udata {
981 void __user *inbuf; 981 const void __user *inbuf;
982 void __user *outbuf; 982 void __user *outbuf;
983 size_t inlen; 983 size_t inlen;
984 size_t outlen; 984 size_t outlen;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 45412a6afa69..321301c0a643 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -517,10 +517,6 @@ struct se_node_acl {
517 u32 acl_index; 517 u32 acl_index;
518#define MAX_ACL_TAG_SIZE 64 518#define MAX_ACL_TAG_SIZE 64
519 char acl_tag[MAX_ACL_TAG_SIZE]; 519 char acl_tag[MAX_ACL_TAG_SIZE];
520 u64 num_cmds;
521 u64 read_bytes;
522 u64 write_bytes;
523 spinlock_t stats_lock;
524 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 520 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
525 atomic_t acl_pr_ref_count; 521 atomic_t acl_pr_ref_count;
526 struct se_dev_entry **device_list; 522 struct se_dev_entry **device_list;
@@ -624,6 +620,7 @@ struct se_dev_attrib {
624 u32 unmap_granularity; 620 u32 unmap_granularity;
625 u32 unmap_granularity_alignment; 621 u32 unmap_granularity_alignment;
626 u32 max_write_same_len; 622 u32 max_write_same_len;
623 u32 max_bytes_per_io;
627 struct se_device *da_dev; 624 struct se_device *da_dev;
628 struct config_group da_group; 625 struct config_group da_group;
629}; 626};
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e1802d6153ae..959d454f76a1 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -679,6 +679,7 @@ enum perf_event_type {
679 * 679 *
680 * { u64 weight; } && PERF_SAMPLE_WEIGHT 680 * { u64 weight; } && PERF_SAMPLE_WEIGHT
681 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 681 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
682 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
682 * }; 683 * };
683 */ 684 */
684 PERF_RECORD_SAMPLE = 9, 685 PERF_RECORD_SAMPLE = 9,
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 65e12099ef89..ae665ac59c36 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
146struct blkif_request_rw { 146struct blkif_request_rw {
147 uint8_t nr_segments; /* number of segments */ 147 uint8_t nr_segments; /* number of segments */
148 blkif_vdev_t handle; /* only for read/write requests */ 148 blkif_vdev_t handle; /* only for read/write requests */
149#ifdef CONFIG_X86_64 149#ifndef CONFIG_X86_32
150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ 150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
151#endif 151#endif
152 uint64_t id; /* private guest value, echoed in resp */ 152 uint64_t id; /* private guest value, echoed in resp */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ 163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ 164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
165 blkif_vdev_t _pad1; /* only for read/write requests */ 165 blkif_vdev_t _pad1; /* only for read/write requests */
166#ifdef CONFIG_X86_64 166#ifndef CONFIG_X86_32
167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ 167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
168#endif 168#endif
169 uint64_t id; /* private guest value, echoed in resp */ 169 uint64_t id; /* private guest value, echoed in resp */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
175struct blkif_request_other { 175struct blkif_request_other {
176 uint8_t _pad1; 176 uint8_t _pad1;
177 blkif_vdev_t _pad2; /* only for read/write requests */ 177 blkif_vdev_t _pad2; /* only for read/write requests */
178#ifdef CONFIG_X86_64 178#ifndef CONFIG_X86_32
179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ 179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
180#endif 180#endif
181 uint64_t id; /* private guest value, echoed in resp */ 181 uint64_t id; /* private guest value, echoed in resp */
@@ -184,7 +184,7 @@ struct blkif_request_other {
184struct blkif_request_indirect { 184struct blkif_request_indirect {
185 uint8_t indirect_op; 185 uint8_t indirect_op;
186 uint16_t nr_segments; 186 uint16_t nr_segments;
187#ifdef CONFIG_X86_64 187#ifndef CONFIG_X86_32
188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ 188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
189#endif 189#endif
190 uint64_t id; 190 uint64_t id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
192 blkif_vdev_t handle; 192 blkif_vdev_t handle;
193 uint16_t _pad2; 193 uint16_t _pad2;
194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; 194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
195#ifdef CONFIG_X86_64 195#ifndef CONFIG_X86_32
196 uint32_t _pad3; /* make it 64 byte aligned */ 196 uint32_t _pad3; /* make it 64 byte aligned */
197#else 197#else
198 uint64_t _pad3; /* make it 64 byte aligned */ 198 uint64_t _pad3; /* make it 64 byte aligned */
diff --git a/kernel/Makefile b/kernel/Makefile
index bbaf7d59c1bb..bc010ee272b6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
137############################################################################### 137###############################################################################
138ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) 138ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
139X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) 139X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
140X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 140X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509
141X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ 141X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
142 $(or $(realpath $(CERT)),$(CERT)))) 142 $(or $(realpath $(CERT)),$(CERT))))
143X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw))
143 144
144ifeq ($(X509_CERTIFICATES),) 145ifeq ($(X509_CERTIFICATES),)
145$(warning *** No X.509 certificates found ***) 146$(warning *** No X.509 certificates found ***)
@@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
164targets += $(obj)/.x509.list 165targets += $(obj)/.x509.list
165$(obj)/.x509.list: 166$(obj)/.x509.list:
166 @echo $(X509_CERTIFICATES) >$@ 167 @echo $(X509_CERTIFICATES) >$@
168endif
167 169
168clean-files := x509_certificate_list .x509.list 170clean-files := x509_certificate_list .x509.list
169endif
170 171
171ifeq ($(CONFIG_MODULE_SIG),y) 172ifeq ($(CONFIG_MODULE_SIG),y)
172############################################################################### 173###############################################################################
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 5253204afdca..9fd4246b04b8 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -22,6 +22,6 @@ void foo(void)
22#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
23 DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); 23 DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
24#endif 24#endif
25 DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); 25 DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
26 /* End of constants */ 26 /* End of constants */
27} 27}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8b729c278b64..bc1dcabe9217 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -890,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
890 struct cgroup *cgrp = dentry->d_fsdata; 890 struct cgroup *cgrp = dentry->d_fsdata;
891 891
892 BUG_ON(!(cgroup_is_dead(cgrp))); 892 BUG_ON(!(cgroup_is_dead(cgrp)));
893
894 /*
895 * XXX: cgrp->id is only used to look up css's. As cgroup
896 * and css's lifetimes will be decoupled, it should be made
897 * per-subsystem and moved to css->id so that lookups are
898 * successful until the target css is released.
899 */
900 idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
901 cgrp->id = -1;
902
893 call_rcu(&cgrp->rcu_head, cgroup_free_rcu); 903 call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
894 } else { 904 } else {
895 struct cfent *cfe = __d_cfe(dentry); 905 struct cfent *cfe = __d_cfe(dentry);
@@ -4268,6 +4278,7 @@ static void css_release(struct percpu_ref *ref)
4268 struct cgroup_subsys_state *css = 4278 struct cgroup_subsys_state *css =
4269 container_of(ref, struct cgroup_subsys_state, refcnt); 4279 container_of(ref, struct cgroup_subsys_state, refcnt);
4270 4280
4281 rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL);
4271 call_rcu(&css->rcu_head, css_free_rcu_fn); 4282 call_rcu(&css->rcu_head, css_free_rcu_fn);
4272} 4283}
4273 4284
@@ -4426,14 +4437,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4426 list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); 4437 list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
4427 root->number_of_cgroups++; 4438 root->number_of_cgroups++;
4428 4439
4429 /* each css holds a ref to the cgroup's dentry and the parent css */
4430 for_each_root_subsys(root, ss) {
4431 struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
4432
4433 dget(dentry);
4434 css_get(css->parent);
4435 }
4436
4437 /* hold a ref to the parent's dentry */ 4440 /* hold a ref to the parent's dentry */
4438 dget(parent->dentry); 4441 dget(parent->dentry);
4439 4442
@@ -4445,6 +4448,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4445 if (err) 4448 if (err)
4446 goto err_destroy; 4449 goto err_destroy;
4447 4450
4451 /* each css holds a ref to the cgroup's dentry and parent css */
4452 dget(dentry);
4453 css_get(css->parent);
4454
4455 /* mark it consumed for error path */
4456 css_ar[ss->subsys_id] = NULL;
4457
4448 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && 4458 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
4449 parent->parent) { 4459 parent->parent) {
4450 pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", 4460 pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
@@ -4491,6 +4501,14 @@ err_free_cgrp:
4491 return err; 4501 return err;
4492 4502
4493err_destroy: 4503err_destroy:
4504 for_each_root_subsys(root, ss) {
4505 struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
4506
4507 if (css) {
4508 percpu_ref_cancel_init(&css->refcnt);
4509 ss->css_free(css);
4510 }
4511 }
4494 cgroup_destroy_locked(cgrp); 4512 cgroup_destroy_locked(cgrp);
4495 mutex_unlock(&cgroup_mutex); 4513 mutex_unlock(&cgroup_mutex);
4496 mutex_unlock(&dentry->d_inode->i_mutex); 4514 mutex_unlock(&dentry->d_inode->i_mutex);
@@ -4652,8 +4670,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4652 * will be invoked to perform the rest of destruction once the 4670 * will be invoked to perform the rest of destruction once the
4653 * percpu refs of all css's are confirmed to be killed. 4671 * percpu refs of all css's are confirmed to be killed.
4654 */ 4672 */
4655 for_each_root_subsys(cgrp->root, ss) 4673 for_each_root_subsys(cgrp->root, ss) {
4656 kill_css(cgroup_css(cgrp, ss)); 4674 struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
4675
4676 if (css)
4677 kill_css(css);
4678 }
4657 4679
4658 /* 4680 /*
4659 * Mark @cgrp dead. This prevents further task migration and child 4681 * Mark @cgrp dead. This prevents further task migration and child
@@ -4722,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp)
4722 /* delete this cgroup from parent->children */ 4744 /* delete this cgroup from parent->children */
4723 list_del_rcu(&cgrp->sibling); 4745 list_del_rcu(&cgrp->sibling);
4724 4746
4725 /*
4726 * We should remove the cgroup object from idr before its grace
4727 * period starts, so we won't be looking up a cgroup while the
4728 * cgroup is being freed.
4729 */
4730 idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
4731 cgrp->id = -1;
4732
4733 dput(d); 4747 dput(d);
4734 4748
4735 set_bit(CGRP_RELEASABLE, &parent->flags); 4749 set_bit(CGRP_RELEASABLE, &parent->flags);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 72348dc192c1..f5744010a8d2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event,
1396 if (event->state != PERF_EVENT_STATE_ACTIVE) 1396 if (event->state != PERF_EVENT_STATE_ACTIVE)
1397 return; 1397 return;
1398 1398
1399 perf_pmu_disable(event->pmu);
1400
1399 event->state = PERF_EVENT_STATE_INACTIVE; 1401 event->state = PERF_EVENT_STATE_INACTIVE;
1400 if (event->pending_disable) { 1402 if (event->pending_disable) {
1401 event->pending_disable = 0; 1403 event->pending_disable = 0;
@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event,
1412 ctx->nr_freq--; 1414 ctx->nr_freq--;
1413 if (event->attr.exclusive || !cpuctx->active_oncpu) 1415 if (event->attr.exclusive || !cpuctx->active_oncpu)
1414 cpuctx->exclusive = 0; 1416 cpuctx->exclusive = 0;
1417
1418 perf_pmu_enable(event->pmu);
1415} 1419}
1416 1420
1417static void 1421static void
@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event,
1652 struct perf_event_context *ctx) 1656 struct perf_event_context *ctx)
1653{ 1657{
1654 u64 tstamp = perf_event_time(event); 1658 u64 tstamp = perf_event_time(event);
1659 int ret = 0;
1655 1660
1656 if (event->state <= PERF_EVENT_STATE_OFF) 1661 if (event->state <= PERF_EVENT_STATE_OFF)
1657 return 0; 1662 return 0;
@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event,
1674 */ 1679 */
1675 smp_wmb(); 1680 smp_wmb();
1676 1681
1682 perf_pmu_disable(event->pmu);
1683
1677 if (event->pmu->add(event, PERF_EF_START)) { 1684 if (event->pmu->add(event, PERF_EF_START)) {
1678 event->state = PERF_EVENT_STATE_INACTIVE; 1685 event->state = PERF_EVENT_STATE_INACTIVE;
1679 event->oncpu = -1; 1686 event->oncpu = -1;
1680 return -EAGAIN; 1687 ret = -EAGAIN;
1688 goto out;
1681 } 1689 }
1682 1690
1683 event->tstamp_running += tstamp - event->tstamp_stopped; 1691 event->tstamp_running += tstamp - event->tstamp_stopped;
@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event,
1693 if (event->attr.exclusive) 1701 if (event->attr.exclusive)
1694 cpuctx->exclusive = 1; 1702 cpuctx->exclusive = 1;
1695 1703
1696 return 0; 1704out:
1705 perf_pmu_enable(event->pmu);
1706
1707 return ret;
1697} 1708}
1698 1709
1699static int 1710static int
@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2743 if (!event_filter_match(event)) 2754 if (!event_filter_match(event))
2744 continue; 2755 continue;
2745 2756
2757 perf_pmu_disable(event->pmu);
2758
2746 hwc = &event->hw; 2759 hwc = &event->hw;
2747 2760
2748 if (hwc->interrupts == MAX_INTERRUPTS) { 2761 if (hwc->interrupts == MAX_INTERRUPTS) {
@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2752 } 2765 }
2753 2766
2754 if (!event->attr.freq || !event->attr.sample_freq) 2767 if (!event->attr.freq || !event->attr.sample_freq)
2755 continue; 2768 goto next;
2756 2769
2757 /* 2770 /*
2758 * stop the event and update event->count 2771 * stop the event and update event->count
@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2774 perf_adjust_period(event, period, delta, false); 2787 perf_adjust_period(event, period, delta, false);
2775 2788
2776 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2789 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2790 next:
2791 perf_pmu_enable(event->pmu);
2777 } 2792 }
2778 2793
2779 perf_pmu_enable(ctx->pmu); 2794 perf_pmu_enable(ctx->pmu);
diff --git a/kernel/fork.c b/kernel/fork.c
index 728d5be9548c..5721f0e3f2da 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
537 spin_lock_init(&mm->page_table_lock); 537 spin_lock_init(&mm->page_table_lock);
538 mm_init_aio(mm); 538 mm_init_aio(mm);
539 mm_init_owner(mm, p); 539 mm_init_owner(mm, p);
540 clear_tlb_flush_pending(mm);
540 541
541 if (likely(!mm_alloc_pgd(mm))) { 542 if (likely(!mm_alloc_pgd(mm))) {
542 mm->def_flags = 0; 543 mm->def_flags = 0;
diff --git a/kernel/freezer.c b/kernel/freezer.c
index b462fa197517..aa6a8aadb911 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
19bool pm_freezing; 19bool pm_freezing;
20bool pm_nosig_freezing; 20bool pm_nosig_freezing;
21 21
22/*
23 * Temporary export for the deadlock workaround in ata_scsi_hotplug().
24 * Remove once the hack becomes unnecessary.
25 */
26EXPORT_SYMBOL_GPL(pm_freezing);
27
22/* protects freezing and frozen transitions */ 28/* protects freezing and frozen transitions */
23static DEFINE_SPINLOCK(freezer_lock); 29static DEFINE_SPINLOCK(freezer_lock);
24 30
diff --git a/kernel/kexec.c b/kernel/kexec.c
index d0d8fca54065..9c970167e402 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1680,6 +1680,7 @@ int kernel_kexec(void)
1680 { 1680 {
1681 kexec_in_progress = true; 1681 kexec_in_progress = true;
1682 kernel_restart_prepare(NULL); 1682 kernel_restart_prepare(NULL);
1683 migrate_to_reboot_cpu();
1683 printk(KERN_EMERG "Starting new kernel\n"); 1684 printk(KERN_EMERG "Starting new kernel\n");
1684 machine_shutdown(); 1685 machine_shutdown();
1685 } 1686 }
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 463aa6736751..eacb8bd8cab4 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev)
81 list_for_each_entry(tmp, &pm_vt_switch_list, head) { 81 list_for_each_entry(tmp, &pm_vt_switch_list, head) {
82 if (tmp->dev == dev) { 82 if (tmp->dev == dev) {
83 list_del(&tmp->head); 83 list_del(&tmp->head);
84 kfree(tmp);
84 break; 85 break;
85 } 86 }
86 } 87 }
diff --git a/kernel/reboot.c b/kernel/reboot.c
index f813b3474646..662c83fc16b7 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb)
104} 104}
105EXPORT_SYMBOL(unregister_reboot_notifier); 105EXPORT_SYMBOL(unregister_reboot_notifier);
106 106
107static void migrate_to_reboot_cpu(void) 107void migrate_to_reboot_cpu(void)
108{ 108{
109 /* The boot cpu is always logical cpu 0 */ 109 /* The boot cpu is always logical cpu 0 */
110 int cpu = reboot_cpu; 110 int cpu = reboot_cpu;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 19af58f3a261..a88f4a485c5e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym);
4902static void update_top_cache_domain(int cpu) 4902static void update_top_cache_domain(int cpu)
4903{ 4903{
4904 struct sched_domain *sd; 4904 struct sched_domain *sd;
4905 struct sched_domain *busy_sd = NULL;
4905 int id = cpu; 4906 int id = cpu;
4906 int size = 1; 4907 int size = 1;
4907 4908
@@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu)
4909 if (sd) { 4910 if (sd) {
4910 id = cpumask_first(sched_domain_span(sd)); 4911 id = cpumask_first(sched_domain_span(sd));
4911 size = cpumask_weight(sched_domain_span(sd)); 4912 size = cpumask_weight(sched_domain_span(sd));
4912 sd = sd->parent; /* sd_busy */ 4913 busy_sd = sd->parent; /* sd_busy */
4913 } 4914 }
4914 rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); 4915 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
4915 4916
4916 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 4917 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
4917 per_cpu(sd_llc_size, cpu) = size; 4918 per_cpu(sd_llc_size, cpu) = size;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9030da7bcb15..c7395d97e4cb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1738,6 +1738,13 @@ void task_numa_work(struct callback_head *work)
1738 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 1738 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1739 continue; 1739 continue;
1740 1740
1741 /*
1742 * Skip inaccessible VMAs to avoid any confusion between
1743 * PROT_NONE and NUMA hinting ptes
1744 */
1745 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1746 continue;
1747
1741 do { 1748 do {
1742 start = max(start, vma->vm_start); 1749 start = max(start, vma->vm_start);
1743 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 1750 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7d57275fc396..1c4065575fa2 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
901{ 901{
902 struct rq *rq = rq_of_rt_rq(rt_rq); 902 struct rq *rq = rq_of_rt_rq(rt_rq);
903 903
904#ifdef CONFIG_RT_GROUP_SCHED
905 /*
906 * Change rq's cpupri only if rt_rq is the top queue.
907 */
908 if (&rq->rt != rt_rq)
909 return;
910#endif
904 if (rq->online && prio < prev_prio) 911 if (rq->online && prio < prev_prio)
905 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 912 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
906} 913}
@@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
910{ 917{
911 struct rq *rq = rq_of_rt_rq(rt_rq); 918 struct rq *rq = rq_of_rt_rq(rt_rq);
912 919
920#ifdef CONFIG_RT_GROUP_SCHED
921 /*
922 * Change rq's cpupri only if rt_rq is the top queue.
923 */
924 if (&rq->rt != rt_rq)
925 return;
926#endif
913 if (rq->online && rt_rq->highest_prio.curr != prev_prio) 927 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
914 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 928 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
915} 929}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0e9f9eaade2f..72a0f81dc5a8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -775,7 +775,7 @@ static int ftrace_profile_init(void)
775 int cpu; 775 int cpu;
776 int ret = 0; 776 int ret = 0;
777 777
778 for_each_online_cpu(cpu) { 778 for_each_possible_cpu(cpu) {
779 ret = ftrace_profile_init_cpu(cpu); 779 ret = ftrace_profile_init_cpu(cpu);
780 if (ret) 780 if (ret)
781 break; 781 break;
diff --git a/kernel/user.c b/kernel/user.c
index a3a0dbfda329..c006131beb77 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,9 +51,9 @@ struct user_namespace init_user_ns = {
51 .owner = GLOBAL_ROOT_UID, 51 .owner = GLOBAL_ROOT_UID,
52 .group = GLOBAL_ROOT_GID, 52 .group = GLOBAL_ROOT_GID,
53 .proc_inum = PROC_USER_INIT_INO, 53 .proc_inum = PROC_USER_INIT_INO,
54#ifdef CONFIG_KEYS_KERBEROS_CACHE 54#ifdef CONFIG_PERSISTENT_KEYRINGS
55 .krb_cache_register_sem = 55 .persistent_keyring_register_sem =
56 __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), 56 __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
57#endif 57#endif
58}; 58};
59EXPORT_SYMBOL_GPL(init_user_ns); 59EXPORT_SYMBOL_GPL(init_user_ns);
diff --git a/mm/Kconfig b/mm/Kconfig
index eb69f352401d..723bbe04a0b0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -543,7 +543,7 @@ config ZSWAP
543 543
544config MEM_SOFT_DIRTY 544config MEM_SOFT_DIRTY
545 bool "Track memory changes" 545 bool "Track memory changes"
546 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY 546 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
547 select PROC_PAGE_MONITOR 547 select PROC_PAGE_MONITOR
548 help 548 help
549 This option enables memory changes tracking by introducing a 549 This option enables memory changes tracking by introducing a
diff --git a/mm/compaction.c b/mm/compaction.c
index 805165bcd3dd..f58bcd016f43 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
134 bool migrate_scanner) 134 bool migrate_scanner)
135{ 135{
136 struct zone *zone = cc->zone; 136 struct zone *zone = cc->zone;
137
138 if (cc->ignore_skip_hint)
139 return;
140
137 if (!page) 141 if (!page)
138 return; 142 return;
139 143
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 33a5dc492810..7de1bf85f683 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -882,6 +882,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
882 ret = 0; 882 ret = 0;
883 goto out_unlock; 883 goto out_unlock;
884 } 884 }
885
886 /* mmap_sem prevents this happening but warn if that changes */
887 WARN_ON(pmd_trans_migrating(pmd));
888
885 if (unlikely(pmd_trans_splitting(pmd))) { 889 if (unlikely(pmd_trans_splitting(pmd))) {
886 /* split huge page running from under us */ 890 /* split huge page running from under us */
887 spin_unlock(src_ptl); 891 spin_unlock(src_ptl);
@@ -1243,6 +1247,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1243 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1247 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1244 return ERR_PTR(-EFAULT); 1248 return ERR_PTR(-EFAULT);
1245 1249
1250 /* Full NUMA hinting faults to serialise migration in fault paths */
1251 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
1252 goto out;
1253
1246 page = pmd_page(*pmd); 1254 page = pmd_page(*pmd);
1247 VM_BUG_ON(!PageHead(page)); 1255 VM_BUG_ON(!PageHead(page));
1248 if (flags & FOLL_TOUCH) { 1256 if (flags & FOLL_TOUCH) {
@@ -1295,6 +1303,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1295 if (unlikely(!pmd_same(pmd, *pmdp))) 1303 if (unlikely(!pmd_same(pmd, *pmdp)))
1296 goto out_unlock; 1304 goto out_unlock;
1297 1305
1306 /*
1307 * If there are potential migrations, wait for completion and retry
1308 * without disrupting NUMA hinting information. Do not relock and
1309 * check_same as the page may no longer be mapped.
1310 */
1311 if (unlikely(pmd_trans_migrating(*pmdp))) {
1312 spin_unlock(ptl);
1313 wait_migrate_huge_page(vma->anon_vma, pmdp);
1314 goto out;
1315 }
1316
1298 page = pmd_page(pmd); 1317 page = pmd_page(pmd);
1299 BUG_ON(is_huge_zero_page(page)); 1318 BUG_ON(is_huge_zero_page(page));
1300 page_nid = page_to_nid(page); 1319 page_nid = page_to_nid(page);
@@ -1323,23 +1342,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1323 /* If the page was locked, there are no parallel migrations */ 1342 /* If the page was locked, there are no parallel migrations */
1324 if (page_locked) 1343 if (page_locked)
1325 goto clear_pmdnuma; 1344 goto clear_pmdnuma;
1345 }
1326 1346
1327 /* 1347 /* Migration could have started since the pmd_trans_migrating check */
1328 * Otherwise wait for potential migrations and retry. We do 1348 if (!page_locked) {
1329 * relock and check_same as the page may no longer be mapped.
1330 * As the fault is being retried, do not account for it.
1331 */
1332 spin_unlock(ptl); 1349 spin_unlock(ptl);
1333 wait_on_page_locked(page); 1350 wait_on_page_locked(page);
1334 page_nid = -1; 1351 page_nid = -1;
1335 goto out; 1352 goto out;
1336 } 1353 }
1337 1354
1338 /* Page is misplaced, serialise migrations and parallel THP splits */ 1355 /*
1356 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1357 * to serialises splits
1358 */
1339 get_page(page); 1359 get_page(page);
1340 spin_unlock(ptl); 1360 spin_unlock(ptl);
1341 if (!page_locked)
1342 lock_page(page);
1343 anon_vma = page_lock_anon_vma_read(page); 1361 anon_vma = page_lock_anon_vma_read(page);
1344 1362
1345 /* Confirm the PMD did not change while page_table_lock was released */ 1363 /* Confirm the PMD did not change while page_table_lock was released */
@@ -1351,6 +1369,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1351 goto out_unlock; 1369 goto out_unlock;
1352 } 1370 }
1353 1371
1372 /* Bail if we fail to protect against THP splits for any reason */
1373 if (unlikely(!anon_vma)) {
1374 put_page(page);
1375 page_nid = -1;
1376 goto clear_pmdnuma;
1377 }
1378
1354 /* 1379 /*
1355 * Migrate the THP to the requested node, returns with page unlocked 1380 * Migrate the THP to the requested node, returns with page unlocked
1356 * and pmd_numa cleared. 1381 * and pmd_numa cleared.
@@ -1517,6 +1542,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1517 ret = 1; 1542 ret = 1;
1518 if (!prot_numa) { 1543 if (!prot_numa) {
1519 entry = pmdp_get_and_clear(mm, addr, pmd); 1544 entry = pmdp_get_and_clear(mm, addr, pmd);
1545 if (pmd_numa(entry))
1546 entry = pmd_mknonnuma(entry);
1520 entry = pmd_modify(entry, newprot); 1547 entry = pmd_modify(entry, newprot);
1521 ret = HPAGE_PMD_NR; 1548 ret = HPAGE_PMD_NR;
1522 BUG_ON(pmd_write(entry)); 1549 BUG_ON(pmd_write(entry));
@@ -1531,7 +1558,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1531 */ 1558 */
1532 if (!is_huge_zero_page(page) && 1559 if (!is_huge_zero_page(page) &&
1533 !pmd_numa(*pmd)) { 1560 !pmd_numa(*pmd)) {
1534 entry = pmdp_get_and_clear(mm, addr, pmd); 1561 entry = *pmd;
1535 entry = pmd_mknuma(entry); 1562 entry = pmd_mknuma(entry);
1536 ret = HPAGE_PMD_NR; 1563 ret = HPAGE_PMD_NR;
1537 } 1564 }
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b7c171602ba1..db08af92c6fc 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1505,10 +1505,16 @@ static int soft_offline_huge_page(struct page *page, int flags)
1505 if (ret > 0) 1505 if (ret > 0)
1506 ret = -EIO; 1506 ret = -EIO;
1507 } else { 1507 } else {
1508 set_page_hwpoison_huge_page(hpage); 1508 /* overcommit hugetlb page will be freed to buddy */
1509 dequeue_hwpoisoned_huge_page(hpage); 1509 if (PageHuge(page)) {
1510 atomic_long_add(1 << compound_order(hpage), 1510 set_page_hwpoison_huge_page(hpage);
1511 &num_poisoned_pages); 1511 dequeue_hwpoisoned_huge_page(hpage);
1512 atomic_long_add(1 << compound_order(hpage),
1513 &num_poisoned_pages);
1514 } else {
1515 SetPageHWPoison(page);
1516 atomic_long_inc(&num_poisoned_pages);
1517 }
1512 } 1518 }
1513 return ret; 1519 return ret;
1514} 1520}
diff --git a/mm/memory.c b/mm/memory.c
index 5d9025f3b3e1..6768ce9e57d2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
4271} 4271}
4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4273 4273
4274#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS 4274#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
4275bool ptlock_alloc(struct page *page) 4275bool ptlock_alloc(struct page *page)
4276{ 4276{
4277 spinlock_t *ptl; 4277 spinlock_t *ptl;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eca4a3129129..0cd2c4d4e270 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
1197 break; 1197 break;
1198 vma = vma->vm_next; 1198 vma = vma->vm_next;
1199 } 1199 }
1200
1201 if (PageHuge(page)) {
1202 if (vma)
1203 return alloc_huge_page_noerr(vma, address, 1);
1204 else
1205 return NULL;
1206 }
1200 /* 1207 /*
1201 * queue_pages_range() confirms that @page belongs to some vma, 1208 * if !vma, alloc_page_vma() will use task or system default policy
1202 * so vma shouldn't be NULL.
1203 */ 1209 */
1204 BUG_ON(!vma);
1205
1206 if (PageHuge(page))
1207 return alloc_huge_page_noerr(vma, address, 1);
1208 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1210 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1209} 1211}
1210#else 1212#else
@@ -1318,7 +1320,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1318 if (nr_failed && (flags & MPOL_MF_STRICT)) 1320 if (nr_failed && (flags & MPOL_MF_STRICT))
1319 err = -EIO; 1321 err = -EIO;
1320 } else 1322 } else
1321 putback_lru_pages(&pagelist); 1323 putback_movable_pages(&pagelist);
1322 1324
1323 up_write(&mm->mmap_sem); 1325 up_write(&mm->mmap_sem);
1324 mpol_out: 1326 mpol_out:
diff --git a/mm/migrate.c b/mm/migrate.c
index bb940045fe85..9194375b2307 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -36,6 +36,7 @@
36#include <linux/hugetlb_cgroup.h> 36#include <linux/hugetlb_cgroup.h>
37#include <linux/gfp.h> 37#include <linux/gfp.h>
38#include <linux/balloon_compaction.h> 38#include <linux/balloon_compaction.h>
39#include <linux/mmu_notifier.h>
39 40
40#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
41 42
@@ -316,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
316 */ 317 */
317int migrate_page_move_mapping(struct address_space *mapping, 318int migrate_page_move_mapping(struct address_space *mapping,
318 struct page *newpage, struct page *page, 319 struct page *newpage, struct page *page,
319 struct buffer_head *head, enum migrate_mode mode) 320 struct buffer_head *head, enum migrate_mode mode,
321 int extra_count)
320{ 322{
321 int expected_count = 0; 323 int expected_count = 1 + extra_count;
322 void **pslot; 324 void **pslot;
323 325
324 if (!mapping) { 326 if (!mapping) {
325 /* Anonymous page without mapping */ 327 /* Anonymous page without mapping */
326 if (page_count(page) != 1) 328 if (page_count(page) != expected_count)
327 return -EAGAIN; 329 return -EAGAIN;
328 return MIGRATEPAGE_SUCCESS; 330 return MIGRATEPAGE_SUCCESS;
329 } 331 }
@@ -333,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
333 pslot = radix_tree_lookup_slot(&mapping->page_tree, 335 pslot = radix_tree_lookup_slot(&mapping->page_tree,
334 page_index(page)); 336 page_index(page));
335 337
336 expected_count = 2 + page_has_private(page); 338 expected_count += 1 + page_has_private(page);
337 if (page_count(page) != expected_count || 339 if (page_count(page) != expected_count ||
338 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 340 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
339 spin_unlock_irq(&mapping->tree_lock); 341 spin_unlock_irq(&mapping->tree_lock);
@@ -583,7 +585,7 @@ int migrate_page(struct address_space *mapping,
583 585
584 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 586 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
585 587
586 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); 588 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
587 589
588 if (rc != MIGRATEPAGE_SUCCESS) 590 if (rc != MIGRATEPAGE_SUCCESS)
589 return rc; 591 return rc;
@@ -610,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping,
610 612
611 head = page_buffers(page); 613 head = page_buffers(page);
612 614
613 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); 615 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
614 616
615 if (rc != MIGRATEPAGE_SUCCESS) 617 if (rc != MIGRATEPAGE_SUCCESS)
616 return rc; 618 return rc;
@@ -1654,6 +1656,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1654 return 1; 1656 return 1;
1655} 1657}
1656 1658
1659bool pmd_trans_migrating(pmd_t pmd)
1660{
1661 struct page *page = pmd_page(pmd);
1662 return PageLocked(page);
1663}
1664
1665void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
1666{
1667 struct page *page = pmd_page(*pmd);
1668 wait_on_page_locked(page);
1669}
1670
1657/* 1671/*
1658 * Attempt to migrate a misplaced page to the specified destination 1672 * Attempt to migrate a misplaced page to the specified destination
1659 * node. Caller is expected to have an elevated reference count on 1673 * node. Caller is expected to have an elevated reference count on
@@ -1716,12 +1730,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1716 struct page *page, int node) 1730 struct page *page, int node)
1717{ 1731{
1718 spinlock_t *ptl; 1732 spinlock_t *ptl;
1719 unsigned long haddr = address & HPAGE_PMD_MASK;
1720 pg_data_t *pgdat = NODE_DATA(node); 1733 pg_data_t *pgdat = NODE_DATA(node);
1721 int isolated = 0; 1734 int isolated = 0;
1722 struct page *new_page = NULL; 1735 struct page *new_page = NULL;
1723 struct mem_cgroup *memcg = NULL; 1736 struct mem_cgroup *memcg = NULL;
1724 int page_lru = page_is_file_cache(page); 1737 int page_lru = page_is_file_cache(page);
1738 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1739 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1740 pmd_t orig_entry;
1725 1741
1726 /* 1742 /*
1727 * Rate-limit the amount of data that is being migrated to a node. 1743 * Rate-limit the amount of data that is being migrated to a node.
@@ -1744,6 +1760,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1744 goto out_fail; 1760 goto out_fail;
1745 } 1761 }
1746 1762
1763 if (mm_tlb_flush_pending(mm))
1764 flush_tlb_range(vma, mmun_start, mmun_end);
1765
1747 /* Prepare a page as a migration target */ 1766 /* Prepare a page as a migration target */
1748 __set_page_locked(new_page); 1767 __set_page_locked(new_page);
1749 SetPageSwapBacked(new_page); 1768 SetPageSwapBacked(new_page);
@@ -1755,9 +1774,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1755 WARN_ON(PageLRU(new_page)); 1774 WARN_ON(PageLRU(new_page));
1756 1775
1757 /* Recheck the target PMD */ 1776 /* Recheck the target PMD */
1777 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1758 ptl = pmd_lock(mm, pmd); 1778 ptl = pmd_lock(mm, pmd);
1759 if (unlikely(!pmd_same(*pmd, entry))) { 1779 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1780fail_putback:
1760 spin_unlock(ptl); 1781 spin_unlock(ptl);
1782 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1761 1783
1762 /* Reverse changes made by migrate_page_copy() */ 1784 /* Reverse changes made by migrate_page_copy() */
1763 if (TestClearPageActive(new_page)) 1785 if (TestClearPageActive(new_page))
@@ -1774,7 +1796,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1774 putback_lru_page(page); 1796 putback_lru_page(page);
1775 mod_zone_page_state(page_zone(page), 1797 mod_zone_page_state(page_zone(page),
1776 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); 1798 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1777 goto out_fail; 1799
1800 goto out_unlock;
1778 } 1801 }
1779 1802
1780 /* 1803 /*
@@ -1786,16 +1809,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1786 */ 1809 */
1787 mem_cgroup_prepare_migration(page, new_page, &memcg); 1810 mem_cgroup_prepare_migration(page, new_page, &memcg);
1788 1811
1812 orig_entry = *pmd;
1789 entry = mk_pmd(new_page, vma->vm_page_prot); 1813 entry = mk_pmd(new_page, vma->vm_page_prot);
1790 entry = pmd_mknonnuma(entry);
1791 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1792 entry = pmd_mkhuge(entry); 1814 entry = pmd_mkhuge(entry);
1815 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1793 1816
1794 pmdp_clear_flush(vma, haddr, pmd); 1817 /*
1795 set_pmd_at(mm, haddr, pmd, entry); 1818 * Clear the old entry under pagetable lock and establish the new PTE.
1796 page_add_new_anon_rmap(new_page, vma, haddr); 1819 * Any parallel GUP will either observe the old page blocking on the
1820 * page lock, block on the page table lock or observe the new page.
1821 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1822 * guarantee the copy is visible before the pagetable update.
1823 */
1824 flush_cache_range(vma, mmun_start, mmun_end);
1825 page_add_new_anon_rmap(new_page, vma, mmun_start);
1826 pmdp_clear_flush(vma, mmun_start, pmd);
1827 set_pmd_at(mm, mmun_start, pmd, entry);
1828 flush_tlb_range(vma, mmun_start, mmun_end);
1797 update_mmu_cache_pmd(vma, address, &entry); 1829 update_mmu_cache_pmd(vma, address, &entry);
1830
1831 if (page_count(page) != 2) {
1832 set_pmd_at(mm, mmun_start, pmd, orig_entry);
1833 flush_tlb_range(vma, mmun_start, mmun_end);
1834 update_mmu_cache_pmd(vma, address, &entry);
1835 page_remove_rmap(new_page);
1836 goto fail_putback;
1837 }
1838
1798 page_remove_rmap(page); 1839 page_remove_rmap(page);
1840
1799 /* 1841 /*
1800 * Finish the charge transaction under the page table lock to 1842 * Finish the charge transaction under the page table lock to
1801 * prevent split_huge_page() from dividing up the charge 1843 * prevent split_huge_page() from dividing up the charge
@@ -1803,6 +1845,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1803 */ 1845 */
1804 mem_cgroup_end_migration(memcg, page, new_page, true); 1846 mem_cgroup_end_migration(memcg, page, new_page, true);
1805 spin_unlock(ptl); 1847 spin_unlock(ptl);
1848 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1806 1849
1807 unlock_page(new_page); 1850 unlock_page(new_page);
1808 unlock_page(page); 1851 unlock_page(page);
@@ -1820,10 +1863,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1820out_fail: 1863out_fail:
1821 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1864 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1822out_dropref: 1865out_dropref:
1823 entry = pmd_mknonnuma(entry); 1866 ptl = pmd_lock(mm, pmd);
1824 set_pmd_at(mm, haddr, pmd, entry); 1867 if (pmd_same(*pmd, entry)) {
1825 update_mmu_cache_pmd(vma, address, &entry); 1868 entry = pmd_mknonnuma(entry);
1869 set_pmd_at(mm, mmun_start, pmd, entry);
1870 update_mmu_cache_pmd(vma, address, &entry);
1871 }
1872 spin_unlock(ptl);
1826 1873
1874out_unlock:
1827 unlock_page(page); 1875 unlock_page(page);
1828 put_page(page); 1876 put_page(page);
1829 return 0; 1877 return 0;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 26667971c824..bb53a6591aea 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -52,17 +52,21 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
52 pte_t ptent; 52 pte_t ptent;
53 bool updated = false; 53 bool updated = false;
54 54
55 ptent = ptep_modify_prot_start(mm, addr, pte);
56 if (!prot_numa) { 55 if (!prot_numa) {
56 ptent = ptep_modify_prot_start(mm, addr, pte);
57 if (pte_numa(ptent))
58 ptent = pte_mknonnuma(ptent);
57 ptent = pte_modify(ptent, newprot); 59 ptent = pte_modify(ptent, newprot);
58 updated = true; 60 updated = true;
59 } else { 61 } else {
60 struct page *page; 62 struct page *page;
61 63
64 ptent = *pte;
62 page = vm_normal_page(vma, addr, oldpte); 65 page = vm_normal_page(vma, addr, oldpte);
63 if (page) { 66 if (page) {
64 if (!pte_numa(oldpte)) { 67 if (!pte_numa(oldpte)) {
65 ptent = pte_mknuma(ptent); 68 ptent = pte_mknuma(ptent);
69 set_pte_at(mm, addr, pte, ptent);
66 updated = true; 70 updated = true;
67 } 71 }
68 } 72 }
@@ -79,7 +83,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
79 83
80 if (updated) 84 if (updated)
81 pages++; 85 pages++;
82 ptep_modify_prot_commit(mm, addr, pte, ptent); 86
87 /* Only !prot_numa always clears the pte */
88 if (!prot_numa)
89 ptep_modify_prot_commit(mm, addr, pte, ptent);
83 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { 90 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
84 swp_entry_t entry = pte_to_swp_entry(oldpte); 91 swp_entry_t entry = pte_to_swp_entry(oldpte);
85 92
@@ -181,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
181 BUG_ON(addr >= end); 188 BUG_ON(addr >= end);
182 pgd = pgd_offset(mm, addr); 189 pgd = pgd_offset(mm, addr);
183 flush_cache_range(vma, addr, end); 190 flush_cache_range(vma, addr, end);
191 set_tlb_flush_pending(mm);
184 do { 192 do {
185 next = pgd_addr_end(addr, end); 193 next = pgd_addr_end(addr, end);
186 if (pgd_none_or_clear_bad(pgd)) 194 if (pgd_none_or_clear_bad(pgd))
@@ -192,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
192 /* Only flush the TLB if we actually modified any entries: */ 200 /* Only flush the TLB if we actually modified any entries: */
193 if (pages) 201 if (pages)
194 flush_tlb_range(vma, start, end); 202 flush_tlb_range(vma, start, end);
203 clear_tlb_flush_pending(mm);
195 204
196 return pages; 205 return pages;
197} 206}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 580a5f075ed0..5248fe070aa4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
1816 1816
1817static bool zone_local(struct zone *local_zone, struct zone *zone) 1817static bool zone_local(struct zone *local_zone, struct zone *zone)
1818{ 1818{
1819 return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE; 1819 return local_zone->node == zone->node;
1820} 1820}
1821 1821
1822static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 1822static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
@@ -1913,18 +1913,17 @@ zonelist_scan:
1913 * page was allocated in should have no effect on the 1913 * page was allocated in should have no effect on the
1914 * time the page has in memory before being reclaimed. 1914 * time the page has in memory before being reclaimed.
1915 * 1915 *
1916 * When zone_reclaim_mode is enabled, try to stay in 1916 * Try to stay in local zones in the fastpath. If
1917 * local zones in the fastpath. If that fails, the 1917 * that fails, the slowpath is entered, which will do
1918 * slowpath is entered, which will do another pass 1918 * another pass starting with the local zones, but
1919 * starting with the local zones, but ultimately fall 1919 * ultimately fall back to remote zones that do not
1920 * back to remote zones that do not partake in the 1920 * partake in the fairness round-robin cycle of this
1921 * fairness round-robin cycle of this zonelist. 1921 * zonelist.
1922 */ 1922 */
1923 if (alloc_flags & ALLOC_WMARK_LOW) { 1923 if (alloc_flags & ALLOC_WMARK_LOW) {
1924 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) 1924 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
1925 continue; 1925 continue;
1926 if (zone_reclaim_mode && 1926 if (!zone_local(preferred_zone, zone))
1927 !zone_local(preferred_zone, zone))
1928 continue; 1927 continue;
1929 } 1928 }
1930 /* 1929 /*
@@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
2390 * thrash fairness information for zones that are not 2389 * thrash fairness information for zones that are not
2391 * actually part of this zonelist's round-robin cycle. 2390 * actually part of this zonelist's round-robin cycle.
2392 */ 2391 */
2393 if (zone_reclaim_mode && !zone_local(preferred_zone, zone)) 2392 if (!zone_local(preferred_zone, zone))
2394 continue; 2393 continue;
2395 mod_zone_page_state(zone, NR_ALLOC_BATCH, 2394 mod_zone_page_state(zone, NR_ALLOC_BATCH,
2396 high_wmark_pages(zone) - 2395 high_wmark_pages(zone) -
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index cbb38545d9d6..a8b919925934 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
110pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 110pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
111 pte_t *ptep) 111 pte_t *ptep)
112{ 112{
113 struct mm_struct *mm = (vma)->vm_mm;
113 pte_t pte; 114 pte_t pte;
114 pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); 115 pte = ptep_get_and_clear(mm, address, ptep);
115 if (pte_accessible(pte)) 116 if (pte_accessible(mm, pte))
116 flush_tlb_page(vma, address); 117 flush_tlb_page(vma, address);
117 return pte; 118 return pte;
118} 119}
@@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
191void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 192void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
192 pmd_t *pmdp) 193 pmd_t *pmdp)
193{ 194{
195 pmd_t entry = *pmdp;
196 if (pmd_numa(entry))
197 entry = pmd_mknonnuma(entry);
194 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); 198 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
195 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 199 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
196} 200}
diff --git a/mm/rmap.c b/mm/rmap.c
index 55c8b8dc9ffb..068522d8502a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
600 spinlock_t *ptl; 600 spinlock_t *ptl;
601 601
602 if (unlikely(PageHuge(page))) { 602 if (unlikely(PageHuge(page))) {
603 /* when pud is not present, pte will be NULL */
603 pte = huge_pte_offset(mm, address); 604 pte = huge_pte_offset(mm, address);
605 if (!pte)
606 return NULL;
607
604 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); 608 ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
605 goto check; 609 goto check;
606 } 610 }
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 762896ebfcf5..47c908f1f626 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = {
530 .parse = eth_header_parse, 530 .parse = eth_header_parse,
531}; 531};
532 532
533static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
534 unsigned short type,
535 const void *daddr, const void *saddr,
536 unsigned int len)
537{
538 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
539 struct net_device *real_dev = vlan->real_dev;
540
541 return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
542}
543
544static const struct header_ops vlan_passthru_header_ops = {
545 .create = vlan_passthru_hard_header,
546 .rebuild = dev_rebuild_header,
547 .parse = eth_header_parse,
548};
549
533static struct device_type vlan_type = { 550static struct device_type vlan_type = {
534 .name = "vlan", 551 .name = "vlan",
535}; 552};
@@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev)
573 590
574 dev->needed_headroom = real_dev->needed_headroom; 591 dev->needed_headroom = real_dev->needed_headroom;
575 if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { 592 if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
576 dev->header_ops = real_dev->header_ops; 593 dev->header_ops = &vlan_passthru_header_ops;
577 dev->hard_header_len = real_dev->hard_header_len; 594 dev->hard_header_len = real_dev->hard_header_len;
578 } else { 595 } else {
579 dev->header_ops = &vlan_header_ops; 596 dev->header_ops = &vlan_header_ops;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a2b480a90872..b9c8a6eedf45 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
307 hard_iface->bat_iv.ogm_buff = ogm_buff; 307 hard_iface->bat_iv.ogm_buff = ogm_buff;
308 308
309 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; 309 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
310 batadv_ogm_packet->header.packet_type = BATADV_IV_OGM; 310 batadv_ogm_packet->packet_type = BATADV_IV_OGM;
311 batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION; 311 batadv_ogm_packet->version = BATADV_COMPAT_VERSION;
312 batadv_ogm_packet->header.ttl = 2; 312 batadv_ogm_packet->ttl = 2;
313 batadv_ogm_packet->flags = BATADV_NO_FLAGS; 313 batadv_ogm_packet->flags = BATADV_NO_FLAGS;
314 batadv_ogm_packet->reserved = 0; 314 batadv_ogm_packet->reserved = 0;
315 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; 315 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
@@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
346 346
347 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; 347 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
348 batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; 348 batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
349 batadv_ogm_packet->header.ttl = BATADV_TTL; 349 batadv_ogm_packet->ttl = BATADV_TTL;
350} 350}
351 351
352/* when do we schedule our own ogm to be sent */ 352/* when do we schedule our own ogm to be sent */
@@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
435 fwd_str, (packet_num > 0 ? "aggregated " : ""), 435 fwd_str, (packet_num > 0 ? "aggregated " : ""),
436 batadv_ogm_packet->orig, 436 batadv_ogm_packet->orig,
437 ntohl(batadv_ogm_packet->seqno), 437 ntohl(batadv_ogm_packet->seqno),
438 batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl, 438 batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
439 (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 439 (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
440 "on" : "off"), 440 "on" : "off"),
441 hard_iface->net_dev->name, 441 hard_iface->net_dev->name,
@@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
491 /* multihomed peer assumed 491 /* multihomed peer assumed
492 * non-primary OGMs are only broadcasted on their interface 492 * non-primary OGMs are only broadcasted on their interface
493 */ 493 */
494 if ((directlink && (batadv_ogm_packet->header.ttl == 1)) || 494 if ((directlink && (batadv_ogm_packet->ttl == 1)) ||
495 (forw_packet->own && (forw_packet->if_incoming != primary_if))) { 495 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
496 /* FIXME: what about aggregated packets ? */ 496 /* FIXME: what about aggregated packets ? */
497 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 497 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
499 (forw_packet->own ? "Sending own" : "Forwarding"), 499 (forw_packet->own ? "Sending own" : "Forwarding"),
500 batadv_ogm_packet->orig, 500 batadv_ogm_packet->orig,
501 ntohl(batadv_ogm_packet->seqno), 501 ntohl(batadv_ogm_packet->seqno),
502 batadv_ogm_packet->header.ttl, 502 batadv_ogm_packet->ttl,
503 forw_packet->if_incoming->net_dev->name, 503 forw_packet->if_incoming->net_dev->name,
504 forw_packet->if_incoming->net_dev->dev_addr); 504 forw_packet->if_incoming->net_dev->dev_addr);
505 505
@@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
572 */ 572 */
573 if ((!directlink) && 573 if ((!directlink) &&
574 (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && 574 (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
575 (batadv_ogm_packet->header.ttl != 1) && 575 (batadv_ogm_packet->ttl != 1) &&
576 576
577 /* own packets originating non-primary 577 /* own packets originating non-primary
578 * interfaces leave only that interface 578 * interfaces leave only that interface
@@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
587 * interface only - we still can aggregate 587 * interface only - we still can aggregate
588 */ 588 */
589 if ((directlink) && 589 if ((directlink) &&
590 (new_bat_ogm_packet->header.ttl == 1) && 590 (new_bat_ogm_packet->ttl == 1) &&
591 (forw_packet->if_incoming == if_incoming) && 591 (forw_packet->if_incoming == if_incoming) &&
592 592
593 /* packets from direct neighbors or 593 /* packets from direct neighbors or
@@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
778 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 778 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
779 uint16_t tvlv_len; 779 uint16_t tvlv_len;
780 780
781 if (batadv_ogm_packet->header.ttl <= 1) { 781 if (batadv_ogm_packet->ttl <= 1) {
782 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); 782 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
783 return; 783 return;
784 } 784 }
@@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
798 798
799 tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); 799 tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
800 800
801 batadv_ogm_packet->header.ttl--; 801 batadv_ogm_packet->ttl--;
802 memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); 802 memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
803 803
804 /* apply hop penalty */ 804 /* apply hop penalty */
@@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
807 807
808 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 808 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
809 "Forwarding packet: tq: %i, ttl: %i\n", 809 "Forwarding packet: tq: %i, ttl: %i\n",
810 batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl); 810 batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
811 811
812 /* switch of primaries first hop flag when forwarding */ 812 /* switch of primaries first hop flag when forwarding */
813 batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP; 813 batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
@@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
972 spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock); 972 spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
973 973
974 if (dup_status == BATADV_NO_DUP) { 974 if (dup_status == BATADV_NO_DUP) {
975 orig_node->last_ttl = batadv_ogm_packet->header.ttl; 975 orig_node->last_ttl = batadv_ogm_packet->ttl;
976 neigh_node->last_ttl = batadv_ogm_packet->header.ttl; 976 neigh_node->last_ttl = batadv_ogm_packet->ttl;
977 } 977 }
978 978
979 batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node); 979 batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
@@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1247 * packet in an aggregation. Here we expect that the padding 1247 * packet in an aggregation. Here we expect that the padding
1248 * is always zero (or not 0x01) 1248 * is always zero (or not 0x01)
1249 */ 1249 */
1250 if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM) 1250 if (batadv_ogm_packet->packet_type != BATADV_IV_OGM)
1251 return; 1251 return;
1252 1252
1253 /* could be changed by schedule_own_packet() */ 1253 /* could be changed by schedule_own_packet() */
@@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1267 if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig, 1267 if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
1268 batadv_ogm_packet->prev_sender, 1268 batadv_ogm_packet->prev_sender,
1269 ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, 1269 ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
1270 batadv_ogm_packet->header.ttl, 1270 batadv_ogm_packet->ttl,
1271 batadv_ogm_packet->header.version, has_directlink_flag); 1271 batadv_ogm_packet->version, has_directlink_flag);
1272 1272
1273 rcu_read_lock(); 1273 rcu_read_lock();
1274 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 1274 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1433 * seqno and similar ttl as the non-duplicate 1433 * seqno and similar ttl as the non-duplicate
1434 */ 1434 */
1435 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); 1435 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
1436 similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; 1436 similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl;
1437 if (is_bidirect && ((dup_status == BATADV_NO_DUP) || 1437 if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
1438 (sameseq && similar_ttl))) 1438 (sameseq && similar_ttl)))
1439 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, 1439 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 6c8c3934bd7b..b316a4cb6f14 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
349 349
350 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 350 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
351 351
352 switch (unicast_4addr_packet->u.header.packet_type) { 352 switch (unicast_4addr_packet->u.packet_type) {
353 case BATADV_UNICAST: 353 case BATADV_UNICAST:
354 batadv_dbg(BATADV_DBG_DAT, bat_priv, 354 batadv_dbg(BATADV_DBG_DAT, bat_priv,
355 "* encapsulated within a UNICAST packet\n"); 355 "* encapsulated within a UNICAST packet\n");
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
374 break; 374 break;
375 default: 375 default:
376 batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", 376 batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
377 unicast_4addr_packet->u.header.packet_type); 377 unicast_4addr_packet->u.packet_type);
378 } 378 }
379 break; 379 break;
380 case BATADV_BCAST: 380 case BATADV_BCAST:
@@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
387 default: 387 default:
388 batadv_dbg(BATADV_DBG_DAT, bat_priv, 388 batadv_dbg(BATADV_DBG_DAT, bat_priv,
389 "* encapsulated within an unknown packet type (0x%x)\n", 389 "* encapsulated within an unknown packet type (0x%x)\n",
390 unicast_4addr_packet->u.header.packet_type); 390 unicast_4addr_packet->u.packet_type);
391 } 391 }
392} 392}
393 393
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 271d321b3a04..6ddb6145ffb5 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
355 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, 355 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
356 skb->len + ETH_HLEN); 356 skb->len + ETH_HLEN);
357 357
358 packet->header.ttl--; 358 packet->ttl--;
359 batadv_send_skb_packet(skb, neigh_node->if_incoming, 359 batadv_send_skb_packet(skb, neigh_node->if_incoming,
360 neigh_node->addr); 360 neigh_node->addr);
361 ret = true; 361 ret = true;
@@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
444 goto out_err; 444 goto out_err;
445 445
446 /* Create one header to be copied to all fragments */ 446 /* Create one header to be copied to all fragments */
447 frag_header.header.packet_type = BATADV_UNICAST_FRAG; 447 frag_header.packet_type = BATADV_UNICAST_FRAG;
448 frag_header.header.version = BATADV_COMPAT_VERSION; 448 frag_header.version = BATADV_COMPAT_VERSION;
449 frag_header.header.ttl = BATADV_TTL; 449 frag_header.ttl = BATADV_TTL;
450 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); 450 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
451 frag_header.reserved = 0; 451 frag_header.reserved = 0;
452 frag_header.no = 0; 452 frag_header.no = 0;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 29ae4efe3543..130cc3217e2b 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
194 goto free_skb; 194 goto free_skb;
195 } 195 }
196 196
197 if (icmp_header->header.packet_type != BATADV_ICMP) { 197 if (icmp_header->packet_type != BATADV_ICMP) {
198 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 198 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
199 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); 199 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
200 len = -EINVAL; 200 len = -EINVAL;
@@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
243 243
244 icmp_header->uid = socket_client->index; 244 icmp_header->uid = socket_client->index;
245 245
246 if (icmp_header->header.version != BATADV_COMPAT_VERSION) { 246 if (icmp_header->version != BATADV_COMPAT_VERSION) {
247 icmp_header->msg_type = BATADV_PARAMETER_PROBLEM; 247 icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
248 icmp_header->header.version = BATADV_COMPAT_VERSION; 248 icmp_header->version = BATADV_COMPAT_VERSION;
249 batadv_socket_add_packet(socket_client, icmp_header, 249 batadv_socket_add_packet(socket_client, icmp_header,
250 packet_len); 250 packet_len);
251 goto free_skb; 251 goto free_skb;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index c51a5e568f0a..1511f64a6cea 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
383 383
384 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; 384 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
385 385
386 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) { 386 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
387 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 387 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
388 "Drop packet: incompatible batman version (%i)\n", 388 "Drop packet: incompatible batman version (%i)\n",
389 batadv_ogm_packet->header.version); 389 batadv_ogm_packet->version);
390 goto err_free; 390 goto err_free;
391 } 391 }
392 392
393 /* all receive handlers return whether they received or reused 393 /* all receive handlers return whether they received or reused
394 * the supplied skb. if not, we have to free the skb. 394 * the supplied skb. if not, we have to free the skb.
395 */ 395 */
396 idx = batadv_ogm_packet->header.packet_type; 396 idx = batadv_ogm_packet->packet_type;
397 ret = (*batadv_rx_handler[idx])(skb, hard_iface); 397 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
398 398
399 if (ret == NET_RX_DROP) 399 if (ret == NET_RX_DROP)
@@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void)
426 BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4); 426 BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
427 BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4); 427 BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
428 BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4); 428 BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
429 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4); 429 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4);
430 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4); 430 BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4);
431 431
432 /* broadcast packet */ 432 /* broadcast packet */
433 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; 433 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
@@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
1119 skb_reserve(skb, ETH_HLEN); 1119 skb_reserve(skb, ETH_HLEN);
1120 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len); 1120 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1121 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff; 1121 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
1122 unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV; 1122 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1123 unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION; 1123 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1124 unicast_tvlv_packet->header.ttl = BATADV_TTL; 1124 unicast_tvlv_packet->ttl = BATADV_TTL;
1125 unicast_tvlv_packet->reserved = 0; 1125 unicast_tvlv_packet->reserved = 0;
1126 unicast_tvlv_packet->tvlv_len = htons(tvlv_len); 1126 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1127 unicast_tvlv_packet->align = 0; 1127 unicast_tvlv_packet->align = 0;
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 351e199bc0af..511d7e1eea38 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
722{ 722{
723 if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno)) 723 if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno))
724 return false; 724 return false;
725 if (orig_node->last_ttl != ogm_packet->header.ttl + 1) 725 if (orig_node->last_ttl != ogm_packet->ttl + 1)
726 return false; 726 return false;
727 if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender)) 727 if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
728 return false; 728 return false;
@@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
1082 coded_packet = (struct batadv_coded_packet *)skb_dest->data; 1082 coded_packet = (struct batadv_coded_packet *)skb_dest->data;
1083 skb_reset_mac_header(skb_dest); 1083 skb_reset_mac_header(skb_dest);
1084 1084
1085 coded_packet->header.packet_type = BATADV_CODED; 1085 coded_packet->packet_type = BATADV_CODED;
1086 coded_packet->header.version = BATADV_COMPAT_VERSION; 1086 coded_packet->version = BATADV_COMPAT_VERSION;
1087 coded_packet->header.ttl = packet1->header.ttl; 1087 coded_packet->ttl = packet1->ttl;
1088 1088
1089 /* Info about first unicast packet */ 1089 /* Info about first unicast packet */
1090 memcpy(coded_packet->first_source, first_source, ETH_ALEN); 1090 memcpy(coded_packet->first_source, first_source, ETH_ALEN);
@@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
1097 memcpy(coded_packet->second_source, second_source, ETH_ALEN); 1097 memcpy(coded_packet->second_source, second_source, ETH_ALEN);
1098 memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN); 1098 memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
1099 coded_packet->second_crc = packet_id2; 1099 coded_packet->second_crc = packet_id2;
1100 coded_packet->second_ttl = packet2->header.ttl; 1100 coded_packet->second_ttl = packet2->ttl;
1101 coded_packet->second_ttvn = packet2->ttvn; 1101 coded_packet->second_ttvn = packet2->ttvn;
1102 coded_packet->coded_len = htons(coding_len); 1102 coded_packet->coded_len = htons(coding_len);
1103 1103
@@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb,
1452 /* We only handle unicast packets */ 1452 /* We only handle unicast packets */
1453 payload = skb_network_header(skb); 1453 payload = skb_network_header(skb);
1454 packet = (struct batadv_unicast_packet *)payload; 1454 packet = (struct batadv_unicast_packet *)payload;
1455 if (packet->header.packet_type != BATADV_UNICAST) 1455 if (packet->packet_type != BATADV_UNICAST)
1456 goto out; 1456 goto out;
1457 1457
1458 /* Try to find a coding opportunity and send the skb if one is found */ 1458 /* Try to find a coding opportunity and send the skb if one is found */
@@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
1505 /* Check for supported packet type */ 1505 /* Check for supported packet type */
1506 payload = skb_network_header(skb); 1506 payload = skb_network_header(skb);
1507 packet = (struct batadv_unicast_packet *)payload; 1507 packet = (struct batadv_unicast_packet *)payload;
1508 if (packet->header.packet_type != BATADV_UNICAST) 1508 if (packet->packet_type != BATADV_UNICAST)
1509 goto out; 1509 goto out;
1510 1510
1511 /* Find existing nc_path or create a new */ 1511 /* Find existing nc_path or create a new */
@@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
1623 ttvn = coded_packet_tmp.second_ttvn; 1623 ttvn = coded_packet_tmp.second_ttvn;
1624 } else { 1624 } else {
1625 orig_dest = coded_packet_tmp.first_orig_dest; 1625 orig_dest = coded_packet_tmp.first_orig_dest;
1626 ttl = coded_packet_tmp.header.ttl; 1626 ttl = coded_packet_tmp.ttl;
1627 ttvn = coded_packet_tmp.first_ttvn; 1627 ttvn = coded_packet_tmp.first_ttvn;
1628 } 1628 }
1629 1629
@@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
1648 1648
1649 /* Create decoded unicast packet */ 1649 /* Create decoded unicast packet */
1650 unicast_packet = (struct batadv_unicast_packet *)skb->data; 1650 unicast_packet = (struct batadv_unicast_packet *)skb->data;
1651 unicast_packet->header.packet_type = BATADV_UNICAST; 1651 unicast_packet->packet_type = BATADV_UNICAST;
1652 unicast_packet->header.version = BATADV_COMPAT_VERSION; 1652 unicast_packet->version = BATADV_COMPAT_VERSION;
1653 unicast_packet->header.ttl = ttl; 1653 unicast_packet->ttl = ttl;
1654 memcpy(unicast_packet->dest, orig_dest, ETH_ALEN); 1654 memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
1655 unicast_packet->ttvn = ttvn; 1655 unicast_packet->ttvn = ttvn;
1656 1656
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 207459b62966..2dd8f2422550 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -155,6 +155,7 @@ enum batadv_tvlv_type {
155 BATADV_TVLV_ROAM = 0x05, 155 BATADV_TVLV_ROAM = 0x05,
156}; 156};
157 157
158#pragma pack(2)
158/* the destination hardware field in the ARP frame is used to 159/* the destination hardware field in the ARP frame is used to
159 * transport the claim type and the group id 160 * transport the claim type and the group id
160 */ 161 */
@@ -163,24 +164,20 @@ struct batadv_bla_claim_dst {
163 uint8_t type; /* bla_claimframe */ 164 uint8_t type; /* bla_claimframe */
164 __be16 group; /* group id */ 165 __be16 group; /* group id */
165}; 166};
166 167#pragma pack()
167struct batadv_header {
168 uint8_t packet_type;
169 uint8_t version; /* batman version field */
170 uint8_t ttl;
171 /* the parent struct has to add a byte after the header to make
172 * everything 4 bytes aligned again
173 */
174};
175 168
176/** 169/**
177 * struct batadv_ogm_packet - ogm (routing protocol) packet 170 * struct batadv_ogm_packet - ogm (routing protocol) packet
178 * @header: common batman packet header 171 * @packet_type: batman-adv packet type, part of the general header
172 * @version: batman-adv protocol version, part of the genereal header
173 * @ttl: time to live for this packet, part of the genereal header
179 * @flags: contains routing relevant flags - see enum batadv_iv_flags 174 * @flags: contains routing relevant flags - see enum batadv_iv_flags
180 * @tvlv_len: length of tvlv data following the ogm header 175 * @tvlv_len: length of tvlv data following the ogm header
181 */ 176 */
182struct batadv_ogm_packet { 177struct batadv_ogm_packet {
183 struct batadv_header header; 178 uint8_t packet_type;
179 uint8_t version;
180 uint8_t ttl;
184 uint8_t flags; 181 uint8_t flags;
185 __be32 seqno; 182 __be32 seqno;
186 uint8_t orig[ETH_ALEN]; 183 uint8_t orig[ETH_ALEN];
@@ -196,29 +193,51 @@ struct batadv_ogm_packet {
196#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) 193#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
197 194
198/** 195/**
199 * batadv_icmp_header - common ICMP header 196 * batadv_icmp_header - common members among all the ICMP packets
200 * @header: common batman header 197 * @packet_type: batman-adv packet type, part of the general header
198 * @version: batman-adv protocol version, part of the genereal header
199 * @ttl: time to live for this packet, part of the genereal header
201 * @msg_type: ICMP packet type 200 * @msg_type: ICMP packet type
202 * @dst: address of the destination node 201 * @dst: address of the destination node
203 * @orig: address of the source node 202 * @orig: address of the source node
204 * @uid: local ICMP socket identifier 203 * @uid: local ICMP socket identifier
204 * @align: not used - useful for alignment purposes only
205 *
206 * This structure is used for ICMP packets parsing only and it is never sent
207 * over the wire. The alignment field at the end is there to ensure that
208 * members are padded the same way as they are in real packets.
205 */ 209 */
206struct batadv_icmp_header { 210struct batadv_icmp_header {
207 struct batadv_header header; 211 uint8_t packet_type;
212 uint8_t version;
213 uint8_t ttl;
208 uint8_t msg_type; /* see ICMP message types above */ 214 uint8_t msg_type; /* see ICMP message types above */
209 uint8_t dst[ETH_ALEN]; 215 uint8_t dst[ETH_ALEN];
210 uint8_t orig[ETH_ALEN]; 216 uint8_t orig[ETH_ALEN];
211 uint8_t uid; 217 uint8_t uid;
218 uint8_t align[3];
212}; 219};
213 220
214/** 221/**
215 * batadv_icmp_packet - ICMP packet 222 * batadv_icmp_packet - ICMP packet
216 * @icmph: common ICMP header 223 * @packet_type: batman-adv packet type, part of the general header
224 * @version: batman-adv protocol version, part of the genereal header
225 * @ttl: time to live for this packet, part of the genereal header
226 * @msg_type: ICMP packet type
227 * @dst: address of the destination node
228 * @orig: address of the source node
229 * @uid: local ICMP socket identifier
217 * @reserved: not used - useful for alignment 230 * @reserved: not used - useful for alignment
218 * @seqno: ICMP sequence number 231 * @seqno: ICMP sequence number
219 */ 232 */
220struct batadv_icmp_packet { 233struct batadv_icmp_packet {
221 struct batadv_icmp_header icmph; 234 uint8_t packet_type;
235 uint8_t version;
236 uint8_t ttl;
237 uint8_t msg_type; /* see ICMP message types above */
238 uint8_t dst[ETH_ALEN];
239 uint8_t orig[ETH_ALEN];
240 uint8_t uid;
222 uint8_t reserved; 241 uint8_t reserved;
223 __be16 seqno; 242 __be16 seqno;
224}; 243};
@@ -227,13 +246,25 @@ struct batadv_icmp_packet {
227 246
228/** 247/**
229 * batadv_icmp_packet_rr - ICMP RouteRecord packet 248 * batadv_icmp_packet_rr - ICMP RouteRecord packet
230 * @icmph: common ICMP header 249 * @packet_type: batman-adv packet type, part of the general header
250 * @version: batman-adv protocol version, part of the genereal header
251 * @ttl: time to live for this packet, part of the genereal header
252 * @msg_type: ICMP packet type
253 * @dst: address of the destination node
254 * @orig: address of the source node
255 * @uid: local ICMP socket identifier
231 * @rr_cur: number of entries the rr array 256 * @rr_cur: number of entries the rr array
232 * @seqno: ICMP sequence number 257 * @seqno: ICMP sequence number
233 * @rr: route record array 258 * @rr: route record array
234 */ 259 */
235struct batadv_icmp_packet_rr { 260struct batadv_icmp_packet_rr {
236 struct batadv_icmp_header icmph; 261 uint8_t packet_type;
262 uint8_t version;
263 uint8_t ttl;
264 uint8_t msg_type; /* see ICMP message types above */
265 uint8_t dst[ETH_ALEN];
266 uint8_t orig[ETH_ALEN];
267 uint8_t uid;
237 uint8_t rr_cur; 268 uint8_t rr_cur;
238 __be16 seqno; 269 __be16 seqno;
239 uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; 270 uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
@@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr {
253 */ 284 */
254#pragma pack(2) 285#pragma pack(2)
255 286
287/**
288 * struct batadv_unicast_packet - unicast packet for network payload
289 * @packet_type: batman-adv packet type, part of the general header
290 * @version: batman-adv protocol version, part of the genereal header
291 * @ttl: time to live for this packet, part of the genereal header
292 * @ttvn: translation table version number
293 * @dest: originator destination of the unicast packet
294 */
256struct batadv_unicast_packet { 295struct batadv_unicast_packet {
257 struct batadv_header header; 296 uint8_t packet_type;
297 uint8_t version;
298 uint8_t ttl;
258 uint8_t ttvn; /* destination translation table version number */ 299 uint8_t ttvn; /* destination translation table version number */
259 uint8_t dest[ETH_ALEN]; 300 uint8_t dest[ETH_ALEN];
260 /* "4 bytes boundary + 2 bytes" long to make the payload after the 301 /* "4 bytes boundary + 2 bytes" long to make the payload after the
@@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet {
280 321
281/** 322/**
282 * struct batadv_frag_packet - fragmented packet 323 * struct batadv_frag_packet - fragmented packet
283 * @header: common batman packet header with type, compatversion, and ttl 324 * @packet_type: batman-adv packet type, part of the general header
325 * @version: batman-adv protocol version, part of the genereal header
326 * @ttl: time to live for this packet, part of the genereal header
284 * @dest: final destination used when routing fragments 327 * @dest: final destination used when routing fragments
285 * @orig: originator of the fragment used when merging the packet 328 * @orig: originator of the fragment used when merging the packet
286 * @no: fragment number within this sequence 329 * @no: fragment number within this sequence
@@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet {
289 * @total_size: size of the merged packet 332 * @total_size: size of the merged packet
290 */ 333 */
291struct batadv_frag_packet { 334struct batadv_frag_packet {
292 struct batadv_header header; 335 uint8_t packet_type;
336 uint8_t version; /* batman version field */
337 uint8_t ttl;
293#if defined(__BIG_ENDIAN_BITFIELD) 338#if defined(__BIG_ENDIAN_BITFIELD)
294 uint8_t no:4; 339 uint8_t no:4;
295 uint8_t reserved:4; 340 uint8_t reserved:4;
@@ -305,8 +350,19 @@ struct batadv_frag_packet {
305 __be16 total_size; 350 __be16 total_size;
306}; 351};
307 352
353/**
354 * struct batadv_bcast_packet - broadcast packet for network payload
355 * @packet_type: batman-adv packet type, part of the general header
356 * @version: batman-adv protocol version, part of the genereal header
357 * @ttl: time to live for this packet, part of the genereal header
358 * @reserved: reserved byte for alignment
359 * @seqno: sequence identification
360 * @orig: originator of the broadcast packet
361 */
308struct batadv_bcast_packet { 362struct batadv_bcast_packet {
309 struct batadv_header header; 363 uint8_t packet_type;
364 uint8_t version; /* batman version field */
365 uint8_t ttl;
310 uint8_t reserved; 366 uint8_t reserved;
311 __be32 seqno; 367 __be32 seqno;
312 uint8_t orig[ETH_ALEN]; 368 uint8_t orig[ETH_ALEN];
@@ -315,11 +371,11 @@ struct batadv_bcast_packet {
315 */ 371 */
316}; 372};
317 373
318#pragma pack()
319
320/** 374/**
321 * struct batadv_coded_packet - network coded packet 375 * struct batadv_coded_packet - network coded packet
322 * @header: common batman packet header and ttl of first included packet 376 * @packet_type: batman-adv packet type, part of the general header
377 * @version: batman-adv protocol version, part of the genereal header
378 * @ttl: time to live for this packet, part of the genereal header
323 * @reserved: Align following fields to 2-byte boundaries 379 * @reserved: Align following fields to 2-byte boundaries
324 * @first_source: original source of first included packet 380 * @first_source: original source of first included packet
325 * @first_orig_dest: original destinal of first included packet 381 * @first_orig_dest: original destinal of first included packet
@@ -334,7 +390,9 @@ struct batadv_bcast_packet {
334 * @coded_len: length of network coded part of the payload 390 * @coded_len: length of network coded part of the payload
335 */ 391 */
336struct batadv_coded_packet { 392struct batadv_coded_packet {
337 struct batadv_header header; 393 uint8_t packet_type;
394 uint8_t version; /* batman version field */
395 uint8_t ttl;
338 uint8_t first_ttvn; 396 uint8_t first_ttvn;
339 /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */ 397 /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */
340 uint8_t first_source[ETH_ALEN]; 398 uint8_t first_source[ETH_ALEN];
@@ -349,9 +407,13 @@ struct batadv_coded_packet {
349 __be16 coded_len; 407 __be16 coded_len;
350}; 408};
351 409
410#pragma pack()
411
352/** 412/**
353 * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload 413 * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
354 * @header: common batman packet header 414 * @packet_type: batman-adv packet type, part of the general header
415 * @version: batman-adv protocol version, part of the genereal header
416 * @ttl: time to live for this packet, part of the genereal header
355 * @reserved: reserved field (for packet alignment) 417 * @reserved: reserved field (for packet alignment)
356 * @src: address of the source 418 * @src: address of the source
357 * @dst: address of the destination 419 * @dst: address of the destination
@@ -359,7 +421,9 @@ struct batadv_coded_packet {
359 * @align: 2 bytes to align the header to a 4 byte boundry 421 * @align: 2 bytes to align the header to a 4 byte boundry
360 */ 422 */
361struct batadv_unicast_tvlv_packet { 423struct batadv_unicast_tvlv_packet {
362 struct batadv_header header; 424 uint8_t packet_type;
425 uint8_t version; /* batman version field */
426 uint8_t ttl;
363 uint8_t reserved; 427 uint8_t reserved;
364 uint8_t dst[ETH_ALEN]; 428 uint8_t dst[ETH_ALEN];
365 uint8_t src[ETH_ALEN]; 429 uint8_t src[ETH_ALEN];
@@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data {
420 * struct batadv_tvlv_tt_change - translation table diff data 484 * struct batadv_tvlv_tt_change - translation table diff data
421 * @flags: status indicators concerning the non-mesh client (see 485 * @flags: status indicators concerning the non-mesh client (see
422 * batadv_tt_client_flags) 486 * batadv_tt_client_flags)
423 * @reserved: reserved field 487 * @reserved: reserved field - useful for alignment purposes only
424 * @addr: mac address of non-mesh client that triggered this tt change 488 * @addr: mac address of non-mesh client that triggered this tt change
425 * @vid: VLAN identifier 489 * @vid: VLAN identifier
426 */ 490 */
427struct batadv_tvlv_tt_change { 491struct batadv_tvlv_tt_change {
428 uint8_t flags; 492 uint8_t flags;
429 uint8_t reserved; 493 uint8_t reserved[3];
430 uint8_t addr[ETH_ALEN]; 494 uint8_t addr[ETH_ALEN];
431 __be16 vid; 495 __be16 vid;
432}; 496};
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index d4114d775ad6..46278bfb8fdb 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
308 memcpy(icmph->dst, icmph->orig, ETH_ALEN); 308 memcpy(icmph->dst, icmph->orig, ETH_ALEN);
309 memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 309 memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
310 icmph->msg_type = BATADV_ECHO_REPLY; 310 icmph->msg_type = BATADV_ECHO_REPLY;
311 icmph->header.ttl = BATADV_TTL; 311 icmph->ttl = BATADV_TTL;
312 312
313 res = batadv_send_skb_to_orig(skb, orig_node, NULL); 313 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
314 if (res != NET_XMIT_DROP) 314 if (res != NET_XMIT_DROP)
@@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
338 icmp_packet = (struct batadv_icmp_packet *)skb->data; 338 icmp_packet = (struct batadv_icmp_packet *)skb->data;
339 339
340 /* send TTL exceeded if packet is an echo request (traceroute) */ 340 /* send TTL exceeded if packet is an echo request (traceroute) */
341 if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) { 341 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
342 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", 342 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
343 icmp_packet->icmph.orig, icmp_packet->icmph.dst); 343 icmp_packet->orig, icmp_packet->dst);
344 goto out; 344 goto out;
345 } 345 }
346 346
@@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
349 goto out; 349 goto out;
350 350
351 /* get routing information */ 351 /* get routing information */
352 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig); 352 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
353 if (!orig_node) 353 if (!orig_node)
354 goto out; 354 goto out;
355 355
@@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
359 359
360 icmp_packet = (struct batadv_icmp_packet *)skb->data; 360 icmp_packet = (struct batadv_icmp_packet *)skb->data;
361 361
362 memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN); 362 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
363 memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr, 363 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr,
364 ETH_ALEN); 364 ETH_ALEN);
365 icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED; 365 icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
366 icmp_packet->icmph.header.ttl = BATADV_TTL; 366 icmp_packet->ttl = BATADV_TTL;
367 367
368 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) 368 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
369 ret = NET_RX_SUCCESS; 369 ret = NET_RX_SUCCESS;
@@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
434 return batadv_recv_my_icmp_packet(bat_priv, skb); 434 return batadv_recv_my_icmp_packet(bat_priv, skb);
435 435
436 /* TTL exceeded */ 436 /* TTL exceeded */
437 if (icmph->header.ttl < 2) 437 if (icmph->ttl < 2)
438 return batadv_recv_icmp_ttl_exceeded(bat_priv, skb); 438 return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
439 439
440 /* get routing information */ 440 /* get routing information */
@@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
449 icmph = (struct batadv_icmp_header *)skb->data; 449 icmph = (struct batadv_icmp_header *)skb->data;
450 450
451 /* decrement ttl */ 451 /* decrement ttl */
452 icmph->header.ttl--; 452 icmph->ttl--;
453 453
454 /* route it */ 454 /* route it */
455 if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) 455 if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
709 unicast_packet = (struct batadv_unicast_packet *)skb->data; 709 unicast_packet = (struct batadv_unicast_packet *)skb->data;
710 710
711 /* TTL exceeded */ 711 /* TTL exceeded */
712 if (unicast_packet->header.ttl < 2) { 712 if (unicast_packet->ttl < 2) {
713 pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", 713 pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
714 ethhdr->h_source, unicast_packet->dest); 714 ethhdr->h_source, unicast_packet->dest);
715 goto out; 715 goto out;
@@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
727 727
728 /* decrement ttl */ 728 /* decrement ttl */
729 unicast_packet = (struct batadv_unicast_packet *)skb->data; 729 unicast_packet = (struct batadv_unicast_packet *)skb->data;
730 unicast_packet->header.ttl--; 730 unicast_packet->ttl--;
731 731
732 switch (unicast_packet->header.packet_type) { 732 switch (unicast_packet->packet_type) {
733 case BATADV_UNICAST_4ADDR: 733 case BATADV_UNICAST_4ADDR:
734 hdr_len = sizeof(struct batadv_unicast_4addr_packet); 734 hdr_len = sizeof(struct batadv_unicast_4addr_packet);
735 break; 735 break;
@@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
970 unicast_packet = (struct batadv_unicast_packet *)skb->data; 970 unicast_packet = (struct batadv_unicast_packet *)skb->data;
971 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 971 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
972 972
973 is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR; 973 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
974 /* the caller function should have already pulled 2 bytes */ 974 /* the caller function should have already pulled 2 bytes */
975 if (is4addr) 975 if (is4addr)
976 hdr_size = sizeof(*unicast_4addr_packet); 976 hdr_size = sizeof(*unicast_4addr_packet);
@@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
1160 if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) 1160 if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
1161 goto out; 1161 goto out;
1162 1162
1163 if (bcast_packet->header.ttl < 2) 1163 if (bcast_packet->ttl < 2)
1164 goto out; 1164 goto out;
1165 1165
1166 orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig); 1166 orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index c83be5ebaa28..fba4dcfcfac2 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
161 return false; 161 return false;
162 162
163 unicast_packet = (struct batadv_unicast_packet *)skb->data; 163 unicast_packet = (struct batadv_unicast_packet *)skb->data;
164 unicast_packet->header.version = BATADV_COMPAT_VERSION; 164 unicast_packet->version = BATADV_COMPAT_VERSION;
165 /* batman packet type: unicast */ 165 /* batman packet type: unicast */
166 unicast_packet->header.packet_type = BATADV_UNICAST; 166 unicast_packet->packet_type = BATADV_UNICAST;
167 /* set unicast ttl */ 167 /* set unicast ttl */
168 unicast_packet->header.ttl = BATADV_TTL; 168 unicast_packet->ttl = BATADV_TTL;
169 /* copy the destination for faster routing */ 169 /* copy the destination for faster routing */
170 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 170 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
171 /* set the destination tt version number */ 171 /* set the destination tt version number */
@@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
221 goto out; 221 goto out;
222 222
223 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 223 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
224 uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR; 224 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
225 memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); 225 memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
226 uc_4addr_packet->subtype = packet_subtype; 226 uc_4addr_packet->subtype = packet_subtype;
227 uc_4addr_packet->reserved = 0; 227 uc_4addr_packet->reserved = 0;
@@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
436 436
437 /* as we have a copy now, it is safe to decrease the TTL */ 437 /* as we have a copy now, it is safe to decrease the TTL */
438 bcast_packet = (struct batadv_bcast_packet *)newskb->data; 438 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
439 bcast_packet->header.ttl--; 439 bcast_packet->ttl--;
440 440
441 skb_reset_mac_header(newskb); 441 skb_reset_mac_header(newskb);
442 442
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 36f050876f82..a8f99d1486c0 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb,
264 goto dropped; 264 goto dropped;
265 265
266 bcast_packet = (struct batadv_bcast_packet *)skb->data; 266 bcast_packet = (struct batadv_bcast_packet *)skb->data;
267 bcast_packet->header.version = BATADV_COMPAT_VERSION; 267 bcast_packet->version = BATADV_COMPAT_VERSION;
268 bcast_packet->header.ttl = BATADV_TTL; 268 bcast_packet->ttl = BATADV_TTL;
269 269
270 /* batman packet type: broadcast */ 270 /* batman packet type: broadcast */
271 bcast_packet->header.packet_type = BATADV_BCAST; 271 bcast_packet->packet_type = BATADV_BCAST;
272 bcast_packet->reserved = 0; 272 bcast_packet->reserved = 0;
273 273
274 /* hw address of first interface is the orig mac because only 274 /* hw address of first interface is the orig mac because only
@@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
328 struct sk_buff *skb, struct batadv_hard_iface *recv_if, 328 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
329 int hdr_size, struct batadv_orig_node *orig_node) 329 int hdr_size, struct batadv_orig_node *orig_node)
330{ 330{
331 struct batadv_header *batadv_header = (struct batadv_header *)skb->data; 331 struct batadv_bcast_packet *batadv_bcast_packet;
332 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 332 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
333 __be16 ethertype = htons(ETH_P_BATMAN); 333 __be16 ethertype = htons(ETH_P_BATMAN);
334 struct vlan_ethhdr *vhdr; 334 struct vlan_ethhdr *vhdr;
@@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
336 unsigned short vid; 336 unsigned short vid;
337 bool is_bcast; 337 bool is_bcast;
338 338
339 is_bcast = (batadv_header->packet_type == BATADV_BCAST); 339 batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
340 is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
340 341
341 /* check if enough space is available for pulling, and pull */ 342 /* check if enough space is available for pulling, and pull */
342 if (!pskb_may_pull(skb, hdr_size)) 343 if (!pskb_may_pull(skb, hdr_size))
@@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
345 skb_pull_rcsum(skb, hdr_size); 346 skb_pull_rcsum(skb, hdr_size);
346 skb_reset_mac_header(skb); 347 skb_reset_mac_header(skb);
347 348
348 vid = batadv_get_vid(skb, hdr_size); 349 /* clean the netfilter state now that the batman-adv header has been
350 * removed
351 */
352 nf_reset(skb);
353
354 vid = batadv_get_vid(skb, 0);
349 ethhdr = eth_hdr(skb); 355 ethhdr = eth_hdr(skb);
350 356
351 switch (ntohs(ethhdr->h_proto)) { 357 switch (ntohs(ethhdr->h_proto)) {
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 06506e6f9006..19bc42f8b8be 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
333 return; 333 return;
334 334
335 tt_change_node->change.flags = flags; 335 tt_change_node->change.flags = flags;
336 tt_change_node->change.reserved = 0; 336 memset(tt_change_node->change.reserved, 0,
337 sizeof(tt_change_node->change.reserved));
337 memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN); 338 memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
338 tt_change_node->change.vid = htons(common->vid); 339 tt_change_node->change.vid = htons(common->vid);
339 340
@@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
2221 ETH_ALEN); 2222 ETH_ALEN);
2222 tt_change->flags = tt_common_entry->flags; 2223 tt_change->flags = tt_common_entry->flags;
2223 tt_change->vid = htons(tt_common_entry->vid); 2224 tt_change->vid = htons(tt_common_entry->vid);
2224 tt_change->reserved = 0; 2225 memset(tt_change->reserved, 0,
2226 sizeof(tt_change->reserved));
2225 2227
2226 tt_num_entries++; 2228 tt_num_entries++;
2227 tt_change++; 2229 tt_change++;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 6a6c8bb4fd72..7552f9e3089c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -940,8 +940,22 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
940 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); 940 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
941 skb_pull(skb, 1); 941 skb_pull(skb, 1);
942 942
943 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW && 943 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
944 bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { 944 /* No permission check is needed for user channel
945 * since that gets enforced when binding the socket.
946 *
947 * However check that the packet type is valid.
948 */
949 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
950 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
951 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
952 err = -EINVAL;
953 goto drop;
954 }
955
956 skb_queue_tail(&hdev->raw_q, skb);
957 queue_work(hdev->workqueue, &hdev->tx_work);
958 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
945 u16 opcode = get_unaligned_le16(skb->data); 959 u16 opcode = get_unaligned_le16(skb->data);
946 u16 ogf = hci_opcode_ogf(opcode); 960 u16 ogf = hci_opcode_ogf(opcode);
947 u16 ocf = hci_opcode_ocf(opcode); 961 u16 ocf = hci_opcode_ocf(opcode);
@@ -972,14 +986,6 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
972 goto drop; 986 goto drop;
973 } 987 }
974 988
975 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
976 bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
977 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
978 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
979 err = -EINVAL;
980 goto drop;
981 }
982
983 skb_queue_tail(&hdev->raw_q, skb); 989 skb_queue_tail(&hdev->raw_q, skb);
984 queue_work(hdev->workqueue, &hdev->tx_work); 990 queue_work(hdev->workqueue, &hdev->tx_work);
985 } 991 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 4c214b2b88ef..ef66365b7354 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1998 u32 old; 1998 u32 old;
1999 struct net_bridge_mdb_htable *mdb; 1999 struct net_bridge_mdb_htable *mdb;
2000 2000
2001 spin_lock(&br->multicast_lock); 2001 spin_lock_bh(&br->multicast_lock);
2002 if (!netif_running(br->dev)) 2002 if (!netif_running(br->dev))
2003 goto unlock; 2003 goto unlock;
2004 2004
@@ -2030,7 +2030,7 @@ rollback:
2030 } 2030 }
2031 2031
2032unlock: 2032unlock:
2033 spin_unlock(&br->multicast_lock); 2033 spin_unlock_bh(&br->multicast_lock);
2034 2034
2035 return err; 2035 return err;
2036} 2036}
diff --git a/net/core/dev.c b/net/core/dev.c
index 153ee2f8c33e..e5e23d785454 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4446,7 +4446,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4446{ 4446{
4447 struct netdev_adjacent *upper; 4447 struct netdev_adjacent *upper;
4448 4448
4449 WARN_ON_ONCE(!rcu_read_lock_held()); 4449 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4450 4450
4451 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 4451 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4452 4452
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a666740051dc..ea97361f0e9b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1298,7 +1298,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1298 1298
1299 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, 1299 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1300 skb->len) < 0 && 1300 skb->len) < 0 &&
1301 dev->header_ops->rebuild(skb)) 1301 dev_rebuild_header(skb))
1302 return 0; 1302 return 0;
1303 1303
1304 return dev_queue_xmit(skb); 1304 return dev_queue_xmit(skb);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8f971990677c..303097874633 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
386 !vlan_hw_offload_capable(netif_skb_features(skb), 386 !vlan_hw_offload_capable(netif_skb_features(skb),
387 skb->vlan_proto)) { 387 skb->vlan_proto)) {
388 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); 388 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
389 if (unlikely(!skb)) 389 if (unlikely(!skb)) {
390 break; 390 /* This is actually a packet drop, but we
391 * don't want the code at the end of this
392 * function to try and re-queue a NULL skb.
393 */
394 status = NETDEV_TX_OK;
395 goto unlock_txq;
396 }
391 skb->vlan_tci = 0; 397 skb->vlan_tci = 0;
392 } 398 }
393 399
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
395 if (status == NETDEV_TX_OK) 401 if (status == NETDEV_TX_OK)
396 txq_trans_update(txq); 402 txq_trans_update(txq);
397 } 403 }
404 unlock_txq:
398 __netif_tx_unlock(txq); 405 __netif_tx_unlock(txq);
399 406
400 if (status == NETDEV_TX_OK) 407 if (status == NETDEV_TX_OK)
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 4c6bdf97a657..595ddf0459db 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = {
152 .llseek = noop_llseek, 152 .llseek = noop_llseek,
153}; 153};
154 154
155static __init int setup_jprobe(void)
156{
157 int ret = register_jprobe(&dccp_send_probe);
158
159 if (ret) {
160 request_module("dccp");
161 ret = register_jprobe(&dccp_send_probe);
162 }
163 return ret;
164}
165
166static __init int dccpprobe_init(void) 155static __init int dccpprobe_init(void)
167{ 156{
168 int ret = -ENOMEM; 157 int ret = -ENOMEM;
@@ -174,7 +163,13 @@ static __init int dccpprobe_init(void)
174 if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops)) 163 if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
175 goto err0; 164 goto err0;
176 165
177 ret = setup_jprobe(); 166 ret = register_jprobe(&dccp_send_probe);
167 if (ret) {
168 ret = request_module("dccp");
169 if (!ret)
170 ret = register_jprobe(&dccp_send_probe);
171 }
172
178 if (ret) 173 if (ret)
179 goto err1; 174 goto err1;
180 175
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 459e200c08a4..a2d2456a557a 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb,
547 hc06_ptr += 3; 547 hc06_ptr += 3;
548 } else { 548 } else {
549 /* compress nothing */ 549 /* compress nothing */
550 memcpy(hc06_ptr, &hdr, 4); 550 memcpy(hc06_ptr, hdr, 4);
551 /* replace the top byte with new ECN | DSCP format */ 551 /* replace the top byte with new ECN | DSCP format */
552 *hc06_ptr = tmp; 552 *hc06_ptr = tmp;
553 hc06_ptr += 4; 553 hc06_ptr += 4;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index e5d436188464..2cd02f32f99f 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
28 netdev_features_t enc_features; 28 netdev_features_t enc_features;
29 int ghl = GRE_HEADER_SECTION; 29 int ghl = GRE_HEADER_SECTION;
30 struct gre_base_hdr *greh; 30 struct gre_base_hdr *greh;
31 u16 mac_offset = skb->mac_header;
31 int mac_len = skb->mac_len; 32 int mac_len = skb->mac_len;
32 __be16 protocol = skb->protocol; 33 __be16 protocol = skb->protocol;
33 int tnl_hlen; 34 int tnl_hlen;
@@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
58 } else 59 } else
59 csum = false; 60 csum = false;
60 61
62 if (unlikely(!pskb_may_pull(skb, ghl)))
63 goto out;
64
61 /* setup inner skb. */ 65 /* setup inner skb. */
62 skb->protocol = greh->protocol; 66 skb->protocol = greh->protocol;
63 skb->encapsulation = 0; 67 skb->encapsulation = 0;
64 68
65 if (unlikely(!pskb_may_pull(skb, ghl)))
66 goto out;
67
68 __skb_pull(skb, ghl); 69 __skb_pull(skb, ghl);
69 skb_reset_mac_header(skb); 70 skb_reset_mac_header(skb);
70 skb_set_network_header(skb, skb_inner_network_offset(skb)); 71 skb_set_network_header(skb, skb_inner_network_offset(skb));
@@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
73 /* segment inner packet. */ 74 /* segment inner packet. */
74 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 75 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
75 segs = skb_mac_gso_segment(skb, enc_features); 76 segs = skb_mac_gso_segment(skb, enc_features);
76 if (!segs || IS_ERR(segs)) 77 if (!segs || IS_ERR(segs)) {
78 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
77 goto out; 79 goto out;
80 }
78 81
79 skb = segs; 82 skb = segs;
80 tnl_hlen = skb_tnl_header_len(skb); 83 tnl_hlen = skb_tnl_header_len(skb);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 56a964a553d2..a0f52dac8940 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
106 106
107 r->id.idiag_sport = inet->inet_sport; 107 r->id.idiag_sport = inet->inet_sport;
108 r->id.idiag_dport = inet->inet_dport; 108 r->id.idiag_dport = inet->inet_dport;
109
110 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
111 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
112
109 r->id.idiag_src[0] = inet->inet_rcv_saddr; 113 r->id.idiag_src[0] = inet->inet_rcv_saddr;
110 r->id.idiag_dst[0] = inet->inet_daddr; 114 r->id.idiag_dst[0] = inet->inet_daddr;
111 115
@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
240 244
241 r->idiag_family = tw->tw_family; 245 r->idiag_family = tw->tw_family;
242 r->idiag_retrans = 0; 246 r->idiag_retrans = 0;
247
243 r->id.idiag_if = tw->tw_bound_dev_if; 248 r->id.idiag_if = tw->tw_bound_dev_if;
244 sock_diag_save_cookie(tw, r->id.idiag_cookie); 249 sock_diag_save_cookie(tw, r->id.idiag_cookie);
250
245 r->id.idiag_sport = tw->tw_sport; 251 r->id.idiag_sport = tw->tw_sport;
246 r->id.idiag_dport = tw->tw_dport; 252 r->id.idiag_dport = tw->tw_dport;
253
254 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
255 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
256
247 r->id.idiag_src[0] = tw->tw_rcv_saddr; 257 r->id.idiag_src[0] = tw->tw_rcv_saddr;
248 r->id.idiag_dst[0] = tw->tw_daddr; 258 r->id.idiag_dst[0] = tw->tw_daddr;
259
249 r->idiag_state = tw->tw_substate; 260 r->idiag_state = tw->tw_substate;
250 r->idiag_timer = 3; 261 r->idiag_timer = 3;
251 r->idiag_expires = jiffies_to_msecs(tmo); 262 r->idiag_expires = jiffies_to_msecs(tmo);
@@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
726 737
727 r->id.idiag_sport = inet->inet_sport; 738 r->id.idiag_sport = inet->inet_sport;
728 r->id.idiag_dport = ireq->ir_rmt_port; 739 r->id.idiag_dport = ireq->ir_rmt_port;
740
741 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
742 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
743
729 r->id.idiag_src[0] = ireq->ir_loc_addr; 744 r->id.idiag_src[0] = ireq->ir_loc_addr;
730 r->id.idiag_dst[0] = ireq->ir_rmt_addr; 745 r->id.idiag_dst[0] = ireq->ir_rmt_addr;
746
731 r->idiag_expires = jiffies_to_msecs(tmo); 747 r->idiag_expires = jiffies_to_msecs(tmo);
732 r->idiag_rqueue = 0; 748 r->idiag_rqueue = 0;
733 r->idiag_wqueue = 0; 749 r->idiag_wqueue = 0;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d7aea4c5b940..e560ef34cf4b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
217 iph->saddr, iph->daddr, tpi->key); 217 iph->saddr, iph->daddr, tpi->key);
218 218
219 if (tunnel) { 219 if (tunnel) {
220 skb_pop_mac_header(skb);
220 ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); 221 ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
221 return PACKET_RCVD; 222 return PACKET_RCVD;
222 } 223 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 912402752f2f..df184616493f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
828 828
829 if (cork->length + length > maxnonfragsize - fragheaderlen) { 829 if (cork->length + length > maxnonfragsize - fragheaderlen) {
830 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 830 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
831 mtu-exthdrlen); 831 mtu - (opt ? opt->optlen : 0));
832 return -EMSGSIZE; 832 return -EMSGSIZE;
833 } 833 }
834 834
@@ -1151,7 +1151,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1151 mtu : 0xFFFF; 1151 mtu : 0xFFFF;
1152 1152
1153 if (cork->length + size > maxnonfragsize - fragheaderlen) { 1153 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1154 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu); 1154 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1155 mtu - (opt ? opt->optlen : 0));
1155 return -EMSGSIZE; 1156 return -EMSGSIZE;
1156 } 1157 }
1157 1158
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d5d24ecde6a5..80f649fbee63 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2478,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2478 netdev_features_t features) 2478 netdev_features_t features)
2479{ 2479{
2480 struct sk_buff *segs = ERR_PTR(-EINVAL); 2480 struct sk_buff *segs = ERR_PTR(-EINVAL);
2481 u16 mac_offset = skb->mac_header;
2481 int mac_len = skb->mac_len; 2482 int mac_len = skb->mac_len;
2482 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 2483 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
2483 __be16 protocol = skb->protocol; 2484 __be16 protocol = skb->protocol;
@@ -2497,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2497 /* segment inner packet. */ 2498 /* segment inner packet. */
2498 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 2499 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
2499 segs = skb_mac_gso_segment(skb, enc_features); 2500 segs = skb_mac_gso_segment(skb, enc_features);
2500 if (!segs || IS_ERR(segs)) 2501 if (!segs || IS_ERR(segs)) {
2502 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
2503 mac_len);
2501 goto out; 2504 goto out;
2505 }
2502 2506
2503 outer_hlen = skb_tnl_header_len(skb); 2507 outer_hlen = skb_tnl_header_len(skb);
2504 skb = segs; 2508 skb = segs;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 83206de2bc76..79c62bdcd3c5 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
41{ 41{
42 struct sk_buff *segs = ERR_PTR(-EINVAL); 42 struct sk_buff *segs = ERR_PTR(-EINVAL);
43 unsigned int mss; 43 unsigned int mss;
44 int offset;
45 __wsum csum;
46
47 if (skb->encapsulation &&
48 skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
49 segs = skb_udp_tunnel_segment(skb, features);
50 goto out;
51 }
44 52
45 mss = skb_shinfo(skb)->gso_size; 53 mss = skb_shinfo(skb)->gso_size;
46 if (unlikely(skb->len <= mss)) 54 if (unlikely(skb->len <= mss))
@@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
63 goto out; 71 goto out;
64 } 72 }
65 73
74 /* Do software UFO. Complete and fill in the UDP checksum as
75 * HW cannot do checksum of UDP packets sent as multiple
76 * IP fragments.
77 */
78 offset = skb_checksum_start_offset(skb);
79 csum = skb_checksum(skb, offset, skb->len - offset, 0);
80 offset += skb->csum_offset;
81 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
82 skb->ip_summed = CHECKSUM_NONE;
83
66 /* Fragment the skb. IP headers of the fragments are updated in 84 /* Fragment the skb. IP headers of the fragments are updated in
67 * inet_gso_segment() 85 * inet_gso_segment()
68 */ 86 */
69 if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) 87 segs = skb_segment(skb, features);
70 segs = skb_udp_tunnel_segment(skb, features);
71 else {
72 int offset;
73 __wsum csum;
74
75 /* Do software UFO. Complete and fill in the UDP checksum as
76 * HW cannot do checksum of UDP packets sent as multiple
77 * IP fragments.
78 */
79 offset = skb_checksum_start_offset(skb);
80 csum = skb_checksum(skb, offset, skb->len - offset, 0);
81 offset += skb->csum_offset;
82 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
83 skb->ip_summed = CHECKSUM_NONE;
84
85 segs = skb_segment(skb, features);
86 }
87out: 88out:
88 return segs; 89 return segs;
89} 90}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6c1634507ec2..31f75ea9cb60 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1677,7 +1677,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1677static void addrconf_join_anycast(struct inet6_ifaddr *ifp) 1677static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1678{ 1678{
1679 struct in6_addr addr; 1679 struct in6_addr addr;
1680 if (ifp->prefix_len == 127) /* RFC 6164 */ 1680 if (ifp->prefix_len >= 127) /* RFC 6164 */
1681 return; 1681 return;
1682 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1682 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
1683 if (ipv6_addr_any(&addr)) 1683 if (ipv6_addr_any(&addr))
@@ -1688,7 +1688,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1688static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) 1688static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
1689{ 1689{
1690 struct in6_addr addr; 1690 struct in6_addr addr;
1691 if (ifp->prefix_len == 127) /* RFC 6164 */ 1691 if (ifp->prefix_len >= 127) /* RFC 6164 */
1692 return; 1692 return;
1693 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1693 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
1694 if (ipv6_addr_any(&addr)) 1694 if (ipv6_addr_any(&addr))
@@ -3476,7 +3476,12 @@ restart:
3476 &inet6_addr_lst[i], addr_lst) { 3476 &inet6_addr_lst[i], addr_lst) {
3477 unsigned long age; 3477 unsigned long age;
3478 3478
3479 if (ifp->flags & IFA_F_PERMANENT) 3479 /* When setting preferred_lft to a value not zero or
3480 * infinity, while valid_lft is infinity
3481 * IFA_F_PERMANENT has a non-infinity life time.
3482 */
3483 if ((ifp->flags & IFA_F_PERMANENT) &&
3484 (ifp->prefered_lft == INFINITY_LIFE_TIME))
3480 continue; 3485 continue;
3481 3486
3482 spin_lock(&ifp->lock); 3487 spin_lock(&ifp->lock);
@@ -3501,7 +3506,8 @@ restart:
3501 ifp->flags |= IFA_F_DEPRECATED; 3506 ifp->flags |= IFA_F_DEPRECATED;
3502 } 3507 }
3503 3508
3504 if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)) 3509 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
3510 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
3505 next = ifp->tstamp + ifp->valid_lft * HZ; 3511 next = ifp->tstamp + ifp->valid_lft * HZ;
3506 3512
3507 spin_unlock(&ifp->lock); 3513 spin_unlock(&ifp->lock);
@@ -3801,7 +3807,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3801 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), 3807 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
3802 ifa->idev->dev->ifindex); 3808 ifa->idev->dev->ifindex);
3803 3809
3804 if (!(ifa->flags&IFA_F_PERMANENT)) { 3810 if (!((ifa->flags&IFA_F_PERMANENT) &&
3811 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
3805 preferred = ifa->prefered_lft; 3812 preferred = ifa->prefered_lft;
3806 valid = ifa->valid_lft; 3813 valid = ifa->valid_lft;
3807 if (preferred != INFINITY_LIFE_TIME) { 3814 if (preferred != INFINITY_LIFE_TIME) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 788c01a53593..d1de9560c421 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1188,11 +1188,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1188 1188
1189 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + 1189 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1190 (opt ? opt->opt_nflen : 0); 1190 (opt ? opt->opt_nflen : 0);
1191 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); 1191 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1192 sizeof(struct frag_hdr);
1192 1193
1193 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { 1194 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1194 if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { 1195 unsigned int maxnonfragsize, headersize;
1195 ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); 1196
1197 headersize = sizeof(struct ipv6hdr) +
1198 (opt ? opt->tot_len : 0) +
1199 (dst_allfrag(&rt->dst) ?
1200 sizeof(struct frag_hdr) : 0) +
1201 rt->rt6i_nfheader_len;
1202
1203 maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
1204 mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1205
1206 /* dontfrag active */
1207 if ((cork->length + length > mtu - headersize) && dontfrag &&
1208 (sk->sk_protocol == IPPROTO_UDP ||
1209 sk->sk_protocol == IPPROTO_RAW)) {
1210 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1211 sizeof(struct ipv6hdr));
1212 goto emsgsize;
1213 }
1214
1215 if (cork->length + length > maxnonfragsize - headersize) {
1216emsgsize:
1217 ipv6_local_error(sk, EMSGSIZE, fl6,
1218 mtu - headersize +
1219 sizeof(struct ipv6hdr));
1196 return -EMSGSIZE; 1220 return -EMSGSIZE;
1197 } 1221 }
1198 } 1222 }
@@ -1217,12 +1241,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1217 * --yoshfuji 1241 * --yoshfuji
1218 */ 1242 */
1219 1243
1220 if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1221 sk->sk_protocol == IPPROTO_RAW)) {
1222 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1223 return -EMSGSIZE;
1224 }
1225
1226 skb = skb_peek_tail(&sk->sk_write_queue); 1244 skb = skb_peek_tail(&sk->sk_write_queue);
1227 cork->length += length; 1245 cork->length += length;
1228 if (((length > mtu) || 1246 if (((length > mtu) ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 02894216a46d..1e5e2404f1af 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -101,17 +101,26 @@ struct ip6_tnl_net {
101 101
102static struct net_device_stats *ip6_get_stats(struct net_device *dev) 102static struct net_device_stats *ip6_get_stats(struct net_device *dev)
103{ 103{
104 struct pcpu_sw_netstats sum = { 0 }; 104 struct pcpu_sw_netstats tmp, sum = { 0 };
105 int i; 105 int i;
106 106
107 for_each_possible_cpu(i) { 107 for_each_possible_cpu(i) {
108 unsigned int start;
108 const struct pcpu_sw_netstats *tstats = 109 const struct pcpu_sw_netstats *tstats =
109 per_cpu_ptr(dev->tstats, i); 110 per_cpu_ptr(dev->tstats, i);
110 111
111 sum.rx_packets += tstats->rx_packets; 112 do {
112 sum.rx_bytes += tstats->rx_bytes; 113 start = u64_stats_fetch_begin_bh(&tstats->syncp);
113 sum.tx_packets += tstats->tx_packets; 114 tmp.rx_packets = tstats->rx_packets;
114 sum.tx_bytes += tstats->tx_bytes; 115 tmp.rx_bytes = tstats->rx_bytes;
116 tmp.tx_packets = tstats->tx_packets;
117 tmp.tx_bytes = tstats->tx_bytes;
118 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
119
120 sum.rx_packets += tmp.rx_packets;
121 sum.rx_bytes += tmp.rx_bytes;
122 sum.tx_packets += tmp.tx_packets;
123 sum.tx_bytes += tmp.tx_bytes;
115 } 124 }
116 dev->stats.rx_packets = sum.rx_packets; 125 dev->stats.rx_packets = sum.rx_packets;
117 dev->stats.rx_bytes = sum.rx_bytes; 126 dev->stats.rx_bytes = sum.rx_bytes;
@@ -823,8 +832,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
823 } 832 }
824 833
825 tstats = this_cpu_ptr(t->dev->tstats); 834 tstats = this_cpu_ptr(t->dev->tstats);
835 u64_stats_update_begin(&tstats->syncp);
826 tstats->rx_packets++; 836 tstats->rx_packets++;
827 tstats->rx_bytes += skb->len; 837 tstats->rx_bytes += skb->len;
838 u64_stats_update_end(&tstats->syncp);
828 839
829 netif_rx(skb); 840 netif_rx(skb);
830 841
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index da1d9e4d62ca..b50acd5e75d2 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -74,27 +74,6 @@ struct vti6_net {
74 struct ip6_tnl __rcu **tnls[2]; 74 struct ip6_tnl __rcu **tnls[2];
75}; 75};
76 76
77static struct net_device_stats *vti6_get_stats(struct net_device *dev)
78{
79 struct pcpu_sw_netstats sum = { 0 };
80 int i;
81
82 for_each_possible_cpu(i) {
83 const struct pcpu_sw_netstats *tstats =
84 per_cpu_ptr(dev->tstats, i);
85
86 sum.rx_packets += tstats->rx_packets;
87 sum.rx_bytes += tstats->rx_bytes;
88 sum.tx_packets += tstats->tx_packets;
89 sum.tx_bytes += tstats->tx_bytes;
90 }
91 dev->stats.rx_packets = sum.rx_packets;
92 dev->stats.rx_bytes = sum.rx_bytes;
93 dev->stats.tx_packets = sum.tx_packets;
94 dev->stats.tx_bytes = sum.tx_bytes;
95 return &dev->stats;
96}
97
98#define for_each_vti6_tunnel_rcu(start) \ 77#define for_each_vti6_tunnel_rcu(start) \
99 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 78 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
100 79
@@ -331,8 +310,10 @@ static int vti6_rcv(struct sk_buff *skb)
331 } 310 }
332 311
333 tstats = this_cpu_ptr(t->dev->tstats); 312 tstats = this_cpu_ptr(t->dev->tstats);
313 u64_stats_update_begin(&tstats->syncp);
334 tstats->rx_packets++; 314 tstats->rx_packets++;
335 tstats->rx_bytes += skb->len; 315 tstats->rx_bytes += skb->len;
316 u64_stats_update_end(&tstats->syncp);
336 317
337 skb->mark = 0; 318 skb->mark = 0;
338 secpath_reset(skb); 319 secpath_reset(skb);
@@ -716,7 +697,7 @@ static const struct net_device_ops vti6_netdev_ops = {
716 .ndo_start_xmit = vti6_tnl_xmit, 697 .ndo_start_xmit = vti6_tnl_xmit,
717 .ndo_do_ioctl = vti6_ioctl, 698 .ndo_do_ioctl = vti6_ioctl,
718 .ndo_change_mtu = vti6_change_mtu, 699 .ndo_change_mtu = vti6_change_mtu,
719 .ndo_get_stats = vti6_get_stats, 700 .ndo_get_stats64 = ip_tunnel_get_stats64,
720}; 701};
721 702
722/** 703/**
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 266f110cb6f7..11dac21e6586 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1923,9 +1923,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1923 else 1923 else
1924 rt->rt6i_gateway = *dest; 1924 rt->rt6i_gateway = *dest;
1925 rt->rt6i_flags = ort->rt6i_flags; 1925 rt->rt6i_flags = ort->rt6i_flags;
1926 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == 1926 rt6_set_from(rt, ort);
1927 (RTF_DEFAULT | RTF_ADDRCONF))
1928 rt6_set_from(rt, ort);
1929 rt->rt6i_metric = 0; 1927 rt->rt6i_metric = 0;
1930 1928
1931#ifdef CONFIG_IPV6_SUBTREES 1929#ifdef CONFIG_IPV6_SUBTREES
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 9937b2616713..3dfbcf1dcb1c 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -702,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb)
702 } 702 }
703 703
704 tstats = this_cpu_ptr(tunnel->dev->tstats); 704 tstats = this_cpu_ptr(tunnel->dev->tstats);
705 u64_stats_update_begin(&tstats->syncp);
705 tstats->rx_packets++; 706 tstats->rx_packets++;
706 tstats->rx_bytes += skb->len; 707 tstats->rx_bytes += skb->len;
708 u64_stats_update_end(&tstats->syncp);
707 709
708 netif_rx(skb); 710 netif_rx(skb);
709 711
@@ -924,7 +926,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
924 if (tunnel->parms.iph.daddr && skb_dst(skb)) 926 if (tunnel->parms.iph.daddr && skb_dst(skb))
925 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 927 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
926 928
927 if (skb->len > mtu) { 929 if (skb->len > mtu && !skb_is_gso(skb)) {
928 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 930 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
929 ip_rt_put(rt); 931 ip_rt_put(rt);
930 goto tx_error; 932 goto tx_error;
@@ -966,8 +968,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
966 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 968 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
967 969
968 skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); 970 skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
969 if (IS_ERR(skb)) 971 if (IS_ERR(skb)) {
972 ip_rt_put(rt);
970 goto out; 973 goto out;
974 }
971 975
972 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, 976 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
973 ttl, df, !net_eq(tunnel->net, dev_net(dev))); 977 ttl, df, !net_eq(tunnel->net, dev_net(dev)));
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7b01b9f5846c..c71b699eb555 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
715 unsigned long cpu_flags; 715 unsigned long cpu_flags;
716 size_t copied = 0; 716 size_t copied = 0;
717 u32 peek_seq = 0; 717 u32 peek_seq = 0;
718 u32 *seq; 718 u32 *seq, skb_len;
719 unsigned long used; 719 unsigned long used;
720 int target; /* Read at least this many bytes */ 720 int target; /* Read at least this many bytes */
721 long timeo; 721 long timeo;
@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
812 } 812 }
813 continue; 813 continue;
814 found_ok_skb: 814 found_ok_skb:
815 skb_len = skb->len;
815 /* Ok so how much can we use? */ 816 /* Ok so how much can we use? */
816 used = skb->len - offset; 817 used = skb->len - offset;
817 if (len < used) 818 if (len < used)
@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
844 } 845 }
845 846
846 /* Partial read */ 847 /* Partial read */
847 if (used + offset < skb->len) 848 if (used + offset < skb_len)
848 continue; 849 continue;
849 } while (len > 0); 850 } while (len > 0);
850 851
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index d5f41514f577..5882bbfd198c 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -62,6 +62,7 @@
62#include <net/ip_vs.h> 62#include <net/ip_vs.h>
63#include <net/netfilter/nf_conntrack_core.h> 63#include <net/netfilter/nf_conntrack_core.h>
64#include <net/netfilter/nf_conntrack_expect.h> 64#include <net/netfilter/nf_conntrack_expect.h>
65#include <net/netfilter/nf_conntrack_seqadj.h>
65#include <net/netfilter/nf_conntrack_helper.h> 66#include <net/netfilter/nf_conntrack_helper.h>
66#include <net/netfilter/nf_conntrack_zones.h> 67#include <net/netfilter/nf_conntrack_zones.h>
67 68
@@ -96,6 +97,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
96 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 97 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
97 return; 98 return;
98 99
100 /* Applications may adjust TCP seqs */
101 if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
102 !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
103 return;
104
99 /* 105 /*
100 * The connection is not yet in the hashtable, so we update it. 106 * The connection is not yet in the hashtable, so we update it.
101 * CIP->VIP will remain the same, so leave the tuple in 107 * CIP->VIP will remain the same, so leave the tuple in
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index 17c1bcb182c6..b2d38da67822 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
36 if (off == 0) 36 if (off == 0)
37 return 0; 37 return 0;
38 38
39 if (unlikely(!seqadj)) {
40 WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n");
41 return 0;
42 }
43
39 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 44 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
40 45
41 spin_lock_bh(&ct->lock); 46 spin_lock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index 902fb0a6b38a..7a394df0deb7 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net)
97void nf_conntrack_tstamp_pernet_fini(struct net *net) 97void nf_conntrack_tstamp_pernet_fini(struct net *net)
98{ 98{
99 nf_conntrack_tstamp_fini_sysctl(net); 99 nf_conntrack_tstamp_fini_sysctl(net);
100 nf_ct_extend_unregister(&tstamp_extend);
101} 100}
102 101
103int nf_conntrack_tstamp_init(void) 102int nf_conntrack_tstamp_init(void)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 629b6da98318..1fcef1ec1dc1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -313,6 +313,9 @@ static int nf_tables_table_enable(struct nft_table *table)
313 int err, i = 0; 313 int err, i = 0;
314 314
315 list_for_each_entry(chain, &table->chains, list) { 315 list_for_each_entry(chain, &table->chains, list) {
316 if (!(chain->flags & NFT_BASE_CHAIN))
317 continue;
318
316 err = nf_register_hook(&nft_base_chain(chain)->ops); 319 err = nf_register_hook(&nft_base_chain(chain)->ops);
317 if (err < 0) 320 if (err < 0)
318 goto err; 321 goto err;
@@ -322,6 +325,9 @@ static int nf_tables_table_enable(struct nft_table *table)
322 return 0; 325 return 0;
323err: 326err:
324 list_for_each_entry(chain, &table->chains, list) { 327 list_for_each_entry(chain, &table->chains, list) {
328 if (!(chain->flags & NFT_BASE_CHAIN))
329 continue;
330
325 if (i-- <= 0) 331 if (i-- <= 0)
326 break; 332 break;
327 333
@@ -334,8 +340,10 @@ static int nf_tables_table_disable(struct nft_table *table)
334{ 340{
335 struct nft_chain *chain; 341 struct nft_chain *chain;
336 342
337 list_for_each_entry(chain, &table->chains, list) 343 list_for_each_entry(chain, &table->chains, list) {
338 nf_unregister_hook(&nft_base_chain(chain)->ops); 344 if (chain->flags & NFT_BASE_CHAIN)
345 nf_unregister_hook(&nft_base_chain(chain)->ops);
346 }
339 347
340 return 0; 348 return 0;
341} 349}
@@ -2104,17 +2112,21 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2104 struct netlink_callback *cb) 2112 struct netlink_callback *cb)
2105{ 2113{
2106 const struct nft_set *set; 2114 const struct nft_set *set;
2107 unsigned int idx = 0, s_idx = cb->args[0]; 2115 unsigned int idx, s_idx = cb->args[0];
2108 struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; 2116 struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
2109 2117
2110 if (cb->args[1]) 2118 if (cb->args[1])
2111 return skb->len; 2119 return skb->len;
2112 2120
2113 list_for_each_entry(table, &ctx->afi->tables, list) { 2121 list_for_each_entry(table, &ctx->afi->tables, list) {
2114 if (cur_table && cur_table != table) 2122 if (cur_table) {
2115 continue; 2123 if (cur_table != table)
2124 continue;
2116 2125
2126 cur_table = NULL;
2127 }
2117 ctx->table = table; 2128 ctx->table = table;
2129 idx = 0;
2118 list_for_each_entry(set, &ctx->table->sets, list) { 2130 list_for_each_entry(set, &ctx->table->sets, list) {
2119 if (idx < s_idx) 2131 if (idx < s_idx)
2120 goto cont; 2132 goto cont;
@@ -2443,7 +2455,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
2443 enum nft_registers dreg; 2455 enum nft_registers dreg;
2444 2456
2445 dreg = nft_type_to_reg(set->dtype); 2457 dreg = nft_type_to_reg(set->dtype);
2446 return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype); 2458 return nft_validate_data_load(ctx, dreg, &elem->data,
2459 set->dtype == NFT_DATA_VERDICT ?
2460 NFT_DATA_VERDICT : NFT_DATA_VALUE);
2447} 2461}
2448 2462
2449int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, 2463int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 7d4254b0dc6b..d292c8d286eb 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1050,6 +1050,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net)
1050#ifdef CONFIG_PROC_FS 1050#ifdef CONFIG_PROC_FS
1051 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); 1051 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
1052#endif 1052#endif
1053 nf_log_unset(net, &nfulnl_logger);
1053} 1054}
1054 1055
1055static struct pernet_operations nfnl_log_net_ops = { 1056static struct pernet_operations nfnl_log_net_ops = {
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 8e0bb75e7c51..55c939f5371f 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr,
31{ 31{
32 struct nft_exthdr *priv = nft_expr_priv(expr); 32 struct nft_exthdr *priv = nft_expr_priv(expr);
33 struct nft_data *dest = &data[priv->dreg]; 33 struct nft_data *dest = &data[priv->dreg];
34 unsigned int offset; 34 unsigned int offset = 0;
35 int err; 35 int err;
36 36
37 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); 37 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index b4c8b0022fee..ba2dffeff608 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
338 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); 338 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
339 /* due to this, we will claim to support iWARP devices unless we 339 /* due to this, we will claim to support iWARP devices unless we
340 check node_type. */ 340 check node_type. */
341 if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) 341 if (ret || !cm_id->device ||
342 cm_id->device->node_type != RDMA_NODE_IB_CA)
342 ret = -EADDRNOTAVAIL; 343 ret = -EADDRNOTAVAIL;
343 344
344 rdsdebug("addr %pI4 ret %d node type %d\n", 345 rdsdebug("addr %pI4 ret %d node type %d\n",
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 81f94b1ae1c7..d080eb4b0d29 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1253 1253
1254 if (msg->msg_name) { 1254 if (msg->msg_name) {
1255 struct sockaddr_rose *srose; 1255 struct sockaddr_rose *srose;
1256 struct full_sockaddr_rose *full_srose = msg->msg_name;
1256 1257
1257 memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); 1258 memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1258 srose = msg->msg_name; 1259 srose = msg->msg_name;
@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1260 srose->srose_addr = rose->dest_addr; 1261 srose->srose_addr = rose->dest_addr;
1261 srose->srose_call = rose->dest_call; 1262 srose->srose_call = rose->dest_call;
1262 srose->srose_ndigis = rose->dest_ndigis; 1263 srose->srose_ndigis = rose->dest_ndigis;
1263 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { 1264 for (n = 0 ; n < rose->dest_ndigis ; n++)
1264 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; 1265 full_srose->srose_digis[n] = rose->dest_digis[n];
1265 for (n = 0 ; n < rose->dest_ndigis ; n++) 1266 msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1266 full_srose->srose_digis[n] = rose->dest_digis[n];
1267 msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1268 } else {
1269 if (rose->dest_ndigis >= 1) {
1270 srose->srose_ndigis = 1;
1271 srose->srose_digi = rose->dest_digis[0];
1272 }
1273 msg->msg_namelen = sizeof(struct sockaddr_rose);
1274 }
1275 } 1267 }
1276 1268
1277 skb_free_datagram(sk, skb); 1269 skb_free_datagram(sk, skb);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 9cc6717c5f19..8b1d65772a8d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -70,16 +70,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
70 &csum_idx_gen, &csum_hash_info); 70 &csum_idx_gen, &csum_hash_info);
71 if (IS_ERR(pc)) 71 if (IS_ERR(pc))
72 return PTR_ERR(pc); 72 return PTR_ERR(pc);
73 p = to_tcf_csum(pc);
74 ret = ACT_P_CREATED; 73 ret = ACT_P_CREATED;
75 } else { 74 } else {
76 p = to_tcf_csum(pc); 75 if (bind)/* dont override defaults */
77 if (!ovr) { 76 return 0;
78 tcf_hash_release(pc, bind, &csum_hash_info); 77 tcf_hash_release(pc, bind, &csum_hash_info);
78 if (!ovr)
79 return -EEXIST; 79 return -EEXIST;
80 }
81 } 80 }
82 81
82 p = to_tcf_csum(pc);
83 spin_lock_bh(&p->tcf_lock); 83 spin_lock_bh(&p->tcf_lock);
84 p->tcf_action = parm->action; 84 p->tcf_action = parm->action;
85 p->update_flags = parm->update_flags; 85 p->update_flags = parm->update_flags;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index dea927343bf4..af5641c290fa 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -95,10 +95,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
95 return PTR_ERR(pc); 95 return PTR_ERR(pc);
96 ret = ACT_P_CREATED; 96 ret = ACT_P_CREATED;
97 } else { 97 } else {
98 if (!ovr) { 98 if (bind)/* dont override defaults */
99 tcf_hash_release(pc, bind, &gact_hash_info); 99 return 0;
100 tcf_hash_release(pc, bind, &gact_hash_info);
101 if (!ovr)
100 return -EEXIST; 102 return -EEXIST;
101 }
102 } 103 }
103 104
104 gact = to_gact(pc); 105 gact = to_gact(pc);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index e13ecbbfe8c4..242636950ea5 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -134,10 +134,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
134 return PTR_ERR(pc); 134 return PTR_ERR(pc);
135 ret = ACT_P_CREATED; 135 ret = ACT_P_CREATED;
136 } else { 136 } else {
137 if (!ovr) { 137 if (bind)/* dont override defaults */
138 tcf_ipt_release(to_ipt(pc), bind); 138 return 0;
139 tcf_ipt_release(to_ipt(pc), bind);
140
141 if (!ovr)
139 return -EEXIST; 142 return -EEXIST;
140 }
141 } 143 }
142 ipt = to_ipt(pc); 144 ipt = to_ipt(pc);
143 145
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 921fea43fca2..584e65503edb 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -64,15 +64,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
64 &nat_idx_gen, &nat_hash_info); 64 &nat_idx_gen, &nat_hash_info);
65 if (IS_ERR(pc)) 65 if (IS_ERR(pc))
66 return PTR_ERR(pc); 66 return PTR_ERR(pc);
67 p = to_tcf_nat(pc);
68 ret = ACT_P_CREATED; 67 ret = ACT_P_CREATED;
69 } else { 68 } else {
70 p = to_tcf_nat(pc); 69 if (bind)
71 if (!ovr) { 70 return 0;
72 tcf_hash_release(pc, bind, &nat_hash_info); 71 tcf_hash_release(pc, bind, &nat_hash_info);
72 if (!ovr)
73 return -EEXIST; 73 return -EEXIST;
74 }
75 } 74 }
75 p = to_tcf_nat(pc);
76 76
77 spin_lock_bh(&p->tcf_lock); 77 spin_lock_bh(&p->tcf_lock);
78 p->old_addr = parm->old_addr; 78 p->old_addr = parm->old_addr;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index e2520e90a10d..729189341933 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -78,10 +78,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
78 ret = ACT_P_CREATED; 78 ret = ACT_P_CREATED;
79 } else { 79 } else {
80 p = to_pedit(pc); 80 p = to_pedit(pc);
81 if (!ovr) { 81 tcf_hash_release(pc, bind, &pedit_hash_info);
82 tcf_hash_release(pc, bind, &pedit_hash_info); 82 if (bind)
83 return 0;
84 if (!ovr)
83 return -EEXIST; 85 return -EEXIST;
84 } 86
85 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { 87 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
86 keys = kmalloc(ksize, GFP_KERNEL); 88 keys = kmalloc(ksize, GFP_KERNEL);
87 if (keys == NULL) 89 if (keys == NULL)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 819a9a4d1987..9295b86d5319 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -162,10 +162,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
162 if (bind) { 162 if (bind) {
163 police->tcf_bindcnt += 1; 163 police->tcf_bindcnt += 1;
164 police->tcf_refcnt += 1; 164 police->tcf_refcnt += 1;
165 return 0;
165 } 166 }
166 if (ovr) 167 if (ovr)
167 goto override; 168 goto override;
168 return ret; 169 /* not replacing */
170 return -EEXIST;
169 } 171 }
170 } 172 }
171 173
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 81aebc162e5c..b44491e3ec17 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -135,10 +135,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
135 ret = ACT_P_CREATED; 135 ret = ACT_P_CREATED;
136 } else { 136 } else {
137 d = to_defact(pc); 137 d = to_defact(pc);
138 if (!ovr) { 138
139 tcf_simp_release(d, bind); 139 if (bind)
140 return 0;
141 tcf_simp_release(d, bind);
142 if (!ovr)
140 return -EEXIST; 143 return -EEXIST;
141 } 144
142 reset_policy(d, defdata, parm); 145 reset_policy(d, defdata, parm);
143 } 146 }
144 147
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index aa0a4c056f31..0fa1aad6e204 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -112,10 +112,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
112 ret = ACT_P_CREATED; 112 ret = ACT_P_CREATED;
113 } else { 113 } else {
114 d = to_skbedit(pc); 114 d = to_skbedit(pc);
115 if (!ovr) { 115 if (bind)
116 tcf_hash_release(pc, bind, &skbedit_hash_info); 116 return 0;
117 tcf_hash_release(pc, bind, &skbedit_hash_info);
118 if (!ovr)
117 return -EEXIST; 119 return -EEXIST;
118 }
119 } 120 }
120 121
121 spin_lock_bh(&d->tcf_lock); 122 spin_lock_bh(&d->tcf_lock);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 111516c3d34c..9c77947c0597 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -207,8 +207,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
207 INIT_LIST_HEAD(&q->retransmit); 207 INIT_LIST_HEAD(&q->retransmit);
208 INIT_LIST_HEAD(&q->sacked); 208 INIT_LIST_HEAD(&q->sacked);
209 INIT_LIST_HEAD(&q->abandoned); 209 INIT_LIST_HEAD(&q->abandoned);
210
211 q->empty = 1;
212} 210}
213 211
214/* Free the outqueue structure and any related pending chunks. 212/* Free the outqueue structure and any related pending chunks.
@@ -331,7 +329,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
331 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); 329 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
332 else 330 else
333 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); 331 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
334 q->empty = 0;
335 break; 332 break;
336 } 333 }
337 } else { 334 } else {
@@ -653,7 +650,6 @@ redo:
653 if (chunk->fast_retransmit == SCTP_NEED_FRTX) 650 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
654 chunk->fast_retransmit = SCTP_DONT_FRTX; 651 chunk->fast_retransmit = SCTP_DONT_FRTX;
655 652
656 q->empty = 0;
657 q->asoc->stats.rtxchunks++; 653 q->asoc->stats.rtxchunks++;
658 break; 654 break;
659 } 655 }
@@ -1064,8 +1060,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
1064 1060
1065 sctp_transport_reset_timers(transport); 1061 sctp_transport_reset_timers(transport);
1066 1062
1067 q->empty = 0;
1068
1069 /* Only let one DATA chunk get bundled with a 1063 /* Only let one DATA chunk get bundled with a
1070 * COOKIE-ECHO chunk. 1064 * COOKIE-ECHO chunk.
1071 */ 1065 */
@@ -1274,29 +1268,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1274 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, 1268 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1275 asoc->adv_peer_ack_point); 1269 asoc->adv_peer_ack_point);
1276 1270
1277 /* See if all chunks are acked. 1271 return sctp_outq_is_empty(q);
1278 * Make sure the empty queue handler will get run later.
1279 */
1280 q->empty = (list_empty(&q->out_chunk_list) &&
1281 list_empty(&q->retransmit));
1282 if (!q->empty)
1283 goto finish;
1284
1285 list_for_each_entry(transport, transport_list, transports) {
1286 q->empty = q->empty && list_empty(&transport->transmitted);
1287 if (!q->empty)
1288 goto finish;
1289 }
1290
1291 pr_debug("%s: sack queue is empty\n", __func__);
1292finish:
1293 return q->empty;
1294} 1272}
1295 1273
1296/* Is the outqueue empty? */ 1274/* Is the outqueue empty?
1275 * The queue is empty when we have not pending data, no in-flight data
1276 * and nothing pending retransmissions.
1277 */
1297int sctp_outq_is_empty(const struct sctp_outq *q) 1278int sctp_outq_is_empty(const struct sctp_outq *q)
1298{ 1279{
1299 return q->empty; 1280 return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
1281 list_empty(&q->retransmit);
1300} 1282}
1301 1283
1302/******************************************************************** 1284/********************************************************************
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 5fd4c8cec08e..b742b2654525 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk,
251 return p_ptr; 251 return p_ptr;
252} 252}
253 253
254int tipc_deleteport(u32 ref) 254int tipc_deleteport(struct tipc_port *p_ptr)
255{ 255{
256 struct tipc_port *p_ptr;
257 struct sk_buff *buf = NULL; 256 struct sk_buff *buf = NULL;
258 257
259 tipc_withdraw(ref, 0, NULL); 258 tipc_withdraw(p_ptr, 0, NULL);
260 p_ptr = tipc_port_lock(ref);
261 if (!p_ptr)
262 return -EINVAL;
263 259
264 tipc_ref_discard(ref); 260 spin_lock_bh(p_ptr->lock);
265 tipc_port_unlock(p_ptr); 261 tipc_ref_discard(p_ptr->ref);
262 spin_unlock_bh(p_ptr->lock);
266 263
267 k_cancel_timer(&p_ptr->timer); 264 k_cancel_timer(&p_ptr->timer);
268 if (p_ptr->connected) { 265 if (p_ptr->connected) {
@@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
704} 701}
705 702
706 703
707int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) 704int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
705 struct tipc_name_seq const *seq)
708{ 706{
709 struct tipc_port *p_ptr;
710 struct publication *publ; 707 struct publication *publ;
711 u32 key; 708 u32 key;
712 int res = -EINVAL;
713 709
714 p_ptr = tipc_port_lock(ref); 710 if (p_ptr->connected)
715 if (!p_ptr)
716 return -EINVAL; 711 return -EINVAL;
712 key = p_ptr->ref + p_ptr->pub_count + 1;
713 if (key == p_ptr->ref)
714 return -EADDRINUSE;
717 715
718 if (p_ptr->connected)
719 goto exit;
720 key = ref + p_ptr->pub_count + 1;
721 if (key == ref) {
722 res = -EADDRINUSE;
723 goto exit;
724 }
725 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, 716 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
726 scope, p_ptr->ref, key); 717 scope, p_ptr->ref, key);
727 if (publ) { 718 if (publ) {
728 list_add(&publ->pport_list, &p_ptr->publications); 719 list_add(&publ->pport_list, &p_ptr->publications);
729 p_ptr->pub_count++; 720 p_ptr->pub_count++;
730 p_ptr->published = 1; 721 p_ptr->published = 1;
731 res = 0; 722 return 0;
732 } 723 }
733exit: 724 return -EINVAL;
734 tipc_port_unlock(p_ptr);
735 return res;
736} 725}
737 726
738int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) 727int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
728 struct tipc_name_seq const *seq)
739{ 729{
740 struct tipc_port *p_ptr;
741 struct publication *publ; 730 struct publication *publ;
742 struct publication *tpubl; 731 struct publication *tpubl;
743 int res = -EINVAL; 732 int res = -EINVAL;
744 733
745 p_ptr = tipc_port_lock(ref);
746 if (!p_ptr)
747 return -EINVAL;
748 if (!seq) { 734 if (!seq) {
749 list_for_each_entry_safe(publ, tpubl, 735 list_for_each_entry_safe(publ, tpubl,
750 &p_ptr->publications, pport_list) { 736 &p_ptr->publications, pport_list) {
@@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
771 } 757 }
772 if (list_empty(&p_ptr->publications)) 758 if (list_empty(&p_ptr->publications))
773 p_ptr->published = 0; 759 p_ptr->published = 0;
774 tipc_port_unlock(p_ptr);
775 return res; 760 return res;
776} 761}
777 762
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 912253597343..34f12bd4074e 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err);
116 116
117void tipc_acknowledge(u32 port_ref, u32 ack); 117void tipc_acknowledge(u32 port_ref, u32 ack);
118 118
119int tipc_deleteport(u32 portref); 119int tipc_deleteport(struct tipc_port *p_ptr);
120 120
121int tipc_portimportance(u32 portref, unsigned int *importance); 121int tipc_portimportance(u32 portref, unsigned int *importance);
122int tipc_set_portimportance(u32 portref, unsigned int importance); 122int tipc_set_portimportance(u32 portref, unsigned int importance);
@@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
127int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable); 127int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
128int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable); 128int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
129 129
130int tipc_publish(u32 portref, unsigned int scope, 130int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
131 struct tipc_name_seq const *name_seq); 131 struct tipc_name_seq const *name_seq);
132int tipc_withdraw(u32 portref, unsigned int scope, 132int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
133 struct tipc_name_seq const *name_seq); 133 struct tipc_name_seq const *name_seq);
134 134
135int tipc_connect(u32 portref, struct tipc_portid const *port); 135int tipc_connect(u32 portref, struct tipc_portid const *port);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 5efdeef06f9d..c8341d1f995e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -351,7 +351,7 @@ static int release(struct socket *sock)
351 * Delete TIPC port; this ensures no more messages are queued 351 * Delete TIPC port; this ensures no more messages are queued
352 * (also disconnects an active connection & sends a 'FIN-' to peer) 352 * (also disconnects an active connection & sends a 'FIN-' to peer)
353 */ 353 */
354 res = tipc_deleteport(tport->ref); 354 res = tipc_deleteport(tport);
355 355
356 /* Discard any remaining (connection-based) messages in receive queue */ 356 /* Discard any remaining (connection-based) messages in receive queue */
357 __skb_queue_purge(&sk->sk_receive_queue); 357 __skb_queue_purge(&sk->sk_receive_queue);
@@ -383,30 +383,46 @@ static int release(struct socket *sock)
383 */ 383 */
384static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) 384static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
385{ 385{
386 struct sock *sk = sock->sk;
386 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 387 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
387 u32 portref = tipc_sk_port(sock->sk)->ref; 388 struct tipc_port *tport = tipc_sk_port(sock->sk);
389 int res = -EINVAL;
388 390
389 if (unlikely(!uaddr_len)) 391 lock_sock(sk);
390 return tipc_withdraw(portref, 0, NULL); 392 if (unlikely(!uaddr_len)) {
393 res = tipc_withdraw(tport, 0, NULL);
394 goto exit;
395 }
391 396
392 if (uaddr_len < sizeof(struct sockaddr_tipc)) 397 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
393 return -EINVAL; 398 res = -EINVAL;
394 if (addr->family != AF_TIPC) 399 goto exit;
395 return -EAFNOSUPPORT; 400 }
401 if (addr->family != AF_TIPC) {
402 res = -EAFNOSUPPORT;
403 goto exit;
404 }
396 405
397 if (addr->addrtype == TIPC_ADDR_NAME) 406 if (addr->addrtype == TIPC_ADDR_NAME)
398 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 407 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
399 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) 408 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
400 return -EAFNOSUPPORT; 409 res = -EAFNOSUPPORT;
410 goto exit;
411 }
401 412
402 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 413 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
403 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 414 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
404 (addr->addr.nameseq.type != TIPC_CFG_SRV)) 415 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
405 return -EACCES; 416 res = -EACCES;
417 goto exit;
418 }
406 419
407 return (addr->scope > 0) ? 420 res = (addr->scope > 0) ?
408 tipc_publish(portref, addr->scope, &addr->addr.nameseq) : 421 tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
409 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq); 422 tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
423exit:
424 release_sock(sk);
425 return res;
410} 426}
411 427
412/** 428/**
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index a271c27fac77..722da616438c 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init(
124 /* find payload start allowing for extended bitmap(s) */ 124 /* find payload start allowing for extended bitmap(s) */
125 125
126 if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { 126 if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
127 if ((unsigned long)iterator->_arg -
128 (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
129 (unsigned long)iterator->_max_length)
130 return -EINVAL;
127 while (get_unaligned_le32(iterator->_arg) & 131 while (get_unaligned_le32(iterator->_arg) &
128 (1 << IEEE80211_RADIOTAP_EXT)) { 132 (1 << IEEE80211_RADIOTAP_EXT)) {
129 iterator->_arg += sizeof(uint32_t); 133 iterator->_arg += sizeof(uint32_t);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 65f800890d70..d3c5bd7c6b51 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -632,6 +632,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
632 } 632 }
633#endif 633#endif
634 634
635 if (!bss && (status == WLAN_STATUS_SUCCESS)) {
636 WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
637 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
638 wdev->ssid, wdev->ssid_len,
639 WLAN_CAPABILITY_ESS,
640 WLAN_CAPABILITY_ESS);
641 if (bss)
642 cfg80211_hold_bss(bss_from_pub(bss));
643 }
644
635 if (wdev->current_bss) { 645 if (wdev->current_bss) {
636 cfg80211_unhold_bss(wdev->current_bss); 646 cfg80211_unhold_bss(wdev->current_bss);
637 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); 647 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
@@ -649,16 +659,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
649 return; 659 return;
650 } 660 }
651 661
652 if (!bss) { 662 if (WARN_ON(!bss))
653 WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); 663 return;
654 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
655 wdev->ssid, wdev->ssid_len,
656 WLAN_CAPABILITY_ESS,
657 WLAN_CAPABILITY_ESS);
658 if (WARN_ON(!bss))
659 return;
660 cfg80211_hold_bss(bss_from_pub(bss));
661 }
662 664
663 wdev->current_bss = bss_from_pub(bss); 665 wdev->current_bss = bss_from_pub(bss);
664 666
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 32b10f53d0b4..2dcb37736d84 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,7 +82,9 @@ kallsyms()
82 kallsymopt="${kallsymopt} --all-symbols" 82 kallsymopt="${kallsymopt} --all-symbols"
83 fi 83 fi
84 84
85 kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET" 85 if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
86 kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
87 fi
86 88
87 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ 89 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
88 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" 90 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 419491d8e7d2..6625699f497c 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4334,8 +4334,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
4334 } 4334 }
4335 err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, 4335 err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
4336 PEER__RECV, &ad); 4336 PEER__RECV, &ad);
4337 if (err) 4337 if (err) {
4338 selinux_netlbl_err(skb, err, 0); 4338 selinux_netlbl_err(skb, err, 0);
4339 return err;
4340 }
4339 } 4341 }
4340 4342
4341 if (secmark_active) { 4343 if (secmark_active) {
@@ -5586,11 +5588,11 @@ static int selinux_setprocattr(struct task_struct *p,
5586 /* Check for ptracing, and update the task SID if ok. 5588 /* Check for ptracing, and update the task SID if ok.
5587 Otherwise, leave SID unchanged and fail. */ 5589 Otherwise, leave SID unchanged and fail. */
5588 ptsid = 0; 5590 ptsid = 0;
5589 task_lock(p); 5591 rcu_read_lock();
5590 tracer = ptrace_parent(p); 5592 tracer = ptrace_parent(p);
5591 if (tracer) 5593 if (tracer)
5592 ptsid = task_sid(tracer); 5594 ptsid = task_sid(tracer);
5593 task_unlock(p); 5595 rcu_read_unlock();
5594 5596
5595 if (tracer) { 5597 if (tracer) {
5596 error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, 5598 error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 6e03b465e44e..a2104671f51d 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1937 case SNDRV_PCM_STATE_DISCONNECTED: 1937 case SNDRV_PCM_STATE_DISCONNECTED:
1938 err = -EBADFD; 1938 err = -EBADFD;
1939 goto _endloop; 1939 goto _endloop;
1940 case SNDRV_PCM_STATE_PAUSED:
1941 continue;
1940 } 1942 }
1941 if (!tout) { 1943 if (!tout) {
1942 snd_printd("%s write error (DMA or IRQ trouble?)\n", 1944 snd_printd("%s write error (DMA or IRQ trouble?)\n",
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 27aa14007cbd..956871d8b3d2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -3433,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev)
3433 * white/black-list for enable_msi 3433 * white/black-list for enable_msi
3434 */ 3434 */
3435static struct snd_pci_quirk msi_black_list[] = { 3435static struct snd_pci_quirk msi_black_list[] = {
3436 SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
3437 SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
3438 SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
3439 SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
3436 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ 3440 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
3437 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ 3441 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
3438 SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ 3442 SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 34de5dc2fe9b..c5646941539a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4247,12 +4247,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4247 SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4247 SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4248 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4248 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4249 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4249 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4250 SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4250 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4251 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4251 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4252 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4252 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS), 4253 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
4253 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4254 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4255 SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4254 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS), 4256 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
4257 SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4255 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4258 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4259 SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4256 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4260 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4257 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4261 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4258 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 4262 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 8697cedccd21..1ead3c977a51 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
648 648
649 dma_params = ssc_p->dma_params[dir]; 649 dma_params = ssc_p->dma_params[dir];
650 650
651 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable); 651 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
652 ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error); 652 ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error);
653 653
654 pr_debug("%s enabled SSC_SR=0x%08x\n", 654 pr_debug("%s enabled SSC_SR=0x%08x\n",
@@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
657 return 0; 657 return 0;
658} 658}
659 659
660static int atmel_ssc_trigger(struct snd_pcm_substream *substream,
661 int cmd, struct snd_soc_dai *dai)
662{
663 struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
664 struct atmel_pcm_dma_params *dma_params;
665 int dir;
666
667 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
668 dir = 0;
669 else
670 dir = 1;
671
672 dma_params = ssc_p->dma_params[dir];
673
674 switch (cmd) {
675 case SNDRV_PCM_TRIGGER_START:
676 case SNDRV_PCM_TRIGGER_RESUME:
677 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
678 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
679 break;
680 default:
681 ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
682 break;
683 }
684
685 return 0;
686}
660 687
661#ifdef CONFIG_PM 688#ifdef CONFIG_PM
662static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai) 689static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai)
@@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = {
731 .startup = atmel_ssc_startup, 758 .startup = atmel_ssc_startup,
732 .shutdown = atmel_ssc_shutdown, 759 .shutdown = atmel_ssc_shutdown,
733 .prepare = atmel_ssc_prepare, 760 .prepare = atmel_ssc_prepare,
761 .trigger = atmel_ssc_trigger,
734 .hw_params = atmel_ssc_hw_params, 762 .hw_params = atmel_ssc_hw_params,
735 .set_fmt = atmel_ssc_set_dai_fmt, 763 .set_fmt = atmel_ssc_set_dai_fmt,
736 .set_clkdiv = atmel_ssc_set_dai_clkdiv, 764 .set_clkdiv = atmel_ssc_set_dai_clkdiv,
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
index 1b372283bd01..7d6a9055874b 100644
--- a/sound/soc/atmel/sam9x5_wm8731.c
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -109,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
109 dai->stream_name = "WM8731 PCM"; 109 dai->stream_name = "WM8731 PCM";
110 dai->codec_dai_name = "wm8731-hifi"; 110 dai->codec_dai_name = "wm8731-hifi";
111 dai->init = sam9x5_wm8731_init; 111 dai->init = sam9x5_wm8731_init;
112 dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF 112 dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF
113 | SND_SOC_DAIFMT_CBM_CFM; 113 | SND_SOC_DAIFMT_CBM_CFM;
114 114
115 ret = snd_soc_of_parse_card_name(card, "atmel,model"); 115 ret = snd_soc_of_parse_card_name(card, "atmel,model");
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 99b359e19d35..0ab2dc296474 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1012,7 +1012,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
1012 { "AEC Loopback", "HPOUT3L", "OUT3L" }, 1012 { "AEC Loopback", "HPOUT3L", "OUT3L" },
1013 { "AEC Loopback", "HPOUT3R", "OUT3R" }, 1013 { "AEC Loopback", "HPOUT3R", "OUT3R" },
1014 { "HPOUT3L", NULL, "OUT3L" }, 1014 { "HPOUT3L", NULL, "OUT3L" },
1015 { "HPOUT3R", NULL, "OUT3L" }, 1015 { "HPOUT3R", NULL, "OUT3R" },
1016 1016
1017 { "AEC Loopback", "SPKOUTL", "OUT4L" }, 1017 { "AEC Loopback", "SPKOUTL", "OUT4L" },
1018 { "SPKOUTLN", NULL, "OUT4L" }, 1018 { "SPKOUTLN", NULL, "OUT4L" },
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 3938fb1c203e..53bbfac6a83a 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
1444 1444
1445 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 1445 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
1446 case SND_SOC_DAIFMT_DSP_B: 1446 case SND_SOC_DAIFMT_DSP_B:
1447 aif1 |= WM8904_AIF_LRCLK_INV; 1447 aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
1448 case SND_SOC_DAIFMT_DSP_A: 1448 case SND_SOC_DAIFMT_DSP_A:
1449 aif1 |= 0x3; 1449 aif1 |= 0x3;
1450 break; 1450 break;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 543c5c2631b6..0f17ed3e29f4 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
2439 snd_soc_update_bits(codec, WM8962_CLOCKING_4, 2439 snd_soc_update_bits(codec, WM8962_CLOCKING_4,
2440 WM8962_SYSCLK_RATE_MASK, clocking4); 2440 WM8962_SYSCLK_RATE_MASK, clocking4);
2441 2441
2442 /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK.
2443 * So we here provisionally enable it and then disable it afterward
2444 * if current bias_level hasn't reached SND_SOC_BIAS_ON.
2445 */
2446 if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
2447 snd_soc_update_bits(codec, WM8962_CLOCKING2,
2448 WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
2449
2442 dspclk = snd_soc_read(codec, WM8962_CLOCKING1); 2450 dspclk = snd_soc_read(codec, WM8962_CLOCKING1);
2451
2452 if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
2453 snd_soc_update_bits(codec, WM8962_CLOCKING2,
2454 WM8962_SYSCLK_ENA_MASK, 0);
2455
2443 if (dspclk < 0) { 2456 if (dspclk < 0) {
2444 dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk); 2457 dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk);
2445 return; 2458 return;
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 46ec0e9744d4..4fbcab63e61f 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
1474 return ret; 1474 return ret;
1475 1475
1476 /* Wait for the RAM to start, should be near instantaneous */ 1476 /* Wait for the RAM to start, should be near instantaneous */
1477 count = 0; 1477 for (count = 0; count < 10; ++count) {
1478 do {
1479 ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, 1478 ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
1480 &val); 1479 &val);
1481 if (ret != 0) 1480 if (ret != 0)
1482 return ret; 1481 return ret;
1483 } while (!(val & ADSP2_RAM_RDY) && ++count < 10); 1482
1483 if (val & ADSP2_RAM_RDY)
1484 break;
1485
1486 msleep(1);
1487 }
1484 1488
1485 if (!(val & ADSP2_RAM_RDY)) { 1489 if (!(val & ADSP2_RAM_RDY)) {
1486 adsp_err(dsp, "Failed to start DSP RAM\n"); 1490 adsp_err(dsp, "Failed to start DSP RAM\n");
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 61e48852b9e8..3fd76bc391de 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
130 break; 130 break;
131 } 131 }
132 132
133 dapm->bias_level = level;
134
135 return 0; 133 return 0;
136} 134}
137 135
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 0b18f654b413..3920a5e8125f 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -473,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
473 .playback = { 473 .playback = {
474 .channels_min = 1, 474 .channels_min = 1,
475 .channels_max = 2, 475 .channels_max = 2,
476 .rates = SNDRV_PCM_RATE_8000_192000 | 476 .rates = SNDRV_PCM_RATE_CONTINUOUS,
477 SNDRV_PCM_RATE_CONTINUOUS | 477 .rate_min = 5512,
478 SNDRV_PCM_RATE_KNOT, 478 .rate_max = 192000,
479 .formats = KIRKWOOD_I2S_FORMATS, 479 .formats = KIRKWOOD_I2S_FORMATS,
480 }, 480 },
481 .capture = { 481 .capture = {
482 .channels_min = 1, 482 .channels_min = 1,
483 .channels_max = 2, 483 .channels_max = 2,
484 .rates = SNDRV_PCM_RATE_8000_192000 | 484 .rates = SNDRV_PCM_RATE_CONTINUOUS,
485 SNDRV_PCM_RATE_CONTINUOUS | 485 .rate_min = 5512,
486 SNDRV_PCM_RATE_KNOT, 486 .rate_max = 192000,
487 .formats = KIRKWOOD_I2S_FORMATS, 487 .formats = KIRKWOOD_I2S_FORMATS,
488 }, 488 },
489 .ops = &kirkwood_i2s_dai_ops, 489 .ops = &kirkwood_i2s_dai_ops,
@@ -494,17 +494,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
494 .playback = { 494 .playback = {
495 .channels_min = 1, 495 .channels_min = 1,
496 .channels_max = 2, 496 .channels_max = 2,
497 .rates = SNDRV_PCM_RATE_8000_192000 | 497 .rates = SNDRV_PCM_RATE_CONTINUOUS,
498 SNDRV_PCM_RATE_CONTINUOUS | 498 .rate_min = 5512,
499 SNDRV_PCM_RATE_KNOT, 499 .rate_max = 192000,
500 .formats = KIRKWOOD_SPDIF_FORMATS, 500 .formats = KIRKWOOD_SPDIF_FORMATS,
501 }, 501 },
502 .capture = { 502 .capture = {
503 .channels_min = 1, 503 .channels_min = 1,
504 .channels_max = 2, 504 .channels_max = 2,
505 .rates = SNDRV_PCM_RATE_8000_192000 | 505 .rates = SNDRV_PCM_RATE_CONTINUOUS,
506 SNDRV_PCM_RATE_CONTINUOUS | 506 .rate_min = 5512,
507 SNDRV_PCM_RATE_KNOT, 507 .rate_max = 192000,
508 .formats = KIRKWOOD_SPDIF_FORMATS, 508 .formats = KIRKWOOD_SPDIF_FORMATS,
509 }, 509 },
510 .ops = &kirkwood_i2s_dai_ops, 510 .ops = &kirkwood_i2s_dai_ops,
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index cbc9c96ce1f4..41949af3baae 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
305 } 305 }
306} 306}
307 307
308static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
309{
310 unsigned int i;
311
312 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
313 i++) {
314 if (!pcm->chan[i])
315 continue;
316 dma_release_channel(pcm->chan[i]);
317 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
318 break;
319 }
320}
321
308/** 322/**
309 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device 323 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
310 * @dev: The parent device for the PCM device 324 * @dev: The parent device for the PCM device
@@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev,
315 const struct snd_dmaengine_pcm_config *config, unsigned int flags) 329 const struct snd_dmaengine_pcm_config *config, unsigned int flags)
316{ 330{
317 struct dmaengine_pcm *pcm; 331 struct dmaengine_pcm *pcm;
332 int ret;
318 333
319 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); 334 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
320 if (!pcm) 335 if (!pcm)
@@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev,
326 dmaengine_pcm_request_chan_of(pcm, dev); 341 dmaengine_pcm_request_chan_of(pcm, dev);
327 342
328 if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) 343 if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
329 return snd_soc_add_platform(dev, &pcm->platform, 344 ret = snd_soc_add_platform(dev, &pcm->platform,
330 &dmaengine_no_residue_pcm_platform); 345 &dmaengine_no_residue_pcm_platform);
331 else 346 else
332 return snd_soc_add_platform(dev, &pcm->platform, 347 ret = snd_soc_add_platform(dev, &pcm->platform,
333 &dmaengine_pcm_platform); 348 &dmaengine_pcm_platform);
349 if (ret)
350 goto err_free_dma;
351
352 return 0;
353
354err_free_dma:
355 dmaengine_pcm_release_chan(pcm);
356 kfree(pcm);
357 return ret;
334} 358}
335EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); 359EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
336 360
@@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
345{ 369{
346 struct snd_soc_platform *platform; 370 struct snd_soc_platform *platform;
347 struct dmaengine_pcm *pcm; 371 struct dmaengine_pcm *pcm;
348 unsigned int i;
349 372
350 platform = snd_soc_lookup_platform(dev); 373 platform = snd_soc_lookup_platform(dev);
351 if (!platform) 374 if (!platform)
@@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
353 376
354 pcm = soc_platform_to_pcm(platform); 377 pcm = soc_platform_to_pcm(platform);
355 378
356 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
357 if (pcm->chan[i]) {
358 dma_release_channel(pcm->chan[i]);
359 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
360 break;
361 }
362 }
363
364 snd_soc_remove_platform(platform); 379 snd_soc_remove_platform(platform);
380 dmaengine_pcm_release_chan(pcm);
365 kfree(pcm); 381 kfree(pcm);
366} 382}
367EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); 383EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 11a90cd027fa..891b9a9bcbf8 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -600,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
600 struct snd_soc_platform *platform = rtd->platform; 600 struct snd_soc_platform *platform = rtd->platform;
601 struct snd_soc_dai *cpu_dai = rtd->cpu_dai; 601 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
602 struct snd_soc_dai *codec_dai = rtd->codec_dai; 602 struct snd_soc_dai *codec_dai = rtd->codec_dai;
603 struct snd_soc_codec *codec = rtd->codec; 603 bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
604 604
605 mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass); 605 mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
606 606
607 /* apply codec digital mute */ 607 /* apply codec digital mute */
608 if (!codec->active) 608 if ((playback && codec_dai->playback_active == 1) ||
609 (!playback && codec_dai->capture_active == 1))
609 snd_soc_dai_digital_mute(codec_dai, 1, substream->stream); 610 snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
610 611
611 /* free any machine hw params */ 612 /* free any machine hw params */
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 364bf6a907e1..8c819f811470 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
74 unsigned int fmt) 74 unsigned int fmt)
75{ 75{
76 struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai); 76 struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
77 unsigned int mask, val; 77 unsigned int mask = 0, val = 0;
78 78
79 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 79 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
80 case SND_SOC_DAIFMT_NB_NF: 80 case SND_SOC_DAIFMT_NB_NF:
@@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
83 return -EINVAL; 83 return -EINVAL;
84 } 84 }
85 85
86 mask = TEGRA20_I2S_CTRL_MASTER_ENABLE; 86 mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
87 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 87 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
88 case SND_SOC_DAIFMT_CBS_CFS: 88 case SND_SOC_DAIFMT_CBS_CFS:
89 val = TEGRA20_I2S_CTRL_MASTER_ENABLE; 89 val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
90 break; 90 break;
91 case SND_SOC_DAIFMT_CBM_CFM: 91 case SND_SOC_DAIFMT_CBM_CFM:
92 break; 92 break;
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 08bc6931c7c7..8c7c1028e579 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
67{ 67{
68 struct device *dev = dai->dev; 68 struct device *dev = dai->dev;
69 struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai); 69 struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
70 unsigned int mask, val; 70 unsigned int mask = 0, val = 0;
71 int ret, spdifclock; 71 int ret, spdifclock;
72 72
73 mask = TEGRA20_SPDIF_CTRL_PACK | 73 mask |= TEGRA20_SPDIF_CTRL_PACK |
74 TEGRA20_SPDIF_CTRL_BIT_MODE_MASK; 74 TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
75 switch (params_format(params)) { 75 switch (params_format(params)) {
76 case SNDRV_PCM_FORMAT_S16_LE: 76 case SNDRV_PCM_FORMAT_S16_LE:
77 val = TEGRA20_SPDIF_CTRL_PACK | 77 val |= TEGRA20_SPDIF_CTRL_PACK |
78 TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT; 78 TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
79 break; 79 break;
80 default: 80 default:
81 return -EINVAL; 81 return -EINVAL;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 231a785b3921..02247fee1cf7 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
118 unsigned int fmt) 118 unsigned int fmt)
119{ 119{
120 struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai); 120 struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
121 unsigned int mask, val; 121 unsigned int mask = 0, val = 0;
122 122
123 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 123 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
124 case SND_SOC_DAIFMT_NB_NF: 124 case SND_SOC_DAIFMT_NB_NF:
@@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
127 return -EINVAL; 127 return -EINVAL;
128 } 128 }
129 129
130 mask = TEGRA30_I2S_CTRL_MASTER_ENABLE; 130 mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
131 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 131 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
132 case SND_SOC_DAIFMT_CBS_CFS: 132 case SND_SOC_DAIFMT_CBS_CFS:
133 val = TEGRA30_I2S_CTRL_MASTER_ENABLE; 133 val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
134 break; 134 break;
135 case SND_SOC_DAIFMT_CBM_CFM: 135 case SND_SOC_DAIFMT_CBM_CFM:
136 break; 136 break;