aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt57
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/arc/lib/strchr-700.S10
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi5
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts4
-rw-r--r--arch/arm/include/asm/smp_plat.h3
-rw-r--r--arch/arm/include/asm/spinlock.h51
-rw-r--r--arch/arm/include/asm/tlb.h7
-rw-r--r--arch/arm/kernel/entry-armv.S3
-rw-r--r--arch/arm/kernel/fiq.c9
-rw-r--r--arch/arm/kernel/machine_kexec.c21
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/kvm/coproc.c26
-rw-r--r--arch/arm/kvm/coproc.h3
-rw-r--r--arch/arm/kvm/coproc_a15.c6
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/kvm/mmu.c36
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/usb-musb.c5
-rw-r--r--arch/arm/mm/Kconfig9
-rw-r--r--arch/arm/plat-samsung/init.c5
-rw-r--r--arch/arm/xen/enlighten.c1
-rw-r--r--arch/arm64/include/asm/kvm_asm.h17
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/include/asm/tlb.h7
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kvm/hyp.S13
-rw-r--r--arch/arm64/kvm/sys_regs.c3
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/ia64/include/asm/tlb.h9
-rw-r--r--arch/m68k/emu/natfeat.c23
-rw-r--r--arch/m68k/include/asm/div64.h9
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/mips/math-emu/cp1emu.c26
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/s390/include/asm/tlb.h8
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--arch/x86/include/asm/bootparam_utils.h4
-rw-r--r--arch/x86/include/asm/microcode_amd.h2
-rw-r--r--arch/x86/include/asm/pgtable-2level.h48
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h30
-rw-r--r--arch/x86/include/asm/pgtable_types.h17
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/kernel/cpu/amd.c20
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c4
-rw-r--r--arch/x86/kernel/microcode_amd.c27
-rw-r--r--arch/x86/kernel/microcode_amd_early.c27
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/kernel/tboot.c10
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/xen/setup.c22
-rw-r--r--arch/x86/xen/smp.c11
-rw-r--r--drivers/acpi/acpica/acglobal.h7
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/acnamesp.h4
-rw-r--r--drivers/acpi/acpica/actables.h7
-rw-r--r--drivers/acpi/acpica/acutils.h4
-rw-r--r--drivers/acpi/acpica/evgpeinit.c11
-rw-r--r--drivers/acpi/acpica/exdump.c6
-rw-r--r--drivers/acpi/acpica/exoparg1.c48
-rw-r--r--drivers/acpi/acpica/hwesleep.c9
-rw-r--r--drivers/acpi/acpica/hwtimer.c13
-rw-r--r--drivers/acpi/acpica/nspredef.c16
-rw-r--r--drivers/acpi/acpica/nswalk.c26
-rw-r--r--drivers/acpi/acpica/nsxfeval.c16
-rw-r--r--drivers/acpi/acpica/nsxfname.c11
-rw-r--r--drivers/acpi/acpica/tbfadt.c4
-rw-r--r--drivers/acpi/acpica/tbxfroot.c12
-rw-r--r--drivers/acpi/acpica/uteval.c8
-rw-r--r--drivers/acpi/acpica/utglobal.c1
-rw-r--r--drivers/acpi/acpica/utosi.c77
-rw-r--r--drivers/acpi/acpica/utstring.c5
-rw-r--r--drivers/acpi/acpica/utxface.c29
-rw-r--r--drivers/acpi/battery.c19
-rw-r--r--drivers/acpi/bus.c22
-rw-r--r--drivers/acpi/device_pm.c34
-rw-r--r--drivers/acpi/dock.c398
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/osl.c45
-rw-r--r--drivers/acpi/pci_slot.c14
-rw-r--r--drivers/acpi/power.c6
-rw-r--r--drivers/acpi/processor_perflib.c22
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/scan.c193
-rw-r--r--drivers/acpi/sleep.c46
-rw-r--r--drivers/acpi/thermal.c18
-rw-r--r--drivers/acpi/utils.c70
-rw-r--r--drivers/acpi/video.c67
-rw-r--r--drivers/acpi/video_detect.c19
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-pmp.c12
-rw-r--r--drivers/ata/sata_fsl.c5
-rw-r--r--drivers/ata/sata_highbank.c4
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/clk/samsung/clk-exynos4.c64
-rw-r--r--drivers/clk/zynq/clkc.c13
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c8
-rw-r--r--drivers/gpu/drm/radeon/rv770.c12
-rw-r--r--drivers/ide/ide-acpi.c5
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/md/dm-cache-policy-mq.c16
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c88
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c66
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c107
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c58
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c111
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/irda/via-ircc.c6
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/macvtap.c30
-rw-r--r--drivers/net/phy/realtek.c4
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/hso.c15
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/cw1200/sta.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c16
-rw-r--r--drivers/net/wireless/iwlegacy/common.c1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c33
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/zd1201.c4
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/pci/hotplug/acpiphp.h55
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c18
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c1035
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/pci-acpi.c18
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c66
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h2
-rw-r--r--drivers/platform/olpc/olpc-ec.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c16
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/pnp/pnpacpi/core.c6
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c35
-rw-r--r--drivers/s390/scsi/zfcp_erp.c29
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c8
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c14
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/usb/class/usbtmc.c8
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/host/ehci-sched.c13
-rw-r--r--drivers/usb/host/ohci-pci.c5
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c2
-rw-r--r--drivers/usb/serial/keyspan.c2
-rw-r--r--drivers/usb/serial/mos7720.c21
-rw-r--r--drivers/usb/serial/mos7840.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c9
-rw-r--r--drivers/usb/serial/usb_wwan.c20
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c9
-rw-r--r--drivers/xen/acpi.c41
-rw-r--r--drivers/xen/events.c13
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/bio.c20
-rw-r--r--fs/cifs/cifsencrypt.c14
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/connect.c7
-rw-r--r--fs/cifs/file.c1
-rw-r--r--fs/cifs/link.c84
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c6
-rw-r--r--fs/cifs/smb1ops.c1
-rw-r--r--fs/cifs/smb2transport.c9
-rw-r--r--fs/dcache.c11
-rw-r--r--fs/efs/inode.c2
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/ext4_jbd2.c8
-rw-r--r--fs/ext4/file.c21
-rw-r--r--fs/ext4/inode.c43
-rw-r--r--fs/ext4/ioctl.c6
-rw-r--r--fs/ext4/super.c19
-rw-r--r--fs/gfs2/glock.c8
-rw-r--r--fs/gfs2/glops.c18
-rw-r--r--fs/gfs2/inode.c6
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/hugetlbfs/inode.c18
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nilfs2/segbuf.c5
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/dir.c4
-rw-r--r--fs/ocfs2/file.c6
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/refcounttree.c53
-rw-r--r--fs/ocfs2/refcounttree.h6
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/root.c4
-rw-r--r--fs/proc/task_mmu.c31
-rw-r--r--include/acpi/acpi_bus.h15
-rw-r--r--include/acpi/acpi_drivers.h14
-rw-r--r--include/acpi/acpixf.h8
-rw-r--r--include/acpi/actypes.h21
-rw-r--r--include/asm-generic/pgtable.h30
-rw-r--r--include/asm-generic/tlb.h2
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/inetdevice.h34
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/mlx5/device.h22
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--include/linux/pci-acpi.h10
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/swapops.h2
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/linux/wait.h57
-rw-r--r--include/net/busy_poll.h7
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/ip_tunnels.h14
-rw-r--r--include/net/sch_generic.h9
-rw-r--r--include/uapi/linux/ip.h34
-rw-r--r--include/uapi/linux/pkt_sched.h10
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/xen/acpi.h8
-rw-r--r--include/xen/interface/platform.h7
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/power/qos.c20
-rw-r--r--kernel/sched/core.c96
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/fair.c10
-rw-r--r--kernel/time/sched_clock.c2
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/wait.c3
-rw-r--r--lib/lz4/lz4_compress.c4
-rw-r--r--lib/lz4/lz4_decompress.c6
-rw-r--r--lib/lz4/lz4hc_compress.c4
-rw-r--r--mm/fremap.c11
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c5
-rw-r--r--mm/memory.c49
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/rmap.c14
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/swapfile.c19
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/gateway_client.h3
-rw-r--r--net/batman-adv/soft-interface.c9
-rw-r--r--net/batman-adv/unicast.c23
-rw-r--r--net/bridge/br_fdb.c10
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/core/flow_dissector.c1
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_cubic.c12
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c21
-rw-r--r--net/mac80211/mlme.c54
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c12
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue_core.c5
-rw-r--r--net/netfilter/xt_TCPMSS.c28
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c10
-rw-r--r--net/openvswitch/actions.c1
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_api.c41
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/tipc/bearer.c9
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/nl80211.c28
-rw-r--r--net/wireless/sme.c10
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/soc/codecs/cs42l52.c5
-rw-r--r--sound/soc/codecs/sgtl5000.c18
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--sound/soc/tegra/tegra30_i2s.c2
-rw-r--r--sound/usb/6fire/midi.c16
-rw-r--r--sound/usb/6fire/midi.h6
-rw-r--r--sound/usb/6fire/pcm.c41
-rw-r--r--sound/usb/6fire/pcm.h2
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--sound/usb/quirks.c6
371 files changed, 3804 insertions, 2663 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 15356aca938c..03c196d90da3 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -235,10 +235,61 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
235 Format: To spoof as Windows 98: ="Microsoft Windows" 235 Format: To spoof as Windows 98: ="Microsoft Windows"
236 236
237 acpi_osi= [HW,ACPI] Modify list of supported OS interface strings 237 acpi_osi= [HW,ACPI] Modify list of supported OS interface strings
238 acpi_osi="string1" # add string1 -- only one string 238 acpi_osi="string1" # add string1
239 acpi_osi="!string2" # remove built-in string2 239 acpi_osi="!string2" # remove string2
240 acpi_osi=!* # remove all strings
241 acpi_osi=! # disable all built-in OS vendor
242 strings
240 acpi_osi= # disable all strings 243 acpi_osi= # disable all strings
241 244
245 'acpi_osi=!' can be used in combination with single or
246 multiple 'acpi_osi="string1"' to support specific OS
247 vendor string(s). Note that such command can only
248 affect the default state of the OS vendor strings, thus
249 it cannot affect the default state of the feature group
250 strings and the current state of the OS vendor strings,
251 specifying it multiple times through kernel command line
252 is meaningless. This command is useful when one do not
253 care about the state of the feature group strings which
254 should be controlled by the OSPM.
255 Examples:
256 1. 'acpi_osi=! acpi_osi="Windows 2000"' is equivalent
257 to 'acpi_osi="Windows 2000" acpi_osi=!', they all
258 can make '_OSI("Windows 2000")' TRUE.
259
260 'acpi_osi=' cannot be used in combination with other
261 'acpi_osi=' command lines, the _OSI method will not
262 exist in the ACPI namespace. NOTE that such command can
263 only affect the _OSI support state, thus specifying it
264 multiple times through kernel command line is also
265 meaningless.
266 Examples:
267 1. 'acpi_osi=' can make 'CondRefOf(_OSI, Local1)'
268 FALSE.
269
270 'acpi_osi=!*' can be used in combination with single or
271 multiple 'acpi_osi="string1"' to support specific
272 string(s). Note that such command can affect the
273 current state of both the OS vendor strings and the
274 feature group strings, thus specifying it multiple times
275 through kernel command line is meaningful. But it may
276 still not able to affect the final state of a string if
277 there are quirks related to this string. This command
278 is useful when one want to control the state of the
279 feature group strings to debug BIOS issues related to
280 the OSPM features.
281 Examples:
282 1. 'acpi_osi="Module Device" acpi_osi=!*' can make
283 '_OSI("Module Device")' FALSE.
284 2. 'acpi_osi=!* acpi_osi="Module Device"' can make
285 '_OSI("Module Device")' TRUE.
286 3. 'acpi_osi=! acpi_osi=!* acpi_osi="Windows 2000"' is
287 equivalent to
288 'acpi_osi=!* acpi_osi=! acpi_osi="Windows 2000"'
289 and
290 'acpi_osi=!* acpi_osi="Windows 2000" acpi_osi=!',
291 they all will make '_OSI("Windows 2000")' TRUE.
292
242 acpi_pm_good [X86] 293 acpi_pm_good [X86]
243 Override the pmtimer bug detection: force the kernel 294 Override the pmtimer bug detection: force the kernel
244 to assume that this machine's pmtimer latches its value 295 to assume that this machine's pmtimer latches its value
@@ -2953,7 +3004,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2953 improve throughput, but will also increase the 3004 improve throughput, but will also increase the
2954 amount of memory reserved for use by the client. 3005 amount of memory reserved for use by the client.
2955 3006
2956 swapaccount[=0|1] 3007 swapaccount=[0|1]
2957 [KNL] Enable accounting of swap in memory resource 3008 [KNL] Enable accounting of swap in memory resource
2958 controller if no parameter or 1 is given or disable 3009 controller if no parameter or 1 is given or disable
2959 it if 0 is given (See Documentation/cgroups/memory.txt) 3010 it if 0 is given (See Documentation/cgroups/memory.txt)
diff --git a/MAINTAINERS b/MAINTAINERS
index 7cacc88dc79c..8197fbd70a3e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5581,9 +5581,9 @@ S: Maintained
5581F: drivers/media/tuners/mxl5007t.* 5581F: drivers/media/tuners/mxl5007t.*
5582 5582
5583MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) 5583MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
5584M: Andrew Gallatin <gallatin@myri.com> 5584M: Hyong-Youb Kim <hykim@myri.com>
5585L: netdev@vger.kernel.org 5585L: netdev@vger.kernel.org
5586W: http://www.myri.com/scs/download-Myri10GE.html 5586W: https://www.myricom.com/support/downloads/myri10ge.html
5587S: Supported 5587S: Supported
5588F: drivers/net/ethernet/myricom/myri10ge/ 5588F: drivers/net/ethernet/myricom/myri10ge/
5589 5589
@@ -5884,7 +5884,7 @@ F: drivers/i2c/busses/i2c-omap.c
5884F: include/linux/i2c-omap.h 5884F: include/linux/i2c-omap.h
5885 5885
5886OMAP DEVICE TREE SUPPORT 5886OMAP DEVICE TREE SUPPORT
5887M: Benoît Cousson <b-cousson@ti.com> 5887M: Benoît Cousson <bcousson@baylibre.com>
5888M: Tony Lindgren <tony@atomide.com> 5888M: Tony Lindgren <tony@atomide.com>
5889L: linux-omap@vger.kernel.org 5889L: linux-omap@vger.kernel.org
5890L: devicetree@vger.kernel.org 5890L: devicetree@vger.kernel.org
@@ -5964,14 +5964,14 @@ S: Maintained
5964F: drivers/char/hw_random/omap-rng.c 5964F: drivers/char/hw_random/omap-rng.c
5965 5965
5966OMAP HWMOD SUPPORT 5966OMAP HWMOD SUPPORT
5967M: Benoît Cousson <b-cousson@ti.com> 5967M: Benoît Cousson <bcousson@baylibre.com>
5968M: Paul Walmsley <paul@pwsan.com> 5968M: Paul Walmsley <paul@pwsan.com>
5969L: linux-omap@vger.kernel.org 5969L: linux-omap@vger.kernel.org
5970S: Maintained 5970S: Maintained
5971F: arch/arm/mach-omap2/omap_hwmod.* 5971F: arch/arm/mach-omap2/omap_hwmod.*
5972 5972
5973OMAP HWMOD DATA FOR OMAP4-BASED DEVICES 5973OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
5974M: Benoît Cousson <b-cousson@ti.com> 5974M: Benoît Cousson <bcousson@baylibre.com>
5975L: linux-omap@vger.kernel.org 5975L: linux-omap@vger.kernel.org
5976S: Maintained 5976S: Maintained
5977F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c 5977F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -7366,7 +7366,6 @@ F: drivers/net/ethernet/sfc/
7366 7366
7367SGI GRU DRIVER 7367SGI GRU DRIVER
7368M: Dimitri Sivanich <sivanich@sgi.com> 7368M: Dimitri Sivanich <sivanich@sgi.com>
7369M: Robin Holt <holt@sgi.com>
7370S: Maintained 7369S: Maintained
7371F: drivers/misc/sgi-gru/ 7370F: drivers/misc/sgi-gru/
7372 7371
@@ -7386,7 +7385,8 @@ S: Maintained for 2.6.
7386F: Documentation/sgi-visws.txt 7385F: Documentation/sgi-visws.txt
7387 7386
7388SGI XP/XPC/XPNET DRIVER 7387SGI XP/XPC/XPNET DRIVER
7389M: Robin Holt <holt@sgi.com> 7388M: Cliff Whickman <cpw@sgi.com>
7389M: Robin Holt <robinmholt@gmail.com>
7390S: Maintained 7390S: Maintained
7391F: drivers/misc/sgi-xp/ 7391F: drivers/misc/sgi-xp/
7392 7392
diff --git a/Makefile b/Makefile
index 6e488480bff3..369882e4fc77 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc7
5NAME = Linux for Workgroups 5NAME = Linux for Workgroups
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 8d2ae24b9f4a..1feb169274fe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2
407 help 407 help
408 Architecture has the first two arguments of clone(2) swapped. 408 Architecture has the first two arguments of clone(2) swapped.
409 409
410config CLONE_BACKWARDS3
411 bool
412 help
413 Architecture has tls passed as the 3rd argument of clone(2),
414 not the 5th one.
415
410config ODD_RT_SIGACTION 416config ODD_RT_SIGACTION
411 bool 417 bool
412 help 418 help
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d477..9c548c7cf001 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
39 ld.a r2,[r0,4] 39 ld.a r2,[r0,4]
40 sub r12,r6,r7 40 sub r12,r6,r7
41 bic r12,r12,r6 41 bic r12,r12,r6
42#ifdef __LITTLE_ENDIAN__
42 and r7,r12,r4 43 and r7,r12,r4
43 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. 44 breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
44 b .Lfound_char ; Likewise this one. 45 b .Lfound_char ; Likewise this one.
46#else
47 and r12,r12,r4
48 breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
49 lsr_s r12,r12,7
50 bic r2,r7,r6
51 b.d .Lfound_char_b
52 and_s r2,r2,r12
53#endif
45; /* We require this code address to be unaligned for speed... */ 54; /* We require this code address to be unaligned for speed... */
46.Laligned: 55.Laligned:
47 ld_s r2,[r0] 56 ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
95 lsr r7,r7,7 104 lsr r7,r7,7
96 105
97 bic r2,r7,r6 106 bic r2,r7,r6
107.Lfound_char_b:
98 norm r2,r2 108 norm r2,r2
99 sub_s r0,r0,4 109 sub_s r0,r0,4
100 asr_s r2,r2,3 110 asr_s r2,r2,3
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d59b70c6a6a0..3d77dbe406f4 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -14,11 +14,11 @@
14 compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; 14 compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
15 15
16 chosen { 16 chosen {
17 bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; 17 bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
18 }; 18 };
19 19
20 memory { 20 memory {
21 reg = <0x20000000 0x10000000>; 21 reg = <0x20000000 0x8000000>;
22 }; 22 };
23 23
24 clocks { 24 clocks {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index b753855b2058..49e3c45818c2 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -94,8 +94,9 @@
94 94
95 usb0: ohci@00600000 { 95 usb0: ohci@00600000 {
96 status = "okay"; 96 status = "okay";
97 num-ports = <2>; 97 num-ports = <3>;
98 atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW 98 atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */
99 &pioD 19 GPIO_ACTIVE_LOW
99 &pioD 20 GPIO_ACTIVE_LOW 100 &pioD 20 GPIO_ACTIVE_LOW
100 >; 101 >;
101 }; 102 };
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index 365760b33a26..40e6fb280333 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -830,6 +830,8 @@
830 regulator-max-microvolt = <5000000>; 830 regulator-max-microvolt = <5000000>;
831 enable-active-high; 831 enable-active-high;
832 gpio = <&gpio 24 0>; /* PD0 */ 832 gpio = <&gpio 24 0>; /* PD0 */
833 regulator-always-on;
834 regulator-boot-on;
833 }; 835 };
834 }; 836 };
835 837
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index ed4b901b0227..37c93d3c4812 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -412,6 +412,8 @@
412 regulator-max-microvolt = <5000000>; 412 regulator-max-microvolt = <5000000>;
413 enable-active-high; 413 enable-active-high;
414 gpio = <&gpio 170 0>; /* PV2 */ 414 gpio = <&gpio 170 0>; /* PV2 */
415 regulator-always-on;
416 regulator-boot-on;
415 }; 417 };
416 }; 418 };
417 419
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index ab67c94db280..a3d0ebad78a1 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -588,6 +588,8 @@
588 regulator-max-microvolt = <5000000>; 588 regulator-max-microvolt = <5000000>;
589 enable-active-high; 589 enable-active-high;
590 gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ 590 gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
591 regulator-always-on;
592 regulator-boot-on;
591 }; 593 };
592 594
593 vbus3_reg: regulator@3 { 595 vbus3_reg: regulator@3 {
@@ -598,6 +600,8 @@
598 regulator-max-microvolt = <5000000>; 600 regulator-max-microvolt = <5000000>;
599 enable-active-high; 601 enable-active-high;
600 gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ 602 gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
603 regulator-always-on;
604 regulator-boot-on;
601 }; 605 };
602 }; 606 };
603 607
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 6462a721ebd4..a252c0bfacf5 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void)
88{ 88{
89 return 1 << mpidr_hash.bits; 89 return 1 << mpidr_hash.bits;
90} 90}
91
92extern int platform_can_cpu_hotplug(void);
93
91#endif 94#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index f8b8965666e9..b07c09e5a0ac 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
107 " subs %1, %0, %0, ror #16\n" 107 " subs %1, %0, %0, ror #16\n"
108 " addeq %0, %0, %4\n" 108 " addeq %0, %0, %4\n"
109 " strexeq %2, %0, [%3]" 109 " strexeq %2, %0, [%3]"
110 : "=&r" (slock), "=&r" (contended), "=r" (res) 110 : "=&r" (slock), "=&r" (contended), "=&r" (res)
111 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 111 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
112 : "cc"); 112 : "cc");
113 } while (res); 113 } while (res);
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
168 168
169static inline int arch_write_trylock(arch_rwlock_t *rw) 169static inline int arch_write_trylock(arch_rwlock_t *rw)
170{ 170{
171 unsigned long tmp; 171 unsigned long contended, res;
172 172
173 __asm__ __volatile__( 173 do {
174" ldrex %0, [%1]\n" 174 __asm__ __volatile__(
175" teq %0, #0\n" 175 " ldrex %0, [%2]\n"
176" strexeq %0, %2, [%1]" 176 " mov %1, #0\n"
177 : "=&r" (tmp) 177 " teq %0, #0\n"
178 : "r" (&rw->lock), "r" (0x80000000) 178 " strexeq %1, %3, [%2]"
179 : "cc"); 179 : "=&r" (contended), "=&r" (res)
180 : "r" (&rw->lock), "r" (0x80000000)
181 : "cc");
182 } while (res);
180 183
181 if (tmp == 0) { 184 if (!contended) {
182 smp_mb(); 185 smp_mb();
183 return 1; 186 return 1;
184 } else { 187 } else {
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
254 257
255static inline int arch_read_trylock(arch_rwlock_t *rw) 258static inline int arch_read_trylock(arch_rwlock_t *rw)
256{ 259{
257 unsigned long tmp, tmp2 = 1; 260 unsigned long contended, res;
258 261
259 __asm__ __volatile__( 262 do {
260" ldrex %0, [%2]\n" 263 __asm__ __volatile__(
261" adds %0, %0, #1\n" 264 " ldrex %0, [%2]\n"
262" strexpl %1, %0, [%2]\n" 265 " mov %1, #0\n"
263 : "=&r" (tmp), "+r" (tmp2) 266 " adds %0, %0, #1\n"
264 : "r" (&rw->lock) 267 " strexpl %1, %0, [%2]"
265 : "cc"); 268 : "=&r" (contended), "=&r" (res)
269 : "r" (&rw->lock)
270 : "cc");
271 } while (res);
266 272
267 smp_mb(); 273 /* If the lock is negative, then it is already held for write. */
268 return tmp2 == 0; 274 if (contended < 0x80000000) {
275 smp_mb();
276 return 1;
277 } else {
278 return 0;
279 }
269} 280}
270 281
271/* read_can_lock - would read_trylock() succeed? */ 282/* read_can_lock - would read_trylock() succeed? */
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 46e7cfb3e721..0baf7f0d9394 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -43,6 +43,7 @@ struct mmu_gather {
43 struct mm_struct *mm; 43 struct mm_struct *mm;
44 unsigned int fullmm; 44 unsigned int fullmm;
45 struct vm_area_struct *vma; 45 struct vm_area_struct *vma;
46 unsigned long start, end;
46 unsigned long range_start; 47 unsigned long range_start;
47 unsigned long range_end; 48 unsigned long range_end;
48 unsigned int nr; 49 unsigned int nr;
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
107} 108}
108 109
109static inline void 110static inline void
110tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 111tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
111{ 112{
112 tlb->mm = mm; 113 tlb->mm = mm;
113 tlb->fullmm = fullmm; 114 tlb->fullmm = !(start | (end+1));
115 tlb->start = start;
116 tlb->end = end;
114 tlb->vma = NULL; 117 tlb->vma = NULL;
115 tlb->max = ARRAY_SIZE(tlb->local); 118 tlb->max = ARRAY_SIZE(tlb->local);
116 tlb->pages = tlb->local; 119 tlb->pages = tlb->local;
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d40d0ef389db..9cbe70c8b0ef 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc)
357 .endm 357 .endm
358 358
359 .macro kuser_cmpxchg_check 359 .macro kuser_cmpxchg_check
360#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 360#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
361 !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
361#ifndef CONFIG_MMU 362#ifndef CONFIG_MMU
362#warning "NPTL on non MMU needs fixing" 363#warning "NPTL on non MMU needs fixing"
363#else 364#else
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 25442f451148..918875d96d5d 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -84,17 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec)
84 84
85void set_fiq_handler(void *start, unsigned int length) 85void set_fiq_handler(void *start, unsigned int length)
86{ 86{
87#if defined(CONFIG_CPU_USE_DOMAINS)
88 void *base = (void *)0xffff0000;
89#else
90 void *base = vectors_page; 87 void *base = vectors_page;
91#endif
92 unsigned offset = FIQ_OFFSET; 88 unsigned offset = FIQ_OFFSET;
93 89
94 memcpy(base + offset, start, length); 90 memcpy(base + offset, start, length);
91 if (!cache_is_vipt_nonaliasing())
92 flush_icache_range((unsigned long)base + offset, offset +
93 length);
95 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); 94 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
96 if (!vectors_high())
97 flush_icache_range(offset, offset + length);
98} 95}
99 96
100int claim_fiq(struct fiq_handler *f) 97int claim_fiq(struct fiq_handler *f)
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 4fb074c446bf..57221e349a7c 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <asm/smp_plat.h>
18#include <asm/system_misc.h> 19#include <asm/system_misc.h>
19 20
20extern const unsigned char relocate_new_kernel[]; 21extern const unsigned char relocate_new_kernel[];
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image)
39 int i, err; 40 int i, err;
40 41
41 /* 42 /*
43 * Validate that if the current HW supports SMP, then the SW supports
44 * and implements CPU hotplug for the current HW. If not, we won't be
45 * able to kexec reliably, so fail the prepare operation.
46 */
47 if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
48 return -EINVAL;
49
50 /*
42 * No segment at default ATAGs address. try to locate 51 * No segment at default ATAGs address. try to locate
43 * a dtb using magic. 52 * a dtb using magic.
44 */ 53 */
@@ -73,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
73 crash_save_cpu(&regs, smp_processor_id()); 82 crash_save_cpu(&regs, smp_processor_id());
74 flush_cache_all(); 83 flush_cache_all();
75 84
85 set_cpu_online(smp_processor_id(), false);
76 atomic_dec(&waiting_for_crash_ipi); 86 atomic_dec(&waiting_for_crash_ipi);
77 while (1) 87 while (1)
78 cpu_relax(); 88 cpu_relax();
@@ -134,10 +144,13 @@ void machine_kexec(struct kimage *image)
134 unsigned long reboot_code_buffer_phys; 144 unsigned long reboot_code_buffer_phys;
135 void *reboot_code_buffer; 145 void *reboot_code_buffer;
136 146
137 if (num_online_cpus() > 1) { 147 /*
138 pr_err("kexec: error: multiple CPUs still online\n"); 148 * This can only happen if machine_shutdown() failed to disable some
139 return; 149 * CPU, and that can only happen if the checks in
140 } 150 * machine_kexec_prepare() were not correct. If this fails, we can't
151 * reliably kexec anyway, so BUG_ON is appropriate.
152 */
153 BUG_ON(num_online_cpus() > 1);
141 154
142 page_list = image->head & PAGE_MASK; 155 page_list = image->head & PAGE_MASK;
143 156
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d9f5cd4e533f..e186ee1e63f6 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
53static int 53static int
54armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 54armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
55{ 55{
56 int mapping = (*event_map)[config]; 56 int mapping;
57
58 if (config >= PERF_COUNT_HW_MAX)
59 return -EINVAL;
60
61 mapping = (*event_map)[config];
57 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 62 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
58} 63}
59 64
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
253 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 258 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
254 struct pmu *leader_pmu = event->group_leader->pmu; 259 struct pmu *leader_pmu = event->group_leader->pmu;
255 260
261 if (is_software_event(event))
262 return 1;
263
256 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) 264 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
257 return 1; 265 return 1;
258 266
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 536c85fe72a8..94f6b05f9e24 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr)
462{ 462{
463 return in_gate_area(NULL, addr); 463 return in_gate_area(NULL, addr);
464} 464}
465#define is_gate_vma(vma) ((vma) = &gate_vma) 465#define is_gate_vma(vma) ((vma) == &gate_vma)
466#else 466#else
467#define is_gate_vma(vma) 0 467#define is_gate_vma(vma) 0
468#endif 468#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c2b4f8f0be9a..2dc19349eb19 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
145 return -ENOSYS; 145 return -ENOSYS;
146} 146}
147 147
148int platform_can_cpu_hotplug(void)
149{
150#ifdef CONFIG_HOTPLUG_CPU
151 if (smp_ops.cpu_kill)
152 return 1;
153#endif
154
155 return 0;
156}
157
148#ifdef CONFIG_HOTPLUG_CPU 158#ifdef CONFIG_HOTPLUG_CPU
149static void percpu_timer_stop(void); 159static void percpu_timer_stop(void);
150 160
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4a5199070430..db9cf692d4dd 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
146#define access_pmintenclr pm_fake 146#define access_pmintenclr pm_fake
147 147
148/* Architected CP15 registers. 148/* Architected CP15 registers.
149 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 149 * CRn denotes the primary register number, but is copied to the CRm in the
150 * user space API for 64-bit register access in line with the terminology used
151 * in the ARM ARM.
152 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
153 * registers preceding 32-bit ones.
150 */ 154 */
151static const struct coproc_reg cp15_regs[] = { 155static const struct coproc_reg cp15_regs[] = {
152 /* CSSELR: swapped by interrupt.S. */ 156 /* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
154 NULL, reset_unknown, c0_CSSELR }, 158 NULL, reset_unknown, c0_CSSELR },
155 159
156 /* TTBR0/TTBR1: swapped by interrupt.S. */ 160 /* TTBR0/TTBR1: swapped by interrupt.S. */
157 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, 161 { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
158 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, 162 { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
159 163
160 /* TTBCR: swapped by interrupt.S. */ 164 /* TTBCR: swapped by interrupt.S. */
161 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, 165 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = {
182 NULL, reset_unknown, c6_IFAR }, 186 NULL, reset_unknown, c6_IFAR },
183 187
184 /* PAR swapped by interrupt.S */ 188 /* PAR swapped by interrupt.S */
185 { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, 189 { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
186 190
187 /* 191 /*
188 * DC{C,I,CI}SW operations: 192 * DC{C,I,CI}SW operations:
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
399 | KVM_REG_ARM_OPC1_MASK)) 403 | KVM_REG_ARM_OPC1_MASK))
400 return false; 404 return false;
401 params->is_64bit = true; 405 params->is_64bit = true;
402 params->CRm = ((id & KVM_REG_ARM_CRM_MASK) 406 /* CRm to CRn: see cp15_to_index for details */
407 params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
403 >> KVM_REG_ARM_CRM_SHIFT); 408 >> KVM_REG_ARM_CRM_SHIFT);
404 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 409 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
405 >> KVM_REG_ARM_OPC1_SHIFT); 410 >> KVM_REG_ARM_OPC1_SHIFT);
406 params->Op2 = 0; 411 params->Op2 = 0;
407 params->CRn = 0; 412 params->CRm = 0;
408 return true; 413 return true;
409 default: 414 default:
410 return false; 415 return false;
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
898 if (reg->is_64) { 903 if (reg->is_64) {
899 val |= KVM_REG_SIZE_U64; 904 val |= KVM_REG_SIZE_U64;
900 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 905 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
901 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); 906 /*
907 * CRn always denotes the primary coproc. reg. nr. for the
908 * in-kernel representation, but the user space API uses the
909 * CRm for the encoding, because it is modelled after the
910 * MRRC/MCRR instructions: see the ARM ARM rev. c page
911 * B3-1445
912 */
913 val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
902 } else { 914 } else {
903 val |= KVM_REG_SIZE_U32; 915 val |= KVM_REG_SIZE_U32;
904 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 916 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index b7301d3e4799..0461d5c8d3de 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
135 return -1; 135 return -1;
136 if (i1->CRn != i2->CRn) 136 if (i1->CRn != i2->CRn)
137 return i1->CRn - i2->CRn; 137 return i1->CRn - i2->CRn;
138 if (i1->is_64 != i2->is_64)
139 return i2->is_64 - i1->is_64;
138 if (i1->CRm != i2->CRm) 140 if (i1->CRm != i2->CRm)
139 return i1->CRm - i2->CRm; 141 return i1->CRm - i2->CRm;
140 if (i1->Op1 != i2->Op1) 142 if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
145 147
146#define CRn(_x) .CRn = _x 148#define CRn(_x) .CRn = _x
147#define CRm(_x) .CRm = _x 149#define CRm(_x) .CRm = _x
150#define CRm64(_x) .CRn = _x, .CRm = 0
148#define Op1(_x) .Op1 = _x 151#define Op1(_x) .Op1 = _x
149#define Op2(_x) .Op2 = _x 152#define Op2(_x) .Op2 = _x
150#define is64 .is_64 = true 153#define is64 .is_64 = true
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index 685063a6d0cf..cf93472b9dd6 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
114 114
115/* 115/*
116 * A15-specific CP15 registers. 116 * A15-specific CP15 registers.
117 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 117 * CRn denotes the primary register number, but is copied to the CRm in the
118 * user space API for 64-bit register access in line with the terminology used
119 * in the ARM ARM.
120 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
121 * registers preceding 32-bit ones.
118 */ 122 */
119static const struct coproc_reg a15_regs[] = { 123static const struct coproc_reg a15_regs[] = {
120 /* MPIDR: we use VMPIDR for guest access. */ 124 /* MPIDR: we use VMPIDR for guest access. */
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index b8e06b7a2833..0c25d9487d53 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
63static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 63static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
64 struct kvm_exit_mmio *mmio) 64 struct kvm_exit_mmio *mmio)
65{ 65{
66 unsigned long rt, len; 66 unsigned long rt;
67 int len;
67 bool is_write, sign_extend; 68 bool is_write, sign_extend;
68 69
69 if (kvm_vcpu_dabt_isextabt(vcpu)) { 70 if (kvm_vcpu_dabt_isextabt(vcpu)) {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index ca6bea4859b4..0988d9e04dd4 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
85 return p; 85 return p;
86} 86}
87 87
88static bool page_empty(void *ptr)
89{
90 struct page *ptr_page = virt_to_page(ptr);
91 return page_count(ptr_page) == 1;
92}
93
88static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 94static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
89{ 95{
90 pmd_t *pmd_table = pmd_offset(pud, 0); 96 pmd_t *pmd_table = pmd_offset(pud, 0);
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
103 put_page(virt_to_page(pmd)); 109 put_page(virt_to_page(pmd));
104} 110}
105 111
106static bool pmd_empty(pmd_t *pmd)
107{
108 struct page *pmd_page = virt_to_page(pmd);
109 return page_count(pmd_page) == 1;
110}
111
112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
113{ 113{
114 if (pte_present(*pte)) { 114 if (pte_present(*pte)) {
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
118 } 118 }
119} 119}
120 120
121static bool pte_empty(pte_t *pte)
122{
123 struct page *pte_page = virt_to_page(pte);
124 return page_count(pte_page) == 1;
125}
126
127static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 121static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
128 unsigned long long start, u64 size) 122 unsigned long long start, u64 size)
129{ 123{
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
132 pmd_t *pmd; 126 pmd_t *pmd;
133 pte_t *pte; 127 pte_t *pte;
134 unsigned long long addr = start, end = start + size; 128 unsigned long long addr = start, end = start + size;
135 u64 range; 129 u64 next;
136 130
137 while (addr < end) { 131 while (addr < end) {
138 pgd = pgdp + pgd_index(addr); 132 pgd = pgdp + pgd_index(addr);
139 pud = pud_offset(pgd, addr); 133 pud = pud_offset(pgd, addr);
140 if (pud_none(*pud)) { 134 if (pud_none(*pud)) {
141 addr += PUD_SIZE; 135 addr = pud_addr_end(addr, end);
142 continue; 136 continue;
143 } 137 }
144 138
145 pmd = pmd_offset(pud, addr); 139 pmd = pmd_offset(pud, addr);
146 if (pmd_none(*pmd)) { 140 if (pmd_none(*pmd)) {
147 addr += PMD_SIZE; 141 addr = pmd_addr_end(addr, end);
148 continue; 142 continue;
149 } 143 }
150 144
151 pte = pte_offset_kernel(pmd, addr); 145 pte = pte_offset_kernel(pmd, addr);
152 clear_pte_entry(kvm, pte, addr); 146 clear_pte_entry(kvm, pte, addr);
153 range = PAGE_SIZE; 147 next = addr + PAGE_SIZE;
154 148
155 /* If we emptied the pte, walk back up the ladder */ 149 /* If we emptied the pte, walk back up the ladder */
156 if (pte_empty(pte)) { 150 if (page_empty(pte)) {
157 clear_pmd_entry(kvm, pmd, addr); 151 clear_pmd_entry(kvm, pmd, addr);
158 range = PMD_SIZE; 152 next = pmd_addr_end(addr, end);
159 if (pmd_empty(pmd)) { 153 if (page_empty(pmd) && !page_empty(pud)) {
160 clear_pud_entry(kvm, pud, addr); 154 clear_pud_entry(kvm, pud, addr);
161 range = PUD_SIZE; 155 next = pud_addr_end(addr, end);
162 } 156 }
163 } 157 }
164 158
165 addr += range; 159 addr = next;
166 } 160 }
167} 161}
168 162
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 2abee6626aac..916e5a142917 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
227 CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), 227 CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk),
228 CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), 228 CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk),
229 CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), 229 CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
230 CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk),
231 CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk),
230 CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), 232 CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
231 CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), 233 CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
232 CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), 234 CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index dff4ddc5ef81..139e42da25f0 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
75 .parts = davinci_nand_partitions, 75 .parts = davinci_nand_partitions,
76 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 76 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
77 .ecc_mode = NAND_ECC_HW_SYNDROME, 77 .ecc_mode = NAND_ECC_HW_SYNDROME,
78 .ecc_bits = 4,
78 .bbt_options = NAND_BBT_USE_FLASH, 79 .bbt_options = NAND_BBT_USE_FLASH,
79}; 80};
80 81
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a33686a6fbb2..fa4bfaf952d8 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
153 .parts = davinci_evm_nandflash_partition, 153 .parts = davinci_evm_nandflash_partition,
154 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), 154 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
155 .ecc_mode = NAND_ECC_HW, 155 .ecc_mode = NAND_ECC_HW,
156 .ecc_bits = 1,
156 .bbt_options = NAND_BBT_USE_FLASH, 157 .bbt_options = NAND_BBT_USE_FLASH,
157 .timing = &davinci_evm_nandflash_timing, 158 .timing = &davinci_evm_nandflash_timing,
158}; 159};
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index fbb8e5ab1dc1..0c005e876cac 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
90 .parts = davinci_nand_partitions, 90 .parts = davinci_nand_partitions,
91 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 91 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
92 .ecc_mode = NAND_ECC_HW, 92 .ecc_mode = NAND_ECC_HW,
93 .ecc_bits = 1,
93 .options = 0, 94 .options = 0,
94}; 95};
95 96
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 2bc112adf565..808233b60e3d 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
88 .parts = davinci_ntosd2_nandflash_partition, 88 .parts = davinci_ntosd2_nandflash_partition,
89 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), 89 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
90 .ecc_mode = NAND_ECC_HW, 90 .ecc_mode = NAND_ECC_HW,
91 .ecc_bits = 1,
91 .bbt_options = NAND_BBT_USE_FLASH, 92 .bbt_options = NAND_BBT_USE_FLASH,
92}; 93};
93 94
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index f6eeb87e4e95..827d15009a86 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = {
122}; 122};
123 123
124static struct musb_hdrc_platform_data tusb_data = { 124static struct musb_hdrc_platform_data tusb_data = {
125#ifdef CONFIG_USB_GADGET_MUSB_HDRC
126 .mode = MUSB_OTG, 125 .mode = MUSB_OTG,
127#else
128 .mode = MUSB_HOST,
129#endif
130 .set_power = tusb_set_power, 126 .set_power = tusb_set_power,
131 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ 127 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */
132 .power = 100, /* Max 100 mA VBUS for host mode */ 128 .power = 100, /* Max 100 mA VBUS for host mode */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index d2ea68ea678a..7735105561d8 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = {
85 85
86static struct omap_musb_board_data musb_board_data = { 86static struct omap_musb_board_data musb_board_data = {
87 .interface_type = MUSB_INTERFACE_ULPI, 87 .interface_type = MUSB_INTERFACE_ULPI,
88 .mode = MUSB_PERIPHERAL, 88 .mode = MUSB_OTG,
89 .power = 0, 89 .power = 0,
90}; 90};
91 91
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8c4de2708cf2..bc897231bd10 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = {
38}; 38};
39 39
40static struct musb_hdrc_platform_data musb_plat = { 40static struct musb_hdrc_platform_data musb_plat = {
41#ifdef CONFIG_USB_GADGET_MUSB_HDRC
42 .mode = MUSB_OTG, 41 .mode = MUSB_OTG,
43#else 42
44 .mode = MUSB_HOST,
45#endif
46 /* .clock is set dynamically */ 43 /* .clock is set dynamically */
47 .config = &musb_config, 44 .config = &musb_config,
48 45
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index db5c2cab8fda..cd2c88e7a8f7 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -809,15 +809,18 @@ config KUSER_HELPERS
809 the CPU type fitted to the system. This permits binaries to be 809 the CPU type fitted to the system. This permits binaries to be
810 run on ARMv4 through to ARMv7 without modification. 810 run on ARMv4 through to ARMv7 without modification.
811 811
812 See Documentation/arm/kernel_user_helpers.txt for details.
813
812 However, the fixed address nature of these helpers can be used 814 However, the fixed address nature of these helpers can be used
813 by ROP (return orientated programming) authors when creating 815 by ROP (return orientated programming) authors when creating
814 exploits. 816 exploits.
815 817
816 If all of the binaries and libraries which run on your platform 818 If all of the binaries and libraries which run on your platform
817 are built specifically for your platform, and make no use of 819 are built specifically for your platform, and make no use of
818 these helpers, then you can turn this option off. However, 820 these helpers, then you can turn this option off to hinder
819 when such an binary or library is run, it will receive a SIGILL 821 such exploits. However, in that case, if a binary or library
820 signal, which will terminate the program. 822 relying on those helpers is run, it will receive a SIGILL signal,
823 which will terminate the program.
821 824
822 Say N here only if you are absolutely certain that you do not 825 Say N here only if you are absolutely certain that you do not
823 need these helpers; otherwise, the safe option is to say Y. 826 need these helpers; otherwise, the safe option is to say Y.
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c
index 3e5c4619caa5..50a3ea0037db 100644
--- a/arch/arm/plat-samsung/init.c
+++ b/arch/arm/plat-samsung/init.c
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode,
55 55
56 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); 56 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
57 57
58 if (cpu->map_io == NULL || cpu->init == NULL) { 58 if (cpu->init == NULL) {
59 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); 59 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
60 panic("Unsupported Samsung CPU"); 60 panic("Unsupported Samsung CPU");
61 } 61 }
62 62
63 cpu->map_io(); 63 if (cpu->map_io)
64 cpu->map_io();
64} 65}
65 66
66/* s3c24xx_init_clocks 67/* s3c24xx_init_clocks
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index c9770ba5c7df..8a6295c86209 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused)
170 per_cpu(xen_vcpu, cpu) = vcpup; 170 per_cpu(xen_vcpu, cpu) = vcpup;
171 171
172 enable_percpu_irq(xen_events_irq, 0); 172 enable_percpu_irq(xen_events_irq, 0);
173 put_cpu();
173} 174}
174 175
175static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) 176static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c92de4163eba..b25763bc0ec4 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -42,14 +42,15 @@
42#define TPIDR_EL1 18 /* Thread ID, Privileged */ 42#define TPIDR_EL1 18 /* Thread ID, Privileged */
43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ 43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ 44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
45#define PAR_EL1 21 /* Physical Address Register */
45/* 32bit specific registers. Keep them at the end of the range */ 46/* 32bit specific registers. Keep them at the end of the range */
46#define DACR32_EL2 21 /* Domain Access Control Register */ 47#define DACR32_EL2 22 /* Domain Access Control Register */
47#define IFSR32_EL2 22 /* Instruction Fault Status Register */ 48#define IFSR32_EL2 23 /* Instruction Fault Status Register */
48#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ 49#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
49#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ 50#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
50#define TEECR32_EL1 25 /* ThumbEE Configuration Register */ 51#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
51#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ 52#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
52#define NR_SYS_REGS 27 53#define NR_SYS_REGS 28
53 54
54/* 32bit mapping */ 55/* 32bit mapping */
55#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ 56#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -69,6 +70,8 @@
69#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ 70#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
70#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ 71#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
71#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ 72#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
73#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
74#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
72#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ 75#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
73#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ 76#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
74#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ 77#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 644d73956864..0859a4ddd1e7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch {
129 struct kvm_mmu_memory_cache mmu_page_cache; 129 struct kvm_mmu_memory_cache mmu_page_cache;
130 130
131 /* Target CPU and feature flags */ 131 /* Target CPU and feature flags */
132 u32 target; 132 int target;
133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
134 134
135 /* Detect first run of a vcpu */ 135 /* Detect first run of a vcpu */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 46b3beb4b773..717031a762c2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@ struct mmu_gather {
35 struct mm_struct *mm; 35 struct mm_struct *mm;
36 unsigned int fullmm; 36 unsigned int fullmm;
37 struct vm_area_struct *vma; 37 struct vm_area_struct *vma;
38 unsigned long start, end;
38 unsigned long range_start; 39 unsigned long range_start;
39 unsigned long range_end; 40 unsigned long range_end;
40 unsigned int nr; 41 unsigned int nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
97} 98}
98 99
99static inline void 100static inline void
100tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 101tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
101{ 102{
102 tlb->mm = mm; 103 tlb->mm = mm;
103 tlb->fullmm = fullmm; 104 tlb->fullmm = !(start | (end+1));
105 tlb->start = start;
106 tlb->end = end;
104 tlb->vma = NULL; 107 tlb->vma = NULL;
105 tlb->max = ARRAY_SIZE(tlb->local); 108 tlb->max = ARRAY_SIZE(tlb->local);
106 tlb->pages = tlb->local; 109 tlb->pages = tlb->local;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c40cdf8..12e6ccb88691 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
107static int 107static int
108armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 108armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
109{ 109{
110 int mapping = (*event_map)[config]; 110 int mapping;
111
112 if (config >= PERF_COUNT_HW_MAX)
113 return -EINVAL;
114
115 mapping = (*event_map)[config];
111 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 116 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
112} 117}
113 118
@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events,
317 struct hw_perf_event fake_event = event->hw; 322 struct hw_perf_event fake_event = event->hw;
318 struct pmu *leader_pmu = event->group_leader->pmu; 323 struct pmu *leader_pmu = event->group_leader->pmu;
319 324
325 if (is_software_event(event))
326 return 1;
327
320 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 328 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
321 return 1; 329 return 1;
322 330
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index ff985e3d8b72..1ac0bbbdddb2 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -214,6 +214,7 @@ __kvm_hyp_code_start:
214 mrs x21, tpidr_el1 214 mrs x21, tpidr_el1
215 mrs x22, amair_el1 215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1 216 mrs x23, cntkctl_el1
217 mrs x24, par_el1
217 218
218 stp x4, x5, [x3] 219 stp x4, x5, [x3]
219 stp x6, x7, [x3, #16] 220 stp x6, x7, [x3, #16]
@@ -225,6 +226,7 @@ __kvm_hyp_code_start:
225 stp x18, x19, [x3, #112] 226 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128] 227 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144] 228 stp x22, x23, [x3, #144]
229 str x24, [x3, #160]
228.endm 230.endm
229 231
230.macro restore_sysregs 232.macro restore_sysregs
@@ -243,6 +245,7 @@ __kvm_hyp_code_start:
243 ldp x18, x19, [x3, #112] 245 ldp x18, x19, [x3, #112]
244 ldp x20, x21, [x3, #128] 246 ldp x20, x21, [x3, #128]
245 ldp x22, x23, [x3, #144] 247 ldp x22, x23, [x3, #144]
248 ldr x24, [x3, #160]
246 249
247 msr vmpidr_el2, x4 250 msr vmpidr_el2, x4
248 msr csselr_el1, x5 251 msr csselr_el1, x5
@@ -264,6 +267,7 @@ __kvm_hyp_code_start:
264 msr tpidr_el1, x21 267 msr tpidr_el1, x21
265 msr amair_el1, x22 268 msr amair_el1, x22
266 msr cntkctl_el1, x23 269 msr cntkctl_el1, x23
270 msr par_el1, x24
267.endm 271.endm
268 272
269.macro skip_32bit_state tmp, target 273.macro skip_32bit_state tmp, target
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run)
600 604
601// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 605// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
602ENTRY(__kvm_tlb_flush_vmid_ipa) 606ENTRY(__kvm_tlb_flush_vmid_ipa)
607 dsb ishst
608
603 kern_hyp_va x0 609 kern_hyp_va x0
604 ldr x2, [x0, #KVM_VTTBR] 610 ldr x2, [x0, #KVM_VTTBR]
605 msr vttbr_el2, x2 611 msr vttbr_el2, x2
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
621ENDPROC(__kvm_tlb_flush_vmid_ipa) 627ENDPROC(__kvm_tlb_flush_vmid_ipa)
622 628
623ENTRY(__kvm_flush_vm_context) 629ENTRY(__kvm_flush_vm_context)
630 dsb ishst
624 tlbi alle1is 631 tlbi alle1is
625 ic ialluis 632 ic ialluis
626 dsb sy 633 dsb sy
@@ -753,6 +760,10 @@ el1_trap:
753 */ 760 */
754 tbnz x1, #7, 1f // S1PTW is set 761 tbnz x1, #7, 1f // S1PTW is set
755 762
763 /* Preserve PAR_EL1 */
764 mrs x3, par_el1
765 push x3, xzr
766
756 /* 767 /*
757 * Permission fault, HPFAR_EL2 is invalid. 768 * Permission fault, HPFAR_EL2 is invalid.
758 * Resolve the IPA the hard way using the guest VA. 769 * Resolve the IPA the hard way using the guest VA.
@@ -766,6 +777,8 @@ el1_trap:
766 777
767 /* Read result */ 778 /* Read result */
768 mrs x3, par_el1 779 mrs x3, par_el1
780 pop x0, xzr // Restore PAR_EL1 from the stack
781 msr par_el1, x0
769 tbnz x3, #0, 3f // Bail out if we failed the translation 782 tbnz x3, #0, 3f // Bail out if we failed the translation
770 ubfx x3, x3, #12, #36 // Extract IPA 783 ubfx x3, x3, #12, #36 // Extract IPA
771 lsl x3, x3, #4 // and present it like HPFAR 784 lsl x3, x3, #4 // and present it like HPFAR
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 94923609753b..02e9d09e1d80 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
211 /* FAR_EL1 */ 211 /* FAR_EL1 */
212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), 212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
213 NULL, reset_unknown, FAR_EL1 }, 213 NULL, reset_unknown, FAR_EL1 },
214 /* PAR_EL1 */
215 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
216 NULL, reset_unknown, PAR_EL1 },
214 217
215 /* PMINTENSET_EL1 */ 218 /* PMINTENSET_EL1 */
216 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 219 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 33a97929d055..77d442ab28c8 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz"
158endmenu 158endmenu
159 159
160source "init/Kconfig" 160source "init/Kconfig"
161source "kernel/Kconfig.freezer"
161source "drivers/Kconfig" 162source "drivers/Kconfig"
162source "fs/Kconfig" 163source "fs/Kconfig"
163 164
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index ef3a9de01954..bc5efc7c3f3f 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -22,7 +22,7 @@
22 * unmapping a portion of the virtual address space, these hooks are called according to 22 * unmapping a portion of the virtual address space, these hooks are called according to
23 * the following template: 23 * the following template:
24 * 24 *
25 * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM 25 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
26 * { 26 * {
27 * for each vma that needs a shootdown do { 27 * for each vma that needs a shootdown do {
28 * tlb_start_vma(tlb, vma); 28 * tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@ struct mmu_gather {
58 unsigned int max; 58 unsigned int max;
59 unsigned char fullmm; /* non-zero means full mm flush */ 59 unsigned char fullmm; /* non-zero means full mm flush */
60 unsigned char need_flush; /* really unmapped some PTEs? */ 60 unsigned char need_flush; /* really unmapped some PTEs? */
61 unsigned long start, end;
61 unsigned long start_addr; 62 unsigned long start_addr;
62 unsigned long end_addr; 63 unsigned long end_addr;
63 struct page **pages; 64 struct page **pages;
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
155 156
156 157
157static inline void 158static inline void
158tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 159tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
159{ 160{
160 tlb->mm = mm; 161 tlb->mm = mm;
161 tlb->max = ARRAY_SIZE(tlb->local); 162 tlb->max = ARRAY_SIZE(tlb->local);
162 tlb->pages = tlb->local; 163 tlb->pages = tlb->local;
163 tlb->nr = 0; 164 tlb->nr = 0;
164 tlb->fullmm = full_mm_flush; 165 tlb->fullmm = !(start | (end+1));
166 tlb->start = start;
167 tlb->end = end;
165 tlb->start_addr = ~0UL; 168 tlb->start_addr = ~0UL;
166} 169}
167 170
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 2291a7d69d49..fa277aecfb78 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -18,9 +18,11 @@
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/natfeat.h> 19#include <asm/natfeat.h>
20 20
21extern long nf_get_id2(const char *feature_name);
22
21asm("\n" 23asm("\n"
22" .global nf_get_id,nf_call\n" 24" .global nf_get_id2,nf_call\n"
23"nf_get_id:\n" 25"nf_get_id2:\n"
24" .short 0x7300\n" 26" .short 0x7300\n"
25" rts\n" 27" rts\n"
26"nf_call:\n" 28"nf_call:\n"
@@ -29,12 +31,25 @@ asm("\n"
29"1: moveq.l #0,%d0\n" 31"1: moveq.l #0,%d0\n"
30" rts\n" 32" rts\n"
31" .section __ex_table,\"a\"\n" 33" .section __ex_table,\"a\"\n"
32" .long nf_get_id,1b\n" 34" .long nf_get_id2,1b\n"
33" .long nf_call,1b\n" 35" .long nf_call,1b\n"
34" .previous"); 36" .previous");
35EXPORT_SYMBOL_GPL(nf_get_id);
36EXPORT_SYMBOL_GPL(nf_call); 37EXPORT_SYMBOL_GPL(nf_call);
37 38
39long nf_get_id(const char *feature_name)
40{
41 /* feature_name may be in vmalloc()ed memory, so make a copy */
42 char name_copy[32];
43 size_t n;
44
45 n = strlcpy(name_copy, feature_name, sizeof(name_copy));
46 if (n >= sizeof(name_copy))
47 return 0;
48
49 return nf_get_id2(name_copy);
50}
51EXPORT_SYMBOL_GPL(nf_get_id);
52
38void nfprint(const char *fmt, ...) 53void nfprint(const char *fmt, ...)
39{ 54{
40 static char buf[256]; 55 static char buf[256];
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index 444ea8a09e9f..ef881cfbbca9 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -15,16 +15,17 @@
15 unsigned long long n64; \ 15 unsigned long long n64; \
16 } __n; \ 16 } __n; \
17 unsigned long __rem, __upper; \ 17 unsigned long __rem, __upper; \
18 unsigned long __base = (base); \
18 \ 19 \
19 __n.n64 = (n); \ 20 __n.n64 = (n); \
20 if ((__upper = __n.n32[0])) { \ 21 if ((__upper = __n.n32[0])) { \
21 asm ("divul.l %2,%1:%0" \ 22 asm ("divul.l %2,%1:%0" \
22 : "=d" (__n.n32[0]), "=d" (__upper) \ 23 : "=d" (__n.n32[0]), "=d" (__upper) \
23 : "d" (base), "0" (__n.n32[0])); \ 24 : "d" (__base), "0" (__n.n32[0])); \
24 } \ 25 } \
25 asm ("divu.l %2,%1:%0" \ 26 asm ("divu.l %2,%1:%0" \
26 : "=d" (__n.n32[1]), "=d" (__rem) \ 27 : "=d" (__n.n32[1]), "=d" (__rem) \
27 : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ 28 : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
28 (n) = __n.n64; \ 29 (n) = __n.n64; \
29 __rem; \ 30 __rem; \
30}) 31})
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d22a4ecffff4..4fab52294d98 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,7 @@ config MICROBLAZE
28 select GENERIC_CLOCKEVENTS 28 select GENERIC_CLOCKEVENTS
29 select GENERIC_IDLE_POLL_SETUP 29 select GENERIC_IDLE_POLL_SETUP
30 select MODULES_USE_ELF_RELA 30 select MODULES_USE_ELF_RELA
31 select CLONE_BACKWARDS 31 select CLONE_BACKWARDS3
32 32
33config SWAP 33config SWAP
34 def_bool n 34 def_bool n
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index e773659ccf9f..46048d24328c 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
803 dec_insn.next_pc_inc; 803 dec_insn.next_pc_inc;
804 return 1; 804 return 1;
805 break; 805 break;
806#ifdef CONFIG_CPU_CAVIUM_OCTEON
807 case lwc2_op: /* This is bbit0 on Octeon */
808 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
809 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
810 else
811 *contpc = regs->cp0_epc + 8;
812 return 1;
813 case ldc2_op: /* This is bbit032 on Octeon */
814 if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
815 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
816 else
817 *contpc = regs->cp0_epc + 8;
818 return 1;
819 case swc2_op: /* This is bbit1 on Octeon */
820 if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
821 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
822 else
823 *contpc = regs->cp0_epc + 8;
824 return 1;
825 case sdc2_op: /* This is bbit132 on Octeon */
826 if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
827 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
828 else
829 *contpc = regs->cp0_epc + 8;
830 return 1;
831#endif
806 case cop0_op: 832 case cop0_op:
807 case cop1_op: 833 case cop1_op:
808 case cop2_op: 834 case cop2_op:
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 99dbab1c59ac..d60bf98fa5cf 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -55,6 +55,7 @@ config GENERIC_CSUM
55 55
56source "init/Kconfig" 56source "init/Kconfig"
57 57
58source "kernel/Kconfig.freezer"
58 59
59menu "Processor type and features" 60menu "Processor type and features"
60 61
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b75d7d686684..6d6d92b4ea11 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -32,6 +32,7 @@ struct mmu_gather {
32 struct mm_struct *mm; 32 struct mm_struct *mm;
33 struct mmu_table_batch *batch; 33 struct mmu_table_batch *batch;
34 unsigned int fullmm; 34 unsigned int fullmm;
35 unsigned long start, end;
35}; 36};
36 37
37struct mmu_table_batch { 38struct mmu_table_batch {
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
48 49
49static inline void tlb_gather_mmu(struct mmu_gather *tlb, 50static inline void tlb_gather_mmu(struct mmu_gather *tlb,
50 struct mm_struct *mm, 51 struct mm_struct *mm,
51 unsigned int full_mm_flush) 52 unsigned long start,
53 unsigned long end)
52{ 54{
53 tlb->mm = mm; 55 tlb->mm = mm;
54 tlb->fullmm = full_mm_flush; 56 tlb->start = start;
57 tlb->end = end;
58 tlb->fullmm = !(start | (end+1));
55 tlb->batch = NULL; 59 tlb->batch = NULL;
56 if (tlb->fullmm) 60 if (tlb->fullmm)
57 __tlb_flush_mm(mm); 61 __tlb_flush_mm(mm);
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index c8def8bc9020..5fc237581caf 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT
87 87
88source "init/Kconfig" 88source "init/Kconfig"
89 89
90source "kernel/Kconfig.freezer"
91
90config MMU 92config MMU
91 def_bool y 93 def_bool y
92 94
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index e61d43d9f689..362192ed12fe 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
36} 36}
37 37
38static inline void 38static inline void
39tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 39tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
40{ 40{
41 tlb->mm = mm; 41 tlb->mm = mm;
42 tlb->fullmm = full_mm_flush; 42 tlb->start = start;
43 tlb->end = end;
44 tlb->fullmm = !(start | (end+1));
43 45
44 init_tlb_gather(tlb); 46 init_tlb_gather(tlb);
45} 47}
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 4febacd1a8a1..29b0301c18aa 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
45} 45}
46 46
47static inline void 47static inline void
48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
49{ 49{
50 tlb->mm = mm; 50 tlb->mm = mm;
51 tlb->fullmm = full_mm_flush; 51 tlb->start = start;
52 tlb->end = end;
53 tlb->fullmm = !(start | (end+1));
52 54
53 init_tlb_gather(tlb); 55 init_tlb_gather(tlb);
54} 56}
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 653668d140f9..4a8cb8d7cbd5 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
35 */ 35 */
36 if (boot_params->sentinel) { 36 if (boot_params->sentinel) {
37 /* fields in boot_params are left uninitialized, clear them */ 37 /* fields in boot_params are left uninitialized, clear them */
38 memset(&boot_params->olpc_ofw_header, 0, 38 memset(&boot_params->ext_ramdisk_image, 0,
39 (char *)&boot_params->efi_info - 39 (char *)&boot_params->efi_info -
40 (char *)&boot_params->olpc_ofw_header); 40 (char *)&boot_params->ext_ramdisk_image);
41 memset(&boot_params->kbd_status, 0, 41 memset(&boot_params->kbd_status, 0,
42 (char *)&boot_params->hdr - 42 (char *)&boot_params->hdr -
43 (char *)&boot_params->kbd_status); 43 (char *)&boot_params->kbd_status);
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 50e5c58ced23..4c019179a57d 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
59 59
60extern int __apply_microcode_amd(struct microcode_amd *mc_amd); 60extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
61extern int apply_microcode_amd(int cpu); 61extern int apply_microcode_amd(int cpu);
62extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); 62extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
63 63
64#ifdef CONFIG_MICROCODE_AMD_EARLY 64#ifdef CONFIG_MICROCODE_AMD_EARLY
65#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index f2b489cf1602..3bf2dd0cf61f 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
55#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) 55#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
56#endif 56#endif
57 57
58#ifdef CONFIG_MEM_SOFT_DIRTY
59
60/*
61 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
62 * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
63 * into this range.
64 */
65#define PTE_FILE_MAX_BITS 28
66#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
67#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
68#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
69#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
70#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
71#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
72#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
73
74#define pte_to_pgoff(pte) \
75 ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
76 & ((1U << PTE_FILE_BITS1) - 1))) \
77 + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
78 & ((1U << PTE_FILE_BITS2) - 1)) \
79 << (PTE_FILE_BITS1)) \
80 + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
81 & ((1U << PTE_FILE_BITS3) - 1)) \
82 << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
83 + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
84 << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
85
86#define pgoff_to_pte(off) \
87 ((pte_t) { .pte_low = \
88 ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
89 + ((((off) >> PTE_FILE_BITS1) \
90 & ((1U << PTE_FILE_BITS2) - 1)) \
91 << PTE_FILE_SHIFT2) \
92 + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
93 & ((1U << PTE_FILE_BITS3) - 1)) \
94 << PTE_FILE_SHIFT3) \
95 + ((((off) >> \
96 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
97 << PTE_FILE_SHIFT4) \
98 + _PAGE_FILE })
99
100#else /* CONFIG_MEM_SOFT_DIRTY */
101
58/* 102/*
59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 103 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
60 * split up the 29 bits of offset into this range: 104 * split up the 29 bits of offset into this range.
61 */ 105 */
62#define PTE_FILE_MAX_BITS 29 106#define PTE_FILE_MAX_BITS 29
63#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) 107#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
88 << PTE_FILE_SHIFT3) \ 132 << PTE_FILE_SHIFT3) \
89 + _PAGE_FILE }) 133 + _PAGE_FILE })
90 134
135#endif /* CONFIG_MEM_SOFT_DIRTY */
136
91/* Encode and de-code a swap entry */ 137/* Encode and de-code a swap entry */
92#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE 138#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
93#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) 139#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 4cc9f2b7cdc3..81bb91b49a88 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
179/* 179/*
180 * Bits 0, 6 and 7 are taken in the low part of the pte, 180 * Bits 0, 6 and 7 are taken in the low part of the pte,
181 * put the 32 bits of offset into the high part. 181 * put the 32 bits of offset into the high part.
182 *
183 * For soft-dirty tracking 11 bit is taken from
184 * the low part of pte as well.
182 */ 185 */
183#define pte_to_pgoff(pte) ((pte).pte_high) 186#define pte_to_pgoff(pte) ((pte).pte_high)
184#define pgoff_to_pte(off) \ 187#define pgoff_to_pte(off) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7dc305a46058..1c00631164c2 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
315} 315}
316 316
317static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
318{
319 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
320}
321
322static inline int pte_swp_soft_dirty(pte_t pte)
323{
324 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
325}
326
327static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
328{
329 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
330}
331
332static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
333{
334 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
335}
336
337static inline pte_t pte_file_mksoft_dirty(pte_t pte)
338{
339 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
340}
341
342static inline int pte_file_soft_dirty(pte_t pte)
343{
344 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
345}
346
317/* 347/*
318 * Mask out unsupported bits in a present pgprot. Non-present pgprots 348 * Mask out unsupported bits in a present pgprot. Non-present pgprots
319 * can use those bits for other purposes, so leave them be. 349 * can use those bits for other purposes, so leave them be.
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index c98ac63aae48..f4843e031131 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -61,12 +61,27 @@
61 * they do not conflict with each other. 61 * they do not conflict with each other.
62 */ 62 */
63 63
64#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
65
64#ifdef CONFIG_MEM_SOFT_DIRTY 66#ifdef CONFIG_MEM_SOFT_DIRTY
65#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) 67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
66#else 68#else
67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) 69#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
68#endif 70#endif
69 71
72/*
73 * Tracking soft dirty bit when a page goes to a swap is tricky.
74 * We need a bit which can be stored in pte _and_ not conflict
75 * with swap entry format. On x86 bits 6 and 7 are *not* involved
76 * into swap entry computation, but bit 6 is used for nonlinear
77 * file mapping, so we borrow bit 7 for soft dirty tracking.
78 */
79#ifdef CONFIG_MEM_SOFT_DIRTY
80#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
81#else
82#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
83#endif
84
70#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 85#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
71#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) 86#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
72#else 87#else
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 33692eaabab5..e3ddd7db723f 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
233#define arch_read_relax(lock) cpu_relax() 233#define arch_read_relax(lock) cpu_relax()
234#define arch_write_relax(lock) cpu_relax() 234#define arch_write_relax(lock) cpu_relax()
235 235
236/* The {read|write|spin}_lock() on x86 are full memory barriers. */
237static inline void smp_mb__after_lock(void) { }
238#define ARCH_HAS_SMP_MB_AFTER_LOCK
239
240#endif /* _ASM_X86_SPINLOCK_H */ 236#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f654ecefea5b..08a089043ccf 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
512 512
513static const int amd_erratum_383[]; 513static const int amd_erratum_383[];
514static const int amd_erratum_400[]; 514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum); 515static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
516 516
517static void init_amd(struct cpuinfo_x86 *c) 517static void init_amd(struct cpuinfo_x86 *c)
518{ 518{
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c)
729 value &= ~(1ULL << 24); 729 value &= ~(1ULL << 24);
730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value); 730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
731 731
732 if (cpu_has_amd_erratum(amd_erratum_383)) 732 if (cpu_has_amd_erratum(c, amd_erratum_383))
733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
734 } 734 }
735 735
736 if (cpu_has_amd_erratum(amd_erratum_400)) 736 if (cpu_has_amd_erratum(c, amd_erratum_400))
737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); 737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
738 738
739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] =
878static const int amd_erratum_383[] = 878static const int amd_erratum_383[] =
879 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 879 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
880 880
881static bool cpu_has_amd_erratum(const int *erratum) 881
882static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
882{ 883{
883 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
884 int osvw_id = *erratum++; 884 int osvw_id = *erratum++;
885 u32 range; 885 u32 range;
886 u32 ms; 886 u32 ms;
887 887
888 /*
889 * If called early enough that current_cpu_data hasn't been initialized
890 * yet, fall back to boot_cpu_data.
891 */
892 if (cpu->x86 == 0)
893 cpu = &boot_cpu_data;
894
895 if (cpu->x86_vendor != X86_VENDOR_AMD)
896 return false;
897
898 if (osvw_id >= 0 && osvw_id < 65536 && 888 if (osvw_id >= 0 && osvw_id < 65536 &&
899 cpu_has(cpu, X86_FEATURE_OSVW)) { 889 cpu_has(cpu, X86_FEATURE_OSVW)) {
900 u64 osvw_len; 890 u64 osvw_len;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index fbc9210b45bc..a45d8d4ace10 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void)
2270 case 70: 2270 case 70:
2271 case 71: 2271 case 71:
2272 case 63: 2272 case 63:
2273 case 69:
2273 x86_pmu.late_ack = true; 2274 x86_pmu.late_ack = true;
2274 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 2275 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2275 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 2276 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index cad791dbde95..1fb6c72717bd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
314static struct uncore_event_desc snbep_uncore_qpi_events[] = { 314static struct uncore_event_desc snbep_uncore_qpi_events[] = {
315 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), 315 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
316 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), 316 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
317 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), 317 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
318 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), 318 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
319 { /* end: all zeroes */ }, 319 { /* end: all zeroes */ },
320}; 320};
321 321
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7a0adb7ee433..7123b5df479d 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
145 return 0; 145 return 0;
146} 146}
147 147
148static unsigned int verify_patch_size(int cpu, u32 patch_size, 148static unsigned int verify_patch_size(u8 family, u32 patch_size,
149 unsigned int size) 149 unsigned int size)
150{ 150{
151 struct cpuinfo_x86 *c = &cpu_data(cpu);
152 u32 max_size; 151 u32 max_size;
153 152
154#define F1XH_MPB_MAX_SIZE 2048 153#define F1XH_MPB_MAX_SIZE 2048
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
156#define F15H_MPB_MAX_SIZE 4096 155#define F15H_MPB_MAX_SIZE 4096
157#define F16H_MPB_MAX_SIZE 3458 156#define F16H_MPB_MAX_SIZE 3458
158 157
159 switch (c->x86) { 158 switch (family) {
160 case 0x14: 159 case 0x14:
161 max_size = F14H_MPB_MAX_SIZE; 160 max_size = F14H_MPB_MAX_SIZE;
162 break; 161 break;
@@ -277,9 +276,8 @@ static void cleanup(void)
277 * driver cannot continue functioning normally. In such cases, we tear 276 * driver cannot continue functioning normally. In such cases, we tear
278 * down everything we've used up so far and exit. 277 * down everything we've used up so far and exit.
279 */ 278 */
280static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) 279static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
281{ 280{
282 struct cpuinfo_x86 *c = &cpu_data(cpu);
283 struct microcode_header_amd *mc_hdr; 281 struct microcode_header_amd *mc_hdr;
284 struct ucode_patch *patch; 282 struct ucode_patch *patch;
285 unsigned int patch_size, crnt_size, ret; 283 unsigned int patch_size, crnt_size, ret;
@@ -299,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
299 297
300 /* check if patch is for the current family */ 298 /* check if patch is for the current family */
301 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); 299 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
302 if (proc_fam != c->x86) 300 if (proc_fam != family)
303 return crnt_size; 301 return crnt_size;
304 302
305 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { 303 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
@@ -308,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
308 return crnt_size; 306 return crnt_size;
309 } 307 }
310 308
311 ret = verify_patch_size(cpu, patch_size, leftover); 309 ret = verify_patch_size(family, patch_size, leftover);
312 if (!ret) { 310 if (!ret) {
313 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); 311 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
314 return crnt_size; 312 return crnt_size;
@@ -339,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
339 return crnt_size; 337 return crnt_size;
340} 338}
341 339
342static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) 340static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
341 size_t size)
343{ 342{
344 enum ucode_state ret = UCODE_ERROR; 343 enum ucode_state ret = UCODE_ERROR;
345 unsigned int leftover; 344 unsigned int leftover;
@@ -362,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
362 } 361 }
363 362
364 while (leftover) { 363 while (leftover) {
365 crnt_size = verify_and_add_patch(cpu, fw, leftover); 364 crnt_size = verify_and_add_patch(family, fw, leftover);
366 if (crnt_size < 0) 365 if (crnt_size < 0)
367 return ret; 366 return ret;
368 367
@@ -373,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
373 return UCODE_OK; 372 return UCODE_OK;
374} 373}
375 374
376enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) 375enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
377{ 376{
378 enum ucode_state ret; 377 enum ucode_state ret;
379 378
380 /* free old equiv table */ 379 /* free old equiv table */
381 free_equiv_cpu_table(); 380 free_equiv_cpu_table();
382 381
383 ret = __load_microcode_amd(cpu, data, size); 382 ret = __load_microcode_amd(family, data, size);
384 383
385 if (ret != UCODE_OK) 384 if (ret != UCODE_OK)
386 cleanup(); 385 cleanup();
387 386
388#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) 387#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
389 /* save BSP's matching patch for early load */ 388 /* save BSP's matching patch for early load */
390 if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { 389 if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
391 struct ucode_patch *p = find_patch(cpu); 390 struct ucode_patch *p = find_patch(smp_processor_id());
392 if (p) { 391 if (p) {
393 memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); 392 memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
394 memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), 393 memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
@@ -441,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
441 goto fw_release; 440 goto fw_release;
442 } 441 }
443 442
444 ret = load_microcode_amd(cpu, fw->data, fw->size); 443 ret = load_microcode_amd(c->x86, fw->data, fw->size);
445 444
446 fw_release: 445 fw_release:
447 release_firmware(fw); 446 release_firmware(fw);
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1d14ffee5749..6073104ccaa3 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
238 uci->cpu_sig.sig = cpuid_eax(0x00000001); 238 uci->cpu_sig.sig = cpuid_eax(0x00000001);
239} 239}
240#else 240#else
241static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 241void load_ucode_amd_ap(void)
242 struct ucode_cpu_info *uci)
243{ 242{
243 unsigned int cpu = smp_processor_id();
244 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
244 u32 rev, eax; 245 u32 rev, eax;
245 246
246 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); 247 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
247 eax = cpuid_eax(0x00000001); 248 eax = cpuid_eax(0x00000001);
248 249
249 uci->cpu_sig.sig = eax;
250 uci->cpu_sig.rev = rev; 250 uci->cpu_sig.rev = rev;
251 c->microcode = rev; 251 uci->cpu_sig.sig = eax;
252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
253}
254
255void load_ucode_amd_ap(void)
256{
257 unsigned int cpu = smp_processor_id();
258
259 collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
260 252
261 if (cpu && !ucode_loaded) { 253 if (cpu && !ucode_loaded) {
262 void *ucode; 254 void *ucode;
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void)
265 return; 257 return;
266 258
267 ucode = (void *)(initrd_start + ucode_offset); 259 ucode = (void *)(initrd_start + ucode_offset);
268 if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) 260 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
261 if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
269 return; 262 return;
263
270 ucode_loaded = true; 264 ucode_loaded = true;
271 } 265 }
272 266
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void)
278{ 272{
279 enum ucode_state ret; 273 enum ucode_state ret;
280 void *ucode; 274 void *ucode;
275 u32 eax;
276
281#ifdef CONFIG_X86_32 277#ifdef CONFIG_X86_32
282 unsigned int bsp = boot_cpu_data.cpu_index; 278 unsigned int bsp = boot_cpu_data.cpu_index;
283 struct ucode_cpu_info *uci = ucode_cpu_info + bsp; 279 struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void)
293 return 0; 289 return 0;
294 290
295 ucode = (void *)(initrd_start + ucode_offset); 291 ucode = (void *)(initrd_start + ucode_offset);
296 ret = load_microcode_amd(0, ucode, ucode_size); 292 eax = cpuid_eax(0x00000001);
293 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
294
295 ret = load_microcode_amd(eax, ucode, ucode_size);
297 if (ret != UCODE_OK) 296 if (ret != UCODE_OK)
298 return -EINVAL; 297 return -EINVAL;
299 298
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index dbded5aedb81..30277e27431a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
101 *begin = new_begin; 101 *begin = new_begin;
102 } 102 }
103 } else { 103 } else {
104 *begin = TASK_UNMAPPED_BASE; 104 *begin = current->mm->mmap_legacy_base;
105 *end = TASK_SIZE; 105 *end = TASK_SIZE;
106 } 106 }
107} 107}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index addf7b58f4e8..91a4496db434 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -301,6 +301,15 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
301 return 0; 301 return 0;
302} 302}
303 303
304static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
305{
306 if (!tboot_enabled())
307 return 0;
308
309 pr_warning("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)");
310 return -ENODEV;
311}
312
304static atomic_t ap_wfs_count; 313static atomic_t ap_wfs_count;
305 314
306static int tboot_wait_for_aps(int num_aps) 315static int tboot_wait_for_aps(int num_aps)
@@ -422,6 +431,7 @@ static __init int tboot_late_init(void)
422#endif 431#endif
423 432
424 acpi_os_set_prepare_sleep(&tboot_sleep); 433 acpi_os_set_prepare_sleep(&tboot_sleep);
434 acpi_os_set_prepare_extended_sleep(&tboot_extended_sleep);
425 return 0; 435 return 0;
426} 436}
427 437
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 62c29a5bfe26..25e7e1372bb2 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void)
112 */ 112 */
113void arch_pick_mmap_layout(struct mm_struct *mm) 113void arch_pick_mmap_layout(struct mm_struct *mm)
114{ 114{
115 mm->mmap_legacy_base = mmap_legacy_base();
116 mm->mmap_base = mmap_base();
117
115 if (mmap_is_legacy()) { 118 if (mmap_is_legacy()) {
116 mm->mmap_base = mmap_legacy_base(); 119 mm->mmap_base = mm->mmap_legacy_base;
117 mm->get_unmapped_area = arch_get_unmapped_area; 120 mm->get_unmapped_area = arch_get_unmapped_area;
118 } else { 121 } else {
119 mm->mmap_base = mmap_base();
120 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 122 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
121 } 123 }
122} 124}
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 94919e307f8e..db6b1ab43255 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -210,6 +210,8 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
210 r = &dev->resource[idx]; 210 r = &dev->resource[idx];
211 if (!r->flags) 211 if (!r->flags)
212 continue; 212 continue;
213 if (r->parent) /* Already allocated */
214 continue;
213 if (!r->start || pci_claim_resource(dev, idx) < 0) { 215 if (!r->start || pci_claim_resource(dev, idx) < 0) {
214 /* 216 /*
215 * Something is wrong with the region. 217 * Something is wrong with the region.
@@ -318,6 +320,8 @@ static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
318 r = &dev->resource[PCI_ROM_RESOURCE]; 320 r = &dev->resource[PCI_ROM_RESOURCE];
319 if (!r->flags || !r->start) 321 if (!r->flags || !r->start)
320 return; 322 return;
323 if (r->parent) /* Already allocated */
324 return;
321 325
322 if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) { 326 if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
323 r->end -= r->start; 327 r->end -= r->start;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 056d11faef21..8f3eea6b80c5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
313 e820_add_region(start, end - start, type); 313 e820_add_region(start, end - start, type);
314} 314}
315 315
316void xen_ignore_unusable(struct e820entry *list, size_t map_size)
317{
318 struct e820entry *entry;
319 unsigned int i;
320
321 for (i = 0, entry = list; i < map_size; i++, entry++) {
322 if (entry->type == E820_UNUSABLE)
323 entry->type = E820_RAM;
324 }
325}
326
316/** 327/**
317 * machine_specific_memory_setup - Hook for machine specific memory setup. 328 * machine_specific_memory_setup - Hook for machine specific memory setup.
318 **/ 329 **/
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
353 } 364 }
354 BUG_ON(rc); 365 BUG_ON(rc);
355 366
367 /*
368 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
369 * regions, so if we're using the machine memory map leave the
370 * region as RAM as it is in the pseudo-physical map.
371 *
372 * UNUSABLE regions in domUs are not handled and will need
373 * a patch in the future.
374 */
375 if (xen_initial_domain())
376 xen_ignore_unusable(map, memmap.nr_entries);
377
356 /* Make sure the Xen-supplied memory map is well-ordered. */ 378 /* Make sure the Xen-supplied memory map is well-ordered. */
357 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); 379 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
358 380
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index ca92754eb846..b81c88e51daa 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
695{ 695{
696 int rc; 696 int rc;
697 rc = native_cpu_up(cpu, tidle); 697 /*
698 WARN_ON (xen_smp_intr_init(cpu)); 698 * xen_smp_intr_init() needs to run before native_cpu_up()
699 * so that IPI vectors are set up on the booting CPU before
700 * it is marked online in native_cpu_up().
701 */
702 rc = xen_smp_intr_init(cpu);
703 WARN_ON(rc);
704 if (!rc)
705 rc = native_cpu_up(cpu, tidle);
699 return rc; 706 return rc;
700} 707}
701 708
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index b8d38117a20c..90e846f985fa 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -138,6 +138,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
138 */ 138 */
139u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_ssdt_table_load, FALSE); 139u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_ssdt_table_load, FALSE);
140 140
141/*
142 * We keep track of the latest version of Windows that has been requested by
143 * the BIOS.
144 */
145u8 ACPI_INIT_GLOBAL(acpi_gbl_osi_data, 0);
146
141/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ 147/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
142 148
143struct acpi_table_fadt acpi_gbl_FADT; 149struct acpi_table_fadt acpi_gbl_FADT;
@@ -285,7 +291,6 @@ ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
285ACPI_EXTERN u8 acpi_gbl_step_to_next_call; 291ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
286ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present; 292ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
287ACPI_EXTERN u8 acpi_gbl_events_initialized; 293ACPI_EXTERN u8 acpi_gbl_events_initialized;
288ACPI_EXTERN u8 acpi_gbl_osi_data;
289ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces; 294ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
290ACPI_EXTERN struct acpi_address_range 295ACPI_EXTERN struct acpi_address_range
291 *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX]; 296 *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index d4a4901637cd..0ed00669cd21 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -942,6 +942,9 @@ struct acpi_interface_info {
942 942
943#define ACPI_OSI_INVALID 0x01 943#define ACPI_OSI_INVALID 0x01
944#define ACPI_OSI_DYNAMIC 0x02 944#define ACPI_OSI_DYNAMIC 0x02
945#define ACPI_OSI_FEATURE 0x04
946#define ACPI_OSI_DEFAULT_INVALID 0x08
947#define ACPI_OSI_OPTIONAL_FEATURE (ACPI_OSI_FEATURE | ACPI_OSI_DEFAULT_INVALID | ACPI_OSI_INVALID)
945 948
946struct acpi_port_info { 949struct acpi_port_info {
947 char *name; 950 char *name;
@@ -1030,6 +1033,7 @@ struct acpi_external_list {
1030 u8 type; 1033 u8 type;
1031 u8 flags; 1034 u8 flags;
1032 u8 resolved; 1035 u8 resolved;
1036 u8 emitted;
1033}; 1037};
1034 1038
1035/* Values for Flags field above */ 1039/* Values for Flags field above */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index b83dc32a5ae0..40b04bd5579e 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -104,8 +104,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
104 acpi_handle start_object, 104 acpi_handle start_object,
105 u32 max_depth, 105 u32 max_depth,
106 u32 flags, 106 u32 flags,
107 acpi_walk_callback pre_order_visit, 107 acpi_walk_callback descending_callback,
108 acpi_walk_callback post_order_visit, 108 acpi_walk_callback ascending_callback,
109 void *context, void **return_value); 109 void *context, void **return_value);
110 110
111struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node 111struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 7755e915a007..c54f42c64fe2 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -47,6 +47,13 @@
47acpi_status acpi_allocate_root_table(u32 initial_table_count); 47acpi_status acpi_allocate_root_table(u32 initial_table_count);
48 48
49/* 49/*
50 * tbxfroot - Root pointer utilities
51 */
52acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
53
54u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length);
55
56/*
50 * tbfadt - FADT parse/convert/validate 57 * tbfadt - FADT parse/convert/validate
51 */ 58 */
52void acpi_tb_parse_fadt(u32 table_index); 59void acpi_tb_parse_fadt(u32 table_index);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 3c76edea6803..d5a62a6182bb 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -470,6 +470,8 @@ acpi_status acpi_ut_install_interface(acpi_string interface_name);
470 470
471acpi_status acpi_ut_remove_interface(acpi_string interface_name); 471acpi_status acpi_ut_remove_interface(acpi_string interface_name);
472 472
473acpi_status acpi_ut_update_interfaces(u8 action);
474
473struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name); 475struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name);
474 476
475acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state); 477acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
@@ -616,7 +618,7 @@ int acpi_ut_stricmp(char *string1, char *string2);
616 618
617acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer); 619acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
618 620
619void acpi_ut_print_string(char *string, u8 max_length); 621void acpi_ut_print_string(char *string, u16 max_length);
620 622
621void ut_convert_backslashes(char *pathname); 623void ut_convert_backslashes(char *pathname);
622 624
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 9037f17c9608..7842700346a4 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -125,7 +125,6 @@ acpi_status acpi_ev_gpe_initialize(void)
125 /* GPE block 0 exists (has both length and address > 0) */ 125 /* GPE block 0 exists (has both length and address > 0) */
126 126
127 register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2); 127 register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
128
129 gpe_number_max = 128 gpe_number_max =
130 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; 129 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
131 130
@@ -204,16 +203,6 @@ acpi_status acpi_ev_gpe_initialize(void)
204 goto cleanup; 203 goto cleanup;
205 } 204 }
206 205
207 /* Check for Max GPE number out-of-range */
208
209 if (gpe_number_max > ACPI_GPE_MAX) {
210 ACPI_ERROR((AE_INFO,
211 "Maximum GPE number from FADT is too large: 0x%X",
212 gpe_number_max));
213 status = AE_BAD_VALUE;
214 goto cleanup;
215 }
216
217 cleanup: 206 cleanup:
218 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 207 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
219 return_ACPI_STATUS(AE_OK); 208 return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index c740f24e3101..4d046faac48c 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -338,6 +338,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
338{ 338{
339 u8 *target; 339 u8 *target;
340 char *name; 340 char *name;
341 const char *reference_name;
341 u8 count; 342 u8 count;
342 343
343 if (!info) { 344 if (!info) {
@@ -426,10 +427,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
426 427
427 case ACPI_EXD_REFERENCE: 428 case ACPI_EXD_REFERENCE:
428 429
430 reference_name = acpi_ut_get_reference_name(obj_desc);
429 acpi_ex_out_string("Class Name", 431 acpi_ex_out_string("Class Name",
430 ACPI_CAST_PTR(char, 432 ACPI_CAST_PTR(char, reference_name));
431 acpi_ut_get_reference_name
432 (obj_desc)));
433 acpi_ex_dump_reference_obj(obj_desc); 433 acpi_ex_dump_reference_obj(obj_desc);
434 break; 434 break;
435 435
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 814b4a3d656a..2cdd41d8ade6 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -962,10 +962,17 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
962 */ 962 */
963 return_desc = 963 return_desc =
964 *(operand[0]->reference.where); 964 *(operand[0]->reference.where);
965 if (return_desc) { 965 if (!return_desc) {
966 acpi_ut_add_reference 966 /*
967 (return_desc); 967 * Element is NULL, do not allow the dereference.
968 * This provides compatibility with other ACPI
969 * implementations.
970 */
971 return_ACPI_STATUS
972 (AE_AML_UNINITIALIZED_ELEMENT);
968 } 973 }
974
975 acpi_ut_add_reference(return_desc);
969 break; 976 break;
970 977
971 default: 978 default:
@@ -990,11 +997,40 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
990 acpi_namespace_node 997 acpi_namespace_node
991 *) 998 *)
992 return_desc); 999 return_desc);
993 } 1000 if (!return_desc) {
1001 break;
1002 }
994 1003
995 /* Add another reference to the object! */ 1004 /*
1005 * June 2013:
1006 * buffer_fields/field_units require additional resolution
1007 */
1008 switch (return_desc->common.type) {
1009 case ACPI_TYPE_BUFFER_FIELD:
1010 case ACPI_TYPE_LOCAL_REGION_FIELD:
1011 case ACPI_TYPE_LOCAL_BANK_FIELD:
1012 case ACPI_TYPE_LOCAL_INDEX_FIELD:
996 1013
997 acpi_ut_add_reference(return_desc); 1014 status =
1015 acpi_ex_read_data_from_field
1016 (walk_state, return_desc,
1017 &temp_desc);
1018 if (ACPI_FAILURE(status)) {
1019 goto cleanup;
1020 }
1021
1022 return_desc = temp_desc;
1023 break;
1024
1025 default:
1026
1027 /* Add another reference to the object */
1028
1029 acpi_ut_add_reference
1030 (return_desc);
1031 break;
1032 }
1033 }
998 break; 1034 break;
999 1035
1000 default: 1036 default:
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 5e5f76230f5e..414076818d40 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <linux/acpi.h>
46#include "accommon.h" 47#include "accommon.h"
47 48
48#define _COMPONENT ACPI_HARDWARE 49#define _COMPONENT ACPI_HARDWARE
@@ -128,6 +129,14 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
128 129
129 ACPI_FLUSH_CPU_CACHE(); 130 ACPI_FLUSH_CPU_CACHE();
130 131
132 status = acpi_os_prepare_extended_sleep(sleep_state,
133 acpi_gbl_sleep_type_a,
134 acpi_gbl_sleep_type_b);
135 if (ACPI_SKIP(status))
136 return_ACPI_STATUS(AE_OK);
137 if (ACPI_FAILURE(status))
138 return_ACPI_STATUS(status);
139
131 /* 140 /*
132 * Set the SLP_TYP and SLP_EN bits. 141 * Set the SLP_TYP and SLP_EN bits.
133 * 142 *
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 0c1a8bbd05d6..2d7d22ebc782 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -100,8 +100,13 @@ acpi_status acpi_get_timer(u32 * ticks)
100 return_ACPI_STATUS(AE_BAD_PARAMETER); 100 return_ACPI_STATUS(AE_BAD_PARAMETER);
101 } 101 }
102 102
103 status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); 103 /* ACPI 5.0A: PM Timer is optional */
104
105 if (!acpi_gbl_FADT.xpm_timer_block.address) {
106 return_ACPI_STATUS(AE_SUPPORT);
107 }
104 108
109 status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
105 return_ACPI_STATUS(status); 110 return_ACPI_STATUS(status);
106} 111}
107 112
@@ -148,6 +153,12 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
148 return_ACPI_STATUS(AE_BAD_PARAMETER); 153 return_ACPI_STATUS(AE_BAD_PARAMETER);
149 } 154 }
150 155
156 /* ACPI 5.0A: PM Timer is optional */
157
158 if (!acpi_gbl_FADT.xpm_timer_block.address) {
159 return_ACPI_STATUS(AE_SUPPORT);
160 }
161
151 /* 162 /*
152 * Compute Tick Delta: 163 * Compute Tick Delta:
153 * Handle (max one) timer rollovers on 24-bit versus 32-bit timers. 164 * Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 24b71a01bf93..098e7666cbc9 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -151,6 +151,15 @@ acpi_ns_check_return_value(struct acpi_namespace_node *node,
151 } 151 }
152 152
153 /* 153 /*
154 *
155 * 4) If there is no return value and it is optional, just return
156 * AE_OK (_WAK).
157 */
158 if (!(*return_object_ptr)) {
159 goto exit;
160 }
161
162 /*
154 * For returned Package objects, check the type of all sub-objects. 163 * For returned Package objects, check the type of all sub-objects.
155 * Note: Package may have been newly created by call above. 164 * Note: Package may have been newly created by call above.
156 */ 165 */
@@ -268,7 +277,12 @@ acpi_ns_check_object_type(struct acpi_evaluate_info *info,
268 277
269 acpi_ut_get_expected_return_types(type_buffer, expected_btypes); 278 acpi_ut_get_expected_return_types(type_buffer, expected_btypes);
270 279
271 if (package_index == ACPI_NOT_PACKAGE_ELEMENT) { 280 if (!return_object) {
281 ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
282 info->node_flags,
283 "Expected return object of type %s",
284 type_buffer));
285 } else if (package_index == ACPI_NOT_PACKAGE_ELEMENT) {
272 ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, 286 ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
273 info->node_flags, 287 info->node_flags,
274 "Return type mismatch - found %s, expected %s", 288 "Return type mismatch - found %s, expected %s",
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index e70911a9e441..e81f15ef659a 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -156,9 +156,9 @@ struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
156 * max_depth - Depth to which search is to reach 156 * max_depth - Depth to which search is to reach
157 * flags - Whether to unlock the NS before invoking 157 * flags - Whether to unlock the NS before invoking
158 * the callback routine 158 * the callback routine
159 * pre_order_visit - Called during tree pre-order visit 159 * descending_callback - Called during tree descent
160 * when an object of "Type" is found 160 * when an object of "Type" is found
161 * post_order_visit - Called during tree post-order visit 161 * ascending_callback - Called during tree ascent
162 * when an object of "Type" is found 162 * when an object of "Type" is found
163 * context - Passed to user function(s) above 163 * context - Passed to user function(s) above
164 * return_value - from the user_function if terminated 164 * return_value - from the user_function if terminated
@@ -185,8 +185,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
185 acpi_handle start_node, 185 acpi_handle start_node,
186 u32 max_depth, 186 u32 max_depth,
187 u32 flags, 187 u32 flags,
188 acpi_walk_callback pre_order_visit, 188 acpi_walk_callback descending_callback,
189 acpi_walk_callback post_order_visit, 189 acpi_walk_callback ascending_callback,
190 void *context, void **return_value) 190 void *context, void **return_value)
191{ 191{
192 acpi_status status; 192 acpi_status status;
@@ -255,22 +255,22 @@ acpi_ns_walk_namespace(acpi_object_type type,
255 } 255 }
256 256
257 /* 257 /*
258 * Invoke the user function, either pre-order or post-order 258 * Invoke the user function, either descending, ascending,
259 * or both. 259 * or both.
260 */ 260 */
261 if (!node_previously_visited) { 261 if (!node_previously_visited) {
262 if (pre_order_visit) { 262 if (descending_callback) {
263 status = 263 status =
264 pre_order_visit(child_node, level, 264 descending_callback(child_node,
265 context, 265 level, context,
266 return_value); 266 return_value);
267 } 267 }
268 } else { 268 } else {
269 if (post_order_visit) { 269 if (ascending_callback) {
270 status = 270 status =
271 post_order_visit(child_node, level, 271 ascending_callback(child_node,
272 context, 272 level, context,
273 return_value); 273 return_value);
274 } 274 }
275 } 275 }
276 276
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index f553cfdb71dd..b38b4b07f86e 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -533,9 +533,9 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
533 * PARAMETERS: type - acpi_object_type to search for 533 * PARAMETERS: type - acpi_object_type to search for
534 * start_object - Handle in namespace where search begins 534 * start_object - Handle in namespace where search begins
535 * max_depth - Depth to which search is to reach 535 * max_depth - Depth to which search is to reach
536 * pre_order_visit - Called during tree pre-order visit 536 * descending_callback - Called during tree descent
537 * when an object of "Type" is found 537 * when an object of "Type" is found
538 * post_order_visit - Called during tree post-order visit 538 * ascending_callback - Called during tree ascent
539 * when an object of "Type" is found 539 * when an object of "Type" is found
540 * context - Passed to user function(s) above 540 * context - Passed to user function(s) above
541 * return_value - Location where return value of 541 * return_value - Location where return value of
@@ -563,8 +563,8 @@ acpi_status
563acpi_walk_namespace(acpi_object_type type, 563acpi_walk_namespace(acpi_object_type type,
564 acpi_handle start_object, 564 acpi_handle start_object,
565 u32 max_depth, 565 u32 max_depth,
566 acpi_walk_callback pre_order_visit, 566 acpi_walk_callback descending_callback,
567 acpi_walk_callback post_order_visit, 567 acpi_walk_callback ascending_callback,
568 void *context, void **return_value) 568 void *context, void **return_value)
569{ 569{
570 acpi_status status; 570 acpi_status status;
@@ -574,7 +574,7 @@ acpi_walk_namespace(acpi_object_type type,
574 /* Parameter validation */ 574 /* Parameter validation */
575 575
576 if ((type > ACPI_TYPE_LOCAL_MAX) || 576 if ((type > ACPI_TYPE_LOCAL_MAX) ||
577 (!max_depth) || (!pre_order_visit && !post_order_visit)) { 577 (!max_depth) || (!descending_callback && !ascending_callback)) {
578 return_ACPI_STATUS(AE_BAD_PARAMETER); 578 return_ACPI_STATUS(AE_BAD_PARAMETER);
579 } 579 }
580 580
@@ -606,9 +606,9 @@ acpi_walk_namespace(acpi_object_type type,
606 } 606 }
607 607
608 status = acpi_ns_walk_namespace(type, start_object, max_depth, 608 status = acpi_ns_walk_namespace(type, start_object, max_depth,
609 ACPI_NS_WALK_UNLOCK, pre_order_visit, 609 ACPI_NS_WALK_UNLOCK,
610 post_order_visit, context, 610 descending_callback, ascending_callback,
611 return_value); 611 context, return_value);
612 612
613 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 613 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
614 614
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index f3a4d95899f7..83c164434580 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -158,6 +158,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
158{ 158{
159 acpi_status status; 159 acpi_status status;
160 struct acpi_namespace_node *node; 160 struct acpi_namespace_node *node;
161 char *node_name;
161 162
162 /* Parameter validation */ 163 /* Parameter validation */
163 164
@@ -202,7 +203,8 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
202 203
203 /* Just copy the ACPI name from the Node and zero terminate it */ 204 /* Just copy the ACPI name from the Node and zero terminate it */
204 205
205 ACPI_MOVE_NAME(buffer->pointer, acpi_ut_get_node_name(node)); 206 node_name = acpi_ut_get_node_name(node);
207 ACPI_MOVE_NAME(buffer->pointer, node_name);
206 ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0; 208 ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
207 status = AE_OK; 209 status = AE_OK;
208 210
@@ -379,9 +381,14 @@ acpi_get_object_info(acpi_handle handle,
379 * Get extra info for ACPI Device/Processor objects only: 381 * Get extra info for ACPI Device/Processor objects only:
380 * Run the _STA, _ADR and, sx_w, and _sx_d methods. 382 * Run the _STA, _ADR and, sx_w, and _sx_d methods.
381 * 383 *
382 * Note: none of these methods are required, so they may or may 384 * Notes: none of these methods are required, so they may or may
383 * not be present for this device. The Info->Valid bitfield is used 385 * not be present for this device. The Info->Valid bitfield is used
384 * to indicate which methods were found and run successfully. 386 * to indicate which methods were found and run successfully.
387 *
388 * For _STA, if the method does not exist, then (as per the ACPI
389 * specification), the returned current_status flags will indicate
390 * that the device is present/functional/enabled. Otherwise, the
391 * current_status flags reflect the value returned from _STA.
385 */ 392 */
386 393
387 /* Execute the Device._STA method */ 394 /* Execute the Device._STA method */
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 33b00d22300a..9d99f2189693 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -117,7 +117,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
117 ACPI_FADT_OFFSET(pm_timer_block), 117 ACPI_FADT_OFFSET(pm_timer_block),
118 ACPI_FADT_OFFSET(pm_timer_length), 118 ACPI_FADT_OFFSET(pm_timer_length),
119 ACPI_PM_TIMER_WIDTH, 119 ACPI_PM_TIMER_WIDTH,
120 ACPI_FADT_REQUIRED}, 120 ACPI_FADT_SEPARATE_LENGTH}, /* ACPI 5.0A: Timer is optional */
121 121
122 {"Gpe0Block", 122 {"Gpe0Block",
123 ACPI_FADT_OFFSET(xgpe0_block), 123 ACPI_FADT_OFFSET(xgpe0_block),
@@ -574,7 +574,7 @@ static void acpi_tb_validate_fadt(void)
574 574
575 if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) { 575 if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
576 /* 576 /*
577 * Field is required (Pm1a_event, Pm1a_control, pm_timer). 577 * Field is required (Pm1a_event, Pm1a_control).
578 * Both the address and length must be non-zero. 578 * Both the address and length must be non-zero.
579 */ 579 */
580 if (!address64->address || !length) { 580 if (!address64->address || !length) {
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 7c2ecfb7c2c3..948c95e80d44 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -48,11 +48,6 @@
48#define _COMPONENT ACPI_TABLES 48#define _COMPONENT ACPI_TABLES
49ACPI_MODULE_NAME("tbxfroot") 49ACPI_MODULE_NAME("tbxfroot")
50 50
51/* Local prototypes */
52static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
53
54static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
55
56/******************************************************************************* 51/*******************************************************************************
57 * 52 *
58 * FUNCTION: acpi_tb_validate_rsdp 53 * FUNCTION: acpi_tb_validate_rsdp
@@ -64,8 +59,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
64 * DESCRIPTION: Validate the RSDP (ptr) 59 * DESCRIPTION: Validate the RSDP (ptr)
65 * 60 *
66 ******************************************************************************/ 61 ******************************************************************************/
67 62acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
68static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
69{ 63{
70 64
71 /* 65 /*
@@ -74,7 +68,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
74 * Note: Sometimes there exists more than one RSDP in memory; the valid 68 * Note: Sometimes there exists more than one RSDP in memory; the valid
75 * RSDP has a valid checksum, all others have an invalid checksum. 69 * RSDP has a valid checksum, all others have an invalid checksum.
76 */ 70 */
77 if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, 71 if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP,
78 sizeof(ACPI_SIG_RSDP) - 1) != 0) { 72 sizeof(ACPI_SIG_RSDP) - 1) != 0) {
79 73
80 /* Nope, BAD Signature */ 74 /* Nope, BAD Signature */
@@ -231,7 +225,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
231 * DESCRIPTION: Search a block of memory for the RSDP signature 225 * DESCRIPTION: Search a block of memory for the RSDP signature
232 * 226 *
233 ******************************************************************************/ 227 ******************************************************************************/
234static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length) 228u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length)
235{ 229{
236 acpi_status status; 230 acpi_status status;
237 u8 *mem_rover; 231 u8 *mem_rover;
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index ee83adb97b1e..4fd68971019b 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -239,7 +239,8 @@ acpi_ut_evaluate_numeric_object(char *object_name,
239 * RETURN: Status 239 * RETURN: Status
240 * 240 *
241 * DESCRIPTION: Executes _STA for selected device and stores results in 241 * DESCRIPTION: Executes _STA for selected device and stores results in
242 * *Flags. 242 * *Flags. If _STA does not exist, then the device is assumed
243 * to be present/functional/enabled (as per the ACPI spec).
243 * 244 *
244 * NOTE: Internal function, no parameter validation 245 * NOTE: Internal function, no parameter validation
245 * 246 *
@@ -257,6 +258,11 @@ acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
257 ACPI_BTYPE_INTEGER, &obj_desc); 258 ACPI_BTYPE_INTEGER, &obj_desc);
258 if (ACPI_FAILURE(status)) { 259 if (ACPI_FAILURE(status)) {
259 if (AE_NOT_FOUND == status) { 260 if (AE_NOT_FOUND == status) {
261 /*
262 * if _STA does not exist, then (as per the ACPI specification),
263 * the returned flags will indicate that the device is present,
264 * functional, and enabled.
265 */
260 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 266 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
261 "_STA on %4.4s was not found, assuming device is present\n", 267 "_STA on %4.4s was not found, assuming device is present\n",
262 acpi_ut_get_node_name(device_node))); 268 acpi_ut_get_node_name(device_node)));
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index f736448a8606..d6f26bf8a062 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -336,7 +336,6 @@ acpi_status acpi_ut_init_globals(void)
336 acpi_gbl_trace_dbg_layer = 0; 336 acpi_gbl_trace_dbg_layer = 0;
337 acpi_gbl_debugger_configuration = DEBUGGER_THREADING; 337 acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
338 acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT; 338 acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
339 acpi_gbl_osi_data = 0;
340 acpi_gbl_osi_mutex = NULL; 339 acpi_gbl_osi_mutex = NULL;
341 acpi_gbl_reg_methods_executed = FALSE; 340 acpi_gbl_reg_methods_executed = FALSE;
342 341
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 7e807725c636..8856bd37bc76 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -77,21 +77,20 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
77 77
78 /* Feature Group Strings */ 78 /* Feature Group Strings */
79 79
80 {"Extended Address Space Descriptor", NULL, 0, 0} 80 {"Extended Address Space Descriptor", NULL, ACPI_OSI_FEATURE, 0},
81 81
82 /* 82 /*
83 * All "optional" feature group strings (features that are implemented 83 * All "optional" feature group strings (features that are implemented
84 * by the host) should be dynamically added by the host via 84 * by the host) should be dynamically modified to VALID by the host via
85 * acpi_install_interface and should not be manually added here. 85 * acpi_install_interface or acpi_update_interfaces. Such optional feature
86 * 86 * group strings are set as INVALID by default here.
87 * Examples of optional feature group strings:
88 *
89 * "Module Device"
90 * "Processor Device"
91 * "3.0 Thermal Model"
92 * "3.0 _SCP Extensions"
93 * "Processor Aggregator Device"
94 */ 87 */
88
89 {"Module Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
90 {"Processor Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
91 {"3.0 Thermal Model", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
92 {"3.0 _SCP Extensions", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
93 {"Processor Aggregator Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0}
95}; 94};
96 95
97/******************************************************************************* 96/*******************************************************************************
@@ -158,11 +157,20 @@ acpi_status acpi_ut_interface_terminate(void)
158 while (next_interface) { 157 while (next_interface) {
159 acpi_gbl_supported_interfaces = next_interface->next; 158 acpi_gbl_supported_interfaces = next_interface->next;
160 159
161 /* Only interfaces added at runtime can be freed */
162
163 if (next_interface->flags & ACPI_OSI_DYNAMIC) { 160 if (next_interface->flags & ACPI_OSI_DYNAMIC) {
161
162 /* Only interfaces added at runtime can be freed */
163
164 ACPI_FREE(next_interface->name); 164 ACPI_FREE(next_interface->name);
165 ACPI_FREE(next_interface); 165 ACPI_FREE(next_interface);
166 } else {
167 /* Interface is in static list. Reset it to invalid or valid. */
168
169 if (next_interface->flags & ACPI_OSI_DEFAULT_INVALID) {
170 next_interface->flags |= ACPI_OSI_INVALID;
171 } else {
172 next_interface->flags &= ~ACPI_OSI_INVALID;
173 }
166 } 174 }
167 175
168 next_interface = acpi_gbl_supported_interfaces; 176 next_interface = acpi_gbl_supported_interfaces;
@@ -278,6 +286,49 @@ acpi_status acpi_ut_remove_interface(acpi_string interface_name)
278 286
279/******************************************************************************* 287/*******************************************************************************
280 * 288 *
289 * FUNCTION: acpi_ut_update_interfaces
290 *
291 * PARAMETERS: action - Actions to be performed during the
292 * update
293 *
294 * RETURN: Status
295 *
296 * DESCRIPTION: Update _OSI interface strings, disabling or enabling OS vendor
297 * strings or/and feature group strings.
298 * Caller MUST hold acpi_gbl_osi_mutex
299 *
300 ******************************************************************************/
301
302acpi_status acpi_ut_update_interfaces(u8 action)
303{
304 struct acpi_interface_info *next_interface;
305
306 next_interface = acpi_gbl_supported_interfaces;
307 while (next_interface) {
308 if (((next_interface->flags & ACPI_OSI_FEATURE) &&
309 (action & ACPI_FEATURE_STRINGS)) ||
310 (!(next_interface->flags & ACPI_OSI_FEATURE) &&
311 (action & ACPI_VENDOR_STRINGS))) {
312 if (action & ACPI_DISABLE_INTERFACES) {
313
314 /* Mark the interfaces as invalid */
315
316 next_interface->flags |= ACPI_OSI_INVALID;
317 } else {
318 /* Mark the interfaces as valid */
319
320 next_interface->flags &= ~ACPI_OSI_INVALID;
321 }
322 }
323
324 next_interface = next_interface->next;
325 }
326
327 return (AE_OK);
328}
329
330/*******************************************************************************
331 *
281 * FUNCTION: acpi_ut_get_interface 332 * FUNCTION: acpi_ut_get_interface
282 * 333 *
283 * PARAMETERS: interface_name - The interface to find 334 * PARAMETERS: interface_name - The interface to find
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index c53759b76a3f..cb1e9cc32d5f 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -333,7 +333,8 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
333 * FUNCTION: acpi_ut_print_string 333 * FUNCTION: acpi_ut_print_string
334 * 334 *
335 * PARAMETERS: string - Null terminated ASCII string 335 * PARAMETERS: string - Null terminated ASCII string
336 * max_length - Maximum output length 336 * max_length - Maximum output length. Used to constrain the
337 * length of strings during debug output only.
337 * 338 *
338 * RETURN: None 339 * RETURN: None
339 * 340 *
@@ -342,7 +343,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
342 * 343 *
343 ******************************************************************************/ 344 ******************************************************************************/
344 345
345void acpi_ut_print_string(char *string, u8 max_length) 346void acpi_ut_print_string(char *string, u16 max_length)
346{ 347{
347 u32 i; 348 u32 i;
348 349
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 6505774f223e..03a211e6e26a 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -389,6 +389,34 @@ ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
389 389
390/***************************************************************************** 390/*****************************************************************************
391 * 391 *
392 * FUNCTION: acpi_update_interfaces
393 *
394 * PARAMETERS: action - Actions to be performed during the
395 * update
396 *
397 * RETURN: Status
398 *
399 * DESCRIPTION: Update _OSI interface strings, disabling or enabling OS vendor
400 * string or/and feature group strings.
401 *
402 ****************************************************************************/
403acpi_status acpi_update_interfaces(u8 action)
404{
405 acpi_status status;
406
407 status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
408 if (ACPI_FAILURE(status)) {
409 return (status);
410 }
411
412 status = acpi_ut_update_interfaces(action);
413
414 acpi_os_release_mutex(acpi_gbl_osi_mutex);
415 return (status);
416}
417
418/*****************************************************************************
419 *
392 * FUNCTION: acpi_check_address_range 420 * FUNCTION: acpi_check_address_range
393 * 421 *
394 * PARAMETERS: space_id - Address space ID 422 * PARAMETERS: space_id - Address space ID
@@ -402,6 +430,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
402 * ASL operation region address ranges. 430 * ASL operation region address ranges.
403 * 431 *
404 ****************************************************************************/ 432 ****************************************************************************/
433
405u32 434u32
406acpi_check_address_range(acpi_adr_space_type space_id, 435acpi_check_address_range(acpi_adr_space_type space_id,
407 acpi_physical_address address, 436 acpi_physical_address address,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 85332872da45..2c9958cd7a43 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -527,18 +527,14 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
527static int acpi_battery_set_alarm(struct acpi_battery *battery) 527static int acpi_battery_set_alarm(struct acpi_battery *battery)
528{ 528{
529 acpi_status status = 0; 529 acpi_status status = 0;
530 union acpi_object arg0 = { .type = ACPI_TYPE_INTEGER };
531 struct acpi_object_list arg_list = { 1, &arg0 };
532 530
533 if (!acpi_battery_present(battery) || 531 if (!acpi_battery_present(battery) ||
534 !test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags)) 532 !test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags))
535 return -ENODEV; 533 return -ENODEV;
536 534
537 arg0.integer.value = battery->alarm;
538
539 mutex_lock(&battery->lock); 535 mutex_lock(&battery->lock);
540 status = acpi_evaluate_object(battery->device->handle, "_BTP", 536 status = acpi_execute_simple_method(battery->device->handle, "_BTP",
541 &arg_list, NULL); 537 battery->alarm);
542 mutex_unlock(&battery->lock); 538 mutex_unlock(&battery->lock);
543 539
544 if (ACPI_FAILURE(status)) 540 if (ACPI_FAILURE(status))
@@ -550,12 +546,8 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery)
550 546
551static int acpi_battery_init_alarm(struct acpi_battery *battery) 547static int acpi_battery_init_alarm(struct acpi_battery *battery)
552{ 548{
553 acpi_status status = AE_OK;
554 acpi_handle handle = NULL;
555
556 /* See if alarms are supported, and if so, set default */ 549 /* See if alarms are supported, and if so, set default */
557 status = acpi_get_handle(battery->device->handle, "_BTP", &handle); 550 if (!acpi_has_method(battery->device->handle, "_BTP")) {
558 if (ACPI_FAILURE(status)) {
559 clear_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags); 551 clear_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags);
560 return 0; 552 return 0;
561 } 553 }
@@ -1066,7 +1058,7 @@ static int acpi_battery_add(struct acpi_device *device)
1066{ 1058{
1067 int result = 0; 1059 int result = 0;
1068 struct acpi_battery *battery = NULL; 1060 struct acpi_battery *battery = NULL;
1069 acpi_handle handle; 1061
1070 if (!device) 1062 if (!device)
1071 return -EINVAL; 1063 return -EINVAL;
1072 battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL); 1064 battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
@@ -1078,8 +1070,7 @@ static int acpi_battery_add(struct acpi_device *device)
1078 device->driver_data = battery; 1070 device->driver_data = battery;
1079 mutex_init(&battery->lock); 1071 mutex_init(&battery->lock);
1080 mutex_init(&battery->sysfs_lock); 1072 mutex_init(&battery->sysfs_lock);
1081 if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle, 1073 if (acpi_has_method(battery->device->handle, "_BIX"))
1082 "_BIX", &handle)))
1083 set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); 1074 set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
1084 result = acpi_battery_update(battery); 1075 result = acpi_battery_update(battery);
1085 if (result) 1076 if (result)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 7df97d277545..b587ec8257b2 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -380,19 +380,6 @@ static void acpi_bus_check_scope(acpi_handle handle)
380 */ 380 */
381} 381}
382 382
383static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list);
384int register_acpi_bus_notifier(struct notifier_block *nb)
385{
386 return blocking_notifier_chain_register(&acpi_bus_notify_list, nb);
387}
388EXPORT_SYMBOL_GPL(register_acpi_bus_notifier);
389
390void unregister_acpi_bus_notifier(struct notifier_block *nb)
391{
392 blocking_notifier_chain_unregister(&acpi_bus_notify_list, nb);
393}
394EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier);
395
396/** 383/**
397 * acpi_bus_notify 384 * acpi_bus_notify
398 * --------------- 385 * ---------------
@@ -406,9 +393,6 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
406 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n", 393 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
407 type, handle)); 394 type, handle));
408 395
409 blocking_notifier_call_chain(&acpi_bus_notify_list,
410 type, (void *)handle);
411
412 switch (type) { 396 switch (type) {
413 397
414 case ACPI_NOTIFY_BUS_CHECK: 398 case ACPI_NOTIFY_BUS_CHECK:
@@ -474,8 +458,6 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
474static int __init acpi_bus_init_irq(void) 458static int __init acpi_bus_init_irq(void)
475{ 459{
476 acpi_status status; 460 acpi_status status;
477 union acpi_object arg = { ACPI_TYPE_INTEGER };
478 struct acpi_object_list arg_list = { 1, &arg };
479 char *message = NULL; 461 char *message = NULL;
480 462
481 463
@@ -504,9 +486,7 @@ static int __init acpi_bus_init_irq(void)
504 486
505 printk(KERN_INFO PREFIX "Using %s for interrupt routing\n", message); 487 printk(KERN_INFO PREFIX "Using %s for interrupt routing\n", message);
506 488
507 arg.integer.value = acpi_irq_model; 489 status = acpi_execute_simple_method(NULL, "\\_PIC", acpi_irq_model);
508
509 status = acpi_evaluate_object(NULL, "\\_PIC", &arg_list, NULL);
510 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { 490 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
511 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PIC")); 491 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PIC"));
512 return -ENODEV; 492 return -ENODEV;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 4ab807dc8518..59d3202f6b36 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -159,26 +159,29 @@ int acpi_device_set_power(struct acpi_device *device, int state)
159 int result = 0; 159 int result = 0;
160 bool cut_power = false; 160 bool cut_power = false;
161 161
162 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) 162 if (!device || !device->flags.power_manageable
163 || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
163 return -EINVAL; 164 return -EINVAL;
164 165
165 /* Make sure this is a valid target state */ 166 /* Make sure this is a valid target state */
166 167
167 if (state == device->power.state) { 168 if (state == device->power.state) {
168 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n", 169 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n",
170 device->pnp.bus_id,
169 acpi_power_state_string(state))); 171 acpi_power_state_string(state)));
170 return 0; 172 return 0;
171 } 173 }
172 174
173 if (!device->power.states[state].flags.valid) { 175 if (!device->power.states[state].flags.valid) {
174 printk(KERN_WARNING PREFIX "Device does not support %s\n", 176 dev_warn(&device->dev, "Power state %s not supported\n",
175 acpi_power_state_string(state)); 177 acpi_power_state_string(state));
176 return -ENODEV; 178 return -ENODEV;
177 } 179 }
178 if (device->parent && (state < device->parent->power.state)) { 180 if (device->parent && (state < device->parent->power.state)) {
179 printk(KERN_WARNING PREFIX 181 dev_warn(&device->dev,
180 "Cannot set device to a higher-powered" 182 "Cannot transition to power state %s for parent in %s\n",
181 " state than parent\n"); 183 acpi_power_state_string(state),
184 acpi_power_state_string(device->parent->power.state));
182 return -ENODEV; 185 return -ENODEV;
183 } 186 }
184 187
@@ -191,8 +194,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
191 194
192 if (state < device->power.state && state != ACPI_STATE_D0 195 if (state < device->power.state && state != ACPI_STATE_D0
193 && device->power.state >= ACPI_STATE_D3_HOT) { 196 && device->power.state >= ACPI_STATE_D3_HOT) {
194 printk(KERN_WARNING PREFIX 197 dev_warn(&device->dev,
195 "Cannot transition to non-D0 state from D3\n"); 198 "Cannot transition to non-D0 state from D3\n");
196 return -ENODEV; 199 return -ENODEV;
197 } 200 }
198 201
@@ -219,10 +222,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
219 222
220 end: 223 end:
221 if (result) { 224 if (result) {
222 printk(KERN_WARNING PREFIX 225 dev_warn(&device->dev, "Failed to change power state to %s\n",
223 "Device [%s] failed to transition to %s\n", 226 acpi_power_state_string(state));
224 device->pnp.bus_id,
225 acpi_power_state_string(state));
226 } else { 227 } else {
227 device->power.state = state; 228 device->power.state = state;
228 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 229 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -244,13 +245,6 @@ int acpi_bus_set_power(acpi_handle handle, int state)
244 if (result) 245 if (result)
245 return result; 246 return result;
246 247
247 if (!device->flags.power_manageable) {
248 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
249 "Device [%s] is not power manageable\n",
250 dev_name(&device->dev)));
251 return -ENODEV;
252 }
253
254 return acpi_device_set_power(device, state); 248 return acpi_device_set_power(device, state);
255} 249}
256EXPORT_SYMBOL(acpi_bus_set_power); 250EXPORT_SYMBOL(acpi_bus_set_power);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index c90112ceb570..05ea4be01a83 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -51,8 +51,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
51 " the driver to wait for userspace to write the undock sysfs file " 51 " the driver to wait for userspace to write the undock sysfs file "
52 " before undocking"); 52 " before undocking");
53 53
54static struct atomic_notifier_head dock_notifier_list;
55
56static const struct acpi_device_id dock_device_ids[] = { 54static const struct acpi_device_id dock_device_ids[] = {
57 {"LNXDOCK", 0}, 55 {"LNXDOCK", 0},
58 {"", 0}, 56 {"", 0},
@@ -63,8 +61,6 @@ struct dock_station {
63 acpi_handle handle; 61 acpi_handle handle;
64 unsigned long last_dock_time; 62 unsigned long last_dock_time;
65 u32 flags; 63 u32 flags;
66 spinlock_t dd_lock;
67 struct mutex hp_lock;
68 struct list_head dependent_devices; 64 struct list_head dependent_devices;
69 65
70 struct list_head sibling; 66 struct list_head sibling;
@@ -91,6 +87,12 @@ struct dock_dependent_device {
91#define DOCK_EVENT 3 87#define DOCK_EVENT 3
92#define UNDOCK_EVENT 2 88#define UNDOCK_EVENT 2
93 89
90enum dock_callback_type {
91 DOCK_CALL_HANDLER,
92 DOCK_CALL_FIXUP,
93 DOCK_CALL_UEVENT,
94};
95
94/***************************************************************************** 96/*****************************************************************************
95 * Dock Dependent device functions * 97 * Dock Dependent device functions *
96 *****************************************************************************/ 98 *****************************************************************************/
@@ -101,7 +103,7 @@ struct dock_dependent_device {
101 * 103 *
102 * Add the dependent device to the dock's dependent device list. 104 * Add the dependent device to the dock's dependent device list.
103 */ 105 */
104static int 106static int __init
105add_dock_dependent_device(struct dock_station *ds, acpi_handle handle) 107add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
106{ 108{
107 struct dock_dependent_device *dd; 109 struct dock_dependent_device *dd;
@@ -112,14 +114,21 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
112 114
113 dd->handle = handle; 115 dd->handle = handle;
114 INIT_LIST_HEAD(&dd->list); 116 INIT_LIST_HEAD(&dd->list);
115
116 spin_lock(&ds->dd_lock);
117 list_add_tail(&dd->list, &ds->dependent_devices); 117 list_add_tail(&dd->list, &ds->dependent_devices);
118 spin_unlock(&ds->dd_lock);
119 118
120 return 0; 119 return 0;
121} 120}
122 121
122static void remove_dock_dependent_devices(struct dock_station *ds)
123{
124 struct dock_dependent_device *dd, *aux;
125
126 list_for_each_entry_safe(dd, aux, &ds->dependent_devices, list) {
127 list_del(&dd->list);
128 kfree(dd);
129 }
130}
131
123/** 132/**
124 * dock_init_hotplug - Initialize a hotplug device on a docking station. 133 * dock_init_hotplug - Initialize a hotplug device on a docking station.
125 * @dd: Dock-dependent device. 134 * @dd: Dock-dependent device.
@@ -135,19 +144,16 @@ static int dock_init_hotplug(struct dock_dependent_device *dd,
135 int ret = 0; 144 int ret = 0;
136 145
137 mutex_lock(&hotplug_lock); 146 mutex_lock(&hotplug_lock);
138 147 if (WARN_ON(dd->hp_context)) {
139 if (dd->hp_context) {
140 ret = -EEXIST; 148 ret = -EEXIST;
141 } else { 149 } else {
142 dd->hp_refcount = 1; 150 dd->hp_refcount = 1;
143 dd->hp_ops = ops; 151 dd->hp_ops = ops;
144 dd->hp_context = context; 152 dd->hp_context = context;
145 dd->hp_release = release; 153 dd->hp_release = release;
154 if (init)
155 init(context);
146 } 156 }
147
148 if (!WARN_ON(ret) && init)
149 init(context);
150
151 mutex_unlock(&hotplug_lock); 157 mutex_unlock(&hotplug_lock);
152 return ret; 158 return ret;
153} 159}
@@ -162,27 +168,22 @@ static int dock_init_hotplug(struct dock_dependent_device *dd,
162 */ 168 */
163static void dock_release_hotplug(struct dock_dependent_device *dd) 169static void dock_release_hotplug(struct dock_dependent_device *dd)
164{ 170{
165 void (*release)(void *) = NULL;
166 void *context = NULL;
167
168 mutex_lock(&hotplug_lock); 171 mutex_lock(&hotplug_lock);
169
170 if (dd->hp_context && !--dd->hp_refcount) { 172 if (dd->hp_context && !--dd->hp_refcount) {
173 void (*release)(void *) = dd->hp_release;
174 void *context = dd->hp_context;
175
171 dd->hp_ops = NULL; 176 dd->hp_ops = NULL;
172 context = dd->hp_context;
173 dd->hp_context = NULL; 177 dd->hp_context = NULL;
174 release = dd->hp_release;
175 dd->hp_release = NULL; 178 dd->hp_release = NULL;
179 if (release)
180 release(context);
176 } 181 }
177
178 if (release && context)
179 release(context);
180
181 mutex_unlock(&hotplug_lock); 182 mutex_unlock(&hotplug_lock);
182} 183}
183 184
184static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event, 185static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
185 bool uevent) 186 enum dock_callback_type cb_type)
186{ 187{
187 acpi_notify_handler cb = NULL; 188 acpi_notify_handler cb = NULL;
188 bool run = false; 189 bool run = false;
@@ -192,8 +193,18 @@ static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
192 if (dd->hp_context) { 193 if (dd->hp_context) {
193 run = true; 194 run = true;
194 dd->hp_refcount++; 195 dd->hp_refcount++;
195 if (dd->hp_ops) 196 if (dd->hp_ops) {
196 cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler; 197 switch (cb_type) {
198 case DOCK_CALL_FIXUP:
199 cb = dd->hp_ops->fixup;
200 break;
201 case DOCK_CALL_UEVENT:
202 cb = dd->hp_ops->uevent;
203 break;
204 default:
205 cb = dd->hp_ops->handler;
206 }
207 }
197 } 208 }
198 209
199 mutex_unlock(&hotplug_lock); 210 mutex_unlock(&hotplug_lock);
@@ -220,63 +231,17 @@ find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
220{ 231{
221 struct dock_dependent_device *dd; 232 struct dock_dependent_device *dd;
222 233
223 spin_lock(&ds->dd_lock); 234 list_for_each_entry(dd, &ds->dependent_devices, list)
224 list_for_each_entry(dd, &ds->dependent_devices, list) { 235 if (handle == dd->handle)
225 if (handle == dd->handle) {
226 spin_unlock(&ds->dd_lock);
227 return dd; 236 return dd;
228 } 237
229 }
230 spin_unlock(&ds->dd_lock);
231 return NULL; 238 return NULL;
232} 239}
233 240
234/***************************************************************************** 241/*****************************************************************************
235 * Dock functions * 242 * Dock functions *
236 *****************************************************************************/ 243 *****************************************************************************/
237/** 244static int __init is_battery(acpi_handle handle)
238 * is_dock - see if a device is a dock station
239 * @handle: acpi handle of the device
240 *
241 * If an acpi object has a _DCK method, then it is by definition a dock
242 * station, so return true.
243 */
244static int is_dock(acpi_handle handle)
245{
246 acpi_status status;
247 acpi_handle tmp;
248
249 status = acpi_get_handle(handle, "_DCK", &tmp);
250 if (ACPI_FAILURE(status))
251 return 0;
252 return 1;
253}
254
255static int is_ejectable(acpi_handle handle)
256{
257 acpi_status status;
258 acpi_handle tmp;
259
260 status = acpi_get_handle(handle, "_EJ0", &tmp);
261 if (ACPI_FAILURE(status))
262 return 0;
263 return 1;
264}
265
266static int is_ata(acpi_handle handle)
267{
268 acpi_handle tmp;
269
270 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
271 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
272 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
273 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
274 return 1;
275
276 return 0;
277}
278
279static int is_battery(acpi_handle handle)
280{ 245{
281 struct acpi_device_info *info; 246 struct acpi_device_info *info;
282 int ret = 1; 247 int ret = 1;
@@ -292,17 +257,13 @@ static int is_battery(acpi_handle handle)
292 return ret; 257 return ret;
293} 258}
294 259
295static int is_ejectable_bay(acpi_handle handle) 260/* Check whether ACPI object is an ejectable battery or disk bay */
261static bool __init is_ejectable_bay(acpi_handle handle)
296{ 262{
297 acpi_handle phandle; 263 if (acpi_has_method(handle, "_EJ0") && is_battery(handle))
264 return true;
298 265
299 if (!is_ejectable(handle)) 266 return acpi_bay_match(handle);
300 return 0;
301 if (is_battery(handle) || is_ata(handle))
302 return 1;
303 if (!acpi_get_parent(handle, &phandle) && is_ata(phandle))
304 return 1;
305 return 0;
306} 267}
307 268
308/** 269/**
@@ -320,7 +281,7 @@ int is_dock_device(acpi_handle handle)
320 if (!dock_station_count) 281 if (!dock_station_count)
321 return 0; 282 return 0;
322 283
323 if (is_dock(handle)) 284 if (acpi_dock_match(handle))
324 return 1; 285 return 1;
325 286
326 list_for_each_entry(dock_station, &dock_stations, sibling) 287 list_for_each_entry(dock_station, &dock_stations, sibling)
@@ -359,10 +320,8 @@ static int dock_present(struct dock_station *ds)
359 * handle if one does not exist already. This should cause 320 * handle if one does not exist already. This should cause
360 * acpi to scan for drivers for the given devices, and call 321 * acpi to scan for drivers for the given devices, and call
361 * matching driver's add routine. 322 * matching driver's add routine.
362 *
363 * Returns a pointer to the acpi_device corresponding to the handle.
364 */ 323 */
365static struct acpi_device * dock_create_acpi_device(acpi_handle handle) 324static void dock_create_acpi_device(acpi_handle handle)
366{ 325{
367 struct acpi_device *device; 326 struct acpi_device *device;
368 int ret; 327 int ret;
@@ -375,10 +334,7 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
375 ret = acpi_bus_scan(handle); 334 ret = acpi_bus_scan(handle);
376 if (ret) 335 if (ret)
377 pr_debug("error adding bus, %x\n", -ret); 336 pr_debug("error adding bus, %x\n", -ret);
378
379 acpi_bus_get_device(handle, &device);
380 } 337 }
381 return device;
382} 338}
383 339
384/** 340/**
@@ -397,9 +353,29 @@ static void dock_remove_acpi_device(acpi_handle handle)
397} 353}
398 354
399/** 355/**
400 * hotplug_dock_devices - insert or remove devices on the dock station 356 * hot_remove_dock_devices - Remove dock station devices.
357 * @ds: Dock station.
358 */
359static void hot_remove_dock_devices(struct dock_station *ds)
360{
361 struct dock_dependent_device *dd;
362
363 /*
364 * Walk the list in reverse order so that devices that have been added
365 * last are removed first (in case there are some indirect dependencies
366 * between them).
367 */
368 list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
369 dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
370
371 list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
372 dock_remove_acpi_device(dd->handle);
373}
374
375/**
376 * hotplug_dock_devices - Insert devices on a dock station.
401 * @ds: the dock station 377 * @ds: the dock station
402 * @event: either bus check or eject request 378 * @event: either bus check or device check request
403 * 379 *
404 * Some devices on the dock station need to have drivers called 380 * Some devices on the dock station need to have drivers called
405 * to perform hotplug operations after a dock event has occurred. 381 * to perform hotplug operations after a dock event has occurred.
@@ -410,27 +386,21 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
410{ 386{
411 struct dock_dependent_device *dd; 387 struct dock_dependent_device *dd;
412 388
413 mutex_lock(&ds->hp_lock); 389 /* Call driver specific post-dock fixups. */
390 list_for_each_entry(dd, &ds->dependent_devices, list)
391 dock_hotplug_event(dd, event, DOCK_CALL_FIXUP);
414 392
415 /* 393 /* Call driver specific hotplug functions. */
416 * First call driver specific hotplug functions
417 */
418 list_for_each_entry(dd, &ds->dependent_devices, list) 394 list_for_each_entry(dd, &ds->dependent_devices, list)
419 dock_hotplug_event(dd, event, false); 395 dock_hotplug_event(dd, event, DOCK_CALL_HANDLER);
420 396
421 /* 397 /*
422 * Now make sure that an acpi_device is created for each 398 * Now make sure that an acpi_device is created for each dependent
423 * dependent device, or removed if this is an eject request. 399 * device. That will cause scan handlers to be attached to device
424 * This will cause acpi_drivers to be stopped/started if they 400 * objects or acpi_drivers to be stopped/started if they are present.
425 * exist
426 */ 401 */
427 list_for_each_entry(dd, &ds->dependent_devices, list) { 402 list_for_each_entry(dd, &ds->dependent_devices, list)
428 if (event == ACPI_NOTIFY_EJECT_REQUEST) 403 dock_create_acpi_device(dd->handle);
429 dock_remove_acpi_device(dd->handle);
430 else
431 dock_create_acpi_device(dd->handle);
432 }
433 mutex_unlock(&ds->hp_lock);
434} 404}
435 405
436static void dock_event(struct dock_station *ds, u32 event, int num) 406static void dock_event(struct dock_station *ds, u32 event, int num)
@@ -453,44 +423,13 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
453 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 423 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
454 424
455 list_for_each_entry(dd, &ds->dependent_devices, list) 425 list_for_each_entry(dd, &ds->dependent_devices, list)
456 dock_hotplug_event(dd, event, true); 426 dock_hotplug_event(dd, event, DOCK_CALL_UEVENT);
457 427
458 if (num != DOCK_EVENT) 428 if (num != DOCK_EVENT)
459 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 429 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
460} 430}
461 431
462/** 432/**
463 * eject_dock - respond to a dock eject request
464 * @ds: the dock station
465 *
466 * This is called after _DCK is called, to execute the dock station's
467 * _EJ0 method.
468 */
469static void eject_dock(struct dock_station *ds)
470{
471 struct acpi_object_list arg_list;
472 union acpi_object arg;
473 acpi_status status;
474 acpi_handle tmp;
475
476 /* all dock devices should have _EJ0, but check anyway */
477 status = acpi_get_handle(ds->handle, "_EJ0", &tmp);
478 if (ACPI_FAILURE(status)) {
479 pr_debug("No _EJ0 support for dock device\n");
480 return;
481 }
482
483 arg_list.count = 1;
484 arg_list.pointer = &arg;
485 arg.type = ACPI_TYPE_INTEGER;
486 arg.integer.value = 1;
487
488 status = acpi_evaluate_object(ds->handle, "_EJ0", &arg_list, NULL);
489 if (ACPI_FAILURE(status))
490 pr_debug("Failed to evaluate _EJ0!\n");
491}
492
493/**
494 * handle_dock - handle a dock event 433 * handle_dock - handle a dock event
495 * @ds: the dock station 434 * @ds: the dock station
496 * @dock: to dock, or undock - that is the question 435 * @dock: to dock, or undock - that is the question
@@ -550,27 +489,6 @@ static inline void complete_undock(struct dock_station *ds)
550 ds->flags &= ~(DOCK_UNDOCKING); 489 ds->flags &= ~(DOCK_UNDOCKING);
551} 490}
552 491
553static void dock_lock(struct dock_station *ds, int lock)
554{
555 struct acpi_object_list arg_list;
556 union acpi_object arg;
557 acpi_status status;
558
559 arg_list.count = 1;
560 arg_list.pointer = &arg;
561 arg.type = ACPI_TYPE_INTEGER;
562 arg.integer.value = !!lock;
563 status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
564 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
565 if (lock)
566 acpi_handle_warn(ds->handle,
567 "Locking device failed (0x%x)\n", status);
568 else
569 acpi_handle_warn(ds->handle,
570 "Unlocking device failed (0x%x)\n", status);
571 }
572}
573
574/** 492/**
575 * dock_in_progress - see if we are in the middle of handling a dock event 493 * dock_in_progress - see if we are in the middle of handling a dock event
576 * @ds: the dock station 494 * @ds: the dock station
@@ -588,37 +506,6 @@ static int dock_in_progress(struct dock_station *ds)
588} 506}
589 507
590/** 508/**
591 * register_dock_notifier - add yourself to the dock notifier list
592 * @nb: the callers notifier block
593 *
594 * If a driver wishes to be notified about dock events, they can
595 * use this function to put a notifier block on the dock notifier list.
596 * this notifier call chain will be called after a dock event, but
597 * before hotplugging any new devices.
598 */
599int register_dock_notifier(struct notifier_block *nb)
600{
601 if (!dock_station_count)
602 return -ENODEV;
603
604 return atomic_notifier_chain_register(&dock_notifier_list, nb);
605}
606EXPORT_SYMBOL_GPL(register_dock_notifier);
607
608/**
609 * unregister_dock_notifier - remove yourself from the dock notifier list
610 * @nb: the callers notifier block
611 */
612void unregister_dock_notifier(struct notifier_block *nb)
613{
614 if (!dock_station_count)
615 return;
616
617 atomic_notifier_chain_unregister(&dock_notifier_list, nb);
618}
619EXPORT_SYMBOL_GPL(unregister_dock_notifier);
620
621/**
622 * register_hotplug_dock_device - register a hotplug function 509 * register_hotplug_dock_device - register a hotplug function
623 * @handle: the handle of the device 510 * @handle: the handle of the device
624 * @ops: handlers to call after docking 511 * @ops: handlers to call after docking
@@ -703,10 +590,10 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
703 */ 590 */
704 dock_event(ds, event, UNDOCK_EVENT); 591 dock_event(ds, event, UNDOCK_EVENT);
705 592
706 hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST); 593 hot_remove_dock_devices(ds);
707 undock(ds); 594 undock(ds);
708 dock_lock(ds, 0); 595 acpi_evaluate_lck(ds->handle, 0);
709 eject_dock(ds); 596 acpi_evaluate_ej0(ds->handle);
710 if (dock_present(ds)) { 597 if (dock_present(ds)) {
711 acpi_handle_err(ds->handle, "Unable to undock!\n"); 598 acpi_handle_err(ds->handle, "Unable to undock!\n");
712 return -EBUSY; 599 return -EBUSY;
@@ -717,18 +604,17 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
717 604
718/** 605/**
719 * dock_notify - act upon an acpi dock notification 606 * dock_notify - act upon an acpi dock notification
720 * @handle: the dock station handle 607 * @ds: dock station
721 * @event: the acpi event 608 * @event: the acpi event
722 * @data: our driver data struct
723 * 609 *
724 * If we are notified to dock, then check to see if the dock is 610 * If we are notified to dock, then check to see if the dock is
725 * present and then dock. Notify all drivers of the dock event, 611 * present and then dock. Notify all drivers of the dock event,
726 * and then hotplug and devices that may need hotplugging. 612 * and then hotplug and devices that may need hotplugging.
727 */ 613 */
728static void dock_notify(acpi_handle handle, u32 event, void *data) 614static void dock_notify(struct dock_station *ds, u32 event)
729{ 615{
730 struct dock_station *ds = data; 616 acpi_handle handle = ds->handle;
731 struct acpi_device *tmp; 617 struct acpi_device *ad;
732 int surprise_removal = 0; 618 int surprise_removal = 0;
733 619
734 /* 620 /*
@@ -751,8 +637,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
751 switch (event) { 637 switch (event) {
752 case ACPI_NOTIFY_BUS_CHECK: 638 case ACPI_NOTIFY_BUS_CHECK:
753 case ACPI_NOTIFY_DEVICE_CHECK: 639 case ACPI_NOTIFY_DEVICE_CHECK:
754 if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle, 640 if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
755 &tmp)) {
756 begin_dock(ds); 641 begin_dock(ds);
757 dock(ds); 642 dock(ds);
758 if (!dock_present(ds)) { 643 if (!dock_present(ds)) {
@@ -760,12 +645,10 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
760 complete_dock(ds); 645 complete_dock(ds);
761 break; 646 break;
762 } 647 }
763 atomic_notifier_call_chain(&dock_notifier_list,
764 event, NULL);
765 hotplug_dock_devices(ds, event); 648 hotplug_dock_devices(ds, event);
766 complete_dock(ds); 649 complete_dock(ds);
767 dock_event(ds, event, DOCK_EVENT); 650 dock_event(ds, event, DOCK_EVENT);
768 dock_lock(ds, 1); 651 acpi_evaluate_lck(ds->handle, 1);
769 acpi_update_all_gpes(); 652 acpi_update_all_gpes();
770 break; 653 break;
771 } 654 }
@@ -789,9 +672,8 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
789} 672}
790 673
791struct dock_data { 674struct dock_data {
792 acpi_handle handle;
793 unsigned long event;
794 struct dock_station *ds; 675 struct dock_station *ds;
676 u32 event;
795}; 677};
796 678
797static void acpi_dock_deferred_cb(void *context) 679static void acpi_dock_deferred_cb(void *context)
@@ -799,52 +681,31 @@ static void acpi_dock_deferred_cb(void *context)
799 struct dock_data *data = context; 681 struct dock_data *data = context;
800 682
801 acpi_scan_lock_acquire(); 683 acpi_scan_lock_acquire();
802 dock_notify(data->handle, data->event, data->ds); 684 dock_notify(data->ds, data->event);
803 acpi_scan_lock_release(); 685 acpi_scan_lock_release();
804 kfree(data); 686 kfree(data);
805} 687}
806 688
807static int acpi_dock_notifier_call(struct notifier_block *this, 689static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
808 unsigned long event, void *data)
809{ 690{
810 struct dock_station *dock_station; 691 struct dock_data *dd;
811 acpi_handle handle = data;
812 692
813 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK 693 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
814 && event != ACPI_NOTIFY_EJECT_REQUEST) 694 && event != ACPI_NOTIFY_EJECT_REQUEST)
815 return 0; 695 return;
816
817 acpi_scan_lock_acquire();
818
819 list_for_each_entry(dock_station, &dock_stations, sibling) {
820 if (dock_station->handle == handle) {
821 struct dock_data *dd;
822 acpi_status status;
823
824 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
825 if (!dd)
826 break;
827 696
828 dd->handle = handle; 697 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
829 dd->event = event; 698 if (dd) {
830 dd->ds = dock_station; 699 acpi_status status;
831 status = acpi_os_hotplug_execute(acpi_dock_deferred_cb,
832 dd);
833 if (ACPI_FAILURE(status))
834 kfree(dd);
835 700
836 break; 701 dd->ds = data;
837 } 702 dd->event = event;
703 status = acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
704 if (ACPI_FAILURE(status))
705 kfree(dd);
838 } 706 }
839
840 acpi_scan_lock_release();
841 return 0;
842} 707}
843 708
844static struct notifier_block dock_acpi_notifier = {
845 .notifier_call = acpi_dock_notifier_call,
846};
847
848/** 709/**
849 * find_dock_devices - find devices on the dock station 710 * find_dock_devices - find devices on the dock station
850 * @handle: the handle of the device we are examining 711 * @handle: the handle of the device we are examining
@@ -856,29 +717,16 @@ static struct notifier_block dock_acpi_notifier = {
856 * check to see if an object has an _EJD method. If it does, then it 717 * check to see if an object has an _EJD method. If it does, then it
857 * will see if it is dependent on the dock station. 718 * will see if it is dependent on the dock station.
858 */ 719 */
859static acpi_status 720static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
860find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv) 721 void *context, void **rv)
861{ 722{
862 acpi_status status;
863 acpi_handle tmp, parent;
864 struct dock_station *ds = context; 723 struct dock_station *ds = context;
724 acpi_handle ejd = NULL;
865 725
866 status = acpi_bus_get_ejd(handle, &tmp); 726 acpi_bus_get_ejd(handle, &ejd);
867 if (ACPI_FAILURE(status)) { 727 if (ejd == ds->handle)
868 /* try the parent device as well */
869 status = acpi_get_parent(handle, &parent);
870 if (ACPI_FAILURE(status))
871 goto fdd_out;
872 /* see if parent is dependent on dock */
873 status = acpi_bus_get_ejd(parent, &tmp);
874 if (ACPI_FAILURE(status))
875 goto fdd_out;
876 }
877
878 if (tmp == ds->handle)
879 add_dock_dependent_device(ds, handle); 728 add_dock_dependent_device(ds, handle);
880 729
881fdd_out:
882 return AE_OK; 730 return AE_OK;
883} 731}
884 732
@@ -988,13 +836,13 @@ static struct attribute_group dock_attribute_group = {
988 */ 836 */
989static int __init dock_add(acpi_handle handle) 837static int __init dock_add(acpi_handle handle)
990{ 838{
991 int ret, id; 839 struct dock_station *dock_station, ds = { NULL, };
992 struct dock_station ds, *dock_station;
993 struct platform_device *dd; 840 struct platform_device *dd;
841 acpi_status status;
842 int ret;
994 843
995 id = dock_station_count; 844 dd = platform_device_register_data(NULL, "dock", dock_station_count,
996 memset(&ds, 0, sizeof(ds)); 845 &ds, sizeof(ds));
997 dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
998 if (IS_ERR(dd)) 846 if (IS_ERR(dd))
999 return PTR_ERR(dd); 847 return PTR_ERR(dd);
1000 848
@@ -1004,18 +852,15 @@ static int __init dock_add(acpi_handle handle)
1004 dock_station->dock_device = dd; 852 dock_station->dock_device = dd;
1005 dock_station->last_dock_time = jiffies - HZ; 853 dock_station->last_dock_time = jiffies - HZ;
1006 854
1007 mutex_init(&dock_station->hp_lock);
1008 spin_lock_init(&dock_station->dd_lock);
1009 INIT_LIST_HEAD(&dock_station->sibling); 855 INIT_LIST_HEAD(&dock_station->sibling);
1010 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
1011 INIT_LIST_HEAD(&dock_station->dependent_devices); 856 INIT_LIST_HEAD(&dock_station->dependent_devices);
1012 857
1013 /* we want the dock device to send uevents */ 858 /* we want the dock device to send uevents */
1014 dev_set_uevent_suppress(&dd->dev, 0); 859 dev_set_uevent_suppress(&dd->dev, 0);
1015 860
1016 if (is_dock(handle)) 861 if (acpi_dock_match(handle))
1017 dock_station->flags |= DOCK_IS_DOCK; 862 dock_station->flags |= DOCK_IS_DOCK;
1018 if (is_ata(handle)) 863 if (acpi_ata_match(handle))
1019 dock_station->flags |= DOCK_IS_ATA; 864 dock_station->flags |= DOCK_IS_ATA;
1020 if (is_battery(handle)) 865 if (is_battery(handle))
1021 dock_station->flags |= DOCK_IS_BAT; 866 dock_station->flags |= DOCK_IS_BAT;
@@ -1034,11 +879,19 @@ static int __init dock_add(acpi_handle handle)
1034 if (ret) 879 if (ret)
1035 goto err_rmgroup; 880 goto err_rmgroup;
1036 881
882 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
883 dock_notify_handler, dock_station);
884 if (ACPI_FAILURE(status)) {
885 ret = -ENODEV;
886 goto err_rmgroup;
887 }
888
1037 dock_station_count++; 889 dock_station_count++;
1038 list_add(&dock_station->sibling, &dock_stations); 890 list_add(&dock_station->sibling, &dock_stations);
1039 return 0; 891 return 0;
1040 892
1041err_rmgroup: 893err_rmgroup:
894 remove_dock_dependent_devices(dock_station);
1042 sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group); 895 sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
1043err_unregister: 896err_unregister:
1044 platform_device_unregister(dd); 897 platform_device_unregister(dd);
@@ -1058,7 +911,7 @@ err_unregister:
1058static acpi_status __init 911static acpi_status __init
1059find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv) 912find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
1060{ 913{
1061 if (is_dock(handle) || is_ejectable_bay(handle)) 914 if (acpi_dock_match(handle) || is_ejectable_bay(handle))
1062 dock_add(handle); 915 dock_add(handle);
1063 916
1064 return AE_OK; 917 return AE_OK;
@@ -1078,7 +931,6 @@ void __init acpi_dock_init(void)
1078 return; 931 return;
1079 } 932 }
1080 933
1081 register_acpi_bus_notifier(&dock_acpi_notifier);
1082 pr_info(PREFIX "%s: %d docks/bays found\n", 934 pr_info(PREFIX "%s: %d docks/bays found\n",
1083 ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); 935 ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
1084} 936}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 34448038724b..8dd2d4dce7c3 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1049,10 +1049,8 @@ int __init acpi_ec_ecdt_probe(void)
1049 * which needs it, has fake EC._INI method, so use it as flag. 1049 * which needs it, has fake EC._INI method, so use it as flag.
1050 * Keep boot_ec struct as it will be needed soon. 1050 * Keep boot_ec struct as it will be needed soon.
1051 */ 1051 */
1052 acpi_handle dummy;
1053 if (!dmi_name_in_vendors("ASUS") || 1052 if (!dmi_name_in_vendors("ASUS") ||
1054 ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", 1053 !acpi_has_method(boot_ec->handle, "_INI"))
1055 &dummy)))
1056 return -ENODEV; 1054 return -ENODEV;
1057 } 1055 }
1058install: 1056install:
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 5b02a0aa540c..41ade6570bc0 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -93,7 +93,7 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
93 if (result) 93 if (result)
94 return result; 94 return result;
95 95
96 *state = (acpi_state == ACPI_STATE_D3 ? 0 : 96 *state = (acpi_state == ACPI_STATE_D3_COLD ? 0 :
97 (acpi_state == ACPI_STATE_D0 ? 1 : -1)); 97 (acpi_state == ACPI_STATE_D0 ? 1 : -1));
98 return 0; 98 return 0;
99} 99}
@@ -108,7 +108,7 @@ fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
108 return -EINVAL; 108 return -EINVAL;
109 109
110 result = acpi_bus_set_power(device->handle, 110 result = acpi_bus_set_power(device->handle,
111 state ? ACPI_STATE_D0 : ACPI_STATE_D3); 111 state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
112 112
113 return result; 113 return result;
114} 114}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 6bc08272f050..e5f416c7f66e 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -80,6 +80,8 @@ extern char line_buf[80];
80 80
81static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 81static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
82 u32 pm1b_ctrl); 82 u32 pm1b_ctrl);
83static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
84 u32 val_b);
83 85
84static acpi_osd_handler acpi_irq_handler; 86static acpi_osd_handler acpi_irq_handler;
85static void *acpi_irq_context; 87static void *acpi_irq_context;
@@ -141,7 +143,8 @@ static struct osi_linux {
141 unsigned int enable:1; 143 unsigned int enable:1;
142 unsigned int dmi:1; 144 unsigned int dmi:1;
143 unsigned int cmdline:1; 145 unsigned int cmdline:1;
144} osi_linux = {0, 0, 0}; 146 unsigned int default_disabling:1;
147} osi_linux = {0, 0, 0, 0};
145 148
146static u32 acpi_osi_handler(acpi_string interface, u32 supported) 149static u32 acpi_osi_handler(acpi_string interface, u32 supported)
147{ 150{
@@ -1381,6 +1384,17 @@ void __init acpi_osi_setup(char *str)
1381 1384
1382 if (*str == '!') { 1385 if (*str == '!') {
1383 str++; 1386 str++;
1387 if (*str == '\0') {
1388 osi_linux.default_disabling = 1;
1389 return;
1390 } else if (*str == '*') {
1391 acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
1392 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1393 osi = &osi_setup_entries[i];
1394 osi->enable = false;
1395 }
1396 return;
1397 }
1384 enable = false; 1398 enable = false;
1385 } 1399 }
1386 1400
@@ -1446,6 +1460,13 @@ static void __init acpi_osi_setup_late(void)
1446 int i; 1460 int i;
1447 acpi_status status; 1461 acpi_status status;
1448 1462
1463 if (osi_linux.default_disabling) {
1464 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
1465
1466 if (ACPI_SUCCESS(status))
1467 printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
1468 }
1469
1449 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1470 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1450 osi = &osi_setup_entries[i]; 1471 osi = &osi_setup_entries[i];
1451 str = osi->string; 1472 str = osi->string;
@@ -1784,6 +1805,28 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1784 __acpi_os_prepare_sleep = func; 1805 __acpi_os_prepare_sleep = func;
1785} 1806}
1786 1807
1808acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1809 u32 val_b)
1810{
1811 int rc = 0;
1812 if (__acpi_os_prepare_extended_sleep)
1813 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1814 val_a, val_b);
1815 if (rc < 0)
1816 return AE_ERROR;
1817 else if (rc > 0)
1818 return AE_CTRL_SKIP;
1819
1820 return AE_OK;
1821}
1822
1823void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1824 u32 val_a, u32 val_b))
1825{
1826 __acpi_os_prepare_extended_sleep = func;
1827}
1828
1829
1787void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context, 1830void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
1788 void (*func)(struct work_struct *work)) 1831 void (*func)(struct work_struct *work))
1789{ 1832{
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 033d1179bdb5..d678a180ca2a 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -159,12 +159,16 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
159 return AE_OK; 159 return AE_OK;
160} 160}
161 161
162void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle) 162void acpi_pci_slot_enumerate(struct pci_bus *bus)
163{ 163{
164 mutex_lock(&slot_list_lock); 164 acpi_handle handle = ACPI_HANDLE(bus->bridge);
165 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 165
166 register_slot, NULL, bus, NULL); 166 if (handle) {
167 mutex_unlock(&slot_list_lock); 167 mutex_lock(&slot_list_lock);
168 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
169 register_slot, NULL, bus, NULL);
170 mutex_unlock(&slot_list_lock);
171 }
168} 172}
169 173
170void acpi_pci_slot_remove(struct pci_bus *bus) 174void acpi_pci_slot_remove(struct pci_bus *bus)
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 5c28c894c0fc..0dbe5cdf3396 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -637,9 +637,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
637 } 637 }
638 638
639 /* Execute _PSW */ 639 /* Execute _PSW */
640 arg_list.count = 1; 640 status = acpi_execute_simple_method(dev->handle, "_PSW", enable);
641 in_arg[0].integer.value = enable;
642 status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL);
643 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { 641 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
644 printk(KERN_ERR PREFIX "_PSW execution failed\n"); 642 printk(KERN_ERR PREFIX "_PSW execution failed\n");
645 dev->wakeup.flags.valid = 0; 643 dev->wakeup.flags.valid = 0;
@@ -786,7 +784,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
786 } 784 }
787 } 785 }
788 786
789 *state = ACPI_STATE_D3; 787 *state = ACPI_STATE_D3_COLD;
790 return 0; 788 return 0;
791} 789}
792 790
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 1e9732d809bf..51d7948611da 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -164,17 +164,12 @@ static void acpi_processor_ppc_ost(acpi_handle handle, int status)
164 {.type = ACPI_TYPE_INTEGER,}, 164 {.type = ACPI_TYPE_INTEGER,},
165 }; 165 };
166 struct acpi_object_list arg_list = {2, params}; 166 struct acpi_object_list arg_list = {2, params};
167 acpi_handle temp;
168 167
169 params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE; 168 if (acpi_has_method(handle, "_OST")) {
170 params[1].integer.value = status; 169 params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
171 170 params[1].integer.value = status;
172 /* when there is no _OST , skip it */ 171 acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
173 if (ACPI_FAILURE(acpi_get_handle(handle, "_OST", &temp))) 172 }
174 return;
175
176 acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
177 return;
178} 173}
179 174
180int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) 175int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
@@ -468,14 +463,11 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
468int acpi_processor_get_performance_info(struct acpi_processor *pr) 463int acpi_processor_get_performance_info(struct acpi_processor *pr)
469{ 464{
470 int result = 0; 465 int result = 0;
471 acpi_status status = AE_OK;
472 acpi_handle handle = NULL;
473 466
474 if (!pr || !pr->performance || !pr->handle) 467 if (!pr || !pr->performance || !pr->handle)
475 return -EINVAL; 468 return -EINVAL;
476 469
477 status = acpi_get_handle(pr->handle, "_PCT", &handle); 470 if (!acpi_has_method(pr->handle, "_PCT")) {
478 if (ACPI_FAILURE(status)) {
479 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 471 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
480 "ACPI-based processor performance control unavailable\n")); 472 "ACPI-based processor performance control unavailable\n"));
481 return -ENODEV; 473 return -ENODEV;
@@ -501,7 +493,7 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
501 */ 493 */
502 update_bios: 494 update_bios:
503#ifdef CONFIG_X86 495#ifdef CONFIG_X86
504 if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){ 496 if (acpi_has_method(pr->handle, "_PPC")) {
505 if(boot_cpu_has(X86_FEATURE_EST)) 497 if(boot_cpu_has(X86_FEATURE_EST))
506 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU " 498 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
507 "frequency support\n"); 499 "frequency support\n");
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 3322b47ab7ca..b7201fc6f1e1 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -505,14 +505,12 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
505 void *preproc_data) 505 void *preproc_data)
506{ 506{
507 struct res_proc_context c; 507 struct res_proc_context c;
508 acpi_handle not_used;
509 acpi_status status; 508 acpi_status status;
510 509
511 if (!adev || !adev->handle || !list_empty(list)) 510 if (!adev || !adev->handle || !list_empty(list))
512 return -EINVAL; 511 return -EINVAL;
513 512
514 status = acpi_get_handle(adev->handle, METHOD_NAME__CRS, &not_used); 513 if (!acpi_has_method(adev->handle, METHOD_NAME__CRS))
515 if (ACPI_FAILURE(status))
516 return 0; 514 return 0;
517 515
518 c.list = list; 516 c.list = list;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7d9e285da452..e76365136ba3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -193,9 +193,6 @@ static acpi_status acpi_bus_online_companions(acpi_handle handle, u32 lvl,
193static int acpi_scan_hot_remove(struct acpi_device *device) 193static int acpi_scan_hot_remove(struct acpi_device *device)
194{ 194{
195 acpi_handle handle = device->handle; 195 acpi_handle handle = device->handle;
196 acpi_handle not_used;
197 struct acpi_object_list arg_list;
198 union acpi_object arg;
199 struct device *errdev; 196 struct device *errdev;
200 acpi_status status; 197 acpi_status status;
201 unsigned long long sta; 198 unsigned long long sta;
@@ -258,32 +255,15 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
258 put_device(&device->dev); 255 put_device(&device->dev);
259 device = NULL; 256 device = NULL;
260 257
261 if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &not_used))) { 258 acpi_evaluate_lck(handle, 0);
262 arg_list.count = 1;
263 arg_list.pointer = &arg;
264 arg.type = ACPI_TYPE_INTEGER;
265 arg.integer.value = 0;
266 acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
267 }
268
269 arg_list.count = 1;
270 arg_list.pointer = &arg;
271 arg.type = ACPI_TYPE_INTEGER;
272 arg.integer.value = 1;
273
274 /* 259 /*
275 * TBD: _EJD support. 260 * TBD: _EJD support.
276 */ 261 */
277 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); 262 status = acpi_evaluate_ej0(handle);
278 if (ACPI_FAILURE(status)) { 263 if (status == AE_NOT_FOUND)
279 if (status == AE_NOT_FOUND) { 264 return -ENODEV;
280 return -ENODEV; 265 else if (ACPI_FAILURE(status))
281 } else { 266 return -EIO;
282 acpi_handle_warn(handle, "Eject failed (0x%x)\n",
283 status);
284 return -EIO;
285 }
286 }
287 267
288 /* 268 /*
289 * Verify if eject was indeed successful. If not, log an error 269 * Verify if eject was indeed successful. If not, log an error
@@ -654,7 +634,6 @@ static int acpi_device_setup_files(struct acpi_device *dev)
654{ 634{
655 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 635 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
656 acpi_status status; 636 acpi_status status;
657 acpi_handle temp;
658 unsigned long long sun; 637 unsigned long long sun;
659 int result = 0; 638 int result = 0;
660 639
@@ -680,8 +659,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
680 /* 659 /*
681 * If device has _STR, 'description' file is created 660 * If device has _STR, 'description' file is created
682 */ 661 */
683 status = acpi_get_handle(dev->handle, "_STR", &temp); 662 if (acpi_has_method(dev->handle, "_STR")) {
684 if (ACPI_SUCCESS(status)) {
685 status = acpi_evaluate_object(dev->handle, "_STR", 663 status = acpi_evaluate_object(dev->handle, "_STR",
686 NULL, &buffer); 664 NULL, &buffer);
687 if (ACPI_FAILURE(status)) 665 if (ACPI_FAILURE(status))
@@ -711,8 +689,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
711 * If device has _EJ0, 'eject' file is created that is used to trigger 689 * If device has _EJ0, 'eject' file is created that is used to trigger
712 * hot-removal function from userland. 690 * hot-removal function from userland.
713 */ 691 */
714 status = acpi_get_handle(dev->handle, "_EJ0", &temp); 692 if (acpi_has_method(dev->handle, "_EJ0")) {
715 if (ACPI_SUCCESS(status)) {
716 result = device_create_file(&dev->dev, &dev_attr_eject); 693 result = device_create_file(&dev->dev, &dev_attr_eject);
717 if (result) 694 if (result)
718 return result; 695 return result;
@@ -734,9 +711,6 @@ end:
734 711
735static void acpi_device_remove_files(struct acpi_device *dev) 712static void acpi_device_remove_files(struct acpi_device *dev)
736{ 713{
737 acpi_status status;
738 acpi_handle temp;
739
740 if (dev->flags.power_manageable) { 714 if (dev->flags.power_manageable) {
741 device_remove_file(&dev->dev, &dev_attr_power_state); 715 device_remove_file(&dev->dev, &dev_attr_power_state);
742 if (dev->power.flags.power_resources) 716 if (dev->power.flags.power_resources)
@@ -747,20 +721,17 @@ static void acpi_device_remove_files(struct acpi_device *dev)
747 /* 721 /*
748 * If device has _STR, remove 'description' file 722 * If device has _STR, remove 'description' file
749 */ 723 */
750 status = acpi_get_handle(dev->handle, "_STR", &temp); 724 if (acpi_has_method(dev->handle, "_STR")) {
751 if (ACPI_SUCCESS(status)) {
752 kfree(dev->pnp.str_obj); 725 kfree(dev->pnp.str_obj);
753 device_remove_file(&dev->dev, &dev_attr_description); 726 device_remove_file(&dev->dev, &dev_attr_description);
754 } 727 }
755 /* 728 /*
756 * If device has _EJ0, remove 'eject' file. 729 * If device has _EJ0, remove 'eject' file.
757 */ 730 */
758 status = acpi_get_handle(dev->handle, "_EJ0", &temp); 731 if (acpi_has_method(dev->handle, "_EJ0"))
759 if (ACPI_SUCCESS(status))
760 device_remove_file(&dev->dev, &dev_attr_eject); 732 device_remove_file(&dev->dev, &dev_attr_eject);
761 733
762 status = acpi_get_handle(dev->handle, "_SUN", &temp); 734 if (acpi_has_method(dev->handle, "_SUN"))
763 if (ACPI_SUCCESS(status))
764 device_remove_file(&dev->dev, &dev_attr_sun); 735 device_remove_file(&dev->dev, &dev_attr_sun);
765 736
766 if (dev->pnp.unique_id) 737 if (dev->pnp.unique_id)
@@ -1350,13 +1321,10 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
1350 1321
1351static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 1322static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
1352{ 1323{
1353 acpi_handle temp;
1354 acpi_status status = 0;
1355 int err; 1324 int err;
1356 1325
1357 /* Presence of _PRW indicates wake capable */ 1326 /* Presence of _PRW indicates wake capable */
1358 status = acpi_get_handle(device->handle, "_PRW", &temp); 1327 if (!acpi_has_method(device->handle, "_PRW"))
1359 if (ACPI_FAILURE(status))
1360 return; 1328 return;
1361 1329
1362 err = acpi_bus_extract_wakeup_device_power_package(device->handle, 1330 err = acpi_bus_extract_wakeup_device_power_package(device->handle,
@@ -1386,7 +1354,6 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
1386 struct acpi_device_power_state *ps = &device->power.states[state]; 1354 struct acpi_device_power_state *ps = &device->power.states[state];
1387 char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' }; 1355 char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
1388 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1356 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1389 acpi_handle handle;
1390 acpi_status status; 1357 acpi_status status;
1391 1358
1392 INIT_LIST_HEAD(&ps->resources); 1359 INIT_LIST_HEAD(&ps->resources);
@@ -1409,8 +1376,7 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
1409 1376
1410 /* Evaluate "_PSx" to see if we can do explicit sets */ 1377 /* Evaluate "_PSx" to see if we can do explicit sets */
1411 pathname[2] = 'S'; 1378 pathname[2] = 'S';
1412 status = acpi_get_handle(device->handle, pathname, &handle); 1379 if (acpi_has_method(device->handle, pathname))
1413 if (ACPI_SUCCESS(status))
1414 ps->flags.explicit_set = 1; 1380 ps->flags.explicit_set = 1;
1415 1381
1416 /* 1382 /*
@@ -1429,28 +1395,21 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
1429 1395
1430static void acpi_bus_get_power_flags(struct acpi_device *device) 1396static void acpi_bus_get_power_flags(struct acpi_device *device)
1431{ 1397{
1432 acpi_status status;
1433 acpi_handle handle;
1434 u32 i; 1398 u32 i;
1435 1399
1436 /* Presence of _PS0|_PR0 indicates 'power manageable' */ 1400 /* Presence of _PS0|_PR0 indicates 'power manageable' */
1437 status = acpi_get_handle(device->handle, "_PS0", &handle); 1401 if (!acpi_has_method(device->handle, "_PS0") &&
1438 if (ACPI_FAILURE(status)) { 1402 !acpi_has_method(device->handle, "_PR0"))
1439 status = acpi_get_handle(device->handle, "_PR0", &handle); 1403 return;
1440 if (ACPI_FAILURE(status))
1441 return;
1442 }
1443 1404
1444 device->flags.power_manageable = 1; 1405 device->flags.power_manageable = 1;
1445 1406
1446 /* 1407 /*
1447 * Power Management Flags 1408 * Power Management Flags
1448 */ 1409 */
1449 status = acpi_get_handle(device->handle, "_PSC", &handle); 1410 if (acpi_has_method(device->handle, "_PSC"))
1450 if (ACPI_SUCCESS(status))
1451 device->power.flags.explicit_get = 1; 1411 device->power.flags.explicit_get = 1;
1452 status = acpi_get_handle(device->handle, "_IRC", &handle); 1412 if (acpi_has_method(device->handle, "_IRC"))
1453 if (ACPI_SUCCESS(status))
1454 device->power.flags.inrush_current = 1; 1413 device->power.flags.inrush_current = 1;
1455 1414
1456 /* 1415 /*
@@ -1464,8 +1423,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
1464 /* Set defaults for D0 and D3 states (always valid) */ 1423 /* Set defaults for D0 and D3 states (always valid) */
1465 device->power.states[ACPI_STATE_D0].flags.valid = 1; 1424 device->power.states[ACPI_STATE_D0].flags.valid = 1;
1466 device->power.states[ACPI_STATE_D0].power = 100; 1425 device->power.states[ACPI_STATE_D0].power = 100;
1467 device->power.states[ACPI_STATE_D3].flags.valid = 1; 1426 device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
1468 device->power.states[ACPI_STATE_D3].power = 0; 1427 device->power.states[ACPI_STATE_D3_COLD].power = 0;
1469 1428
1470 /* Set D3cold's explicit_set flag if _PS3 exists. */ 1429 /* Set D3cold's explicit_set flag if _PS3 exists. */
1471 if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) 1430 if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
@@ -1484,28 +1443,18 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
1484 1443
1485static void acpi_bus_get_flags(struct acpi_device *device) 1444static void acpi_bus_get_flags(struct acpi_device *device)
1486{ 1445{
1487 acpi_status status = AE_OK;
1488 acpi_handle temp = NULL;
1489
1490 /* Presence of _STA indicates 'dynamic_status' */ 1446 /* Presence of _STA indicates 'dynamic_status' */
1491 status = acpi_get_handle(device->handle, "_STA", &temp); 1447 if (acpi_has_method(device->handle, "_STA"))
1492 if (ACPI_SUCCESS(status))
1493 device->flags.dynamic_status = 1; 1448 device->flags.dynamic_status = 1;
1494 1449
1495 /* Presence of _RMV indicates 'removable' */ 1450 /* Presence of _RMV indicates 'removable' */
1496 status = acpi_get_handle(device->handle, "_RMV", &temp); 1451 if (acpi_has_method(device->handle, "_RMV"))
1497 if (ACPI_SUCCESS(status))
1498 device->flags.removable = 1; 1452 device->flags.removable = 1;
1499 1453
1500 /* Presence of _EJD|_EJ0 indicates 'ejectable' */ 1454 /* Presence of _EJD|_EJ0 indicates 'ejectable' */
1501 status = acpi_get_handle(device->handle, "_EJD", &temp); 1455 if (acpi_has_method(device->handle, "_EJD") ||
1502 if (ACPI_SUCCESS(status)) 1456 acpi_has_method(device->handle, "_EJ0"))
1503 device->flags.ejectable = 1; 1457 device->flags.ejectable = 1;
1504 else {
1505 status = acpi_get_handle(device->handle, "_EJ0", &temp);
1506 if (ACPI_SUCCESS(status))
1507 device->flags.ejectable = 1;
1508 }
1509} 1458}
1510 1459
1511static void acpi_device_get_busid(struct acpi_device *device) 1460static void acpi_device_get_busid(struct acpi_device *device)
@@ -1547,46 +1496,45 @@ static void acpi_device_get_busid(struct acpi_device *device)
1547} 1496}
1548 1497
1549/* 1498/*
1499 * acpi_ata_match - see if an acpi object is an ATA device
1500 *
1501 * If an acpi object has one of the ACPI ATA methods defined,
1502 * then we can safely call it an ATA device.
1503 */
1504bool acpi_ata_match(acpi_handle handle)
1505{
1506 return acpi_has_method(handle, "_GTF") ||
1507 acpi_has_method(handle, "_GTM") ||
1508 acpi_has_method(handle, "_STM") ||
1509 acpi_has_method(handle, "_SDD");
1510}
1511
1512/*
1550 * acpi_bay_match - see if an acpi object is an ejectable driver bay 1513 * acpi_bay_match - see if an acpi object is an ejectable driver bay
1551 * 1514 *
1552 * If an acpi object is ejectable and has one of the ACPI ATA methods defined, 1515 * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
1553 * then we can safely call it an ejectable drive bay 1516 * then we can safely call it an ejectable drive bay
1554 */ 1517 */
1555static int acpi_bay_match(acpi_handle handle) 1518bool acpi_bay_match(acpi_handle handle)
1556{ 1519{
1557 acpi_status status;
1558 acpi_handle tmp;
1559 acpi_handle phandle; 1520 acpi_handle phandle;
1560 1521
1561 status = acpi_get_handle(handle, "_EJ0", &tmp); 1522 if (!acpi_has_method(handle, "_EJ0"))
1562 if (ACPI_FAILURE(status)) 1523 return false;
1563 return -ENODEV; 1524 if (acpi_ata_match(handle))
1564 1525 return true;
1565 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) || 1526 if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
1566 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) || 1527 return false;
1567 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
1568 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
1569 return 0;
1570
1571 if (acpi_get_parent(handle, &phandle))
1572 return -ENODEV;
1573 1528
1574 if ((ACPI_SUCCESS(acpi_get_handle(phandle, "_GTF", &tmp))) || 1529 return acpi_ata_match(phandle);
1575 (ACPI_SUCCESS(acpi_get_handle(phandle, "_GTM", &tmp))) ||
1576 (ACPI_SUCCESS(acpi_get_handle(phandle, "_STM", &tmp))) ||
1577 (ACPI_SUCCESS(acpi_get_handle(phandle, "_SDD", &tmp))))
1578 return 0;
1579
1580 return -ENODEV;
1581} 1530}
1582 1531
1583/* 1532/*
1584 * acpi_dock_match - see if an acpi object has a _DCK method 1533 * acpi_dock_match - see if an acpi object has a _DCK method
1585 */ 1534 */
1586static int acpi_dock_match(acpi_handle handle) 1535bool acpi_dock_match(acpi_handle handle)
1587{ 1536{
1588 acpi_handle tmp; 1537 return acpi_has_method(handle, "_DCK");
1589 return acpi_get_handle(handle, "_DCK", &tmp);
1590} 1538}
1591 1539
1592const char *acpi_device_hid(struct acpi_device *device) 1540const char *acpi_device_hid(struct acpi_device *device)
@@ -1624,34 +1572,26 @@ static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
1624 * lacks the SMBUS01 HID and the methods do not have the necessary "_" 1572 * lacks the SMBUS01 HID and the methods do not have the necessary "_"
1625 * prefix. Work around this. 1573 * prefix. Work around this.
1626 */ 1574 */
1627static int acpi_ibm_smbus_match(acpi_handle handle) 1575static bool acpi_ibm_smbus_match(acpi_handle handle)
1628{ 1576{
1629 acpi_handle h_dummy; 1577 char node_name[ACPI_PATH_SEGMENT_LENGTH];
1630 struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; 1578 struct acpi_buffer path = { sizeof(node_name), node_name };
1631 int result;
1632 1579
1633 if (!dmi_name_in_vendors("IBM")) 1580 if (!dmi_name_in_vendors("IBM"))
1634 return -ENODEV; 1581 return false;
1635 1582
1636 /* Look for SMBS object */ 1583 /* Look for SMBS object */
1637 result = acpi_get_name(handle, ACPI_SINGLE_NAME, &path); 1584 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &path)) ||
1638 if (result) 1585 strcmp("SMBS", path.pointer))
1639 return result; 1586 return false;
1640
1641 if (strcmp("SMBS", path.pointer)) {
1642 result = -ENODEV;
1643 goto out;
1644 }
1645 1587
1646 /* Does it have the necessary (but misnamed) methods? */ 1588 /* Does it have the necessary (but misnamed) methods? */
1647 result = -ENODEV; 1589 if (acpi_has_method(handle, "SBI") &&
1648 if (ACPI_SUCCESS(acpi_get_handle(handle, "SBI", &h_dummy)) && 1590 acpi_has_method(handle, "SBR") &&
1649 ACPI_SUCCESS(acpi_get_handle(handle, "SBR", &h_dummy)) && 1591 acpi_has_method(handle, "SBW"))
1650 ACPI_SUCCESS(acpi_get_handle(handle, "SBW", &h_dummy))) 1592 return true;
1651 result = 0; 1593
1652out: 1594 return false;
1653 kfree(path.pointer);
1654 return result;
1655} 1595}
1656 1596
1657static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, 1597static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
@@ -1699,11 +1639,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
1699 */ 1639 */
1700 if (acpi_is_video_device(handle)) 1640 if (acpi_is_video_device(handle))
1701 acpi_add_id(pnp, ACPI_VIDEO_HID); 1641 acpi_add_id(pnp, ACPI_VIDEO_HID);
1702 else if (ACPI_SUCCESS(acpi_bay_match(handle))) 1642 else if (acpi_bay_match(handle))
1703 acpi_add_id(pnp, ACPI_BAY_HID); 1643 acpi_add_id(pnp, ACPI_BAY_HID);
1704 else if (ACPI_SUCCESS(acpi_dock_match(handle))) 1644 else if (acpi_dock_match(handle))
1705 acpi_add_id(pnp, ACPI_DOCK_HID); 1645 acpi_add_id(pnp, ACPI_DOCK_HID);
1706 else if (!acpi_ibm_smbus_match(handle)) 1646 else if (acpi_ibm_smbus_match(handle))
1707 acpi_add_id(pnp, ACPI_SMBUS_IBM_HID); 1647 acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
1708 else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) { 1648 else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
1709 acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */ 1649 acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
@@ -1914,7 +1854,6 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1914 struct acpi_device *device = NULL; 1854 struct acpi_device *device = NULL;
1915 int type; 1855 int type;
1916 unsigned long long sta; 1856 unsigned long long sta;
1917 acpi_status status;
1918 int result; 1857 int result;
1919 1858
1920 acpi_bus_get_device(handle, &device); 1859 acpi_bus_get_device(handle, &device);
@@ -1935,10 +1874,8 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1935 if (!(sta & ACPI_STA_DEVICE_PRESENT) && 1874 if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
1936 !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { 1875 !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
1937 struct acpi_device_wakeup wakeup; 1876 struct acpi_device_wakeup wakeup;
1938 acpi_handle temp;
1939 1877
1940 status = acpi_get_handle(handle, "_PRW", &temp); 1878 if (acpi_has_method(handle, "_PRW")) {
1941 if (ACPI_SUCCESS(status)) {
1942 acpi_bus_extract_wakeup_device_power_package(handle, 1879 acpi_bus_extract_wakeup_device_power_package(handle,
1943 &wakeup); 1880 &wakeup);
1944 acpi_power_resources_list_free(&wakeup.resources); 1881 acpi_power_resources_list_free(&wakeup.resources);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 72554fd31044..14df30580e15 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -31,12 +31,9 @@ static u8 sleep_states[ACPI_S_STATE_COUNT];
31 31
32static void acpi_sleep_tts_switch(u32 acpi_state) 32static void acpi_sleep_tts_switch(u32 acpi_state)
33{ 33{
34 union acpi_object in_arg = { ACPI_TYPE_INTEGER }; 34 acpi_status status;
35 struct acpi_object_list arg_list = { 1, &in_arg };
36 acpi_status status = AE_OK;
37 35
38 in_arg.integer.value = acpi_state; 36 status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
39 status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL);
40 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
41 /* 38 /*
42 * OS can't evaluate the _TTS object correctly. Some warning 39 * OS can't evaluate the _TTS object correctly. Some warning
@@ -423,10 +420,21 @@ static void acpi_pm_finish(void)
423} 420}
424 421
425/** 422/**
426 * acpi_pm_end - Finish up suspend sequence. 423 * acpi_pm_start - Start system PM transition.
424 */
425static void acpi_pm_start(u32 acpi_state)
426{
427 acpi_target_sleep_state = acpi_state;
428 acpi_sleep_tts_switch(acpi_target_sleep_state);
429 acpi_scan_lock_acquire();
430}
431
432/**
433 * acpi_pm_end - Finish up system PM transition.
427 */ 434 */
428static void acpi_pm_end(void) 435static void acpi_pm_end(void)
429{ 436{
437 acpi_scan_lock_release();
430 /* 438 /*
431 * This is necessary in case acpi_pm_finish() is not called during a 439 * This is necessary in case acpi_pm_finish() is not called during a
432 * failing transition to a sleep state. 440 * failing transition to a sleep state.
@@ -454,21 +462,19 @@ static u32 acpi_suspend_states[] = {
454static int acpi_suspend_begin(suspend_state_t pm_state) 462static int acpi_suspend_begin(suspend_state_t pm_state)
455{ 463{
456 u32 acpi_state = acpi_suspend_states[pm_state]; 464 u32 acpi_state = acpi_suspend_states[pm_state];
457 int error = 0; 465 int error;
458 466
459 error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc(); 467 error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
460 if (error) 468 if (error)
461 return error; 469 return error;
462 470
463 if (sleep_states[acpi_state]) { 471 if (!sleep_states[acpi_state]) {
464 acpi_target_sleep_state = acpi_state; 472 pr_err("ACPI does not support sleep state S%u\n", acpi_state);
465 acpi_sleep_tts_switch(acpi_target_sleep_state); 473 return -ENOSYS;
466 } else {
467 printk(KERN_ERR "ACPI does not support this state: %d\n",
468 pm_state);
469 error = -ENOSYS;
470 } 474 }
471 return error; 475
476 acpi_pm_start(acpi_state);
477 return 0;
472} 478}
473 479
474/** 480/**
@@ -634,10 +640,8 @@ static int acpi_hibernation_begin(void)
634 int error; 640 int error;
635 641
636 error = nvs_nosave ? 0 : suspend_nvs_alloc(); 642 error = nvs_nosave ? 0 : suspend_nvs_alloc();
637 if (!error) { 643 if (!error)
638 acpi_target_sleep_state = ACPI_STATE_S4; 644 acpi_pm_start(ACPI_STATE_S4);
639 acpi_sleep_tts_switch(acpi_target_sleep_state);
640 }
641 645
642 return error; 646 return error;
643} 647}
@@ -716,8 +720,10 @@ static int acpi_hibernation_begin_old(void)
716 if (!error) { 720 if (!error) {
717 if (!nvs_nosave) 721 if (!nvs_nosave)
718 error = suspend_nvs_alloc(); 722 error = suspend_nvs_alloc();
719 if (!error) 723 if (!error) {
720 acpi_target_sleep_state = ACPI_STATE_S4; 724 acpi_target_sleep_state = ACPI_STATE_S4;
725 acpi_scan_lock_acquire();
726 }
721 } 727 }
722 return error; 728 return error;
723} 729}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index ccf9527d7ed3..9063239e0b13 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -233,26 +233,16 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
233 233
234static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode) 234static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
235{ 235{
236 acpi_status status = AE_OK;
237 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
238 struct acpi_object_list arg_list = { 1, &arg0 };
239 acpi_handle handle = NULL;
240
241
242 if (!tz) 236 if (!tz)
243 return -EINVAL; 237 return -EINVAL;
244 238
245 status = acpi_get_handle(tz->device->handle, "_SCP", &handle); 239 if (!acpi_has_method(tz->device->handle, "_SCP")) {
246 if (ACPI_FAILURE(status)) {
247 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n")); 240 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n"));
248 return -ENODEV; 241 return -ENODEV;
249 } 242 } else if (ACPI_FAILURE(acpi_execute_simple_method(tz->device->handle,
250 243 "_SCP", mode))) {
251 arg0.integer.value = mode;
252
253 status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
254 if (ACPI_FAILURE(status))
255 return -ENODEV; 244 return -ENODEV;
245 }
256 246
257 return 0; 247 return 0;
258} 248}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 744371304313..552248b0005b 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -495,3 +495,73 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
495 kfree(buffer.pointer); 495 kfree(buffer.pointer);
496} 496}
497EXPORT_SYMBOL(acpi_handle_printk); 497EXPORT_SYMBOL(acpi_handle_printk);
498
499/**
500 * acpi_has_method: Check whether @handle has a method named @name
501 * @handle: ACPI device handle
502 * @name: name of object or method
503 *
504 * Check whether @handle has a method named @name.
505 */
506bool acpi_has_method(acpi_handle handle, char *name)
507{
508 acpi_handle tmp;
509
510 return ACPI_SUCCESS(acpi_get_handle(handle, name, &tmp));
511}
512EXPORT_SYMBOL(acpi_has_method);
513
514acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
515 u64 arg)
516{
517 union acpi_object obj = { .type = ACPI_TYPE_INTEGER };
518 struct acpi_object_list arg_list = { .count = 1, .pointer = &obj, };
519
520 obj.integer.value = arg;
521
522 return acpi_evaluate_object(handle, method, &arg_list, NULL);
523}
524EXPORT_SYMBOL(acpi_execute_simple_method);
525
526/**
527 * acpi_evaluate_ej0: Evaluate _EJ0 method for hotplug operations
528 * @handle: ACPI device handle
529 *
530 * Evaluate device's _EJ0 method for hotplug operations.
531 */
532acpi_status acpi_evaluate_ej0(acpi_handle handle)
533{
534 acpi_status status;
535
536 status = acpi_execute_simple_method(handle, "_EJ0", 1);
537 if (status == AE_NOT_FOUND)
538 acpi_handle_warn(handle, "No _EJ0 support for device\n");
539 else if (ACPI_FAILURE(status))
540 acpi_handle_warn(handle, "Eject failed (0x%x)\n", status);
541
542 return status;
543}
544
545/**
546 * acpi_evaluate_lck: Evaluate _LCK method to lock/unlock device
547 * @handle: ACPI device handle
548 * @lock: lock device if non-zero, otherwise unlock device
549 *
550 * Evaluate device's _LCK method if present to lock/unlock device
551 */
552acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
553{
554 acpi_status status;
555
556 status = acpi_execute_simple_method(handle, "_LCK", !!lock);
557 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
558 if (lock)
559 acpi_handle_warn(handle,
560 "Locking device failed (0x%x)\n", status);
561 else
562 acpi_handle_warn(handle,
563 "Unlocking device failed (0x%x)\n", status);
564 }
565
566 return status;
567}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 9c4ebfcbe865..c86fc0c70ff6 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -355,14 +355,10 @@ static int
355acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) 355acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
356{ 356{
357 int status; 357 int status;
358 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
359 struct acpi_object_list args = { 1, &arg0 };
360 int state; 358 int state;
361 359
362 arg0.integer.value = level; 360 status = acpi_execute_simple_method(device->dev->handle,
363 361 "_BCM", level);
364 status = acpi_evaluate_object(device->dev->handle, "_BCM",
365 &args, NULL);
366 if (ACPI_FAILURE(status)) { 362 if (ACPI_FAILURE(status)) {
367 ACPI_ERROR((AE_INFO, "Evaluating _BCM failed")); 363 ACPI_ERROR((AE_INFO, "Evaluating _BCM failed"));
368 return -EIO; 364 return -EIO;
@@ -638,18 +634,15 @@ static int
638acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag) 634acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
639{ 635{
640 acpi_status status; 636 acpi_status status;
641 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
642 struct acpi_object_list args = { 1, &arg0 };
643 637
644 if (!video->cap._DOS) 638 if (!video->cap._DOS)
645 return 0; 639 return 0;
646 640
647 if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1) 641 if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
648 return -EINVAL; 642 return -EINVAL;
649 arg0.integer.value = (lcd_flag << 2) | bios_flag; 643 video->dos_setting = (lcd_flag << 2) | bios_flag;
650 video->dos_setting = arg0.integer.value; 644 status = acpi_execute_simple_method(video->device->handle, "_DOS",
651 status = acpi_evaluate_object(video->device->handle, "_DOS", 645 (lcd_flag << 2) | bios_flag);
652 &args, NULL);
653 if (ACPI_FAILURE(status)) 646 if (ACPI_FAILURE(status))
654 return -EIO; 647 return -EIO;
655 648
@@ -885,31 +878,21 @@ out:
885 878
886static void acpi_video_device_find_cap(struct acpi_video_device *device) 879static void acpi_video_device_find_cap(struct acpi_video_device *device)
887{ 880{
888 acpi_handle h_dummy1; 881 if (acpi_has_method(device->dev->handle, "_ADR"))
889
890 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_ADR", &h_dummy1))) {
891 device->cap._ADR = 1; 882 device->cap._ADR = 1;
892 } 883 if (acpi_has_method(device->dev->handle, "_BCL"))
893 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCL", &h_dummy1))) {
894 device->cap._BCL = 1; 884 device->cap._BCL = 1;
895 } 885 if (acpi_has_method(device->dev->handle, "_BCM"))
896 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) {
897 device->cap._BCM = 1; 886 device->cap._BCM = 1;
898 } 887 if (acpi_has_method(device->dev->handle, "_BQC")) {
899 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle,"_BQC",&h_dummy1)))
900 device->cap._BQC = 1; 888 device->cap._BQC = 1;
901 else if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCQ", 889 } else if (acpi_has_method(device->dev->handle, "_BCQ")) {
902 &h_dummy1))) {
903 printk(KERN_WARNING FW_BUG "_BCQ is used instead of _BQC\n"); 890 printk(KERN_WARNING FW_BUG "_BCQ is used instead of _BQC\n");
904 device->cap._BCQ = 1; 891 device->cap._BCQ = 1;
905 } 892 }
906 893
907 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { 894 if (acpi_has_method(device->dev->handle, "_DDC"))
908 device->cap._DDC = 1; 895 device->cap._DDC = 1;
909 }
910
911 if (acpi_video_init_brightness(device))
912 return;
913 896
914 if (acpi_video_backlight_support()) { 897 if (acpi_video_backlight_support()) {
915 struct backlight_properties props; 898 struct backlight_properties props;
@@ -920,6 +903,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
920 static int count = 0; 903 static int count = 0;
921 char *name; 904 char *name;
922 905
906 result = acpi_video_init_brightness(device);
907 if (result)
908 return;
923 name = kasprintf(GFP_KERNEL, "acpi_video%d", count); 909 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
924 if (!name) 910 if (!name)
925 return; 911 return;
@@ -979,11 +965,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
979 if (result) 965 if (result)
980 printk(KERN_ERR PREFIX "Create sysfs link\n"); 966 printk(KERN_ERR PREFIX "Create sysfs link\n");
981 967
982 } else {
983 /* Remove the brightness object. */
984 kfree(device->brightness->levels);
985 kfree(device->brightness);
986 device->brightness = NULL;
987 } 968 }
988} 969}
989 970
@@ -999,26 +980,18 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
999 980
1000static void acpi_video_bus_find_cap(struct acpi_video_bus *video) 981static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
1001{ 982{
1002 acpi_handle h_dummy1; 983 if (acpi_has_method(video->device->handle, "_DOS"))
1003
1004 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOS", &h_dummy1))) {
1005 video->cap._DOS = 1; 984 video->cap._DOS = 1;
1006 } 985 if (acpi_has_method(video->device->handle, "_DOD"))
1007 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOD", &h_dummy1))) {
1008 video->cap._DOD = 1; 986 video->cap._DOD = 1;
1009 } 987 if (acpi_has_method(video->device->handle, "_ROM"))
1010 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_ROM", &h_dummy1))) {
1011 video->cap._ROM = 1; 988 video->cap._ROM = 1;
1012 } 989 if (acpi_has_method(video->device->handle, "_GPD"))
1013 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_GPD", &h_dummy1))) {
1014 video->cap._GPD = 1; 990 video->cap._GPD = 1;
1015 } 991 if (acpi_has_method(video->device->handle, "_SPD"))
1016 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_SPD", &h_dummy1))) {
1017 video->cap._SPD = 1; 992 video->cap._SPD = 1;
1018 } 993 if (acpi_has_method(video->device->handle, "_VPO"))
1019 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_VPO", &h_dummy1))) {
1020 video->cap._VPO = 1; 994 video->cap._VPO = 1;
1021 }
1022} 995}
1023 996
1024/* 997/*
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index c3397748ba46..940edbf2fe8f 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -53,14 +53,13 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
53 void **retyurn_value) 53 void **retyurn_value)
54{ 54{
55 long *cap = context; 55 long *cap = context;
56 acpi_handle h_dummy;
57 56
58 if (ACPI_SUCCESS(acpi_get_handle(handle, "_BCM", &h_dummy)) && 57 if (acpi_has_method(handle, "_BCM") &&
59 ACPI_SUCCESS(acpi_get_handle(handle, "_BCL", &h_dummy))) { 58 acpi_has_method(handle, "_BCL")) {
60 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight " 59 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
61 "support\n")); 60 "support\n"));
62 *cap |= ACPI_VIDEO_BACKLIGHT; 61 *cap |= ACPI_VIDEO_BACKLIGHT;
63 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) 62 if (!acpi_has_method(handle, "_BQC"))
64 printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " 63 printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
65 "cannot determine initial brightness\n"); 64 "cannot determine initial brightness\n");
66 /* We have backlight support, no need to scan further */ 65 /* We have backlight support, no need to scan further */
@@ -79,22 +78,20 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
79 */ 78 */
80long acpi_is_video_device(acpi_handle handle) 79long acpi_is_video_device(acpi_handle handle)
81{ 80{
82 acpi_handle h_dummy;
83 long video_caps = 0; 81 long video_caps = 0;
84 82
85 /* Is this device able to support video switching ? */ 83 /* Is this device able to support video switching ? */
86 if (ACPI_SUCCESS(acpi_get_handle(handle, "_DOD", &h_dummy)) || 84 if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS"))
87 ACPI_SUCCESS(acpi_get_handle(handle, "_DOS", &h_dummy)))
88 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; 85 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
89 86
90 /* Is this device able to retrieve a video ROM ? */ 87 /* Is this device able to retrieve a video ROM ? */
91 if (ACPI_SUCCESS(acpi_get_handle(handle, "_ROM", &h_dummy))) 88 if (acpi_has_method(handle, "_ROM"))
92 video_caps |= ACPI_VIDEO_ROM_AVAILABLE; 89 video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
93 90
94 /* Is this device able to configure which video head to be POSTed ? */ 91 /* Is this device able to configure which video head to be POSTed ? */
95 if (ACPI_SUCCESS(acpi_get_handle(handle, "_VPO", &h_dummy)) && 92 if (acpi_has_method(handle, "_VPO") &&
96 ACPI_SUCCESS(acpi_get_handle(handle, "_GPD", &h_dummy)) && 93 acpi_has_method(handle, "_GPD") &&
97 ACPI_SUCCESS(acpi_get_handle(handle, "_SPD", &h_dummy))) 94 acpi_has_method(handle, "_SPD"))
98 video_caps |= ACPI_VIDEO_DEVICE_POSTING; 95 video_caps |= ACPI_VIDEO_DEVICE_POSTING;
99 96
100 /* Only check for backlight functionality if one of the above hit. */ 97 /* Only check for backlight functionality if one of the above hit. */
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index cf4e7020adac..da8170dfc90f 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -947,11 +947,11 @@ static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state)
947 continue; 947 continue;
948 948
949 acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ? 949 acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ?
950 ACPI_STATE_D0 : ACPI_STATE_D3); 950 ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
951 } 951 }
952 952
953 if (!(state.event & PM_EVENT_RESUME)) 953 if (!(state.event & PM_EVENT_RESUME))
954 acpi_bus_set_power(port_handle, ACPI_STATE_D3); 954 acpi_bus_set_power(port_handle, ACPI_STATE_D3_COLD);
955} 955}
956 956
957/** 957/**
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 1c41722bb7e2..20fd337a5731 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
289 289
290 /* Disable sending Early R_OK. 290 /* Disable sending Early R_OK.
291 * With "cached read" HDD testing and multiple ports busy on a SATA 291 * With "cached read" HDD testing and multiple ports busy on a SATA
292 * host controller, 3726 PMP will very rarely drop a deferred 292 * host controller, 3x26 PMP will very rarely drop a deferred
293 * R_OK that was intended for the host. Symptom will be all 293 * R_OK that was intended for the host. Symptom will be all
294 * 5 drives under test will timeout, get reset, and recover. 294 * 5 drives under test will timeout, get reset, and recover.
295 */ 295 */
296 if (vendor == 0x1095 && devid == 0x3726) { 296 if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
297 u32 reg; 297 u32 reg;
298 298
299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg); 299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
300 if (err_mask) { 300 if (err_mask) {
301 rc = -EIO; 301 rc = -EIO;
302 reason = "failed to read Sil3726 Private Register"; 302 reason = "failed to read Sil3x26 Private Register";
303 goto fail; 303 goto fail;
304 } 304 }
305 reg &= ~0x1; 305 reg &= ~0x1;
306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); 306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
307 if (err_mask) { 307 if (err_mask) {
308 rc = -EIO; 308 rc = -EIO;
309 reason = "failed to write Sil3726 Private Register"; 309 reason = "failed to write Sil3x26 Private Register";
310 goto fail; 310 goto fail;
311 } 311 }
312 } 312 }
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
383 u16 devid = sata_pmp_gscr_devid(gscr); 383 u16 devid = sata_pmp_gscr_devid(gscr);
384 struct ata_link *link; 384 struct ata_link *link;
385 385
386 if (vendor == 0x1095 && devid == 0x3726) { 386 if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
387 /* sil3726 quirks */ 387 /* sil3x26 quirks */
388 ata_for_each_link(link, ap, EDGE) { 388 ata_for_each_link(link, ap, EDGE) {
389 /* link reports offline after LPM */ 389 /* link reports offline after LPM */
390 link->flags |= ATA_LFLAG_NO_LPM; 390 link->flags |= ATA_LFLAG_NO_LPM;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 19720a0a4a65..851bd3f43ac6 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
293{ 293{
294 struct sata_fsl_host_priv *host_priv = host->private_data; 294 struct sata_fsl_host_priv *host_priv = host->private_data;
295 void __iomem *hcr_base = host_priv->hcr_base; 295 void __iomem *hcr_base = host_priv->hcr_base;
296 unsigned long flags;
296 297
297 if (count > ICC_MAX_INT_COUNT_THRESHOLD) 298 if (count > ICC_MAX_INT_COUNT_THRESHOLD)
298 count = ICC_MAX_INT_COUNT_THRESHOLD; 299 count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
305 (count > ICC_MIN_INT_COUNT_THRESHOLD)) 306 (count > ICC_MIN_INT_COUNT_THRESHOLD))
306 ticks = ICC_SAFE_INT_TICKS; 307 ticks = ICC_SAFE_INT_TICKS;
307 308
308 spin_lock(&host->lock); 309 spin_lock_irqsave(&host->lock, flags);
309 iowrite32((count << 24 | ticks), hcr_base + ICC); 310 iowrite32((count << 24 | ticks), hcr_base + ICC);
310 311
311 intr_coalescing_count = count; 312 intr_coalescing_count = count;
312 intr_coalescing_ticks = ticks; 313 intr_coalescing_ticks = ticks;
313 spin_unlock(&host->lock); 314 spin_unlock_irqrestore(&host->lock, flags);
314 315
315 DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", 316 DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
316 intr_coalescing_count, intr_coalescing_ticks); 317 intr_coalescing_count, intr_coalescing_ticks);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index d047d92a456f..e9a4f46d962e 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -86,11 +86,11 @@ struct ecx_plat_data {
86 86
87#define SGPIO_SIGNALS 3 87#define SGPIO_SIGNALS 3
88#define ECX_ACTIVITY_BITS 0x300000 88#define ECX_ACTIVITY_BITS 0x300000
89#define ECX_ACTIVITY_SHIFT 2 89#define ECX_ACTIVITY_SHIFT 0
90#define ECX_LOCATE_BITS 0x80000 90#define ECX_LOCATE_BITS 0x80000
91#define ECX_LOCATE_SHIFT 1 91#define ECX_LOCATE_SHIFT 1
92#define ECX_FAULT_BITS 0x400000 92#define ECX_FAULT_BITS 0x400000
93#define ECX_FAULT_SHIFT 0 93#define ECX_FAULT_SHIFT 2
94static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, 94static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
95 u32 shift) 95 u32 shift)
96{ 96{
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 99cb944a002d..4d45dba7fb8f 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio)
906 int i; 906 int i;
907 907
908 bio_for_each_segment(bv, bio, i) { 908 bio_for_each_segment(bv, bio, i) {
909 page = bv->bv_page;
910 /* Non-zero page count for non-head members of 909 /* Non-zero page count for non-head members of
911 * compound pages is no longer allowed by the kernel, 910 * compound pages is no longer allowed by the kernel.
912 * but this has never been seen here.
913 */ 911 */
914 if (unlikely(PageCompound(page))) 912 page = compound_trans_head(bv->bv_page);
915 if (compound_trans_head(page) != page) {
916 pr_crit("page tail used for block I/O\n");
917 BUG();
918 }
919 atomic_inc(&page->_count); 913 atomic_inc(&page->_count);
920 } 914 }
921} 915}
@@ -924,10 +918,13 @@ static void
924bio_pagedec(struct bio *bio) 918bio_pagedec(struct bio *bio)
925{ 919{
926 struct bio_vec *bv; 920 struct bio_vec *bv;
921 struct page *page;
927 int i; 922 int i;
928 923
929 bio_for_each_segment(bv, bio, i) 924 bio_for_each_segment(bv, bio, i) {
930 atomic_dec(&bv->bv_page->_count); 925 page = compound_trans_head(bv->bv_page);
926 atomic_dec(&page->_count);
927 }
931} 928}
932 929
933static void 930static void
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 1bdb882c845b..4e5739773c33 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
581 DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), 581 DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
582 DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), 582 DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
583 DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), 583 DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
584 DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), 584 DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
585 DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), 585 CLK_GET_RATE_NOCACHE, 0),
586 DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
587 CLK_GET_RATE_NOCACHE, 0),
586 DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), 588 DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
587 DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), 589 DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
588 DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), 590 4, 3, CLK_GET_RATE_NOCACHE, 0),
591 DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
592 8, 3, CLK_GET_RATE_NOCACHE, 0),
589 DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), 593 DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
590}; 594};
591 595
@@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
863 GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", 867 GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
864 E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), 868 E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
865 GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, 869 GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
866 CLK_IGNORE_UNUSED, 0), 870 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
867 GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, 871 GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
868 CLK_IGNORE_UNUSED, 0), 872 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
869 GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, 873 GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
870 CLK_IGNORE_UNUSED, 0), 874 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
871 GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, 875 GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
872 CLK_IGNORE_UNUSED, 0), 876 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
873 GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, 877 GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
874 CLK_IGNORE_UNUSED, 0), 878 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
875 GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, 879 GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
876 CLK_IGNORE_UNUSED, 0), 880 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
877 GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, 881 GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
878 CLK_IGNORE_UNUSED, 0), 882 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
879 GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, 883 GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
880 CLK_IGNORE_UNUSED, 0), 884 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
881 GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, 885 GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
882 CLK_IGNORE_UNUSED, 0), 886 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
883 GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, 887 GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
884 CLK_IGNORE_UNUSED, 0), 888 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
885 GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, 889 GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
886 CLK_IGNORE_UNUSED, 0), 890 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
887 GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, 891 GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
888 CLK_IGNORE_UNUSED, 0), 892 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
889 GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, 893 GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
890 CLK_IGNORE_UNUSED, 0), 894 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
891 GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, 895 GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
892 CLK_IGNORE_UNUSED, 0), 896 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
893 GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, 897 GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
894 CLK_IGNORE_UNUSED, 0), 898 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
895 GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, 899 GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
896 CLK_IGNORE_UNUSED, 0), 900 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
897 GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, 901 GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
898 CLK_IGNORE_UNUSED, 0), 902 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
899 GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, 903 GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
900 CLK_IGNORE_UNUSED, 0), 904 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
901 GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, 905 GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
902 CLK_IGNORE_UNUSED, 0), 906 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
903 GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, 907 GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
904 CLK_IGNORE_UNUSED, 0), 908 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
905 GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, 909 GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
906 CLK_IGNORE_UNUSED, 0), 910 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
907 GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, 911 GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
908 CLK_IGNORE_UNUSED, 0), 912 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
909 GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, 913 GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
910 CLK_IGNORE_UNUSED, 0), 914 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
911 GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, 915 GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
912 CLK_IGNORE_UNUSED, 0), 916 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
913 GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, 917 GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
914 CLK_IGNORE_UNUSED, 0), 918 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
915 GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, 919 GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
916 CLK_IGNORE_UNUSED, 0), 920 CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
917 GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), 921 GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
918}; 922};
919 923
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 5c205b60a82a..089d3e30e221 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock);
71static DEFINE_SPINLOCK(ddrpll_lock); 71static DEFINE_SPINLOCK(ddrpll_lock);
72static DEFINE_SPINLOCK(iopll_lock); 72static DEFINE_SPINLOCK(iopll_lock);
73static DEFINE_SPINLOCK(armclk_lock); 73static DEFINE_SPINLOCK(armclk_lock);
74static DEFINE_SPINLOCK(swdtclk_lock);
74static DEFINE_SPINLOCK(ddrclk_lock); 75static DEFINE_SPINLOCK(ddrclk_lock);
75static DEFINE_SPINLOCK(dciclk_lock); 76static DEFINE_SPINLOCK(dciclk_lock);
76static DEFINE_SPINLOCK(gem0clk_lock); 77static DEFINE_SPINLOCK(gem0clk_lock);
@@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np)
293 } 294 }
294 clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], 295 clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
295 swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, 296 swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
296 SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock); 297 SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
297 298
298 /* DDR clocks */ 299 /* DDR clocks */
299 clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, 300 clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np)
364 CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, 365 CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
365 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, 366 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
366 &gem0clk_lock); 367 &gem0clk_lock);
367 clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0, 368 clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
368 SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock); 369 CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
370 &gem0clk_lock);
369 clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], 371 clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
370 "gem0_emio_mux", CLK_SET_RATE_PARENT, 372 "gem0_emio_mux", CLK_SET_RATE_PARENT,
371 SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); 373 SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock);
@@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np)
386 CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, 388 CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
387 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, 389 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
388 &gem1clk_lock); 390 &gem1clk_lock);
389 clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0, 391 clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
390 SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock); 392 CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
393 &gem1clk_lock);
391 clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], 394 clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
392 "gem1_emio_mux", CLK_SET_RATE_PARENT, 395 "gem1_emio_mux", CLK_SET_RATE_PARENT,
393 SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); 396 SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 19e36603b23b..3bc8414533c9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
500 &status)) 500 &status))
501 goto log_fail; 501 goto log_fail;
502 502
503 while (status == SDVO_CMD_STATUS_PENDING && retry--) { 503 while ((status == SDVO_CMD_STATUS_PENDING ||
504 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
504 udelay(15); 505 udelay(15);
505 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, 506 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
506 SDVO_I2C_CMD_STATUS, 507 SDVO_I2C_CMD_STATUS,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index dc53a527126b..9e6578330801 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 struct sg_table *sg, 85 struct sg_table *sg,
86 enum dma_data_direction dir) 86 enum dma_data_direction dir)
87{ 87{
88 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
89
90 mutex_lock(&obj->base.dev->struct_mutex);
91
88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 92 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
89 sg_free_table(sg); 93 sg_free_table(sg);
90 kfree(sg); 94 kfree(sg);
95
96 i915_gem_object_unpin_pages(obj);
97
98 mutex_unlock(&obj->base.dev->struct_mutex);
91} 99}
92 100
93static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) 101static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6f514297c483..53cddd985406 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -752,6 +752,8 @@
752 will not assert AGPBUSY# and will only 752 will not assert AGPBUSY# and will only
753 be delivered when out of C3. */ 753 be delivered when out of C3. */
754#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ 754#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
755#define INSTPM_TLB_INVALIDATE (1<<9)
756#define INSTPM_SYNC_FLUSH (1<<5)
755#define ACTHD 0x020c8 757#define ACTHD 0x020c8
756#define FW_BLC 0x020d8 758#define FW_BLC 0x020d8
757#define FW_BLC2 0x020dc 759#define FW_BLC2 0x020dc
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e38b45786653..be79f477a38f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10042,6 +10042,8 @@ struct intel_display_error_state {
10042 10042
10043 u32 power_well_driver; 10043 u32 power_well_driver;
10044 10044
10045 int num_transcoders;
10046
10045 struct intel_cursor_error_state { 10047 struct intel_cursor_error_state {
10046 u32 control; 10048 u32 control;
10047 u32 position; 10049 u32 position;
@@ -10050,16 +10052,7 @@ struct intel_display_error_state {
10050 } cursor[I915_MAX_PIPES]; 10052 } cursor[I915_MAX_PIPES];
10051 10053
10052 struct intel_pipe_error_state { 10054 struct intel_pipe_error_state {
10053 enum transcoder cpu_transcoder;
10054 u32 conf;
10055 u32 source; 10055 u32 source;
10056
10057 u32 htotal;
10058 u32 hblank;
10059 u32 hsync;
10060 u32 vtotal;
10061 u32 vblank;
10062 u32 vsync;
10063 } pipe[I915_MAX_PIPES]; 10056 } pipe[I915_MAX_PIPES];
10064 10057
10065 struct intel_plane_error_state { 10058 struct intel_plane_error_state {
@@ -10071,6 +10064,19 @@ struct intel_display_error_state {
10071 u32 surface; 10064 u32 surface;
10072 u32 tile_offset; 10065 u32 tile_offset;
10073 } plane[I915_MAX_PIPES]; 10066 } plane[I915_MAX_PIPES];
10067
10068 struct intel_transcoder_error_state {
10069 enum transcoder cpu_transcoder;
10070
10071 u32 conf;
10072
10073 u32 htotal;
10074 u32 hblank;
10075 u32 hsync;
10076 u32 vtotal;
10077 u32 vblank;
10078 u32 vsync;
10079 } transcoder[4];
10074}; 10080};
10075 10081
10076struct intel_display_error_state * 10082struct intel_display_error_state *
@@ -10078,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev)
10078{ 10084{
10079 drm_i915_private_t *dev_priv = dev->dev_private; 10085 drm_i915_private_t *dev_priv = dev->dev_private;
10080 struct intel_display_error_state *error; 10086 struct intel_display_error_state *error;
10081 enum transcoder cpu_transcoder; 10087 int transcoders[] = {
10088 TRANSCODER_A,
10089 TRANSCODER_B,
10090 TRANSCODER_C,
10091 TRANSCODER_EDP,
10092 };
10082 int i; 10093 int i;
10083 10094
10095 if (INTEL_INFO(dev)->num_pipes == 0)
10096 return NULL;
10097
10084 error = kmalloc(sizeof(*error), GFP_ATOMIC); 10098 error = kmalloc(sizeof(*error), GFP_ATOMIC);
10085 if (error == NULL) 10099 if (error == NULL)
10086 return NULL; 10100 return NULL;
@@ -10089,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev)
10089 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 10103 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
10090 10104
10091 for_each_pipe(i) { 10105 for_each_pipe(i) {
10092 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
10093 error->pipe[i].cpu_transcoder = cpu_transcoder;
10094
10095 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { 10106 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
10096 error->cursor[i].control = I915_READ(CURCNTR(i)); 10107 error->cursor[i].control = I915_READ(CURCNTR(i));
10097 error->cursor[i].position = I915_READ(CURPOS(i)); 10108 error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10115,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev)
10115 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 10126 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
10116 } 10127 }
10117 10128
10118 error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
10119 error->pipe[i].source = I915_READ(PIPESRC(i)); 10129 error->pipe[i].source = I915_READ(PIPESRC(i));
10120 error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 10130 }
10121 error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 10131
10122 error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 10132 error->num_transcoders = INTEL_INFO(dev)->num_pipes;
10123 error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 10133 if (HAS_DDI(dev_priv->dev))
10124 error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 10134 error->num_transcoders++; /* Account for eDP. */
10125 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 10135
10136 for (i = 0; i < error->num_transcoders; i++) {
10137 enum transcoder cpu_transcoder = transcoders[i];
10138
10139 error->transcoder[i].cpu_transcoder = cpu_transcoder;
10140
10141 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
10142 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
10143 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
10144 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
10145 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
10146 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
10147 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
10126 } 10148 }
10127 10149
10128 /* In the code above we read the registers without checking if the power 10150 /* In the code above we read the registers without checking if the power
@@ -10144,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
10144{ 10166{
10145 int i; 10167 int i;
10146 10168
10169 if (!error)
10170 return;
10171
10147 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 10172 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
10148 if (HAS_POWER_WELL(dev)) 10173 if (HAS_POWER_WELL(dev))
10149 err_printf(m, "PWR_WELL_CTL2: %08x\n", 10174 err_printf(m, "PWR_WELL_CTL2: %08x\n",
10150 error->power_well_driver); 10175 error->power_well_driver);
10151 for_each_pipe(i) { 10176 for_each_pipe(i) {
10152 err_printf(m, "Pipe [%d]:\n", i); 10177 err_printf(m, "Pipe [%d]:\n", i);
10153 err_printf(m, " CPU transcoder: %c\n",
10154 transcoder_name(error->pipe[i].cpu_transcoder));
10155 err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
10156 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 10178 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
10157 err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
10158 err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
10159 err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
10160 err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
10161 err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
10162 err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
10163 10179
10164 err_printf(m, "Plane [%d]:\n", i); 10180 err_printf(m, "Plane [%d]:\n", i);
10165 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 10181 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
@@ -10180,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
10180 err_printf(m, " POS: %08x\n", error->cursor[i].position); 10196 err_printf(m, " POS: %08x\n", error->cursor[i].position);
10181 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 10197 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
10182 } 10198 }
10199
10200 for (i = 0; i < error->num_transcoders; i++) {
10201 err_printf(m, " CPU transcoder: %c\n",
10202 transcoder_name(error->transcoder[i].cpu_transcoder));
10203 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
10204 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
10205 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
10206 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
10207 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
10208 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
10209 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
10210 }
10183} 10211}
10184#endif 10212#endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 664118d8c1d6..079ef0129e74 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
968 968
969 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 969 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
970 POSTING_READ(mmio); 970 POSTING_READ(mmio);
971
972 /* Flush the TLB for this page */
973 if (INTEL_INFO(dev)->gen >= 6) {
974 u32 reg = RING_INSTPM(ring->mmio_base);
975 I915_WRITE(reg,
976 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
977 INSTPM_SYNC_FLUSH));
978 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
979 1000))
980 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
981 ring->name);
982 }
971} 983}
972 984
973static int 985static int
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index d8291724dbd4..7a4e0891c5f8 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
98 u32 splitoff; 98 u32 splitoff;
99 u32 s, e; 99 u32 s, e;
100 100
101 BUG_ON(!type);
102
101 list_for_each_entry(this, &mm->free, fl_entry) { 103 list_for_each_entry(this, &mm->free, fl_entry) {
102 e = this->offset + this->length; 104 e = this->offset + this->length;
103 s = this->offset; 105 s = this->offset;
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
162 struct nouveau_mm_node *prev, *this, *next; 164 struct nouveau_mm_node *prev, *this, *next;
163 u32 mask = align - 1; 165 u32 mask = align - 1;
164 166
167 BUG_ON(!type);
168
165 list_for_each_entry_reverse(this, &mm->free, fl_entry) { 169 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
166 u32 e = this->offset + this->length; 170 u32 e = this->offset + this->length;
167 u32 s = this->offset; 171 u32 s = this->offset;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index d5502267c30f..9d2cd2006250 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -20,8 +20,8 @@ nouveau_mc(void *obj)
20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; 20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
21} 21}
22 22
23#define nouveau_mc_create(p,e,o,d) \ 23#define nouveau_mc_create(p,e,o,m,d) \
24 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) 24 nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
25#define nouveau_mc_destroy(p) ({ \ 25#define nouveau_mc_destroy(p) ({ \
26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ 26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
27}) 27})
@@ -33,7 +33,8 @@ nouveau_mc(void *obj)
33}) 33})
34 34
35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, 35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
36 struct nouveau_oclass *, int, void **); 36 struct nouveau_oclass *, const struct nouveau_mc_intr *,
37 int, void **);
37void _nouveau_mc_dtor(struct nouveau_object *); 38void _nouveau_mc_dtor(struct nouveau_object *);
38int _nouveau_mc_init(struct nouveau_object *); 39int _nouveau_mc_init(struct nouveau_object *);
39int _nouveau_mc_fini(struct nouveau_object *, bool); 40int _nouveau_mc_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index 19e3a9a63a02..ab7ef0ac9e34 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 switch (pfb914 & 0x00000003) { 42 switch (pfb914 & 0x00000003) {
43 case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; 43 case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
44 case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; 44 case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
45 case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; 45 case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
46 case 0x00000003: break; 46 case 0x00000003: break;
47 } 47 }
48 48
49 pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 pfb->ram->tags = nv_rd32(pfb, 0x100320); 51 ram->tags = nv_rd32(pfb, 0x100320);
52 return 0; 52 return 0;
53} 53}
54 54
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
index 7192aa6e5577..63a6aab86028 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
38 if (ret) 38 if (ret)
39 return ret; 39 return ret;
40 40
41 pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 41 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
42 pfb->ram->type = NV_MEM_TYPE_STOLEN; 42 ram->type = NV_MEM_TYPE_STOLEN;
43 return 0; 43 return 0;
44} 44}
45 45
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index bcca883018f4..cce65cc56514 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv {
30 struct nouveau_ltcg base; 30 struct nouveau_ltcg base;
31 u32 part_nr; 31 u32 part_nr;
32 u32 subp_nr; 32 u32 subp_nr;
33 struct nouveau_mm tags;
34 u32 num_tags; 33 u32 num_tags;
34 u32 tag_base;
35 struct nouveau_mm tags;
35 struct nouveau_mm_node *tag_ram; 36 struct nouveau_mm_node *tag_ram;
36}; 37};
37 38
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
117 u32 tag_size, tag_margin, tag_align; 118 u32 tag_size, tag_margin, tag_align;
118 int ret; 119 int ret;
119 120
120 nv_wr32(priv, 0x17e8d8, priv->part_nr);
121 if (nv_device(pfb)->card_type >= NV_E0)
122 nv_wr32(priv, 0x17e000, priv->part_nr);
123
124 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ 121 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
125 priv->num_tags = (pfb->ram->size >> 17) / 4; 122 priv->num_tags = (pfb->ram->size >> 17) / 4;
126 if (priv->num_tags > (1 << 17)) 123 if (priv->num_tags > (1 << 17))
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
142 tag_size += tag_align; 139 tag_size += tag_align;
143 tag_size = (tag_size + 0xfff) >> 12; /* round up */ 140 tag_size = (tag_size + 0xfff) >> 12; /* round up */
144 141
145 ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, 142 ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
146 &priv->tag_ram); 143 &priv->tag_ram);
147 if (ret) { 144 if (ret) {
148 priv->num_tags = 0; 145 priv->num_tags = 0;
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
152 tag_base += tag_align - 1; 149 tag_base += tag_align - 1;
153 ret = do_div(tag_base, tag_align); 150 ret = do_div(tag_base, tag_align);
154 151
155 nv_wr32(priv, 0x17e8d4, tag_base); 152 priv->tag_base = tag_base;
156 } 153 }
157 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); 154 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
158 155
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
182 } 179 }
183 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; 180 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
184 181
185 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
186
187 ret = nvc0_ltcg_init_tag_ram(pfb, priv); 182 ret = nvc0_ltcg_init_tag_ram(pfb, priv);
188 if (ret) 183 if (ret)
189 return ret; 184 return ret;
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
209 nouveau_ltcg_destroy(ltcg); 204 nouveau_ltcg_destroy(ltcg);
210} 205}
211 206
207static int
208nvc0_ltcg_init(struct nouveau_object *object)
209{
210 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
211 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
212 int ret;
213
214 ret = nouveau_ltcg_init(ltcg);
215 if (ret)
216 return ret;
217
218 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
219 nv_wr32(priv, 0x17e8d8, priv->part_nr);
220 if (nv_device(ltcg)->card_type >= NV_E0)
221 nv_wr32(priv, 0x17e000, priv->part_nr);
222 nv_wr32(priv, 0x17e8d4, priv->tag_base);
223 return 0;
224}
225
212struct nouveau_oclass 226struct nouveau_oclass
213nvc0_ltcg_oclass = { 227nvc0_ltcg_oclass = {
214 .handle = NV_SUBDEV(LTCG, 0xc0), 228 .handle = NV_SUBDEV(LTCG, 0xc0),
215 .ofuncs = &(struct nouveau_ofuncs) { 229 .ofuncs = &(struct nouveau_ofuncs) {
216 .ctor = nvc0_ltcg_ctor, 230 .ctor = nvc0_ltcg_ctor,
217 .dtor = nvc0_ltcg_dtor, 231 .dtor = nvc0_ltcg_dtor,
218 .init = _nouveau_ltcg_init, 232 .init = nvc0_ltcg_init,
219 .fini = _nouveau_ltcg_fini, 233 .fini = _nouveau_ltcg_fini,
220 }, 234 },
221}; 235};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 1c0330b8c9a4..ec9cd6f10f91 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object)
80 80
81int 81int
82nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, 82nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
83 struct nouveau_oclass *oclass, int length, void **pobject) 83 struct nouveau_oclass *oclass,
84 const struct nouveau_mc_intr *intr_map,
85 int length, void **pobject)
84{ 86{
85 struct nouveau_device *device = nv_device(parent); 87 struct nouveau_device *device = nv_device(parent);
86 struct nouveau_mc *pmc; 88 struct nouveau_mc *pmc;
@@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
92 if (ret) 94 if (ret)
93 return ret; 95 return ret;
94 96
97 pmc->intr_map = intr_map;
98
95 ret = request_irq(device->pdev->irq, nouveau_mc_intr, 99 ret = request_irq(device->pdev->irq, nouveau_mc_intr,
96 IRQF_SHARED, "nouveau", pmc); 100 IRQF_SHARED, "nouveau", pmc);
97 if (ret < 0) 101 if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 8c769715227b..64aa4edb0d9d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nv04_mc_priv *priv; 50 struct nv04_mc_priv *priv;
51 int ret; 51 int ret;
52 52
53 ret = nouveau_mc_create(parent, engine, oclass, &priv); 53 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
54 *pobject = nv_object(priv); 54 *pobject = nv_object(priv);
55 if (ret) 55 if (ret)
56 return ret; 56 return ret;
57 57
58 priv->base.intr_map = nv04_mc_intr;
59 return 0; 58 return 0;
60} 59}
61 60
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 51919371810f..d9891782bf28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
36 struct nv44_mc_priv *priv; 36 struct nv44_mc_priv *priv;
37 int ret; 37 int ret;
38 38
39 ret = nouveau_mc_create(parent, engine, oclass, &priv); 39 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
40 *pobject = nv_object(priv); 40 *pobject = nv_object(priv);
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 priv->base.intr_map = nv04_mc_intr;
45 return 0; 44 return 0;
46} 45}
47 46
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index f25fc5fc7dd1..2b1afe225db8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
53 struct nv50_mc_priv *priv; 53 struct nv50_mc_priv *priv;
54 int ret; 54 int ret;
55 55
56 ret = nouveau_mc_create(parent, engine, oclass, &priv); 56 ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
57 *pobject = nv_object(priv); 57 *pobject = nv_object(priv);
58 if (ret) 58 if (ret)
59 return ret; 59 return ret;
60 60
61 priv->base.intr_map = nv50_mc_intr;
62 return 0; 61 return 0;
63} 62}
64 63
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index e82fd21b5041..0d57b4d3e001 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
54 struct nv98_mc_priv *priv; 54 struct nv98_mc_priv *priv;
55 int ret; 55 int ret;
56 56
57 ret = nouveau_mc_create(parent, engine, oclass, &priv); 57 ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
58 *pobject = nv_object(priv); 58 *pobject = nv_object(priv);
59 if (ret) 59 if (ret)
60 return ret; 60 return ret;
61 61
62 priv->base.intr_map = nv98_mc_intr;
63 return 0; 62 return 0;
64} 63}
65 64
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c5da3babbc62..104175c5a2dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
57 struct nvc0_mc_priv *priv; 57 struct nvc0_mc_priv *priv;
58 int ret; 58 int ret;
59 59
60 ret = nouveau_mc_create(parent, engine, oclass, &priv); 60 ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
61 *pobject = nv_object(priv); 61 *pobject = nv_object(priv);
62 if (ret) 62 if (ret)
63 return ret; 63 return ret;
64 64
65 priv->base.intr_map = nvc0_mc_intr;
66 return 0; 65 return 0;
67} 66}
68 67
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0782bd2f1e04..6a13ffb53bdb 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
606 regp->ramdac_a34 = 0x1; 606 regp->ramdac_a34 = 0x1;
607} 607}
608 608
609static int
610nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
611{
612 struct nv04_display *disp = nv04_display(crtc->dev);
613 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
614 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
615 int ret;
616
617 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
618 if (ret == 0) {
619 if (disp->image[nv_crtc->index])
620 nouveau_bo_unpin(disp->image[nv_crtc->index]);
621 nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
622 }
623
624 return ret;
625}
626
609/** 627/**
610 * Sets up registers for the given mode/adjusted_mode pair. 628 * Sets up registers for the given mode/adjusted_mode pair.
611 * 629 *
@@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
622 struct drm_device *dev = crtc->dev; 640 struct drm_device *dev = crtc->dev;
623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 641 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
624 struct nouveau_drm *drm = nouveau_drm(dev); 642 struct nouveau_drm *drm = nouveau_drm(dev);
643 int ret;
625 644
626 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); 645 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
627 drm_mode_debug_printmodeline(adjusted_mode); 646 drm_mode_debug_printmodeline(adjusted_mode);
628 647
648 ret = nv_crtc_swap_fbs(crtc, old_fb);
649 if (ret)
650 return ret;
651
629 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ 652 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
630 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); 653 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
631 654
@@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
722 745
723static void nv_crtc_destroy(struct drm_crtc *crtc) 746static void nv_crtc_destroy(struct drm_crtc *crtc)
724{ 747{
748 struct nv04_display *disp = nv04_display(crtc->dev);
725 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 749 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
726 750
727 if (!nv_crtc) 751 if (!nv_crtc)
@@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
729 753
730 drm_crtc_cleanup(crtc); 754 drm_crtc_cleanup(crtc);
731 755
756 if (disp->image[nv_crtc->index])
757 nouveau_bo_unpin(disp->image[nv_crtc->index]);
758 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
759
732 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 760 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
733 nouveau_bo_unpin(nv_crtc->cursor.nvbo); 761 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
734 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 762 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
@@ -754,6 +782,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
754} 782}
755 783
756static void 784static void
785nv_crtc_disable(struct drm_crtc *crtc)
786{
787 struct nv04_display *disp = nv04_display(crtc->dev);
788 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
789 if (disp->image[nv_crtc->index])
790 nouveau_bo_unpin(disp->image[nv_crtc->index]);
791 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
792}
793
794static void
757nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, 795nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
758 uint32_t size) 796 uint32_t size)
759{ 797{
@@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
791 struct drm_framebuffer *drm_fb; 829 struct drm_framebuffer *drm_fb;
792 struct nouveau_framebuffer *fb; 830 struct nouveau_framebuffer *fb;
793 int arb_burst, arb_lwm; 831 int arb_burst, arb_lwm;
794 int ret;
795 832
796 NV_DEBUG(drm, "index %d\n", nv_crtc->index); 833 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
797 834
@@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
801 return 0; 838 return 0;
802 } 839 }
803 840
804
805 /* If atomic, we want to switch to the fb we were passed, so 841 /* If atomic, we want to switch to the fb we were passed, so
806 * now we update pointers to do that. (We don't pin; just 842 * now we update pointers to do that.
807 * assume we're already pinned and update the base address.)
808 */ 843 */
809 if (atomic) { 844 if (atomic) {
810 drm_fb = passed_fb; 845 drm_fb = passed_fb;
@@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
812 } else { 847 } else {
813 drm_fb = crtc->fb; 848 drm_fb = crtc->fb;
814 fb = nouveau_framebuffer(crtc->fb); 849 fb = nouveau_framebuffer(crtc->fb);
815 /* If not atomic, we can go ahead and pin, and unpin the
816 * old fb we were passed.
817 */
818 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
819 if (ret)
820 return ret;
821
822 if (passed_fb) {
823 struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
824 nouveau_bo_unpin(ofb->nvbo);
825 }
826 } 850 }
827 851
828 nv_crtc->fb.offset = fb->nvbo->bo.offset; 852 nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -877,6 +901,9 @@ static int
877nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 901nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
878 struct drm_framebuffer *old_fb) 902 struct drm_framebuffer *old_fb)
879{ 903{
904 int ret = nv_crtc_swap_fbs(crtc, old_fb);
905 if (ret)
906 return ret;
880 return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); 907 return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
881} 908}
882 909
@@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
1027 .mode_set_base = nv04_crtc_mode_set_base, 1054 .mode_set_base = nv04_crtc_mode_set_base,
1028 .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, 1055 .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
1029 .load_lut = nv_crtc_gamma_load, 1056 .load_lut = nv_crtc_gamma_load,
1057 .disable = nv_crtc_disable,
1030}; 1058};
1031 1059
1032int 1060int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031dad13f..9928187f0a7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -81,6 +81,7 @@ struct nv04_display {
81 uint32_t saved_vga_font[4][16384]; 81 uint32_t saved_vga_font[4][16384];
82 uint32_t dac_users[4]; 82 uint32_t dac_users[4];
83 struct nouveau_object *core; 83 struct nouveau_object *core;
84 struct nouveau_bo *image[2];
84}; 85};
85 86
86static inline struct nv04_display * 87static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 907d20ef6d4d..a03e75deacaf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -577,6 +577,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
577 ret = nv50_display_flip_next(crtc, fb, chan, 0); 577 ret = nv50_display_flip_next(crtc, fb, chan, 0);
578 if (ret) 578 if (ret)
579 goto fail_unreserve; 579 goto fail_unreserve;
580 } else {
581 struct nv04_display *dispnv04 = nv04_display(dev);
582 nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
580 } 583 }
581 584
582 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 585 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3af5bcd0b203..625f80d53dc2 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
131 if (clk < pll->vco1.max_freq) 131 if (clk < pll->vco1.max_freq)
132 pll->vco2.max_freq = 0; 132 pll->vco2.max_freq = 0;
133 133
134 pclk->pll_calc(pclk, pll, clk, &coef); 134 ret = pclk->pll_calc(pclk, pll, clk, &coef);
135 if (ret == 0) 135 if (ret == 0)
136 return -ERANGE; 136 return -ERANGE;
137 137
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 274b8e1b889f..9f19259667df 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2163,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2163 WREG32(reg, tmp_); \ 2163 WREG32(reg, tmp_); \
2164 } while (0) 2164 } while (0)
2165#define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 2165#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2166#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) 2166#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2167#define WREG32_PLL_P(reg, val, mask) \ 2167#define WREG32_PLL_P(reg, val, mask) \
2168 do { \ 2168 do { \
2169 uint32_t tmp_ = RREG32_PLL(reg); \ 2169 uint32_t tmp_ = RREG32_PLL(reg); \
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index f1c15754e73c..b79f4f5cdd62 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -356,6 +356,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
356 return -EINVAL; 356 return -EINVAL;
357 } 357 }
358 358
359 if (bo->tbo.sync_obj) {
360 r = radeon_fence_wait(bo->tbo.sync_obj, false);
361 if (r) {
362 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
363 return r;
364 }
365 }
366
359 r = radeon_bo_kmap(bo, &ptr); 367 r = radeon_bo_kmap(bo, &ptr);
360 if (r) { 368 if (r) {
361 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); 369 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index bcc68ec204ad..f5e92cfcc140 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
744 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); 744 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
745 radeon_program_register_sequence(rdev, 745 radeon_program_register_sequence(rdev,
746 rv730_golden_registers, 746 rv730_golden_registers,
747 (const u32)ARRAY_SIZE(rv770_golden_registers)); 747 (const u32)ARRAY_SIZE(rv730_golden_registers));
748 radeon_program_register_sequence(rdev, 748 radeon_program_register_sequence(rdev,
749 rv730_mgcg_init, 749 rv730_mgcg_init,
750 (const u32)ARRAY_SIZE(rv770_mgcg_init)); 750 (const u32)ARRAY_SIZE(rv730_mgcg_init));
751 break; 751 break;
752 case CHIP_RV710: 752 case CHIP_RV710:
753 radeon_program_register_sequence(rdev, 753 radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
758 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); 758 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
759 radeon_program_register_sequence(rdev, 759 radeon_program_register_sequence(rdev,
760 rv710_golden_registers, 760 rv710_golden_registers,
761 (const u32)ARRAY_SIZE(rv770_golden_registers)); 761 (const u32)ARRAY_SIZE(rv710_golden_registers));
762 radeon_program_register_sequence(rdev, 762 radeon_program_register_sequence(rdev,
763 rv710_mgcg_init, 763 rv710_mgcg_init,
764 (const u32)ARRAY_SIZE(rv770_mgcg_init)); 764 (const u32)ARRAY_SIZE(rv710_mgcg_init));
765 break; 765 break;
766 case CHIP_RV740: 766 case CHIP_RV740:
767 radeon_program_register_sequence(rdev, 767 radeon_program_register_sequence(rdev,
768 rv740_golden_registers, 768 rv740_golden_registers,
769 (const u32)ARRAY_SIZE(rv770_golden_registers)); 769 (const u32)ARRAY_SIZE(rv740_golden_registers));
770 radeon_program_register_sequence(rdev, 770 radeon_program_register_sequence(rdev,
771 rv740_mgcg_init, 771 rv740_mgcg_init,
772 (const u32)ARRAY_SIZE(rv770_mgcg_init)); 772 (const u32)ARRAY_SIZE(rv740_mgcg_init));
773 break; 773 break;
774 default: 774 default:
775 break; 775 break;
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index f1a6796b165c..140c8ef50529 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -520,11 +520,12 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
520 ide_port_for_each_present_dev(i, drive, hwif) { 520 ide_port_for_each_present_dev(i, drive, hwif) {
521 if (drive->acpidata->obj_handle) 521 if (drive->acpidata->obj_handle)
522 acpi_bus_set_power(drive->acpidata->obj_handle, 522 acpi_bus_set_power(drive->acpidata->obj_handle,
523 on ? ACPI_STATE_D0 : ACPI_STATE_D3); 523 on ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
524 } 524 }
525 525
526 if (!on) 526 if (!on)
527 acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D3); 527 acpi_bus_set_power(hwif->acpidata->obj_handle,
528 ACPI_STATE_D3_COLD);
528} 529}
529 530
530/** 531/**
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5f4749e60b04..c1cd5698b8ae 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
232 232
233 switch (mask) { 233 switch (mask) {
234 case IIO_CHAN_INFO_RAW: 234 case IIO_CHAN_INFO_RAW:
235 ret = adjd_s311_read_data(indio_dev, chan->address, val); 235 ret = adjd_s311_read_data(indio_dev,
236 ADJD_S311_DATA_REG(chan->address), val);
236 if (ret < 0) 237 if (ret < 0)
237 return ret; 238 return ret;
238 return IIO_VAL_INT; 239 return IIO_VAL_INT;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index dc112a7137fe..4296155090b2 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -959,23 +959,21 @@ out:
959 return r; 959 return r;
960} 960}
961 961
962static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) 962static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
963{ 963{
964 struct entry *e = hash_lookup(mq, oblock); 964 struct mq_policy *mq = to_mq_policy(p);
965 struct entry *e;
966
967 mutex_lock(&mq->lock);
968
969 e = hash_lookup(mq, oblock);
965 970
966 BUG_ON(!e || !e->in_cache); 971 BUG_ON(!e || !e->in_cache);
967 972
968 del(mq, e); 973 del(mq, e);
969 e->in_cache = false; 974 e->in_cache = false;
970 push(mq, e); 975 push(mq, e);
971}
972 976
973static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
974{
975 struct mq_policy *mq = to_mq_policy(p);
976
977 mutex_lock(&mq->lock);
978 remove_mapping(mq, oblock);
979 mutex_unlock(&mq->lock); 977 mutex_unlock(&mq->lock);
980} 978}
981 979
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07f257d44a1e..e48cb339c0c6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n)
3714 * The bonding ndo_neigh_setup is called at init time beofre any 3714 * The bonding ndo_neigh_setup is called at init time beofre any
3715 * slave exists. So we must declare proxy setup function which will 3715 * slave exists. So we must declare proxy setup function which will
3716 * be used at run time to resolve the actual slave neigh param setup. 3716 * be used at run time to resolve the actual slave neigh param setup.
3717 *
3718 * It's also called by master devices (such as vlans) to setup their
3719 * underlying devices. In that case - do nothing, we're already set up from
3720 * our init.
3717 */ 3721 */
3718static int bond_neigh_setup(struct net_device *dev, 3722static int bond_neigh_setup(struct net_device *dev,
3719 struct neigh_parms *parms) 3723 struct neigh_parms *parms)
3720{ 3724{
3721 parms->neigh_setup = bond_neigh_init; 3725 /* modify only our neigh_parms */
3726 if (parms->dev == dev)
3727 parms->neigh_setup = bond_neigh_init;
3722 3728
3723 return 0; 3729 return 0;
3724} 3730}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25723d8ee201..925ab8ec9329 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
649 if ((mc->ptr + rec_len) > mc->end) 649 if ((mc->ptr + rec_len) > mc->end)
650 goto decode_failed; 650 goto decode_failed;
651 651
652 memcpy(cf->data, mc->ptr, rec_len); 652 memcpy(cf->data, mc->ptr, cf->can_dlc);
653 mc->ptr += rec_len; 653 mc->ptr += rec_len;
654 } 654 }
655 655
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index f1b121ee5525..55d79cb53a79 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
199 struct arc_emac_priv *priv = netdev_priv(ndev); 199 struct arc_emac_priv *priv = netdev_priv(ndev);
200 unsigned int work_done; 200 unsigned int work_done;
201 201
202 for (work_done = 0; work_done <= budget; work_done++) { 202 for (work_done = 0; work_done < budget; work_done++) {
203 unsigned int *last_rx_bd = &priv->last_rx_bd; 203 unsigned int *last_rx_bd = &priv->last_rx_bd;
204 struct net_device_stats *stats = &priv->stats; 204 struct net_device_stats *stats = &priv->stats;
205 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 205 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d80e34b8285f..00b88cbfde25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1333,6 +1333,8 @@ enum {
1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1336 BNX2X_SP_RTNL_TX_STOP,
1337 BNX2X_SP_RTNL_TX_RESUME,
1336}; 1338};
1337 1339
1338struct bnx2x_prev_path_list { 1340struct bnx2x_prev_path_list {
@@ -1502,6 +1504,7 @@ struct bnx2x {
1502#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) 1504#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
1503#define IS_VF_FLAG (1 << 22) 1505#define IS_VF_FLAG (1 << 22)
1504#define INTERRUPTS_ENABLED_FLAG (1 << 23) 1506#define INTERRUPTS_ENABLED_FLAG (1 << 23)
1507#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
1505 1508
1506#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) 1509#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
1507 1510
@@ -1830,6 +1833,8 @@ struct bnx2x {
1830 1833
1831 int fp_array_size; 1834 int fp_array_size;
1832 u32 dump_preset_idx; 1835 u32 dump_preset_idx;
1836 bool stats_started;
1837 struct semaphore stats_sema;
1833}; 1838};
1834 1839
1835/* Tx queues may be less or equal to Rx queues */ 1840/* Tx queues may be less or equal to Rx queues */
@@ -2451,4 +2456,6 @@ enum bnx2x_pci_bus_speed {
2451 BNX2X_PCI_LINK_SPEED_5000 = 5000, 2456 BNX2X_PCI_LINK_SPEED_5000 = 5000,
2452 BNX2X_PCI_LINK_SPEED_8000 = 8000 2457 BNX2X_PCI_LINK_SPEED_8000 = 8000
2453}; 2458};
2459
2460void bnx2x_set_local_cmng(struct bnx2x *bp);
2454#endif /* bnx2x.h */ 2461#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0c94df47e0e8..fcf2761d8828 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -30,10 +30,8 @@
30#include "bnx2x_dcb.h" 30#include "bnx2x_dcb.h"
31 31
32/* forward declarations of dcbx related functions */ 32/* forward declarations of dcbx related functions */
33static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
34static void bnx2x_pfc_set_pfc(struct bnx2x *bp); 33static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
35static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); 34static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
36static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
37static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, 35static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
38 u32 *set_configuration_ets_pg, 36 u32 *set_configuration_ets_pg,
39 u32 *pri_pg_tbl); 37 u32 *pri_pg_tbl);
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
425 bnx2x_pfc_clear(bp); 423 bnx2x_pfc_clear(bp);
426} 424}
427 425
428static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) 426int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
429{ 427{
430 struct bnx2x_func_state_params func_params = {NULL}; 428 struct bnx2x_func_state_params func_params = {NULL};
429 int rc;
431 430
432 func_params.f_obj = &bp->func_obj; 431 func_params.f_obj = &bp->func_obj;
433 func_params.cmd = BNX2X_F_CMD_TX_STOP; 432 func_params.cmd = BNX2X_F_CMD_TX_STOP;
434 433
434 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
435 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
436
435 DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); 437 DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
436 return bnx2x_func_state_change(bp, &func_params); 438
439 rc = bnx2x_func_state_change(bp, &func_params);
440 if (rc) {
441 BNX2X_ERR("Unable to hold traffic for HW configuration\n");
442 bnx2x_panic();
443 }
444
445 return rc;
437} 446}
438 447
439static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) 448int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
440{ 449{
441 struct bnx2x_func_state_params func_params = {NULL}; 450 struct bnx2x_func_state_params func_params = {NULL};
442 struct bnx2x_func_tx_start_params *tx_params = 451 struct bnx2x_func_tx_start_params *tx_params =
443 &func_params.params.tx_start; 452 &func_params.params.tx_start;
453 int rc;
444 454
445 func_params.f_obj = &bp->func_obj; 455 func_params.f_obj = &bp->func_obj;
446 func_params.cmd = BNX2X_F_CMD_TX_START; 456 func_params.cmd = BNX2X_F_CMD_TX_START;
447 457
458 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
459 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
460
448 bnx2x_dcbx_fw_struct(bp, tx_params); 461 bnx2x_dcbx_fw_struct(bp, tx_params);
449 462
450 DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); 463 DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
451 return bnx2x_func_state_change(bp, &func_params); 464
465 rc = bnx2x_func_state_change(bp, &func_params);
466 if (rc) {
467 BNX2X_ERR("Unable to resume traffic after HW configuration\n");
468 bnx2x_panic();
469 }
470
471 return rc;
452} 472}
453 473
454static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) 474static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
744 if (IS_MF(bp)) 764 if (IS_MF(bp))
745 bnx2x_link_sync_notify(bp); 765 bnx2x_link_sync_notify(bp);
746 766
747 bnx2x_dcbx_stop_hw_tx(bp); 767 set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
768
769 schedule_delayed_work(&bp->sp_rtnl_task, 0);
748 770
749 return; 771 return;
750 } 772 }
@@ -753,7 +775,13 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
753 bnx2x_pfc_set_pfc(bp); 775 bnx2x_pfc_set_pfc(bp);
754 776
755 bnx2x_dcbx_update_ets_params(bp); 777 bnx2x_dcbx_update_ets_params(bp);
756 bnx2x_dcbx_resume_hw_tx(bp); 778
779 /* ets may affect cmng configuration: reinit it in hw */
780 bnx2x_set_local_cmng(bp);
781
782 set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
783
784 schedule_delayed_work(&bp->sp_rtnl_task, 0);
757 785
758 return; 786 return;
759 case BNX2X_DCBX_STATE_TX_RELEASED: 787 case BNX2X_DCBX_STATE_TX_RELEASED:
@@ -2363,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
2363 case DCB_FEATCFG_ATTR_PG: 2391 case DCB_FEATCFG_ATTR_PG:
2364 if (bp->dcbx_local_feat.ets.enabled) 2392 if (bp->dcbx_local_feat.ets.enabled)
2365 *flags |= DCB_FEATCFG_ENABLE; 2393 *flags |= DCB_FEATCFG_ENABLE;
2366 if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) 2394 if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
2395 DCBX_REMOTE_MIB_ERROR))
2367 *flags |= DCB_FEATCFG_ERROR; 2396 *flags |= DCB_FEATCFG_ERROR;
2368 break; 2397 break;
2369 case DCB_FEATCFG_ATTR_PFC: 2398 case DCB_FEATCFG_ATTR_PFC:
2370 if (bp->dcbx_local_feat.pfc.enabled) 2399 if (bp->dcbx_local_feat.pfc.enabled)
2371 *flags |= DCB_FEATCFG_ENABLE; 2400 *flags |= DCB_FEATCFG_ENABLE;
2372 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | 2401 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
2373 DCBX_LOCAL_PFC_MISMATCH)) 2402 DCBX_LOCAL_PFC_MISMATCH |
2403 DCBX_REMOTE_MIB_ERROR))
2374 *flags |= DCB_FEATCFG_ERROR; 2404 *flags |= DCB_FEATCFG_ERROR;
2375 break; 2405 break;
2376 case DCB_FEATCFG_ATTR_APP: 2406 case DCB_FEATCFG_ATTR_APP:
2377 if (bp->dcbx_local_feat.app.enabled) 2407 if (bp->dcbx_local_feat.app.enabled)
2378 *flags |= DCB_FEATCFG_ENABLE; 2408 *flags |= DCB_FEATCFG_ENABLE;
2379 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | 2409 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
2380 DCBX_LOCAL_APP_MISMATCH)) 2410 DCBX_LOCAL_APP_MISMATCH |
2411 DCBX_REMOTE_MIB_ERROR))
2381 *flags |= DCB_FEATCFG_ERROR; 2412 *flags |= DCB_FEATCFG_ERROR;
2382 break; 2413 break;
2383 default: 2414 default:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 125bd1b6586f..804b8f64463e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
199int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); 199int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
200#endif /* BCM_DCBNL */ 200#endif /* BCM_DCBNL */
201 201
202int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
203int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
204
202#endif /* BNX2X_DCB_H */ 205#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5018e52ae2ad..32767f6aa33f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1300,6 +1300,9 @@ struct drv_func_mb {
1300 1300
1301 #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 1301 #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
1302 1302
1303 #define DRV_MSG_CODE_RMMOD 0xdb000000
1304 #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
1305
1303 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 1306 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
1304 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 1307 #define REQ_BC_VER_4_SET_MF_BW 0x00060202
1305 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 1308 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1372,6 +1375,8 @@ struct drv_func_mb {
1372 1375
1373 #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 1376 #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
1374 1377
1378 #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
1379
1375 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 1380 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
1376 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 1381 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
1377 1382
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e06186c305d8..8bdc8b973007 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp)
2261 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2261 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2262} 2262}
2263 2263
2264static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2265{
2266 u32 pause_enabled = 0;
2267
2268 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2269 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2270 pause_enabled = 1;
2271
2272 REG_WR(bp, BAR_USTRORM_INTMEM +
2273 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2274 pause_enabled);
2275 }
2276
2277 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2278 pause_enabled ? "enabled" : "disabled");
2279}
2280
2264int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2281int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2265{ 2282{
2266 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); 2283 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2294 2311
2295 bnx2x_release_phy_lock(bp); 2312 bnx2x_release_phy_lock(bp);
2296 2313
2314 bnx2x_init_dropless_fc(bp);
2315
2297 bnx2x_calc_fc_adv(bp); 2316 bnx2x_calc_fc_adv(bp);
2298 2317
2299 if (bp->link_vars.link_up) { 2318 if (bp->link_vars.link_up) {
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp)
2315 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2334 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2316 bnx2x_release_phy_lock(bp); 2335 bnx2x_release_phy_lock(bp);
2317 2336
2337 bnx2x_init_dropless_fc(bp);
2338
2318 bnx2x_calc_fc_adv(bp); 2339 bnx2x_calc_fc_adv(bp);
2319 } else 2340 } else
2320 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2341 BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2476,7 +2497,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2476 2497
2477 input.port_rate = bp->link_vars.line_speed; 2498 input.port_rate = bp->link_vars.line_speed;
2478 2499
2479 if (cmng_type == CMNG_FNS_MINMAX) { 2500 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2480 int vn; 2501 int vn;
2481 2502
2482 /* read mf conf from shmem */ 2503 /* read mf conf from shmem */
@@ -2533,6 +2554,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
2533 } 2554 }
2534} 2555}
2535 2556
2557/* init cmng mode in HW according to local configuration */
2558void bnx2x_set_local_cmng(struct bnx2x *bp)
2559{
2560 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2561
2562 if (cmng_fns != CMNG_FNS_NONE) {
2563 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2564 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2565 } else {
2566 /* rate shaping and fairness are disabled */
2567 DP(NETIF_MSG_IFUP,
2568 "single function mode without fairness\n");
2569 }
2570}
2571
2536/* This function is called upon link interrupt */ 2572/* This function is called upon link interrupt */
2537static void bnx2x_link_attn(struct bnx2x *bp) 2573static void bnx2x_link_attn(struct bnx2x *bp)
2538{ 2574{
@@ -2541,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2541 2577
2542 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2578 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2543 2579
2544 if (bp->link_vars.link_up) { 2580 bnx2x_init_dropless_fc(bp);
2545 2581
2546 /* dropless flow control */ 2582 if (bp->link_vars.link_up) {
2547 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2548 int port = BP_PORT(bp);
2549 u32 pause_enabled = 0;
2550
2551 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2552 pause_enabled = 1;
2553
2554 REG_WR(bp, BAR_USTRORM_INTMEM +
2555 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2556 pause_enabled);
2557 }
2558 2583
2559 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2584 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2560 struct host_port_stats *pstats; 2585 struct host_port_stats *pstats;
@@ -2568,17 +2593,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2568 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2593 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2569 } 2594 }
2570 2595
2571 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2596 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2572 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2597 bnx2x_set_local_cmng(bp);
2573
2574 if (cmng_fns != CMNG_FNS_NONE) {
2575 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2576 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2577 } else
2578 /* rate shaping and fairness are disabled */
2579 DP(NETIF_MSG_IFUP,
2580 "single function mode without fairness\n");
2581 }
2582 2598
2583 __bnx2x_link_report(bp); 2599 __bnx2x_link_report(bp);
2584 2600
@@ -9639,6 +9655,12 @@ sp_rtnl_not_reset:
9639 &bp->sp_rtnl_state)) 9655 &bp->sp_rtnl_state))
9640 bnx2x_pf_set_vfs_vlan(bp); 9656 bnx2x_pf_set_vfs_vlan(bp);
9641 9657
9658 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
9659 bnx2x_dcbx_stop_hw_tx(bp);
9660
9661 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
9662 bnx2x_dcbx_resume_hw_tx(bp);
9663
9642 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9664 /* work which needs rtnl lock not-taken (as it takes the lock itself and
9643 * can be called from other contexts as well) 9665 * can be called from other contexts as well)
9644 */ 9666 */
@@ -10362,6 +10384,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10362 10384
10363 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 10385 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10364 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 10386 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10387
10388 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
10389 BC_SUPPORTS_RMMOD_CMD : 0;
10390
10365 boot_mode = SHMEM_RD(bp, 10391 boot_mode = SHMEM_RD(bp,
10366 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 10392 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10367 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 10393 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -11137,6 +11163,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11137 int tmp; 11163 int tmp;
11138 u32 cfg; 11164 u32 cfg;
11139 11165
11166 if (IS_VF(bp))
11167 return 0;
11168
11140 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { 11169 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11141 /* Take function: tmp = func */ 11170 /* Take function: tmp = func */
11142 tmp = BP_ABS_FUNC(bp); 11171 tmp = BP_ABS_FUNC(bp);
@@ -11524,6 +11553,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11524 mutex_init(&bp->port.phy_mutex); 11553 mutex_init(&bp->port.phy_mutex);
11525 mutex_init(&bp->fw_mb_mutex); 11554 mutex_init(&bp->fw_mb_mutex);
11526 spin_lock_init(&bp->stats_lock); 11555 spin_lock_init(&bp->stats_lock);
11556 sema_init(&bp->stats_sema, 1);
11527 11557
11528 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11558 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11529 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11559 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12817,13 +12847,17 @@ static void __bnx2x_remove(struct pci_dev *pdev,
12817 bnx2x_dcbnl_update_applist(bp, true); 12847 bnx2x_dcbnl_update_applist(bp, true);
12818#endif 12848#endif
12819 12849
12850 if (IS_PF(bp) &&
12851 !BP_NOMCP(bp) &&
12852 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
12853 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
12854
12820 /* Close the interface - either directly or implicitly */ 12855 /* Close the interface - either directly or implicitly */
12821 if (remove_netdev) { 12856 if (remove_netdev) {
12822 unregister_netdev(dev); 12857 unregister_netdev(dev);
12823 } else { 12858 } else {
12824 rtnl_lock(); 12859 rtnl_lock();
12825 if (netif_running(dev)) 12860 dev_close(dev);
12826 bnx2x_close(dev);
12827 rtnl_unlock(); 12861 rtnl_unlock();
12828 } 12862 }
12829 12863
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 95861efb5051..ad83f4b48777 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1747,11 +1747,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1747 1747
1748void bnx2x_iov_init_dmae(struct bnx2x *bp) 1748void bnx2x_iov_init_dmae(struct bnx2x *bp)
1749{ 1749{
1750 DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); 1750 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1751 if (!IS_SRIOV(bp)) 1751 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1752 return;
1753
1754 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1755} 1752}
1756 1753
1757static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1754static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
@@ -3084,8 +3081,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
3084 pci_disable_sriov(bp->pdev); 3081 pci_disable_sriov(bp->pdev);
3085} 3082}
3086 3083
3087static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, 3084static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3088 struct bnx2x_virtf *vf) 3085 struct bnx2x_virtf **vf,
3086 struct pf_vf_bulletin_content **bulletin)
3089{ 3087{
3090 if (bp->state != BNX2X_STATE_OPEN) { 3088 if (bp->state != BNX2X_STATE_OPEN) {
3091 BNX2X_ERR("vf ndo called though PF is down\n"); 3089 BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3103,12 +3101,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
3103 return -EINVAL; 3101 return -EINVAL;
3104 } 3102 }
3105 3103
3106 if (!vf) { 3104 /* init members */
3105 *vf = BP_VF(bp, vfidx);
3106 *bulletin = BP_VF_BULLETIN(bp, vfidx);
3107
3108 if (!*vf) {
3107 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3109 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
3108 vfidx); 3110 vfidx);
3109 return -EINVAL; 3111 return -EINVAL;
3110 } 3112 }
3111 3113
3114 if (!*bulletin) {
3115 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
3116 vfidx);
3117 return -EINVAL;
3118 }
3119
3112 return 0; 3120 return 0;
3113} 3121}
3114 3122
@@ -3116,17 +3124,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3116 struct ifla_vf_info *ivi) 3124 struct ifla_vf_info *ivi)
3117{ 3125{
3118 struct bnx2x *bp = netdev_priv(dev); 3126 struct bnx2x *bp = netdev_priv(dev);
3119 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3127 struct bnx2x_virtf *vf = NULL;
3120 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3128 struct pf_vf_bulletin_content *bulletin = NULL;
3121 struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3129 struct bnx2x_vlan_mac_obj *mac_obj;
3122 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3130 struct bnx2x_vlan_mac_obj *vlan_obj;
3123 int rc; 3131 int rc;
3124 3132
3125 /* sanity */ 3133 /* sanity and init */
3126 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3134 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3127 if (rc) 3135 if (rc)
3128 return rc; 3136 return rc;
3129 if (!mac_obj || !vlan_obj || !bulletin) { 3137 mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3138 vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
3139 if (!mac_obj || !vlan_obj) {
3130 BNX2X_ERR("VF partially initialized\n"); 3140 BNX2X_ERR("VF partially initialized\n");
3131 return -EINVAL; 3141 return -EINVAL;
3132 } 3142 }
@@ -3183,11 +3193,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3183{ 3193{
3184 struct bnx2x *bp = netdev_priv(dev); 3194 struct bnx2x *bp = netdev_priv(dev);
3185 int rc, q_logical_state; 3195 int rc, q_logical_state;
3186 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3196 struct bnx2x_virtf *vf = NULL;
3187 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3197 struct pf_vf_bulletin_content *bulletin = NULL;
3188 3198
3189 /* sanity */ 3199 /* sanity and init */
3190 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3200 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3191 if (rc) 3201 if (rc)
3192 return rc; 3202 return rc;
3193 if (!is_valid_ether_addr(mac)) { 3203 if (!is_valid_ether_addr(mac)) {
@@ -3249,11 +3259,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3249{ 3259{
3250 struct bnx2x *bp = netdev_priv(dev); 3260 struct bnx2x *bp = netdev_priv(dev);
3251 int rc, q_logical_state; 3261 int rc, q_logical_state;
3252 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3262 struct bnx2x_virtf *vf = NULL;
3253 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3263 struct pf_vf_bulletin_content *bulletin = NULL;
3254 3264
3255 /* sanity */ 3265 /* sanity and init */
3256 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3266 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3257 if (rc) 3267 if (rc)
3258 return rc; 3268 return rc;
3259 3269
@@ -3463,7 +3473,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3463alloc_mem_err: 3473alloc_mem_err:
3464 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3474 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3465 sizeof(struct bnx2x_vf_mbx_msg)); 3475 sizeof(struct bnx2x_vf_mbx_msg));
3466 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3476 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3467 sizeof(union pf_vf_bulletin)); 3477 sizeof(union pf_vf_bulletin));
3468 return -ENOMEM; 3478 return -ENOMEM;
3469} 3479}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 98366abd02bd..d63d1327b051 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
221 * Statistics service functions 221 * Statistics service functions
222 */ 222 */
223 223
224static void bnx2x_stats_pmf_update(struct bnx2x *bp) 224/* should be called under stats_sema */
225static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
225{ 226{
226 struct dmae_command *dmae; 227 struct dmae_command *dmae;
227 u32 opcode; 228 u32 opcode;
@@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
518 *stats_comp = 0; 519 *stats_comp = 0;
519} 520}
520 521
521static void bnx2x_stats_start(struct bnx2x *bp) 522/* should be called under stats_sema */
523static void __bnx2x_stats_start(struct bnx2x *bp)
522{ 524{
523 /* vfs travel through here as part of the statistics FSM, but no action 525 /* vfs travel through here as part of the statistics FSM, but no action
524 * is required 526 * is required
@@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp)
534 536
535 bnx2x_hw_stats_post(bp); 537 bnx2x_hw_stats_post(bp);
536 bnx2x_storm_stats_post(bp); 538 bnx2x_storm_stats_post(bp);
539
540 bp->stats_started = true;
541}
542
543static void bnx2x_stats_start(struct bnx2x *bp)
544{
545 if (down_timeout(&bp->stats_sema, HZ/10))
546 BNX2X_ERR("Unable to acquire stats lock\n");
547 __bnx2x_stats_start(bp);
548 up(&bp->stats_sema);
537} 549}
538 550
539static void bnx2x_stats_pmf_start(struct bnx2x *bp) 551static void bnx2x_stats_pmf_start(struct bnx2x *bp)
540{ 552{
553 if (down_timeout(&bp->stats_sema, HZ/10))
554 BNX2X_ERR("Unable to acquire stats lock\n");
541 bnx2x_stats_comp(bp); 555 bnx2x_stats_comp(bp);
542 bnx2x_stats_pmf_update(bp); 556 __bnx2x_stats_pmf_update(bp);
543 bnx2x_stats_start(bp); 557 __bnx2x_stats_start(bp);
558 up(&bp->stats_sema);
559}
560
561static void bnx2x_stats_pmf_update(struct bnx2x *bp)
562{
563 if (down_timeout(&bp->stats_sema, HZ/10))
564 BNX2X_ERR("Unable to acquire stats lock\n");
565 __bnx2x_stats_pmf_update(bp);
566 up(&bp->stats_sema);
544} 567}
545 568
546static void bnx2x_stats_restart(struct bnx2x *bp) 569static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
550 */ 573 */
551 if (IS_VF(bp)) 574 if (IS_VF(bp))
552 return; 575 return;
576 if (down_timeout(&bp->stats_sema, HZ/10))
577 BNX2X_ERR("Unable to acquire stats lock\n");
553 bnx2x_stats_comp(bp); 578 bnx2x_stats_comp(bp);
554 bnx2x_stats_start(bp); 579 __bnx2x_stats_start(bp);
580 up(&bp->stats_sema);
555} 581}
556 582
557static void bnx2x_bmac_stats_update(struct bnx2x *bp) 583static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
888 /* Make sure we use the value of the counter 914 /* Make sure we use the value of the counter
889 * used for sending the last stats ramrod. 915 * used for sending the last stats ramrod.
890 */ 916 */
891 spin_lock_bh(&bp->stats_lock);
892 cur_stats_counter = bp->stats_counter - 1; 917 cur_stats_counter = bp->stats_counter - 1;
893 spin_unlock_bh(&bp->stats_lock);
894 918
895 /* are storm stats valid? */ 919 /* are storm stats valid? */
896 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 920 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1227{ 1251{
1228 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1252 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1229 1253
1230 if (bnx2x_edebug_stats_stopped(bp)) 1254 /* we run update from timer context, so give up
1255 * if somebody is in the middle of transition
1256 */
1257 if (down_trylock(&bp->stats_sema))
1231 return; 1258 return;
1232 1259
1260 if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
1261 goto out;
1262
1233 if (IS_PF(bp)) { 1263 if (IS_PF(bp)) {
1234 if (*stats_comp != DMAE_COMP_VAL) 1264 if (*stats_comp != DMAE_COMP_VAL)
1235 return; 1265 goto out;
1236 1266
1237 if (bp->port.pmf) 1267 if (bp->port.pmf)
1238 bnx2x_hw_stats_update(bp); 1268 bnx2x_hw_stats_update(bp);
@@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1242 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1272 BNX2X_ERR("storm stats were not updated for 3 times\n");
1243 bnx2x_panic(); 1273 bnx2x_panic();
1244 } 1274 }
1245 return; 1275 goto out;
1246 } 1276 }
1247 } else { 1277 } else {
1248 /* vf doesn't collect HW statistics, and doesn't get completions 1278 /* vf doesn't collect HW statistics, and doesn't get completions
@@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1256 1286
1257 /* vf is done */ 1287 /* vf is done */
1258 if (IS_VF(bp)) 1288 if (IS_VF(bp))
1259 return; 1289 goto out;
1260 1290
1261 if (netif_msg_timer(bp)) { 1291 if (netif_msg_timer(bp)) {
1262 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1292 struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1267 1297
1268 bnx2x_hw_stats_post(bp); 1298 bnx2x_hw_stats_post(bp);
1269 bnx2x_storm_stats_post(bp); 1299 bnx2x_storm_stats_post(bp);
1300
1301out:
1302 up(&bp->stats_sema);
1270} 1303}
1271 1304
1272static void bnx2x_port_stats_stop(struct bnx2x *bp) 1305static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
1332{ 1365{
1333 int update = 0; 1366 int update = 0;
1334 1367
1368 if (down_timeout(&bp->stats_sema, HZ/10))
1369 BNX2X_ERR("Unable to acquire stats lock\n");
1370
1371 bp->stats_started = false;
1372
1335 bnx2x_stats_comp(bp); 1373 bnx2x_stats_comp(bp);
1336 1374
1337 if (bp->port.pmf) 1375 if (bp->port.pmf)
@@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
1348 bnx2x_hw_stats_post(bp); 1386 bnx2x_hw_stats_post(bp);
1349 bnx2x_stats_comp(bp); 1387 bnx2x_stats_comp(bp);
1350 } 1388 }
1389
1390 up(&bp->stats_sema);
1351} 1391}
1352 1392
1353static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1393static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1376,15 +1416,17 @@ static const struct {
1376void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1416void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1377{ 1417{
1378 enum bnx2x_stats_state state; 1418 enum bnx2x_stats_state state;
1419 void (*action)(struct bnx2x *bp);
1379 if (unlikely(bp->panic)) 1420 if (unlikely(bp->panic))
1380 return; 1421 return;
1381 1422
1382 spin_lock_bh(&bp->stats_lock); 1423 spin_lock_bh(&bp->stats_lock);
1383 state = bp->stats_state; 1424 state = bp->stats_state;
1384 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1425 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1426 action = bnx2x_stats_stm[state][event].action;
1385 spin_unlock_bh(&bp->stats_lock); 1427 spin_unlock_bh(&bp->stats_lock);
1386 1428
1387 bnx2x_stats_stm[state][event].action(bp); 1429 action(bp);
1388 1430
1389 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1431 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1390 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1432 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ddebc7a5dda0..0da2214ef1b9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17796 17796
17797done: 17797done:
17798 if (state == pci_channel_io_perm_failure) { 17798 if (state == pci_channel_io_perm_failure) {
17799 tg3_napi_enable(tp); 17799 if (netdev) {
17800 dev_close(netdev); 17800 tg3_napi_enable(tp);
17801 dev_close(netdev);
17802 }
17801 err = PCI_ERS_RESULT_DISCONNECT; 17803 err = PCI_ERS_RESULT_DISCONNECT;
17802 } else { 17804 } else {
17803 pci_disable_device(pdev); 17805 pci_disable_device(pdev);
@@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17827 rtnl_lock(); 17829 rtnl_lock();
17828 17830
17829 if (pci_enable_device(pdev)) { 17831 if (pci_enable_device(pdev)) {
17830 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); 17832 dev_err(&pdev->dev,
17833 "Cannot re-enable PCI device after reset.\n");
17831 goto done; 17834 goto done;
17832 } 17835 }
17833 17836
@@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17835 pci_restore_state(pdev); 17838 pci_restore_state(pdev);
17836 pci_save_state(pdev); 17839 pci_save_state(pdev);
17837 17840
17838 if (!netif_running(netdev)) { 17841 if (!netdev || !netif_running(netdev)) {
17839 rc = PCI_ERS_RESULT_RECOVERED; 17842 rc = PCI_ERS_RESULT_RECOVERED;
17840 goto done; 17843 goto done;
17841 } 17844 }
@@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17847 rc = PCI_ERS_RESULT_RECOVERED; 17850 rc = PCI_ERS_RESULT_RECOVERED;
17848 17851
17849done: 17852done:
17850 if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { 17853 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17851 tg3_napi_enable(tp); 17854 tg3_napi_enable(tp);
17852 dev_close(netdev); 17855 dev_close(netdev);
17853 } 17856 }
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 687ec4a8bb48..9c89dc8fe105 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
455 q->pg_chunk.offset = 0; 455 q->pg_chunk.offset = 0;
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457 0, q->alloc_size, PCI_DMA_FROMDEVICE); 457 0, q->alloc_size, PCI_DMA_FROMDEVICE);
458 if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459 __free_pages(q->pg_chunk.page, order);
460 q->pg_chunk.page = NULL;
461 return -EIO;
462 }
463 q->pg_chunk.mapping = mapping; 458 q->pg_chunk.mapping = mapping;
464 } 459 }
465 sd->pg_chunk = q->pg_chunk; 460 sd->pg_chunk = q->pg_chunk;
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
954 return flits_to_desc(flits); 949 return flits_to_desc(flits);
955} 950}
956 951
957
958/* map_skb - map a packet main body and its page fragments
959 * @pdev: the PCI device
960 * @skb: the packet
961 * @addr: placeholder to save the mapped addresses
962 *
963 * map the main body of an sk_buff and its page fragments, if any.
964 */
965static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
966 dma_addr_t *addr)
967{
968 const skb_frag_t *fp, *end;
969 const struct skb_shared_info *si;
970
971 *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972 PCI_DMA_TODEVICE);
973 if (pci_dma_mapping_error(pdev, *addr))
974 goto out_err;
975
976 si = skb_shinfo(skb);
977 end = &si->frags[si->nr_frags];
978
979 for (fp = si->frags; fp < end; fp++) {
980 *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
981 DMA_TO_DEVICE);
982 if (pci_dma_mapping_error(pdev, *addr))
983 goto unwind;
984 }
985 return 0;
986
987unwind:
988 while (fp-- > si->frags)
989 dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
990 DMA_TO_DEVICE);
991
992 pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
993out_err:
994 return -ENOMEM;
995}
996
997/** 952/**
998 * write_sgl - populate a scatter/gather list for a packet 953 * make_sgl - populate a scatter/gather list for a packet
999 * @skb: the packet 954 * @skb: the packet
1000 * @sgp: the SGL to populate 955 * @sgp: the SGL to populate
1001 * @start: start address of skb main body data to include in the SGL 956 * @start: start address of skb main body data to include in the SGL
1002 * @len: length of skb main body data to include in the SGL 957 * @len: length of skb main body data to include in the SGL
1003 * @addr: the list of the mapped addresses 958 * @pdev: the PCI device
1004 * 959 *
1005 * Copies the scatter/gather list for the buffers that make up a packet 960 * Generates a scatter/gather list for the buffers that make up a packet
1006 * and returns the SGL size in 8-byte words. The caller must size the SGL 961 * and returns the SGL size in 8-byte words. The caller must size the SGL
1007 * appropriately. 962 * appropriately.
1008 */ 963 */
1009static inline unsigned int write_sgl(const struct sk_buff *skb, 964static inline unsigned int make_sgl(const struct sk_buff *skb,
1010 struct sg_ent *sgp, unsigned char *start, 965 struct sg_ent *sgp, unsigned char *start,
1011 unsigned int len, const dma_addr_t *addr) 966 unsigned int len, struct pci_dev *pdev)
1012{ 967{
1013 unsigned int i, j = 0, k = 0, nfrags; 968 dma_addr_t mapping;
969 unsigned int i, j = 0, nfrags;
1014 970
1015 if (len) { 971 if (len) {
972 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
1016 sgp->len[0] = cpu_to_be32(len); 973 sgp->len[0] = cpu_to_be32(len);
1017 sgp->addr[j++] = cpu_to_be64(addr[k++]); 974 sgp->addr[0] = cpu_to_be64(mapping);
975 j = 1;
1018 } 976 }
1019 977
1020 nfrags = skb_shinfo(skb)->nr_frags; 978 nfrags = skb_shinfo(skb)->nr_frags;
1021 for (i = 0; i < nfrags; i++) { 979 for (i = 0; i < nfrags; i++) {
1022 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 980 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1023 981
982 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
983 DMA_TO_DEVICE);
1024 sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); 984 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1025 sgp->addr[j] = cpu_to_be64(addr[k++]); 985 sgp->addr[j] = cpu_to_be64(mapping);
1026 j ^= 1; 986 j ^= 1;
1027 if (j == 0) 987 if (j == 0)
1028 ++sgp; 988 ++sgp;
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1178 const struct port_info *pi, 1138 const struct port_info *pi,
1179 unsigned int pidx, unsigned int gen, 1139 unsigned int pidx, unsigned int gen,
1180 struct sge_txq *q, unsigned int ndesc, 1140 struct sge_txq *q, unsigned int ndesc,
1181 unsigned int compl, const dma_addr_t *addr) 1141 unsigned int compl)
1182{ 1142{
1183 unsigned int flits, sgl_flits, cntrl, tso_info; 1143 unsigned int flits, sgl_flits, cntrl, tso_info;
1184 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; 1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1236 } 1196 }
1237 1197
1238 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1198 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1239 sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); 1199 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1240 1200
1241 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, 1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1242 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), 1202 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1267 struct netdev_queue *txq; 1227 struct netdev_queue *txq;
1268 struct sge_qset *qs; 1228 struct sge_qset *qs;
1269 struct sge_txq *q; 1229 struct sge_txq *q;
1270 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1271 1230
1272 /* 1231 /*
1273 * The chip min packet length is 9 octets but play safe and reject 1232 * The chip min packet length is 9 octets but play safe and reject
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1296 return NETDEV_TX_BUSY; 1255 return NETDEV_TX_BUSY;
1297 } 1256 }
1298 1257
1299 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1300 dev_kfree_skb(skb);
1301 return NETDEV_TX_OK;
1302 }
1303
1304 q->in_use += ndesc; 1258 q->in_use += ndesc;
1305 if (unlikely(credits - ndesc < q->stop_thres)) { 1259 if (unlikely(credits - ndesc < q->stop_thres)) {
1306 t3_stop_tx_queue(txq, qs, q); 1260 t3_stop_tx_queue(txq, qs, q);
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1358 if (likely(!skb_shared(skb))) 1312 if (likely(!skb_shared(skb)))
1359 skb_orphan(skb); 1313 skb_orphan(skb);
1360 1314
1361 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); 1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1362 check_ring_tx_db(adap, q); 1316 check_ring_tx_db(adap, q);
1363 return NETDEV_TX_OK; 1317 return NETDEV_TX_OK;
1364} 1318}
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1623 */ 1577 */
1624static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, 1578static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1625 struct sge_txq *q, unsigned int pidx, 1579 struct sge_txq *q, unsigned int pidx,
1626 unsigned int gen, unsigned int ndesc, 1580 unsigned int gen, unsigned int ndesc)
1627 const dma_addr_t *addr)
1628{ 1581{
1629 unsigned int sgl_flits, flits; 1582 unsigned int sgl_flits, flits;
1630 struct work_request_hdr *from; 1583 struct work_request_hdr *from;
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1645 1598
1646 flits = skb_transport_offset(skb) / 8; 1599 flits = skb_transport_offset(skb) / 8;
1647 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1600 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1648 sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), 1601 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1649 skb_tail_pointer(skb) - 1602 skb->tail - skb->transport_header,
1650 skb_transport_header(skb), addr); 1603 adap->pdev);
1651 if (need_skb_unmap()) { 1604 if (need_skb_unmap()) {
1652 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1605 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1653 skb->destructor = deferred_unmap_destructor; 1606 skb->destructor = deferred_unmap_destructor;
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1705 goto again; 1658 goto again;
1706 } 1659 }
1707 1660
1708 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1709 spin_unlock(&q->lock);
1710 return NET_XMIT_SUCCESS;
1711 }
1712
1713 gen = q->gen; 1661 gen = q->gen;
1714 q->in_use += ndesc; 1662 q->in_use += ndesc;
1715 pidx = q->pidx; 1663 pidx = q->pidx;
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1720 } 1668 }
1721 spin_unlock(&q->lock); 1669 spin_unlock(&q->lock);
1722 1670
1723 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); 1671 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1724 check_ring_tx_db(adap, q); 1672 check_ring_tx_db(adap, q);
1725 return NET_XMIT_SUCCESS; 1673 return NET_XMIT_SUCCESS;
1726} 1674}
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data)
1738 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 1686 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1739 const struct port_info *pi = netdev_priv(qs->netdev); 1687 const struct port_info *pi = netdev_priv(qs->netdev);
1740 struct adapter *adap = pi->adapter; 1688 struct adapter *adap = pi->adapter;
1741 unsigned int written = 0;
1742 1689
1743 spin_lock(&q->lock); 1690 spin_lock(&q->lock);
1744again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1691again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1758 break; 1705 break;
1759 } 1706 }
1760 1707
1761 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1762 break;
1763
1764 gen = q->gen; 1708 gen = q->gen;
1765 q->in_use += ndesc; 1709 q->in_use += ndesc;
1766 pidx = q->pidx; 1710 pidx = q->pidx;
1767 q->pidx += ndesc; 1711 q->pidx += ndesc;
1768 written += ndesc;
1769 if (q->pidx >= q->size) { 1712 if (q->pidx >= q->size) {
1770 q->pidx -= q->size; 1713 q->pidx -= q->size;
1771 q->gen ^= 1; 1714 q->gen ^= 1;
@@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1773 __skb_unlink(skb, &q->sendq); 1716 __skb_unlink(skb, &q->sendq);
1774 spin_unlock(&q->lock); 1717 spin_unlock(&q->lock);
1775 1718
1776 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, 1719 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1777 (dma_addr_t *)skb->head);
1778 spin_lock(&q->lock); 1720 spin_lock(&q->lock);
1779 } 1721 }
1780 spin_unlock(&q->lock); 1722 spin_unlock(&q->lock);
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1784 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1726 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1785#endif 1727#endif
1786 wmb(); 1728 wmb();
1787 if (likely(written)) 1729 t3_write_reg(adap, A_SG_KDOORBELL,
1788 t3_write_reg(adap, A_SG_KDOORBELL, 1730 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1789 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1790} 1731}
1791 1732
1792/** 1733/**
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 6e6e0a117ee2..8ec5d74ad44d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
3048 3048
3049 adapter->max_event_queues = le16_to_cpu(desc->eq_count); 3049 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3050 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); 3050 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3051
3052 /* Clear flags that driver is not interested in */
3053 adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
3051 } 3054 }
3052err: 3055err:
3053 mutex_unlock(&adapter->mbox_lock); 3056 mutex_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5228d88c5a02..1b3b9e886412 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -563,6 +563,12 @@ enum be_if_flags {
563 BE_IF_FLAGS_MULTICAST = 0x1000 563 BE_IF_FLAGS_MULTICAST = 0x1000
564}; 564};
565 565
566#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
567 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
568 BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
569 BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
570 BE_IF_FLAGS_UNTAGGED)
571
566/* An RX interface is an object with one or more MAC addresses and 572/* An RX interface is an object with one or more MAC addresses and
567 * filtering capabilities. */ 573 * filtering capabilities. */
568struct be_cmd_req_if_create { 574struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 181edb522450..4559c35eea13 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2563,8 +2563,8 @@ static int be_close(struct net_device *netdev)
2563 /* Wait for all pending tx completions to arrive so that 2563 /* Wait for all pending tx completions to arrive so that
2564 * all tx skbs are freed. 2564 * all tx skbs are freed.
2565 */ 2565 */
2566 be_tx_compl_clean(adapter);
2567 netif_tx_disable(netdev); 2566 netif_tx_disable(netdev);
2567 be_tx_compl_clean(adapter);
2568 2568
2569 be_rx_qs_destroy(adapter); 2569 be_rx_qs_destroy(adapter);
2570 2570
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c896079728e1..ef94a591f9e5 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
931} 931}
932 932
933/* Allocate and setup a new buffer for receiving */ 933/* Allocate and setup a new buffer for receiving */
934static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 934static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
935 struct sk_buff *skb, unsigned int bufsize) 935 struct sk_buff *skb, unsigned int bufsize)
936{ 936{
937 struct skge_rx_desc *rd = e->desc; 937 struct skge_rx_desc *rd = e->desc;
938 u64 map; 938 dma_addr_t map;
939 939
940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
941 PCI_DMA_FROMDEVICE); 941 PCI_DMA_FROMDEVICE);
942 942
943 rd->dma_lo = map; 943 if (pci_dma_mapping_error(skge->hw->pdev, map))
944 rd->dma_hi = map >> 32; 944 return -1;
945
946 rd->dma_lo = lower_32_bits(map);
947 rd->dma_hi = upper_32_bits(map);
945 e->skb = skb; 948 e->skb = skb;
946 rd->csum1_start = ETH_HLEN; 949 rd->csum1_start = ETH_HLEN;
947 rd->csum2_start = ETH_HLEN; 950 rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 956 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
954 dma_unmap_addr_set(e, mapaddr, map); 957 dma_unmap_addr_set(e, mapaddr, map);
955 dma_unmap_len_set(e, maplen, bufsize); 958 dma_unmap_len_set(e, maplen, bufsize);
959 return 0;
956} 960}
957 961
958/* Resume receiving using existing skb, 962/* Resume receiving using existing skb,
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev)
1014 return -ENOMEM; 1018 return -ENOMEM;
1015 1019
1016 skb_reserve(skb, NET_IP_ALIGN); 1020 skb_reserve(skb, NET_IP_ALIGN);
1017 skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1021 if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
1022 dev_kfree_skb(skb);
1023 return -EIO;
1024 }
1018 } while ((e = e->next) != ring->start); 1025 } while ((e = e->next) != ring->start);
1019 1026
1020 ring->to_clean = ring->start; 1027 ring->to_clean = ring->start;
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev)
2544 2551
2545 BUG_ON(skge->dma & 7); 2552 BUG_ON(skge->dma & 7);
2546 2553
2547 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { 2554 if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
2548 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); 2555 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
2549 err = -EINVAL; 2556 err = -EINVAL;
2550 goto free_pci_mem; 2557 goto free_pci_mem;
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2729 struct skge_tx_desc *td; 2736 struct skge_tx_desc *td;
2730 int i; 2737 int i;
2731 u32 control, len; 2738 u32 control, len;
2732 u64 map; 2739 dma_addr_t map;
2733 2740
2734 if (skb_padto(skb, ETH_ZLEN)) 2741 if (skb_padto(skb, ETH_ZLEN))
2735 return NETDEV_TX_OK; 2742 return NETDEV_TX_OK;
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2743 e->skb = skb; 2750 e->skb = skb;
2744 len = skb_headlen(skb); 2751 len = skb_headlen(skb);
2745 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2752 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2753 if (pci_dma_mapping_error(hw->pdev, map))
2754 goto mapping_error;
2755
2746 dma_unmap_addr_set(e, mapaddr, map); 2756 dma_unmap_addr_set(e, mapaddr, map);
2747 dma_unmap_len_set(e, maplen, len); 2757 dma_unmap_len_set(e, maplen, len);
2748 2758
2749 td->dma_lo = map; 2759 td->dma_lo = lower_32_bits(map);
2750 td->dma_hi = map >> 32; 2760 td->dma_hi = upper_32_bits(map);
2751 2761
2752 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2762 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2753 const int offset = skb_checksum_start_offset(skb); 2763 const int offset = skb_checksum_start_offset(skb);
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2778 2788
2779 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2789 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
2780 skb_frag_size(frag), DMA_TO_DEVICE); 2790 skb_frag_size(frag), DMA_TO_DEVICE);
2791 if (dma_mapping_error(&hw->pdev->dev, map))
2792 goto mapping_unwind;
2781 2793
2782 e = e->next; 2794 e = e->next;
2783 e->skb = skb; 2795 e->skb = skb;
2784 tf = e->desc; 2796 tf = e->desc;
2785 BUG_ON(tf->control & BMU_OWN); 2797 BUG_ON(tf->control & BMU_OWN);
2786 2798
2787 tf->dma_lo = map; 2799 tf->dma_lo = lower_32_bits(map);
2788 tf->dma_hi = (u64) map >> 32; 2800 tf->dma_hi = upper_32_bits(map);
2789 dma_unmap_addr_set(e, mapaddr, map); 2801 dma_unmap_addr_set(e, mapaddr, map);
2790 dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2802 dma_unmap_len_set(e, maplen, skb_frag_size(frag));
2791 2803
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2815 } 2827 }
2816 2828
2817 return NETDEV_TX_OK; 2829 return NETDEV_TX_OK;
2830
2831mapping_unwind:
2832 e = skge->tx_ring.to_use;
2833 pci_unmap_single(hw->pdev,
2834 dma_unmap_addr(e, mapaddr),
2835 dma_unmap_len(e, maplen),
2836 PCI_DMA_TODEVICE);
2837 while (i-- > 0) {
2838 e = e->next;
2839 pci_unmap_page(hw->pdev,
2840 dma_unmap_addr(e, mapaddr),
2841 dma_unmap_len(e, maplen),
2842 PCI_DMA_TODEVICE);
2843 }
2844
2845mapping_error:
2846 if (net_ratelimit())
2847 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2848 dev_kfree_skb(skb);
2849 return NETDEV_TX_OK;
2818} 2850}
2819 2851
2820 2852
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3045 3077
3046 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3078 pci_dma_sync_single_for_cpu(skge->hw->pdev,
3047 dma_unmap_addr(e, mapaddr), 3079 dma_unmap_addr(e, mapaddr),
3048 len, PCI_DMA_FROMDEVICE); 3080 dma_unmap_len(e, maplen),
3081 PCI_DMA_FROMDEVICE);
3049 skb_copy_from_linear_data(e->skb, skb->data, len); 3082 skb_copy_from_linear_data(e->skb, skb->data, len);
3050 pci_dma_sync_single_for_device(skge->hw->pdev, 3083 pci_dma_sync_single_for_device(skge->hw->pdev,
3051 dma_unmap_addr(e, mapaddr), 3084 dma_unmap_addr(e, mapaddr),
3052 len, PCI_DMA_FROMDEVICE); 3085 dma_unmap_len(e, maplen),
3086 PCI_DMA_FROMDEVICE);
3053 skge_rx_reuse(e, skge->rx_buf_size); 3087 skge_rx_reuse(e, skge->rx_buf_size);
3054 } else { 3088 } else {
3055 struct sk_buff *nskb; 3089 struct sk_buff *nskb;
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3058 if (!nskb) 3092 if (!nskb)
3059 goto resubmit; 3093 goto resubmit;
3060 3094
3095 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
3096 dev_kfree_skb(nskb);
3097 goto resubmit;
3098 }
3099
3061 pci_unmap_single(skge->hw->pdev, 3100 pci_unmap_single(skge->hw->pdev,
3062 dma_unmap_addr(e, mapaddr), 3101 dma_unmap_addr(e, mapaddr),
3063 dma_unmap_len(e, maplen), 3102 dma_unmap_len(e, maplen),
3064 PCI_DMA_FROMDEVICE); 3103 PCI_DMA_FROMDEVICE);
3065 skb = e->skb; 3104 skb = e->skb;
3066 prefetch(skb->data); 3105 prefetch(skb->data);
3067 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
3068 } 3106 }
3069 3107
3070 skb_put(skb, len); 3108 skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c571de85d0f9..5472cbd34028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -46,7 +46,7 @@
46#include "mlx5_core.h" 46#include "mlx5_core.h"
47 47
48enum { 48enum {
49 CMD_IF_REV = 4, 49 CMD_IF_REV = 5,
50}; 50};
51 51
52enum { 52enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c02cbcfd0fb8..443cc4d7b024 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
268 case MLX5_EVENT_TYPE_PAGE_REQUEST: 268 case MLX5_EVENT_TYPE_PAGE_REQUEST:
269 { 269 {
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); 271 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
272 272
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); 273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages); 274 mlx5_core_req_pages_handler(dev, func_id, npages);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 72a5222447f5..f012658b6a92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; 113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; 114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
115 caps->log_max_mcg = out->hca_cap.log_max_mcg; 115 caps->log_max_mcg = out->hca_cap.log_max_mcg;
116 caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); 116 caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); 117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); 118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; 119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 748f10a155c4..3e6670c4a7cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -55,33 +55,9 @@ enum {
55}; 55};
56 56
57static DEFINE_SPINLOCK(health_lock); 57static DEFINE_SPINLOCK(health_lock);
58
59static LIST_HEAD(health_list); 58static LIST_HEAD(health_list);
60static struct work_struct health_work; 59static struct work_struct health_work;
61 60
62static health_handler_t reg_handler;
63int mlx5_register_health_report_handler(health_handler_t handler)
64{
65 spin_lock_irq(&health_lock);
66 if (reg_handler) {
67 spin_unlock_irq(&health_lock);
68 return -EEXIST;
69 }
70 reg_handler = handler;
71 spin_unlock_irq(&health_lock);
72
73 return 0;
74}
75EXPORT_SYMBOL(mlx5_register_health_report_handler);
76
77void mlx5_unregister_health_report_handler(void)
78{
79 spin_lock_irq(&health_lock);
80 reg_handler = NULL;
81 spin_unlock_irq(&health_lock);
82}
83EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
84
85static void health_care(struct work_struct *work) 61static void health_care(struct work_struct *work)
86{ 62{
87 struct mlx5_core_health *health, *n; 63 struct mlx5_core_health *health, *n;
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work)
98 priv = container_of(health, struct mlx5_priv, health); 74 priv = container_of(health, struct mlx5_priv, health);
99 dev = container_of(priv, struct mlx5_core_dev, priv); 75 dev = container_of(priv, struct mlx5_core_dev, priv);
100 mlx5_core_warn(dev, "handling bad device here\n"); 76 mlx5_core_warn(dev, "handling bad device here\n");
77 /* nothing yet */
101 spin_lock_irq(&health_lock); 78 spin_lock_irq(&health_lock);
102 if (reg_handler)
103 reg_handler(dev->pdev, health->health,
104 sizeof(health->health));
105
106 list_del_init(&health->list); 79 list_del_init(&health->list);
107 spin_unlock_irq(&health_lock); 80 spin_unlock_irq(&health_lock);
108 } 81 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 4a3e137931a3..3a2408d44820 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -43,10 +43,16 @@ enum {
43 MLX5_PAGES_TAKE = 2 43 MLX5_PAGES_TAKE = 2
44}; 44};
45 45
46enum {
47 MLX5_BOOT_PAGES = 1,
48 MLX5_INIT_PAGES = 2,
49 MLX5_POST_INIT_PAGES = 3
50};
51
46struct mlx5_pages_req { 52struct mlx5_pages_req {
47 struct mlx5_core_dev *dev; 53 struct mlx5_core_dev *dev;
48 u32 func_id; 54 u32 func_id;
49 s16 npages; 55 s32 npages;
50 struct work_struct work; 56 struct work_struct work;
51}; 57};
52 58
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
64 70
65struct mlx5_query_pages_outbox { 71struct mlx5_query_pages_outbox {
66 struct mlx5_outbox_hdr hdr; 72 struct mlx5_outbox_hdr hdr;
67 __be16 num_boot_pages; 73 __be16 rsvd;
68 __be16 func_id; 74 __be16 func_id;
69 __be16 init_pages; 75 __be32 num_pages;
70 __be16 num_pages;
71}; 76};
72 77
73struct mlx5_manage_pages_inbox { 78struct mlx5_manage_pages_inbox {
74 struct mlx5_inbox_hdr hdr; 79 struct mlx5_inbox_hdr hdr;
75 __be16 rsvd0; 80 __be16 rsvd;
76 __be16 func_id; 81 __be16 func_id;
77 __be16 rsvd1; 82 __be32 num_entries;
78 __be16 num_entries;
79 u8 rsvd2[16];
80 __be64 pas[0]; 83 __be64 pas[0];
81}; 84};
82 85
83struct mlx5_manage_pages_outbox { 86struct mlx5_manage_pages_outbox {
84 struct mlx5_outbox_hdr hdr; 87 struct mlx5_outbox_hdr hdr;
85 u8 rsvd0[2]; 88 __be32 num_entries;
86 __be16 num_entries; 89 u8 rsvd[4];
87 u8 rsvd1[20];
88 __be64 pas[0]; 90 __be64 pas[0];
89}; 91};
90 92
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
146} 148}
147 149
148static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, 150static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
149 s16 *pages, s16 *init_pages, u16 *boot_pages) 151 s32 *npages, int boot)
150{ 152{
151 struct mlx5_query_pages_inbox in; 153 struct mlx5_query_pages_inbox in;
152 struct mlx5_query_pages_outbox out; 154 struct mlx5_query_pages_outbox out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
155 memset(&in, 0, sizeof(in)); 157 memset(&in, 0, sizeof(in));
156 memset(&out, 0, sizeof(out)); 158 memset(&out, 0, sizeof(out));
157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); 159 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
160 in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
161
158 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 162 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
159 if (err) 163 if (err)
160 return err; 164 return err;
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
162 if (out.hdr.status) 166 if (out.hdr.status)
163 return mlx5_cmd_status_to_err(&out.hdr); 167 return mlx5_cmd_status_to_err(&out.hdr);
164 168
165 if (pages) 169 *npages = be32_to_cpu(out.num_pages);
166 *pages = be16_to_cpu(out.num_pages);
167
168 if (init_pages)
169 *init_pages = be16_to_cpu(out.init_pages);
170
171 if (boot_pages)
172 *boot_pages = be16_to_cpu(out.num_boot_pages);
173
174 *func_id = be16_to_cpu(out.func_id); 170 *func_id = be16_to_cpu(out.func_id);
175 171
176 return err; 172 return err;
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
224 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 220 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
225 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); 221 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
226 in->func_id = cpu_to_be16(func_id); 222 in->func_id = cpu_to_be16(func_id);
227 in->num_entries = cpu_to_be16(npages); 223 in->num_entries = cpu_to_be32(npages);
228 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 224 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
229 mlx5_core_dbg(dev, "err %d\n", err); 225 mlx5_core_dbg(dev, "err %d\n", err);
230 if (err) { 226 if (err) {
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
292 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 288 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
293 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); 289 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
294 in.func_id = cpu_to_be16(func_id); 290 in.func_id = cpu_to_be16(func_id);
295 in.num_entries = cpu_to_be16(npages); 291 in.num_entries = cpu_to_be32(npages);
296 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 292 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
297 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 293 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
298 if (err) { 294 if (err) {
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
306 goto out_free; 302 goto out_free;
307 } 303 }
308 304
309 num_claimed = be16_to_cpu(out->num_entries); 305 num_claimed = be32_to_cpu(out->num_entries);
310 if (nclaimed) 306 if (nclaimed)
311 *nclaimed = num_claimed; 307 *nclaimed = num_claimed;
312 308
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
345} 341}
346 342
347void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 343void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
348 s16 npages) 344 s32 npages)
349{ 345{
350 struct mlx5_pages_req *req; 346 struct mlx5_pages_req *req;
351 347
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
364 360
365int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) 361int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
366{ 362{
367 u16 uninitialized_var(boot_pages);
368 s16 uninitialized_var(init_pages);
369 u16 uninitialized_var(func_id); 363 u16 uninitialized_var(func_id);
364 s32 uninitialized_var(npages);
370 int err; 365 int err;
371 366
372 err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, 367 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
373 &boot_pages);
374 if (err) 368 if (err)
375 return err; 369 return err;
376 370
371 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
372 npages, boot ? "boot" : "init", func_id);
377 373
378 mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", 374 return give_pages(dev, func_id, npages, 0);
379 init_pages, boot_pages, func_id);
380 return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
381} 375}
382 376
383static int optimal_reclaimed_pages(void) 377static int optimal_reclaimed_pages(void)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 92da9980a0a0..9d4bb7f83904 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
3266 u8 val; 3266 u8 val;
3267 int ret, max_sds_rings = adapter->max_sds_rings; 3267 int ret, max_sds_rings = adapter->max_sds_rings;
3268 3268
3269 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
3270 netdev_info(netdev, "Device is resetting\n");
3271 return -EBUSY;
3272 }
3273
3269 if (qlcnic_get_diag_lock(adapter)) { 3274 if (qlcnic_get_diag_lock(adapter)) {
3270 netdev_info(netdev, "Device in diagnostics mode\n"); 3275 netdev_info(netdev, "Device in diagnostics mode\n");
3271 return -EBUSY; 3276 return -EBUSY;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 9f4b8d5f0865..345d987aede4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
629 return -EIO; 629 return -EIO;
630 } 630 }
631 631
632 qlcnic_set_drv_version(adapter); 632 if (adapter->portnum == 0)
633 qlcnic_set_drv_version(adapter);
633 qlcnic_83xx_idc_attach_driver(adapter); 634 qlcnic_83xx_idc_attach_driver(adapter);
634 635
635 return 0; 636 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ee013fcc3322..bc05d016c859 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2165 if (err) 2165 if (err)
2166 goto err_out_disable_mbx_intr; 2166 goto err_out_disable_mbx_intr;
2167 2167
2168 qlcnic_set_drv_version(adapter); 2168 if (adapter->portnum == 0)
2169 qlcnic_set_drv_version(adapter);
2169 2170
2170 pci_set_drvdata(pdev, adapter); 2171 pci_set_drvdata(pdev, adapter);
2171 2172
@@ -3085,7 +3086,8 @@ done:
3085 adapter->fw_fail_cnt = 0; 3086 adapter->fw_fail_cnt = 0;
3086 adapter->flags &= ~QLCNIC_FW_HANG; 3087 adapter->flags &= ~QLCNIC_FW_HANG;
3087 clear_bit(__QLCNIC_RESETTING, &adapter->state); 3088 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3088 qlcnic_set_drv_version(adapter); 3089 if (adapter->portnum == 0)
3090 qlcnic_set_drv_version(adapter);
3089 3091
3090 if (!qlcnic_clr_drv_state(adapter)) 3092 if (!qlcnic_clr_drv_state(adapter))
3091 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, 3093 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 10ed82b3baca..660c3f5b2237 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
170 170
171 if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { 171 if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
172 err = qlcnic_get_beacon_state(adapter, &h_beacon_state); 172 err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
173 if (!err) { 173 if (err) {
174 dev_info(&adapter->pdev->dev, 174 netdev_err(adapter->netdev,
175 "Failed to get current beacon state\n"); 175 "Failed to get current beacon state\n");
176 } else { 176 } else {
177 if (h_beacon_state == QLCNIC_BEACON_DISABLE) 177 if (h_beacon_state == QLCNIC_BEACON_DISABLE)
178 ahw->beacon_state = 0; 178 ahw->beacon_state = 0;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 6f35f8404d68..d2e591955bdd 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -524,6 +524,7 @@ rx_status_loop:
524 PCI_DMA_FROMDEVICE); 524 PCI_DMA_FROMDEVICE);
525 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { 525 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
526 dev->stats.rx_dropped++; 526 dev->stats.rx_dropped++;
527 kfree_skb(new_skb);
527 goto rx_next; 528 goto rx_next;
528 } 529 }
529 530
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b5eb4195fc99..85e5c97191dd 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7088,7 +7088,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7088 7088
7089 RTL_W8(Cfg9346, Cfg9346_Unlock); 7089 RTL_W8(Cfg9346, Cfg9346_Unlock);
7090 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 7090 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
7091 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); 7091 RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
7092 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) 7092 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
7093 tp->features |= RTL_FEATURE_WOL; 7093 tp->features |= RTL_FEATURE_WOL;
7094 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) 7094 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2a469b27a506..30d744235d27 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); 675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != 676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); 677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
678 rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; 678 rep_index = spec->type - EFX_FILTER_UC_DEF;
679 ins_index = rep_index; 679 ins_index = rep_index;
680 680
681 spin_lock_bh(&state->lock); 681 spin_lock_bh(&state->lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index c9d942a5c335..1ef9d8a555aa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
33 struct stmmac_priv *priv = (struct stmmac_priv *)p; 33 struct stmmac_priv *priv = (struct stmmac_priv *)p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
35 unsigned int entry = priv->cur_tx % txsize; 35 unsigned int entry = priv->cur_tx % txsize;
36 struct dma_desc *desc = priv->dma_tx + entry; 36 struct dma_desc *desc;
37 unsigned int nopaged_len = skb_headlen(skb); 37 unsigned int nopaged_len = skb_headlen(skb);
38 unsigned int bmax, len; 38 unsigned int bmax, len;
39 39
40 if (priv->extend_desc)
41 desc = (struct dma_desc *)(priv->dma_etx + entry);
42 else
43 desc = priv->dma_tx + entry;
44
40 if (priv->plat->enh_desc) 45 if (priv->plat->enh_desc)
41 bmax = BUF_SIZE_8KiB; 46 bmax = BUF_SIZE_8KiB;
42 else 47 else
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
54 STMMAC_RING_MODE); 59 STMMAC_RING_MODE);
55 wmb(); 60 wmb();
56 entry = (++priv->cur_tx) % txsize; 61 entry = (++priv->cur_tx) % txsize;
57 desc = priv->dma_tx + entry; 62
63 if (priv->extend_desc)
64 desc = (struct dma_desc *)(priv->dma_etx + entry);
65 else
66 desc = priv->dma_tx + entry;
58 67
59 desc->des2 = dma_map_single(priv->device, skb->data + bmax, 68 desc->des2 = dma_map_single(priv->device, skb->data + bmax,
60 len, DMA_TO_DEVICE); 69 len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f2ccb36e8685..0a9bb9d30c3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
939 939
940 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, 940 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
941 GFP_KERNEL); 941 GFP_KERNEL);
942 if (unlikely(skb == NULL)) { 942 if (!skb) {
943 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 943 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
944 return 1; 944 return -ENOMEM;
945 } 945 }
946 skb_reserve(skb, NET_IP_ALIGN); 946 skb_reserve(skb, NET_IP_ALIGN);
947 priv->rx_skbuff[i] = skb; 947 priv->rx_skbuff[i] = skb;
948 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 948 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
949 priv->dma_buf_sz, 949 priv->dma_buf_sz,
950 DMA_FROM_DEVICE); 950 DMA_FROM_DEVICE);
951 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
952 pr_err("%s: DMA mapping error\n", __func__);
953 dev_kfree_skb_any(skb);
954 return -EINVAL;
955 }
951 956
952 p->des2 = priv->rx_skbuff_dma[i]; 957 p->des2 = priv->rx_skbuff_dma[i];
953 958
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
958 return 0; 963 return 0;
959} 964}
960 965
966static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
967{
968 if (priv->rx_skbuff[i]) {
969 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
970 priv->dma_buf_sz, DMA_FROM_DEVICE);
971 dev_kfree_skb_any(priv->rx_skbuff[i]);
972 }
973 priv->rx_skbuff[i] = NULL;
974}
975
961/** 976/**
962 * init_dma_desc_rings - init the RX/TX descriptor rings 977 * init_dma_desc_rings - init the RX/TX descriptor rings
963 * @dev: net device structure 978 * @dev: net device structure
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
965 * and allocates the socket buffers. It suppors the chained and ring 980 * and allocates the socket buffers. It suppors the chained and ring
966 * modes. 981 * modes.
967 */ 982 */
968static void init_dma_desc_rings(struct net_device *dev) 983static int init_dma_desc_rings(struct net_device *dev)
969{ 984{
970 int i; 985 int i;
971 struct stmmac_priv *priv = netdev_priv(dev); 986 struct stmmac_priv *priv = netdev_priv(dev);
972 unsigned int txsize = priv->dma_tx_size; 987 unsigned int txsize = priv->dma_tx_size;
973 unsigned int rxsize = priv->dma_rx_size; 988 unsigned int rxsize = priv->dma_rx_size;
974 unsigned int bfsize = 0; 989 unsigned int bfsize = 0;
990 int ret = -ENOMEM;
975 991
976 /* Set the max buffer size according to the DESC mode 992 /* Set the max buffer size according to the DESC mode
977 * and the MTU. Note that RING mode allows 16KiB bsize. 993 * and the MTU. Note that RING mode allows 16KiB bsize.
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev)
992 dma_extended_desc), 1008 dma_extended_desc),
993 &priv->dma_rx_phy, 1009 &priv->dma_rx_phy,
994 GFP_KERNEL); 1010 GFP_KERNEL);
1011 if (!priv->dma_erx)
1012 goto err_dma;
1013
995 priv->dma_etx = dma_alloc_coherent(priv->device, txsize * 1014 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
996 sizeof(struct 1015 sizeof(struct
997 dma_extended_desc), 1016 dma_extended_desc),
998 &priv->dma_tx_phy, 1017 &priv->dma_tx_phy,
999 GFP_KERNEL); 1018 GFP_KERNEL);
1000 if ((!priv->dma_erx) || (!priv->dma_etx)) 1019 if (!priv->dma_etx) {
1001 return; 1020 dma_free_coherent(priv->device, priv->dma_rx_size *
1021 sizeof(struct dma_extended_desc),
1022 priv->dma_erx, priv->dma_rx_phy);
1023 goto err_dma;
1024 }
1002 } else { 1025 } else {
1003 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * 1026 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
1004 sizeof(struct dma_desc), 1027 sizeof(struct dma_desc),
1005 &priv->dma_rx_phy, 1028 &priv->dma_rx_phy,
1006 GFP_KERNEL); 1029 GFP_KERNEL);
1030 if (!priv->dma_rx)
1031 goto err_dma;
1032
1007 priv->dma_tx = dma_alloc_coherent(priv->device, txsize * 1033 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
1008 sizeof(struct dma_desc), 1034 sizeof(struct dma_desc),
1009 &priv->dma_tx_phy, 1035 &priv->dma_tx_phy,
1010 GFP_KERNEL); 1036 GFP_KERNEL);
1011 if ((!priv->dma_rx) || (!priv->dma_tx)) 1037 if (!priv->dma_tx) {
1012 return; 1038 dma_free_coherent(priv->device, priv->dma_rx_size *
1039 sizeof(struct dma_desc),
1040 priv->dma_rx, priv->dma_rx_phy);
1041 goto err_dma;
1042 }
1013 } 1043 }
1014 1044
1015 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), 1045 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
1016 GFP_KERNEL); 1046 GFP_KERNEL);
1047 if (!priv->rx_skbuff_dma)
1048 goto err_rx_skbuff_dma;
1049
1017 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), 1050 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
1018 GFP_KERNEL); 1051 GFP_KERNEL);
1052 if (!priv->rx_skbuff)
1053 goto err_rx_skbuff;
1054
1019 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), 1055 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1020 GFP_KERNEL); 1056 GFP_KERNEL);
1057 if (!priv->tx_skbuff_dma)
1058 goto err_tx_skbuff_dma;
1059
1021 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 1060 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1022 GFP_KERNEL); 1061 GFP_KERNEL);
1062 if (!priv->tx_skbuff)
1063 goto err_tx_skbuff;
1064
1023 if (netif_msg_probe(priv)) { 1065 if (netif_msg_probe(priv)) {
1024 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, 1066 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1025 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); 1067 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev)
1034 else 1076 else
1035 p = priv->dma_rx + i; 1077 p = priv->dma_rx + i;
1036 1078
1037 if (stmmac_init_rx_buffers(priv, p, i)) 1079 ret = stmmac_init_rx_buffers(priv, p, i);
1038 break; 1080 if (ret)
1081 goto err_init_rx_buffers;
1039 1082
1040 if (netif_msg_probe(priv)) 1083 if (netif_msg_probe(priv))
1041 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 1084 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev)
1081 1124
1082 if (netif_msg_hw(priv)) 1125 if (netif_msg_hw(priv))
1083 stmmac_display_rings(priv); 1126 stmmac_display_rings(priv);
1127
1128 return 0;
1129err_init_rx_buffers:
1130 while (--i >= 0)
1131 stmmac_free_rx_buffers(priv, i);
1132 kfree(priv->tx_skbuff);
1133err_tx_skbuff:
1134 kfree(priv->tx_skbuff_dma);
1135err_tx_skbuff_dma:
1136 kfree(priv->rx_skbuff);
1137err_rx_skbuff:
1138 kfree(priv->rx_skbuff_dma);
1139err_rx_skbuff_dma:
1140 if (priv->extend_desc) {
1141 dma_free_coherent(priv->device, priv->dma_tx_size *
1142 sizeof(struct dma_extended_desc),
1143 priv->dma_etx, priv->dma_tx_phy);
1144 dma_free_coherent(priv->device, priv->dma_rx_size *
1145 sizeof(struct dma_extended_desc),
1146 priv->dma_erx, priv->dma_rx_phy);
1147 } else {
1148 dma_free_coherent(priv->device,
1149 priv->dma_tx_size * sizeof(struct dma_desc),
1150 priv->dma_tx, priv->dma_tx_phy);
1151 dma_free_coherent(priv->device,
1152 priv->dma_rx_size * sizeof(struct dma_desc),
1153 priv->dma_rx, priv->dma_rx_phy);
1154 }
1155err_dma:
1156 return ret;
1084} 1157}
1085 1158
1086static void dma_free_rx_skbufs(struct stmmac_priv *priv) 1159static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1087{ 1160{
1088 int i; 1161 int i;
1089 1162
1090 for (i = 0; i < priv->dma_rx_size; i++) { 1163 for (i = 0; i < priv->dma_rx_size; i++)
1091 if (priv->rx_skbuff[i]) { 1164 stmmac_free_rx_buffers(priv, i);
1092 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1093 priv->dma_buf_sz, DMA_FROM_DEVICE);
1094 dev_kfree_skb_any(priv->rx_skbuff[i]);
1095 }
1096 priv->rx_skbuff[i] = NULL;
1097 }
1098} 1165}
1099 1166
1100static void dma_free_tx_skbufs(struct stmmac_priv *priv) 1167static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev)
1560 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 1627 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1561 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 1628 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1562 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1629 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1563 init_dma_desc_rings(dev); 1630
1631 ret = init_dma_desc_rings(dev);
1632 if (ret < 0) {
1633 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1634 goto dma_desc_error;
1635 }
1564 1636
1565 /* DMA initialization and SW reset */ 1637 /* DMA initialization and SW reset */
1566 ret = stmmac_init_dma_engine(priv); 1638 ret = stmmac_init_dma_engine(priv);
1567 if (ret < 0) { 1639 if (ret < 0) {
1568 pr_err("%s: DMA initialization failed\n", __func__); 1640 pr_err("%s: DMA engine initialization failed\n", __func__);
1569 goto init_error; 1641 goto init_error;
1570 } 1642 }
1571 1643
@@ -1672,6 +1744,7 @@ wolirq_error:
1672 1744
1673init_error: 1745init_error:
1674 free_dma_desc_resources(priv); 1746 free_dma_desc_resources(priv);
1747dma_desc_error:
1675 if (priv->phydev) 1748 if (priv->phydev)
1676 phy_disconnect(priv->phydev); 1749 phy_disconnect(priv->phydev);
1677phy_error: 1750phy_error:
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1d6dc41f755d..d01cacf8a7c2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2100 2100
2101 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 2101 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2102 } 2102 }
2103 netif_rx(skb); 2103 netif_receive_skb(skb);
2104 2104
2105 stats->rx_bytes += pkt_len; 2105 stats->rx_bytes += pkt_len;
2106 stats->rx_packets++; 2106 stats->rx_packets++;
@@ -2884,6 +2884,7 @@ out:
2884 return ret; 2884 return ret;
2885 2885
2886err_iounmap: 2886err_iounmap:
2887 netif_napi_del(&vptr->napi);
2887 iounmap(regs); 2888 iounmap(regs);
2888err_free_dev: 2889err_free_dev:
2889 free_netdev(netdev); 2890 free_netdev(netdev);
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev)
2904 struct velocity_info *vptr = netdev_priv(netdev); 2905 struct velocity_info *vptr = netdev_priv(netdev);
2905 2906
2906 unregister_netdev(netdev); 2907 unregister_netdev(netdev);
2908 netif_napi_del(&vptr->napi);
2907 iounmap(vptr->mac_regs); 2909 iounmap(vptr->mac_regs);
2908 free_netdev(netdev); 2910 free_netdev(netdev);
2909 velocity_nics--; 2911 velocity_nics--;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 51f2bc376101..2dcc60fb37f1 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); 210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
211 pci_write_config_byte(pcidev,0x5a,0xc0); 211 pci_write_config_byte(pcidev,0x5a,0xc0);
212 WriteLPCReg(0x28, 0x70 ); 212 WriteLPCReg(0x28, 0x70 );
213 if (via_ircc_open(pcidev, &info, 0x3076) == 0) 213 rc = via_ircc_open(pcidev, &info, 0x3076);
214 rc=0;
215 } else 214 } else
216 rc = -ENODEV; //IR not turn on 215 rc = -ENODEV; //IR not turn on
217 } else { //Not VT1211 216 } else { //Not VT1211
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
249 info.irq=FirIRQ; 248 info.irq=FirIRQ;
250 info.dma=FirDRQ1; 249 info.dma=FirDRQ1;
251 info.dma2=FirDRQ0; 250 info.dma2=FirDRQ0;
252 if (via_ircc_open(pcidev, &info, 0x3096) == 0) 251 rc = via_ircc_open(pcidev, &info, 0x3096);
253 rc=0;
254 } else 252 } else
255 rc = -ENODEV; //IR not turn on !!!!! 253 rc = -ENODEV; //IR not turn on !!!!!
256 }//Not VT1211 254 }//Not VT1211
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d0f9c2fd1d4f..16b43bf544b7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
739 return -EADDRNOTAVAIL; 739 return -EADDRNOTAVAIL;
740 } 740 }
741 741
742 if (data && data[IFLA_MACVLAN_FLAGS] &&
743 nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
744 return -EINVAL;
745
742 if (data && data[IFLA_MACVLAN_MODE]) { 746 if (data && data[IFLA_MACVLAN_MODE]) {
743 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { 747 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
744 case MACVLAN_MODE_PRIVATE: 748 case MACVLAN_MODE_PRIVATE:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a98fb0ed6aef..ea53abb20988 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops;
68#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 68#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
69 NETIF_F_TSO6 | NETIF_F_UFO) 69 NETIF_F_TSO6 | NETIF_F_UFO)
70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
71#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
72
71/* 73/*
72 * RCU usage: 74 * RCU usage:
73 * The macvtap_queue and the macvlan_dev are loosely coupled, the 75 * The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
278{ 280{
279 struct macvlan_dev *vlan = netdev_priv(dev); 281 struct macvlan_dev *vlan = netdev_priv(dev);
280 struct macvtap_queue *q = macvtap_get_queue(dev, skb); 282 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
281 netdev_features_t features; 283 netdev_features_t features = TAP_FEATURES;
284
282 if (!q) 285 if (!q)
283 goto drop; 286 goto drop;
284 287
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
287 290
288 skb->dev = dev; 291 skb->dev = dev;
289 /* Apply the forward feature mask so that we perform segmentation 292 /* Apply the forward feature mask so that we perform segmentation
290 * according to users wishes. 293 * according to users wishes. This only works if VNET_HDR is
294 * enabled.
291 */ 295 */
292 features = netif_skb_features(skb) & vlan->tap_features; 296 if (q->flags & IFF_VNET_HDR)
297 features |= vlan->tap_features;
293 if (netif_needs_gso(skb, features)) { 298 if (netif_needs_gso(skb, features)) {
294 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 299 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
295 300
@@ -818,10 +823,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
818 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 823 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
819 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 824 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
820 } 825 }
821 if (vlan) 826 if (vlan) {
827 local_bh_disable();
822 macvlan_start_xmit(skb, vlan->dev); 828 macvlan_start_xmit(skb, vlan->dev);
823 else 829 local_bh_enable();
830 } else {
824 kfree_skb(skb); 831 kfree_skb(skb);
832 }
825 rcu_read_unlock(); 833 rcu_read_unlock();
826 834
827 return total_len; 835 return total_len;
@@ -912,8 +920,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
912done: 920done:
913 rcu_read_lock(); 921 rcu_read_lock();
914 vlan = rcu_dereference(q->vlan); 922 vlan = rcu_dereference(q->vlan);
915 if (vlan) 923 if (vlan) {
924 preempt_disable();
916 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); 925 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
926 preempt_enable();
927 }
917 rcu_read_unlock(); 928 rcu_read_unlock();
918 929
919 return ret ? ret : copied; 930 return ret ? ret : copied;
@@ -1058,8 +1069,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
1058 /* tap_features are the same as features on tun/tap and 1069 /* tap_features are the same as features on tun/tap and
1059 * reflect user expectations. 1070 * reflect user expectations.
1060 */ 1071 */
1061 vlan->tap_features = vlan->dev->features & 1072 vlan->tap_features = feature_mask;
1062 (feature_mask | ~TUN_OFFLOADS);
1063 vlan->set_features = features; 1073 vlan->set_features = features;
1064 netdev_update_features(vlan->dev); 1074 netdev_update_features(vlan->dev);
1065 1075
@@ -1155,10 +1165,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1155 TUN_F_TSO_ECN | TUN_F_UFO)) 1165 TUN_F_TSO_ECN | TUN_F_UFO))
1156 return -EINVAL; 1166 return -EINVAL;
1157 1167
1158 /* TODO: only accept frames with the features that
1159 got enabled for forwarded frames */
1160 if (!(q->flags & IFF_VNET_HDR))
1161 return -EINVAL;
1162 rtnl_lock(); 1168 rtnl_lock();
1163 ret = set_offload(q, arg); 1169 ret = set_offload(q, arg);
1164 rtnl_unlock(); 1170 rtnl_unlock();
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 8e7af8354342..138de837977f 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -23,7 +23,7 @@
23#define RTL821x_INER_INIT 0x6400 23#define RTL821x_INER_INIT 0x6400
24#define RTL821x_INSR 0x13 24#define RTL821x_INSR 0x13
25 25
26#define RTL8211E_INER_LINK_STAT 0x10 26#define RTL8211E_INER_LINK_STATUS 0x400
27 27
28MODULE_DESCRIPTION("Realtek PHY driver"); 28MODULE_DESCRIPTION("Realtek PHY driver");
29MODULE_AUTHOR("Johnson Leung"); 29MODULE_AUTHOR("Johnson Leung");
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
57 57
58 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 58 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
59 err = phy_write(phydev, RTL821x_INER, 59 err = phy_write(phydev, RTL821x_INER,
60 RTL8211E_INER_LINK_STAT); 60 RTL8211E_INER_LINK_STATUS);
61 else 61 else
62 err = phy_write(phydev, RTL821x_INER, 0); 62 err = phy_write(phydev, RTL821x_INER, 0);
63 63
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index db690a372260..71af122edf2d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1074 u32 rxhash; 1074 u32 rxhash;
1075 1075
1076 if (!(tun->flags & TUN_NO_PI)) { 1076 if (!(tun->flags & TUN_NO_PI)) {
1077 if ((len -= sizeof(pi)) > total_len) 1077 if (len < sizeof(pi))
1078 return -EINVAL; 1078 return -EINVAL;
1079 len -= sizeof(pi);
1079 1080
1080 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) 1081 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
1081 return -EFAULT; 1082 return -EFAULT;
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1083 } 1084 }
1084 1085
1085 if (tun->flags & TUN_VNET_HDR) { 1086 if (tun->flags & TUN_VNET_HDR) {
1086 if ((len -= tun->vnet_hdr_sz) > total_len) 1087 if (len < tun->vnet_hdr_sz)
1087 return -EINVAL; 1088 return -EINVAL;
1089 len -= tun->vnet_hdr_sz;
1088 1090
1089 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) 1091 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
1090 return -EFAULT; 1092 return -EFAULT;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cba1d46e672e..86292e6aaf49 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2816,13 +2816,16 @@ exit:
2816static int hso_get_config_data(struct usb_interface *interface) 2816static int hso_get_config_data(struct usb_interface *interface)
2817{ 2817{
2818 struct usb_device *usbdev = interface_to_usbdev(interface); 2818 struct usb_device *usbdev = interface_to_usbdev(interface);
2819 u8 config_data[17]; 2819 u8 *config_data = kmalloc(17, GFP_KERNEL);
2820 u32 if_num = interface->altsetting->desc.bInterfaceNumber; 2820 u32 if_num = interface->altsetting->desc.bInterfaceNumber;
2821 s32 result; 2821 s32 result;
2822 2822
2823 if (!config_data)
2824 return -ENOMEM;
2823 if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 2825 if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
2824 0x86, 0xC0, 0, 0, config_data, 17, 2826 0x86, 0xC0, 0, 0, config_data, 17,
2825 USB_CTRL_SET_TIMEOUT) != 0x11) { 2827 USB_CTRL_SET_TIMEOUT) != 0x11) {
2828 kfree(config_data);
2826 return -EIO; 2829 return -EIO;
2827 } 2830 }
2828 2831
@@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface)
2873 if (config_data[16] & 0x1) 2876 if (config_data[16] & 0x1)
2874 result |= HSO_INFO_CRC_BUG; 2877 result |= HSO_INFO_CRC_BUG;
2875 2878
2879 kfree(config_data);
2876 return result; 2880 return result;
2877} 2881}
2878 2882
@@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface,
2886 struct hso_shared_int *shared_int; 2890 struct hso_shared_int *shared_int;
2887 struct hso_device *tmp_dev = NULL; 2891 struct hso_device *tmp_dev = NULL;
2888 2892
2893 if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
2894 dev_err(&interface->dev, "Not our interface\n");
2895 return -ENODEV;
2896 }
2897
2889 if_num = interface->altsetting->desc.bInterfaceNumber; 2898 if_num = interface->altsetting->desc.bInterfaceNumber;
2890 2899
2891 /* Get the interface/port specification from either driver_info or from 2900 /* Get the interface/port specification from either driver_info or from
@@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface,
2895 else 2904 else
2896 port_spec = hso_get_config_data(interface); 2905 port_spec = hso_get_config_data(interface);
2897 2906
2898 if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
2899 dev_err(&interface->dev, "Not our interface\n");
2900 return -ENODEV;
2901 }
2902 /* Check if we need to switch to alt interfaces prior to port 2907 /* Check if we need to switch to alt interfaces prior to port
2903 * configuration */ 2908 * configuration */
2904 if (interface->num_altsetting > 1) 2909 if (interface->num_altsetting > 1)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f4c6db419ddb..767f7af3bd40 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev)
1386 return -ENOTCONN; 1386 return -ENOTCONN;
1387 1387
1388 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && 1388 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
1389 ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { 1389 vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
1390 vxlan_sock_hold(vs); 1390 vxlan_sock_hold(vs);
1391 dev_hold(dev); 1391 dev_hold(dev);
1392 queue_work(vxlan_wq, &vxlan->igmp_join); 1392 queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1793 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 1793 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1794 struct vxlan_dev *vxlan = netdev_priv(dev); 1794 struct vxlan_dev *vxlan = netdev_priv(dev);
1795 1795
1796 flush_workqueue(vxlan_wq);
1797
1798 spin_lock(&vn->sock_lock); 1796 spin_lock(&vn->sock_lock);
1799 hlist_del_rcu(&vxlan->hlist); 1797 hlist_del_rcu(&vxlan->hlist);
1800 spin_unlock(&vn->sock_lock); 1798 spin_unlock(&vn->sock_lock);
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 7365674366f4..010b252be584 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv)
1406 if (!priv->join_status) 1406 if (!priv->join_status)
1407 goto done; 1407 goto done;
1408 1408
1409 if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { 1409 if (priv->join_status == CW1200_JOIN_STATUS_AP)
1410 wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", 1410 goto done;
1411 priv->join_status);
1412 BUG_ON(1);
1413 }
1414 1411
1415 cancel_work_sync(&priv->update_filtering_work); 1412 cancel_work_sync(&priv->update_filtering_work);
1416 cancel_work_sync(&priv->set_beacon_wakeup_period_work); 1413 cancel_work_sync(&priv->set_beacon_wakeup_period_work);
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index ac074731335a..e5090309824e 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
523 523
524 data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); 524 data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
525 525
526 memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); 526 memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
527 data->flags = 1; /* has quality information */ 527 data->flags = 1; /* has quality information */
528 memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, 528 memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
529 sizeof(struct iw_quality) * data->length); 529 sizeof(struct iw_quality) * data->length);
530 530
531 kfree(addr); 531 kfree(addr);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index b9b2bb51e605..f2ed62e37340 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il)
4460 * is killed. Hence update the killswitch state here. The 4460 * is killed. Hence update the killswitch state here. The
4461 * rfkill handler will care about restarting if needed. 4461 * rfkill handler will care about restarting if needed.
4462 */ 4462 */
4463 if (!test_bit(S_ALIVE, &il->status)) { 4463 if (hw_rf_kill) {
4464 if (hw_rf_kill) 4464 set_bit(S_RFKILL, &il->status);
4465 set_bit(S_RFKILL, &il->status); 4465 } else {
4466 else 4466 clear_bit(S_RFKILL, &il->status);
4467 clear_bit(S_RFKILL, &il->status);
4468 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); 4467 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4468 il_force_reset(il, true);
4469 } 4469 }
4470 4470
4471 handled |= CSR_INT_BIT_RF_KILL; 4471 handled |= CSR_INT_BIT_RF_KILL;
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il)
5334 5334
5335 il->active_rate = RATES_MASK; 5335 il->active_rate = RATES_MASK;
5336 5336
5337 il_power_update_mode(il, true);
5338 D_INFO("Updated power mode\n");
5339
5337 if (il_is_associated(il)) { 5340 if (il_is_associated(il)) {
5338 struct il_rxon_cmd *active_rxon = 5341 struct il_rxon_cmd *active_rxon =
5339 (struct il_rxon_cmd *)&il->active; 5342 (struct il_rxon_cmd *)&il->active;
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il)
5364 D_INFO("ALIVE processing complete.\n"); 5367 D_INFO("ALIVE processing complete.\n");
5365 wake_up(&il->wait_command_queue); 5368 wake_up(&il->wait_command_queue);
5366 5369
5367 il_power_update_mode(il, true);
5368 D_INFO("Updated power mode\n");
5369
5370 return; 5370 return;
5371 5371
5372restart: 5372restart:
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 3195aad440dd..b03e22ef5462 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
4660 4660
4661 return 0; 4661 return 0;
4662} 4662}
4663EXPORT_SYMBOL(il_force_reset);
4663 4664
4664int 4665int
4665il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4666il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 822f1a00efbb..319387263e12 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1068,7 +1068,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
1068 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1068 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1069 return; 1069 return;
1070 1070
1071 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) 1071 if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
1072 return;
1073
1074 if (ctx->vif)
1072 ieee80211_chswitch_done(ctx->vif, is_success); 1075 ieee80211_chswitch_done(ctx->vif, is_success);
1073} 1076}
1074 1077
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index a70c7b9d9bad..ff8cc75c189d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,8 +97,6 @@
97 97
98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
99 99
100#define APMG_RTC_INT_STT_RFKILL (0x10000000)
101
102/* Device system time */ 100/* Device system time */
103#define DEVICE_SYSTEM_TIME_REG 0xA0206C 101#define DEVICE_SYSTEM_TIME_REG 0xA0206C
104 102
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index ad9bbca99213..7fd6fbfbc1b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -138,6 +138,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
138 schedule_work(&mvm->roc_done_wk); 138 schedule_work(&mvm->roc_done_wk);
139} 139}
140 140
141static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
142 struct ieee80211_vif *vif,
143 const char *errmsg)
144{
145 if (vif->type != NL80211_IFTYPE_STATION)
146 return false;
147 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
148 return false;
149 if (errmsg)
150 IWL_ERR(mvm, "%s\n", errmsg);
151 ieee80211_connection_loss(vif);
152 return true;
153}
154
141/* 155/*
142 * Handles a FW notification for an event that is known to the driver. 156 * Handles a FW notification for an event that is known to the driver.
143 * 157 *
@@ -163,8 +177,13 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
163 * P2P Device discoveribility, while there are other higher priority 177 * P2P Device discoveribility, while there are other higher priority
164 * events in the system). 178 * events in the system).
165 */ 179 */
166 WARN_ONCE(!le32_to_cpu(notif->status), 180 if (WARN_ONCE(!le32_to_cpu(notif->status),
167 "Failed to schedule time event\n"); 181 "Failed to schedule time event\n")) {
182 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
183 iwl_mvm_te_clear_data(mvm, te_data);
184 return;
185 }
186 }
168 187
169 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { 188 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
170 IWL_DEBUG_TE(mvm, 189 IWL_DEBUG_TE(mvm,
@@ -180,14 +199,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
180 * By now, we should have finished association 199 * By now, we should have finished association
181 * and know the dtim period. 200 * and know the dtim period.
182 */ 201 */
183 if (te_data->vif->type == NL80211_IFTYPE_STATION && 202 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
184 (!te_data->vif->bss_conf.assoc || 203 "No assocation and the time event is over already...");
185 !te_data->vif->bss_conf.dtim_period)) {
186 IWL_ERR(mvm,
187 "No assocation and the time event is over already...\n");
188 ieee80211_connection_loss(te_data->vif);
189 }
190
191 iwl_mvm_te_clear_data(mvm, te_data); 204 iwl_mvm_te_clear_data(mvm, te_data);
192 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { 205 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
193 te_data->running = true; 206 te_data->running = true;
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index f600e68a410a..fd848cd1583e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -888,14 +888,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
888 888
889 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 889 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
890 if (hw_rfkill) { 890 if (hw_rfkill) {
891 /*
892 * Clear the interrupt in APMG if the NIC is going down.
893 * Note that when the NIC exits RFkill (else branch), we
894 * can't access prph and the NIC will be reset in
895 * start_hw anyway.
896 */
897 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
898 APMG_RTC_INT_STT_RFKILL);
899 set_bit(STATUS_RFKILL, &trans_pcie->status); 891 set_bit(STATUS_RFKILL, &trans_pcie->status);
900 if (test_and_clear_bit(STATUS_HCMD_ACTIVE, 892 if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
901 &trans_pcie->status)) 893 &trans_pcie->status))
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 96cfcdd39079..390e2f058aff 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1502,16 +1502,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1502 spin_lock_init(&trans_pcie->reg_lock); 1502 spin_lock_init(&trans_pcie->reg_lock);
1503 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 1503 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1504 1504
1505 /* W/A - seems to solve weird behavior. We need to remove this if we
1506 * don't want to stay in L1 all the time. This wastes a lot of power */
1507 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1508 PCIE_LINK_STATE_CLKPM);
1509
1510 if (pci_enable_device(pdev)) { 1505 if (pci_enable_device(pdev)) {
1511 err = -ENODEV; 1506 err = -ENODEV;
1512 goto out_no_pci; 1507 goto out_no_pci;
1513 } 1508 }
1514 1509
1510 /* W/A - seems to solve weird behavior. We need to remove this if we
1511 * don't want to stay in L1 all the time. This wastes a lot of power */
1512 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1513 PCIE_LINK_STATE_CLKPM);
1514
1515 pci_set_master(pdev); 1515 pci_set_master(pdev);
1516 1516
1517 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 1517 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 4941f201d6c8..b8ba1f925e75 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
98 goto exit; 98 goto exit;
99 99
100 err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, 100 err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
101 USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); 101 USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
102 if (err < 0) 102 if (err < 0)
103 goto exit; 103 goto exit;
104 104
105 memcpy(&ret, buf, sizeof(ret));
106
105 if (ret & 0x80) { 107 if (ret & 0x80) {
106 err = -EIO; 108 err = -EIO;
107 goto exit; 109 goto exit;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 6bb7cf2de556..b10ba00cc3e6 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
392 mem = (unsigned long) 392 mem = (unsigned long)
393 dt_alloc(size + 4, __alignof__(struct device_node)); 393 dt_alloc(size + 4, __alignof__(struct device_node));
394 394
395 memset((void *)mem, 0, size);
396
395 ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); 397 ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
396 398
397 pr_debug(" unflattening %lx...\n", mem); 399 pr_debug(" unflattening %lx...\n", mem);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 6fdd49c6f0b9..f4e028924667 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -49,6 +49,7 @@
49#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) 49#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
50#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 50#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
51 51
52struct acpiphp_context;
52struct acpiphp_bridge; 53struct acpiphp_bridge;
53struct acpiphp_slot; 54struct acpiphp_slot;
54 55
@@ -59,6 +60,7 @@ struct slot {
59 struct hotplug_slot *hotplug_slot; 60 struct hotplug_slot *hotplug_slot;
60 struct acpiphp_slot *acpi_slot; 61 struct acpiphp_slot *acpi_slot;
61 struct hotplug_slot_info info; 62 struct hotplug_slot_info info;
63 unsigned int sun; /* ACPI _SUN (Slot User Number) value */
62}; 64};
63 65
64static inline const char *slot_name(struct slot *slot) 66static inline const char *slot_name(struct slot *slot)
@@ -75,15 +77,11 @@ struct acpiphp_bridge {
75 struct list_head list; 77 struct list_head list;
76 struct list_head slots; 78 struct list_head slots;
77 struct kref ref; 79 struct kref ref;
78 acpi_handle handle;
79 80
80 /* Ejectable PCI-to-PCI bridge (PCI bridge and PCI function) */ 81 struct acpiphp_context *context;
81 struct acpiphp_func *func;
82 82
83 int nr_slots; 83 int nr_slots;
84 84
85 u32 flags;
86
87 /* This bus (host bridge) or Secondary bus (PCI-to-PCI bridge) */ 85 /* This bus (host bridge) or Secondary bus (PCI-to-PCI bridge) */
88 struct pci_bus *pci_bus; 86 struct pci_bus *pci_bus;
89 87
@@ -99,15 +97,13 @@ struct acpiphp_bridge {
99 */ 97 */
100struct acpiphp_slot { 98struct acpiphp_slot {
101 struct list_head node; 99 struct list_head node;
102 struct acpiphp_bridge *bridge; /* parent */ 100 struct pci_bus *bus;
103 struct list_head funcs; /* one slot may have different 101 struct list_head funcs; /* one slot may have different
104 objects (i.e. for each function) */ 102 objects (i.e. for each function) */
105 struct slot *slot; 103 struct slot *slot;
106 struct mutex crit_sect; 104 struct mutex crit_sect;
107 105
108 u8 device; /* pci device# */ 106 u8 device; /* pci device# */
109
110 unsigned long long sun; /* ACPI _SUN (slot unique number) */
111 u32 flags; /* see below */ 107 u32 flags; /* see below */
112}; 108};
113 109
@@ -119,16 +115,32 @@ struct acpiphp_slot {
119 * typically 8 objects per slot (i.e. for each PCI function) 115 * typically 8 objects per slot (i.e. for each PCI function)
120 */ 116 */
121struct acpiphp_func { 117struct acpiphp_func {
122 struct acpiphp_slot *slot; /* parent */ 118 struct acpiphp_bridge *parent;
119 struct acpiphp_slot *slot;
123 120
124 struct list_head sibling; 121 struct list_head sibling;
125 struct notifier_block nb;
126 acpi_handle handle;
127 122
128 u8 function; /* pci function# */ 123 u8 function; /* pci function# */
129 u32 flags; /* see below */ 124 u32 flags; /* see below */
130}; 125};
131 126
127struct acpiphp_context {
128 acpi_handle handle;
129 struct acpiphp_func func;
130 struct acpiphp_bridge *bridge;
131 unsigned int refcount;
132};
133
134static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func)
135{
136 return container_of(func, struct acpiphp_context, func);
137}
138
139static inline acpi_handle func_to_handle(struct acpiphp_func *func)
140{
141 return func_to_context(func)->handle;
142}
143
132/* 144/*
133 * struct acpiphp_attention_info - device specific attention registration 145 * struct acpiphp_attention_info - device specific attention registration
134 * 146 *
@@ -142,45 +154,32 @@ struct acpiphp_attention_info
142 struct module *owner; 154 struct module *owner;
143}; 155};
144 156
145/* PCI bus bridge HID */
146#define ACPI_PCI_HOST_HID "PNP0A03"
147
148/* ACPI _STA method value (ignore bit 4; battery present) */ 157/* ACPI _STA method value (ignore bit 4; battery present) */
149#define ACPI_STA_ALL (0x0000000f) 158#define ACPI_STA_ALL (0x0000000f)
150 159
151/* bridge flags */
152#define BRIDGE_HAS_EJ0 (0x00000001)
153
154/* slot flags */ 160/* slot flags */
155 161
156#define SLOT_POWEREDON (0x00000001) 162#define SLOT_ENABLED (0x00000001)
157#define SLOT_ENABLED (0x00000002)
158#define SLOT_MULTIFUNCTION (0x00000004)
159 163
160/* function flags */ 164/* function flags */
161 165
162#define FUNC_HAS_STA (0x00000001) 166#define FUNC_HAS_STA (0x00000001)
163#define FUNC_HAS_EJ0 (0x00000002) 167#define FUNC_HAS_EJ0 (0x00000002)
164#define FUNC_HAS_PS0 (0x00000010) 168#define FUNC_HAS_DCK (0x00000004)
165#define FUNC_HAS_PS1 (0x00000020)
166#define FUNC_HAS_PS2 (0x00000040)
167#define FUNC_HAS_PS3 (0x00000080)
168#define FUNC_HAS_DCK (0x00000100)
169 169
170/* function prototypes */ 170/* function prototypes */
171 171
172/* acpiphp_core.c */ 172/* acpiphp_core.c */
173int acpiphp_register_attention(struct acpiphp_attention_info*info); 173int acpiphp_register_attention(struct acpiphp_attention_info*info);
174int acpiphp_unregister_attention(struct acpiphp_attention_info *info); 174int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
175int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot); 175int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot, unsigned int sun);
176void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot); 176void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
177 177
178/* acpiphp_glue.c */ 178/* acpiphp_glue.c */
179typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data); 179typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
180 180
181int acpiphp_enable_slot(struct acpiphp_slot *slot); 181int acpiphp_enable_slot(struct acpiphp_slot *slot);
182int acpiphp_disable_slot(struct acpiphp_slot *slot); 182int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
183int acpiphp_eject_slot(struct acpiphp_slot *slot);
184u8 acpiphp_get_power_status(struct acpiphp_slot *slot); 183u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
185u8 acpiphp_get_attention_status(struct acpiphp_slot *slot); 184u8 acpiphp_get_attention_status(struct acpiphp_slot *slot);
186u8 acpiphp_get_latch_status(struct acpiphp_slot *slot); 185u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index ca8127950fcd..bf2203ef1308 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -155,15 +155,11 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
155static int disable_slot(struct hotplug_slot *hotplug_slot) 155static int disable_slot(struct hotplug_slot *hotplug_slot)
156{ 156{
157 struct slot *slot = hotplug_slot->private; 157 struct slot *slot = hotplug_slot->private;
158 int retval;
159 158
160 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); 159 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
161 160
162 /* disable the specified slot */ 161 /* disable the specified slot */
163 retval = acpiphp_disable_slot(slot->acpi_slot); 162 return acpiphp_disable_and_eject_slot(slot->acpi_slot);
164 if (!retval)
165 retval = acpiphp_eject_slot(slot->acpi_slot);
166 return retval;
167} 163}
168 164
169 165
@@ -290,7 +286,8 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
290} 286}
291 287
292/* callback routine to initialize 'struct slot' for each slot */ 288/* callback routine to initialize 'struct slot' for each slot */
293int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) 289int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
290 unsigned int sun)
294{ 291{
295 struct slot *slot; 292 struct slot *slot;
296 int retval = -ENOMEM; 293 int retval = -ENOMEM;
@@ -317,12 +314,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
317 slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); 314 slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
318 315
319 acpiphp_slot->slot = slot; 316 acpiphp_slot->slot = slot;
320 snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); 317 slot->sun = sun;
318 snprintf(name, SLOT_NAME_SIZE, "%u", sun);
321 319
322 retval = pci_hp_register(slot->hotplug_slot, 320 retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bus,
323 acpiphp_slot->bridge->pci_bus, 321 acpiphp_slot->device, name);
324 acpiphp_slot->device,
325 name);
326 if (retval == -EBUSY) 322 if (retval == -EBUSY)
327 goto error_hpslot; 323 goto error_hpslot;
328 if (retval) { 324 if (retval) {
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 59df8575a48c..8054ddcdaed0 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -46,6 +46,7 @@
46#include <linux/pci.h> 46#include <linux/pci.h>
47#include <linux/pci_hotplug.h> 47#include <linux/pci_hotplug.h>
48#include <linux/pci-acpi.h> 48#include <linux/pci-acpi.h>
49#include <linux/pm_runtime.h>
49#include <linux/mutex.h> 50#include <linux/mutex.h>
50#include <linux/slab.h> 51#include <linux/slab.h>
51#include <linux/acpi.h> 52#include <linux/acpi.h>
@@ -55,28 +56,82 @@
55 56
56static LIST_HEAD(bridge_list); 57static LIST_HEAD(bridge_list);
57static DEFINE_MUTEX(bridge_mutex); 58static DEFINE_MUTEX(bridge_mutex);
59static DEFINE_MUTEX(acpiphp_context_lock);
58 60
59#define MY_NAME "acpiphp_glue" 61#define MY_NAME "acpiphp_glue"
60 62
61static void handle_hotplug_event_bridge (acpi_handle, u32, void *); 63static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
62static void acpiphp_sanitize_bus(struct pci_bus *bus); 64static void acpiphp_sanitize_bus(struct pci_bus *bus);
63static void acpiphp_set_hpp_values(struct pci_bus *bus); 65static void acpiphp_set_hpp_values(struct pci_bus *bus);
64static void hotplug_event_func(acpi_handle handle, u32 type, void *context); 66static void hotplug_event(acpi_handle handle, u32 type, void *data);
65static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
66static void free_bridge(struct kref *kref); 67static void free_bridge(struct kref *kref);
67 68
68/* callback routine to check for the existence of a pci dock device */ 69static void acpiphp_context_handler(acpi_handle handle, void *context)
69static acpi_status
70is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
71{ 70{
72 int *count = (int *)context; 71 /* Intentionally empty. */
72}
73 73
74 if (is_dock_device(handle)) { 74/**
75 (*count)++; 75 * acpiphp_init_context - Create hotplug context and grab a reference to it.
76 return AE_CTRL_TERMINATE; 76 * @handle: ACPI object handle to create the context for.
77 } else { 77 *
78 return AE_OK; 78 * Call under acpiphp_context_lock.
79 */
80static struct acpiphp_context *acpiphp_init_context(acpi_handle handle)
81{
82 struct acpiphp_context *context;
83 acpi_status status;
84
85 context = kzalloc(sizeof(*context), GFP_KERNEL);
86 if (!context)
87 return NULL;
88
89 context->handle = handle;
90 context->refcount = 1;
91 status = acpi_attach_data(handle, acpiphp_context_handler, context);
92 if (ACPI_FAILURE(status)) {
93 kfree(context);
94 return NULL;
79 } 95 }
96 return context;
97}
98
99/**
100 * acpiphp_get_context - Get hotplug context and grab a reference to it.
101 * @handle: ACPI object handle to get the context for.
102 *
103 * Call under acpiphp_context_lock.
104 */
105static struct acpiphp_context *acpiphp_get_context(acpi_handle handle)
106{
107 struct acpiphp_context *context = NULL;
108 acpi_status status;
109 void *data;
110
111 status = acpi_get_data(handle, acpiphp_context_handler, &data);
112 if (ACPI_SUCCESS(status)) {
113 context = data;
114 context->refcount++;
115 }
116 return context;
117}
118
119/**
120 * acpiphp_put_context - Drop a reference to ACPI hotplug context.
121 * @handle: ACPI object handle to put the context for.
122 *
123 * The context object is removed if there are no more references to it.
124 *
125 * Call under acpiphp_context_lock.
126 */
127static void acpiphp_put_context(struct acpiphp_context *context)
128{
129 if (--context->refcount)
130 return;
131
132 WARN_ON(context->bridge);
133 acpi_detach_data(context->handle, acpiphp_context_handler);
134 kfree(context);
80} 135}
81 136
82static inline void get_bridge(struct acpiphp_bridge *bridge) 137static inline void get_bridge(struct acpiphp_bridge *bridge)
@@ -91,25 +146,36 @@ static inline void put_bridge(struct acpiphp_bridge *bridge)
91 146
92static void free_bridge(struct kref *kref) 147static void free_bridge(struct kref *kref)
93{ 148{
149 struct acpiphp_context *context;
94 struct acpiphp_bridge *bridge; 150 struct acpiphp_bridge *bridge;
95 struct acpiphp_slot *slot, *next; 151 struct acpiphp_slot *slot, *next;
96 struct acpiphp_func *func, *tmp; 152 struct acpiphp_func *func, *tmp;
97 153
154 mutex_lock(&acpiphp_context_lock);
155
98 bridge = container_of(kref, struct acpiphp_bridge, ref); 156 bridge = container_of(kref, struct acpiphp_bridge, ref);
99 157
100 list_for_each_entry_safe(slot, next, &bridge->slots, node) { 158 list_for_each_entry_safe(slot, next, &bridge->slots, node) {
101 list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) { 159 list_for_each_entry_safe(func, tmp, &slot->funcs, sibling)
102 kfree(func); 160 acpiphp_put_context(func_to_context(func));
103 } 161
104 kfree(slot); 162 kfree(slot);
105 } 163 }
106 164
107 /* Release reference acquired by acpiphp_bridge_handle_to_function() */ 165 context = bridge->context;
108 if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) 166 /* Root bridges will not have hotplug context. */
109 put_bridge(bridge->func->slot->bridge); 167 if (context) {
168 /* Release the reference taken by acpiphp_enumerate_slots(). */
169 put_bridge(context->func.parent);
170 context->bridge = NULL;
171 acpiphp_put_context(context);
172 }
173
110 put_device(&bridge->pci_bus->dev); 174 put_device(&bridge->pci_bus->dev);
111 pci_dev_put(bridge->pci_dev); 175 pci_dev_put(bridge->pci_dev);
112 kfree(bridge); 176 kfree(bridge);
177
178 mutex_unlock(&acpiphp_context_lock);
113} 179}
114 180
115/* 181/*
@@ -119,15 +185,14 @@ static void free_bridge(struct kref *kref)
119 * TBD - figure out a way to only call fixups for 185 * TBD - figure out a way to only call fixups for
120 * systems that require them. 186 * systems that require them.
121 */ 187 */
122static int post_dock_fixups(struct notifier_block *nb, unsigned long val, 188static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
123 void *v)
124{ 189{
125 struct acpiphp_func *func = container_of(nb, struct acpiphp_func, nb); 190 struct acpiphp_context *context = data;
126 struct pci_bus *bus = func->slot->bridge->pci_bus; 191 struct pci_bus *bus = context->func.slot->bus;
127 u32 buses; 192 u32 buses;
128 193
129 if (!bus->self) 194 if (!bus->self)
130 return NOTIFY_OK; 195 return;
131 196
132 /* fixup bad _DCK function that rewrites 197 /* fixup bad _DCK function that rewrites
133 * secondary bridge on slot 198 * secondary bridge on slot
@@ -143,12 +208,12 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
143 | ((unsigned int)(bus->busn_res.end) << 16); 208 | ((unsigned int)(bus->busn_res.end) << 16);
144 pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses); 209 pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
145 } 210 }
146 return NOTIFY_OK;
147} 211}
148 212
149 213
150static const struct acpi_dock_ops acpiphp_dock_ops = { 214static const struct acpi_dock_ops acpiphp_dock_ops = {
151 .handler = hotplug_event_func, 215 .fixup = post_dock_fixups,
216 .handler = hotplug_event,
152}; 217};
153 218
154/* Check whether the PCI device is managed by native PCIe hotplug driver */ 219/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -182,129 +247,118 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
182 247
183static void acpiphp_dock_init(void *data) 248static void acpiphp_dock_init(void *data)
184{ 249{
185 struct acpiphp_func *func = data; 250 struct acpiphp_context *context = data;
186 251
187 get_bridge(func->slot->bridge); 252 get_bridge(context->func.parent);
188} 253}
189 254
190static void acpiphp_dock_release(void *data) 255static void acpiphp_dock_release(void *data)
191{ 256{
192 struct acpiphp_func *func = data; 257 struct acpiphp_context *context = data;
193 258
194 put_bridge(func->slot->bridge); 259 put_bridge(context->func.parent);
195} 260}
196 261
197/* callback routine to register each ACPI PCI slot object */ 262/* callback routine to register each ACPI PCI slot object */
198static acpi_status 263static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
199register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) 264 void **rv)
200{ 265{
201 struct acpiphp_bridge *bridge = (struct acpiphp_bridge *)context; 266 struct acpiphp_bridge *bridge = data;
267 struct acpiphp_context *context;
202 struct acpiphp_slot *slot; 268 struct acpiphp_slot *slot;
203 struct acpiphp_func *newfunc; 269 struct acpiphp_func *newfunc;
204 acpi_handle tmp;
205 acpi_status status = AE_OK; 270 acpi_status status = AE_OK;
206 unsigned long long adr, sun; 271 unsigned long long adr;
207 int device, function, retval, found = 0; 272 int device, function;
208 struct pci_bus *pbus = bridge->pci_bus; 273 struct pci_bus *pbus = bridge->pci_bus;
209 struct pci_dev *pdev; 274 struct pci_dev *pdev = bridge->pci_dev;
210 u32 val; 275 u32 val;
211 276
212 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) 277 if (pdev && device_is_managed_by_native_pciehp(pdev))
213 return AE_OK; 278 return AE_OK;
214 279
215 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 280 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
216 if (ACPI_FAILURE(status)) { 281 if (ACPI_FAILURE(status)) {
217 warn("can't evaluate _ADR (%#x)\n", status); 282 acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
218 return AE_OK; 283 return AE_OK;
219 } 284 }
220 285
221 device = (adr >> 16) & 0xffff; 286 device = (adr >> 16) & 0xffff;
222 function = adr & 0xffff; 287 function = adr & 0xffff;
223 288
224 pdev = bridge->pci_dev; 289 mutex_lock(&acpiphp_context_lock);
225 if (pdev && device_is_managed_by_native_pciehp(pdev)) 290 context = acpiphp_init_context(handle);
226 return AE_OK; 291 if (!context) {
227 292 mutex_unlock(&acpiphp_context_lock);
228 newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL); 293 acpi_handle_err(handle, "No hotplug context\n");
229 if (!newfunc) 294 return AE_NOT_EXIST;
230 return AE_NO_MEMORY; 295 }
231 296 newfunc = &context->func;
232 newfunc->handle = handle;
233 newfunc->function = function; 297 newfunc->function = function;
298 newfunc->parent = bridge;
299 mutex_unlock(&acpiphp_context_lock);
234 300
235 if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) 301 if (acpi_has_method(handle, "_EJ0"))
236 newfunc->flags = FUNC_HAS_EJ0; 302 newfunc->flags = FUNC_HAS_EJ0;
237 303
238 if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp))) 304 if (acpi_has_method(handle, "_STA"))
239 newfunc->flags |= FUNC_HAS_STA; 305 newfunc->flags |= FUNC_HAS_STA;
240 306
241 if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS0", &tmp))) 307 if (acpi_has_method(handle, "_DCK"))
242 newfunc->flags |= FUNC_HAS_PS0;
243
244 if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS3", &tmp)))
245 newfunc->flags |= FUNC_HAS_PS3;
246
247 if (ACPI_SUCCESS(acpi_get_handle(handle, "_DCK", &tmp)))
248 newfunc->flags |= FUNC_HAS_DCK; 308 newfunc->flags |= FUNC_HAS_DCK;
249 309
250 status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun);
251 if (ACPI_FAILURE(status)) {
252 /*
253 * use the count of the number of slots we've found
254 * for the number of the slot
255 */
256 sun = bridge->nr_slots+1;
257 }
258
259 /* search for objects that share the same slot */ 310 /* search for objects that share the same slot */
260 list_for_each_entry(slot, &bridge->slots, node) 311 list_for_each_entry(slot, &bridge->slots, node)
261 if (slot->device == device) { 312 if (slot->device == device)
262 if (slot->sun != sun) 313 goto slot_found;
263 warn("sibling found, but _SUN doesn't match!\n");
264 found = 1;
265 break;
266 }
267 314
268 if (!found) { 315 slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
269 slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL); 316 if (!slot) {
270 if (!slot) { 317 status = AE_NO_MEMORY;
271 kfree(newfunc); 318 goto err;
272 return AE_NO_MEMORY; 319 }
273 }
274 320
275 slot->bridge = bridge; 321 slot->bus = bridge->pci_bus;
276 slot->device = device; 322 slot->device = device;
277 slot->sun = sun; 323 INIT_LIST_HEAD(&slot->funcs);
278 INIT_LIST_HEAD(&slot->funcs); 324 mutex_init(&slot->crit_sect);
279 mutex_init(&slot->crit_sect); 325
326 list_add_tail(&slot->node, &bridge->slots);
327
328 /* Register slots for ejectable funtions only. */
329 if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) {
330 unsigned long long sun;
331 int retval;
280 332
281 mutex_lock(&bridge_mutex);
282 list_add_tail(&slot->node, &bridge->slots);
283 mutex_unlock(&bridge_mutex);
284 bridge->nr_slots++; 333 bridge->nr_slots++;
334 status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun);
335 if (ACPI_FAILURE(status))
336 sun = bridge->nr_slots;
285 337
286 dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", 338 dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
287 slot->sun, pci_domain_nr(pbus), pbus->number, device); 339 sun, pci_domain_nr(pbus), pbus->number, device);
288 retval = acpiphp_register_hotplug_slot(slot); 340
341 retval = acpiphp_register_hotplug_slot(slot, sun);
289 if (retval) { 342 if (retval) {
343 slot->slot = NULL;
344 bridge->nr_slots--;
290 if (retval == -EBUSY) 345 if (retval == -EBUSY)
291 warn("Slot %llu already registered by another " 346 warn("Slot %llu already registered by another "
292 "hotplug driver\n", slot->sun); 347 "hotplug driver\n", sun);
293 else 348 else
294 warn("acpiphp_register_hotplug_slot failed " 349 warn("acpiphp_register_hotplug_slot failed "
295 "(err code = 0x%x)\n", retval); 350 "(err code = 0x%x)\n", retval);
296 goto err_exit;
297 } 351 }
352 /* Even if the slot registration fails, we can still use it. */
298 } 353 }
299 354
355 slot_found:
300 newfunc->slot = slot; 356 newfunc->slot = slot;
301 mutex_lock(&bridge_mutex);
302 list_add_tail(&newfunc->sibling, &slot->funcs); 357 list_add_tail(&newfunc->sibling, &slot->funcs);
303 mutex_unlock(&bridge_mutex);
304 358
305 if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function), 359 if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function),
306 &val, 60*1000)) 360 &val, 60*1000))
307 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); 361 slot->flags |= SLOT_ENABLED;
308 362
309 if (is_dock_device(handle)) { 363 if (is_dock_device(handle)) {
310 /* we don't want to call this device's _EJ0 364 /* we don't want to call this device's _EJ0
@@ -313,136 +367,46 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
313 */ 367 */
314 newfunc->flags &= ~FUNC_HAS_EJ0; 368 newfunc->flags &= ~FUNC_HAS_EJ0;
315 if (register_hotplug_dock_device(handle, 369 if (register_hotplug_dock_device(handle,
316 &acpiphp_dock_ops, newfunc, 370 &acpiphp_dock_ops, context,
317 acpiphp_dock_init, acpiphp_dock_release)) 371 acpiphp_dock_init, acpiphp_dock_release))
318 dbg("failed to register dock device\n"); 372 dbg("failed to register dock device\n");
319
320 /* we need to be notified when dock events happen
321 * outside of the hotplug operation, since we may
322 * need to do fixups before we can hotplug.
323 */
324 newfunc->nb.notifier_call = post_dock_fixups;
325 if (register_dock_notifier(&newfunc->nb))
326 dbg("failed to register a dock notifier");
327 } 373 }
328 374
329 /* install notify handler */ 375 /* install notify handler */
330 if (!(newfunc->flags & FUNC_HAS_DCK)) { 376 if (!(newfunc->flags & FUNC_HAS_DCK)) {
331 status = acpi_install_notify_handler(handle, 377 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
332 ACPI_SYSTEM_NOTIFY, 378 handle_hotplug_event,
333 handle_hotplug_event_func, 379 context);
334 newfunc);
335
336 if (ACPI_FAILURE(status)) 380 if (ACPI_FAILURE(status))
337 err("failed to register interrupt notify handler\n"); 381 acpi_handle_err(handle,
338 } else 382 "failed to install notify handler\n");
339 status = AE_OK;
340
341 return status;
342
343 err_exit:
344 bridge->nr_slots--;
345 mutex_lock(&bridge_mutex);
346 list_del(&slot->node);
347 mutex_unlock(&bridge_mutex);
348 kfree(slot);
349 kfree(newfunc);
350
351 return AE_OK;
352}
353
354
355/* see if it's worth looking at this bridge */
356static int detect_ejectable_slots(acpi_handle handle)
357{
358 int found = acpi_pci_detect_ejectable(handle);
359 if (!found) {
360 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
361 is_pci_dock_device, NULL, (void *)&found, NULL);
362 }
363 return found;
364}
365
366/* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */
367static void init_bridge_misc(struct acpiphp_bridge *bridge)
368{
369 acpi_status status;
370
371 /* must be added to the list prior to calling register_slot */
372 mutex_lock(&bridge_mutex);
373 list_add(&bridge->list, &bridge_list);
374 mutex_unlock(&bridge_mutex);
375
376 /* register all slot objects under this bridge */
377 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1,
378 register_slot, NULL, bridge, NULL);
379 if (ACPI_FAILURE(status)) {
380 mutex_lock(&bridge_mutex);
381 list_del(&bridge->list);
382 mutex_unlock(&bridge_mutex);
383 return;
384 } 383 }
385 384
386 /* install notify handler for P2P bridges */ 385 return AE_OK;
387 if (!pci_is_root_bus(bridge->pci_bus)) {
388 if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
389 status = acpi_remove_notify_handler(bridge->func->handle,
390 ACPI_SYSTEM_NOTIFY,
391 handle_hotplug_event_func);
392 if (ACPI_FAILURE(status))
393 err("failed to remove notify handler\n");
394 }
395 status = acpi_install_notify_handler(bridge->handle,
396 ACPI_SYSTEM_NOTIFY,
397 handle_hotplug_event_bridge,
398 bridge);
399
400 if (ACPI_FAILURE(status)) {
401 err("failed to register interrupt notify handler\n");
402 }
403 }
404}
405
406
407/* find acpiphp_func from acpiphp_bridge */
408static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle)
409{
410 struct acpiphp_bridge *bridge;
411 struct acpiphp_slot *slot;
412 struct acpiphp_func *func = NULL;
413
414 mutex_lock(&bridge_mutex);
415 list_for_each_entry(bridge, &bridge_list, list) {
416 list_for_each_entry(slot, &bridge->slots, node) {
417 list_for_each_entry(func, &slot->funcs, sibling) {
418 if (func->handle == handle) {
419 get_bridge(func->slot->bridge);
420 mutex_unlock(&bridge_mutex);
421 return func;
422 }
423 }
424 }
425 }
426 mutex_unlock(&bridge_mutex);
427 386
428 return NULL; 387 err:
388 mutex_lock(&acpiphp_context_lock);
389 acpiphp_put_context(context);
390 mutex_unlock(&acpiphp_context_lock);
391 return status;
429} 392}
430 393
431
432static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle) 394static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
433{ 395{
434 struct acpiphp_bridge *bridge; 396 struct acpiphp_context *context;
435 397 struct acpiphp_bridge *bridge = NULL;
436 mutex_lock(&bridge_mutex); 398
437 list_for_each_entry(bridge, &bridge_list, list) 399 mutex_lock(&acpiphp_context_lock);
438 if (bridge->handle == handle) { 400 context = acpiphp_get_context(handle);
401 if (context) {
402 bridge = context->bridge;
403 if (bridge)
439 get_bridge(bridge); 404 get_bridge(bridge);
440 mutex_unlock(&bridge_mutex);
441 return bridge;
442 }
443 mutex_unlock(&bridge_mutex);
444 405
445 return NULL; 406 acpiphp_put_context(context);
407 }
408 mutex_unlock(&acpiphp_context_lock);
409 return bridge;
446} 410}
447 411
448static void cleanup_bridge(struct acpiphp_bridge *bridge) 412static void cleanup_bridge(struct acpiphp_bridge *bridge)
@@ -450,40 +414,24 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
450 struct acpiphp_slot *slot; 414 struct acpiphp_slot *slot;
451 struct acpiphp_func *func; 415 struct acpiphp_func *func;
452 acpi_status status; 416 acpi_status status;
453 acpi_handle handle = bridge->handle;
454
455 if (!pci_is_root_bus(bridge->pci_bus)) {
456 status = acpi_remove_notify_handler(handle,
457 ACPI_SYSTEM_NOTIFY,
458 handle_hotplug_event_bridge);
459 if (ACPI_FAILURE(status))
460 err("failed to remove notify handler\n");
461 }
462
463 if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
464 status = acpi_install_notify_handler(bridge->func->handle,
465 ACPI_SYSTEM_NOTIFY,
466 handle_hotplug_event_func,
467 bridge->func);
468 if (ACPI_FAILURE(status))
469 err("failed to install interrupt notify handler\n");
470 }
471 417
472 list_for_each_entry(slot, &bridge->slots, node) { 418 list_for_each_entry(slot, &bridge->slots, node) {
473 list_for_each_entry(func, &slot->funcs, sibling) { 419 list_for_each_entry(func, &slot->funcs, sibling) {
474 if (is_dock_device(func->handle)) { 420 acpi_handle handle = func_to_handle(func);
475 unregister_hotplug_dock_device(func->handle); 421
476 unregister_dock_notifier(&func->nb); 422 if (is_dock_device(handle))
477 } 423 unregister_hotplug_dock_device(handle);
424
478 if (!(func->flags & FUNC_HAS_DCK)) { 425 if (!(func->flags & FUNC_HAS_DCK)) {
479 status = acpi_remove_notify_handler(func->handle, 426 status = acpi_remove_notify_handler(handle,
480 ACPI_SYSTEM_NOTIFY, 427 ACPI_SYSTEM_NOTIFY,
481 handle_hotplug_event_func); 428 handle_hotplug_event);
482 if (ACPI_FAILURE(status)) 429 if (ACPI_FAILURE(status))
483 err("failed to remove notify handler\n"); 430 err("failed to remove notify handler\n");
484 } 431 }
485 } 432 }
486 acpiphp_unregister_hotplug_slot(slot); 433 if (slot->slot)
434 acpiphp_unregister_hotplug_slot(slot);
487 } 435 }
488 436
489 mutex_lock(&bridge_mutex); 437 mutex_lock(&bridge_mutex);
@@ -491,71 +439,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
491 mutex_unlock(&bridge_mutex); 439 mutex_unlock(&bridge_mutex);
492} 440}
493 441
494static int power_on_slot(struct acpiphp_slot *slot)
495{
496 acpi_status status;
497 struct acpiphp_func *func;
498 int retval = 0;
499
500 /* if already enabled, just skip */
501 if (slot->flags & SLOT_POWEREDON)
502 goto err_exit;
503
504 list_for_each_entry(func, &slot->funcs, sibling) {
505 if (func->flags & FUNC_HAS_PS0) {
506 dbg("%s: executing _PS0\n", __func__);
507 status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL);
508 if (ACPI_FAILURE(status)) {
509 warn("%s: _PS0 failed\n", __func__);
510 retval = -1;
511 goto err_exit;
512 } else
513 break;
514 }
515 }
516
517 /* TBD: evaluate _STA to check if the slot is enabled */
518
519 slot->flags |= SLOT_POWEREDON;
520
521 err_exit:
522 return retval;
523}
524
525
526static int power_off_slot(struct acpiphp_slot *slot)
527{
528 acpi_status status;
529 struct acpiphp_func *func;
530
531 int retval = 0;
532
533 /* if already disabled, just skip */
534 if ((slot->flags & SLOT_POWEREDON) == 0)
535 goto err_exit;
536
537 list_for_each_entry(func, &slot->funcs, sibling) {
538 if (func->flags & FUNC_HAS_PS3) {
539 status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL);
540 if (ACPI_FAILURE(status)) {
541 warn("%s: _PS3 failed\n", __func__);
542 retval = -1;
543 goto err_exit;
544 } else
545 break;
546 }
547 }
548
549 /* TBD: evaluate _STA to check if the slot is disabled */
550
551 slot->flags &= (~SLOT_POWEREDON);
552
553 err_exit:
554 return retval;
555}
556
557
558
559/** 442/**
560 * acpiphp_max_busnr - return the highest reserved bus number under the given bus. 443 * acpiphp_max_busnr - return the highest reserved bus number under the given bus.
561 * @bus: bus to start search with 444 * @bus: bus to start search with
@@ -583,52 +466,32 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
583 return max; 466 return max;
584} 467}
585 468
586
587/** 469/**
588 * acpiphp_bus_add - add a new bus to acpi subsystem 470 * acpiphp_bus_trim - Trim device objects in an ACPI namespace subtree.
589 * @func: acpiphp_func of the bridge 471 * @handle: ACPI device object handle to start from.
590 */ 472 */
591static int acpiphp_bus_add(struct acpiphp_func *func) 473static void acpiphp_bus_trim(acpi_handle handle)
592{ 474{
593 struct acpi_device *device; 475 struct acpi_device *adev = NULL;
594 int ret_val;
595
596 if (!acpi_bus_get_device(func->handle, &device)) {
597 dbg("bus exists... trim\n");
598 /* this shouldn't be in here, so remove
599 * the bus then re-add it...
600 */
601 acpi_bus_trim(device);
602 }
603
604 ret_val = acpi_bus_scan(func->handle);
605 if (!ret_val)
606 ret_val = acpi_bus_get_device(func->handle, &device);
607
608 if (ret_val)
609 dbg("error adding bus, %x\n", -ret_val);
610 476
611 return ret_val; 477 acpi_bus_get_device(handle, &adev);
478 if (adev)
479 acpi_bus_trim(adev);
612} 480}
613 481
614
615/** 482/**
616 * acpiphp_bus_trim - trim a bus from acpi subsystem 483 * acpiphp_bus_add - Scan ACPI namespace subtree.
617 * @handle: handle to acpi namespace 484 * @handle: ACPI object handle to start the scan from.
618 */ 485 */
619static int acpiphp_bus_trim(acpi_handle handle) 486static void acpiphp_bus_add(acpi_handle handle)
620{ 487{
621 struct acpi_device *device; 488 struct acpi_device *adev = NULL;
622 int retval;
623
624 retval = acpi_bus_get_device(handle, &device);
625 if (retval) {
626 dbg("acpi_device not found\n");
627 return retval;
628 }
629 489
630 acpi_bus_trim(device); 490 acpiphp_bus_trim(handle);
631 return 0; 491 acpi_bus_scan(handle);
492 acpi_bus_get_device(handle, &adev);
493 if (adev)
494 acpi_device_set_power(adev, ACPI_STATE_D0);
632} 495}
633 496
634static void acpiphp_set_acpi_region(struct acpiphp_slot *slot) 497static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
@@ -645,7 +508,8 @@ static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
645 params[1].type = ACPI_TYPE_INTEGER; 508 params[1].type = ACPI_TYPE_INTEGER;
646 params[1].integer.value = 1; 509 params[1].integer.value = 1;
647 /* _REG is optional, we don't care about if there is failure */ 510 /* _REG is optional, we don't care about if there is failure */
648 acpi_evaluate_object(func->handle, "_REG", &arg_list, NULL); 511 acpi_evaluate_object(func_to_handle(func), "_REG", &arg_list,
512 NULL);
649 } 513 }
650} 514}
651 515
@@ -653,59 +517,44 @@ static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
653{ 517{
654 struct acpiphp_func *func; 518 struct acpiphp_func *func;
655 519
656 if (!dev->subordinate)
657 return;
658
659 /* quirk, or pcie could set it already */ 520 /* quirk, or pcie could set it already */
660 if (dev->is_hotplug_bridge) 521 if (dev->is_hotplug_bridge)
661 return; 522 return;
662 523
663 if (PCI_SLOT(dev->devfn) != slot->device)
664 return;
665
666 list_for_each_entry(func, &slot->funcs, sibling) { 524 list_for_each_entry(func, &slot->funcs, sibling) {
667 if (PCI_FUNC(dev->devfn) == func->function) { 525 if (PCI_FUNC(dev->devfn) == func->function) {
668 /* check if this bridge has ejectable slots */ 526 dev->is_hotplug_bridge = 1;
669 if ((detect_ejectable_slots(func->handle) > 0))
670 dev->is_hotplug_bridge = 1;
671 break; 527 break;
672 } 528 }
673 } 529 }
674} 530}
675 531
676/** 532/**
677 * enable_device - enable, configure a slot 533 * enable_slot - enable, configure a slot
678 * @slot: slot to be enabled 534 * @slot: slot to be enabled
679 * 535 *
680 * This function should be called per *physical slot*, 536 * This function should be called per *physical slot*,
681 * not per each slot object in ACPI namespace. 537 * not per each slot object in ACPI namespace.
682 */ 538 */
683static int __ref enable_device(struct acpiphp_slot *slot) 539static void __ref enable_slot(struct acpiphp_slot *slot)
684{ 540{
685 struct pci_dev *dev; 541 struct pci_dev *dev;
686 struct pci_bus *bus = slot->bridge->pci_bus; 542 struct pci_bus *bus = slot->bus;
687 struct acpiphp_func *func; 543 struct acpiphp_func *func;
688 int num, max, pass; 544 int max, pass;
689 LIST_HEAD(add_list); 545 LIST_HEAD(add_list);
690 546
691 if (slot->flags & SLOT_ENABLED)
692 goto err_exit;
693
694 list_for_each_entry(func, &slot->funcs, sibling) 547 list_for_each_entry(func, &slot->funcs, sibling)
695 acpiphp_bus_add(func); 548 acpiphp_bus_add(func_to_handle(func));
696 549
697 num = pci_scan_slot(bus, PCI_DEVFN(slot->device, 0)); 550 pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
698 if (num == 0) {
699 /* Maybe only part of funcs are added. */
700 dbg("No new device found\n");
701 goto err_exit;
702 }
703 551
704 max = acpiphp_max_busnr(bus); 552 max = acpiphp_max_busnr(bus);
705 for (pass = 0; pass < 2; pass++) { 553 for (pass = 0; pass < 2; pass++) {
706 list_for_each_entry(dev, &bus->devices, bus_list) { 554 list_for_each_entry(dev, &bus->devices, bus_list) {
707 if (PCI_SLOT(dev->devfn) != slot->device) 555 if (PCI_SLOT(dev->devfn) != slot->device)
708 continue; 556 continue;
557
709 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 558 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
710 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { 559 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
711 max = pci_scan_bridge(bus, dev, max, pass); 560 max = pci_scan_bridge(bus, dev, max, pass);
@@ -744,16 +593,12 @@ static int __ref enable_device(struct acpiphp_slot *slot)
744 continue; 593 continue;
745 } 594 }
746 } 595 }
747
748
749 err_exit:
750 return 0;
751} 596}
752 597
753/* return first device in slot, acquiring a reference on it */ 598/* return first device in slot, acquiring a reference on it */
754static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot) 599static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
755{ 600{
756 struct pci_bus *bus = slot->bridge->pci_bus; 601 struct pci_bus *bus = slot->bus;
757 struct pci_dev *dev; 602 struct pci_dev *dev;
758 struct pci_dev *ret = NULL; 603 struct pci_dev *ret = NULL;
759 604
@@ -769,16 +614,16 @@ static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
769} 614}
770 615
771/** 616/**
772 * disable_device - disable a slot 617 * disable_slot - disable a slot
773 * @slot: ACPI PHP slot 618 * @slot: ACPI PHP slot
774 */ 619 */
775static int disable_device(struct acpiphp_slot *slot) 620static void disable_slot(struct acpiphp_slot *slot)
776{ 621{
777 struct acpiphp_func *func; 622 struct acpiphp_func *func;
778 struct pci_dev *pdev; 623 struct pci_dev *pdev;
779 624
780 /* 625 /*
781 * enable_device() enumerates all functions in this device via 626 * enable_slot() enumerates all functions in this device via
782 * pci_scan_slot(), whether they have associated ACPI hotplug 627 * pci_scan_slot(), whether they have associated ACPI hotplug
783 * methods (_EJ0, etc.) or not. Therefore, we remove all functions 628 * methods (_EJ0, etc.) or not. Therefore, we remove all functions
784 * here. 629 * here.
@@ -788,13 +633,10 @@ static int disable_device(struct acpiphp_slot *slot)
788 pci_dev_put(pdev); 633 pci_dev_put(pdev);
789 } 634 }
790 635
791 list_for_each_entry(func, &slot->funcs, sibling) { 636 list_for_each_entry(func, &slot->funcs, sibling)
792 acpiphp_bus_trim(func->handle); 637 acpiphp_bus_trim(func_to_handle(func));
793 }
794 638
795 slot->flags &= (~SLOT_ENABLED); 639 slot->flags &= (~SLOT_ENABLED);
796
797 return 0;
798} 640}
799 641
800 642
@@ -812,18 +654,21 @@ static int disable_device(struct acpiphp_slot *slot)
812 */ 654 */
813static unsigned int get_slot_status(struct acpiphp_slot *slot) 655static unsigned int get_slot_status(struct acpiphp_slot *slot)
814{ 656{
815 acpi_status status;
816 unsigned long long sta = 0; 657 unsigned long long sta = 0;
817 u32 dvid;
818 struct acpiphp_func *func; 658 struct acpiphp_func *func;
819 659
820 list_for_each_entry(func, &slot->funcs, sibling) { 660 list_for_each_entry(func, &slot->funcs, sibling) {
821 if (func->flags & FUNC_HAS_STA) { 661 if (func->flags & FUNC_HAS_STA) {
822 status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta); 662 acpi_status status;
663
664 status = acpi_evaluate_integer(func_to_handle(func),
665 "_STA", NULL, &sta);
823 if (ACPI_SUCCESS(status) && sta) 666 if (ACPI_SUCCESS(status) && sta)
824 break; 667 break;
825 } else { 668 } else {
826 pci_bus_read_config_dword(slot->bridge->pci_bus, 669 u32 dvid;
670
671 pci_bus_read_config_dword(slot->bus,
827 PCI_DEVFN(slot->device, 672 PCI_DEVFN(slot->device,
828 func->function), 673 func->function),
829 PCI_VENDOR_ID, &dvid); 674 PCI_VENDOR_ID, &dvid);
@@ -838,34 +683,42 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
838} 683}
839 684
840/** 685/**
841 * acpiphp_eject_slot - physically eject the slot 686 * trim_stale_devices - remove PCI devices that are not responding.
842 * @slot: ACPI PHP slot 687 * @dev: PCI device to start walking the hierarchy from.
843 */ 688 */
844int acpiphp_eject_slot(struct acpiphp_slot *slot) 689static void trim_stale_devices(struct pci_dev *dev)
845{ 690{
846 acpi_status status; 691 acpi_handle handle = ACPI_HANDLE(&dev->dev);
847 struct acpiphp_func *func; 692 struct pci_bus *bus = dev->subordinate;
848 struct acpi_object_list arg_list; 693 bool alive = false;
849 union acpi_object arg;
850 694
851 list_for_each_entry(func, &slot->funcs, sibling) { 695 if (handle) {
852 /* We don't want to call _EJ0 on non-existing functions. */ 696 acpi_status status;
853 if ((func->flags & FUNC_HAS_EJ0)) { 697 unsigned long long sta;
854 /* _EJ0 method take one argument */ 698
855 arg_list.count = 1; 699 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
856 arg_list.pointer = &arg; 700 alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
857 arg.type = ACPI_TYPE_INTEGER; 701 }
858 arg.integer.value = 1; 702 if (!alive) {
859 703 u32 v;
860 status = acpi_evaluate_object(func->handle, "_EJ0", &arg_list, NULL); 704
861 if (ACPI_FAILURE(status)) { 705 /* Check if the device responds. */
862 warn("%s: _EJ0 failed\n", __func__); 706 alive = pci_bus_read_dev_vendor_id(dev->bus, dev->devfn, &v, 0);
863 return -1; 707 }
864 } else 708 if (!alive) {
865 break; 709 pci_stop_and_remove_bus_device(dev);
866 } 710 if (handle)
711 acpiphp_bus_trim(handle);
712 } else if (bus) {
713 struct pci_dev *child, *tmp;
714
715 /* The device is a bridge. so check the bus below it. */
716 pm_runtime_get_sync(&dev->dev);
717 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
718 trim_stale_devices(child);
719
720 pm_runtime_put(&dev->dev);
867 } 721 }
868 return 0;
869} 722}
870 723
871/** 724/**
@@ -875,43 +728,30 @@ int acpiphp_eject_slot(struct acpiphp_slot *slot)
875 * Iterate over all slots under this bridge and make sure that if a 728 * Iterate over all slots under this bridge and make sure that if a
876 * card is present they are enabled, and if not they are disabled. 729 * card is present they are enabled, and if not they are disabled.
877 */ 730 */
878static int acpiphp_check_bridge(struct acpiphp_bridge *bridge) 731static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
879{ 732{
880 struct acpiphp_slot *slot; 733 struct acpiphp_slot *slot;
881 int retval = 0;
882 int enabled, disabled;
883
884 enabled = disabled = 0;
885 734
886 list_for_each_entry(slot, &bridge->slots, node) { 735 list_for_each_entry(slot, &bridge->slots, node) {
887 unsigned int status = get_slot_status(slot); 736 struct pci_bus *bus = slot->bus;
888 if (slot->flags & SLOT_ENABLED) { 737 struct pci_dev *dev, *tmp;
889 if (status == ACPI_STA_ALL) 738
890 continue; 739 mutex_lock(&slot->crit_sect);
891 retval = acpiphp_disable_slot(slot); 740 /* wake up all functions */
892 if (retval) { 741 if (get_slot_status(slot) == ACPI_STA_ALL) {
893 err("Error occurred in disabling\n"); 742 /* remove stale devices if any */
894 goto err_exit; 743 list_for_each_entry_safe(dev, tmp, &bus->devices,
895 } else { 744 bus_list)
896 acpiphp_eject_slot(slot); 745 if (PCI_SLOT(dev->devfn) == slot->device)
897 } 746 trim_stale_devices(dev);
898 disabled++; 747
748 /* configure all functions */
749 enable_slot(slot);
899 } else { 750 } else {
900 if (status != ACPI_STA_ALL) 751 disable_slot(slot);
901 continue;
902 retval = acpiphp_enable_slot(slot);
903 if (retval) {
904 err("Error occurred in enabling\n");
905 goto err_exit;
906 }
907 enabled++;
908 } 752 }
753 mutex_unlock(&slot->crit_sect);
909 } 754 }
910
911 dbg("%s: %d enabled, %d disabled\n", __func__, enabled, disabled);
912
913 err_exit:
914 return retval;
915} 755}
916 756
917static void acpiphp_set_hpp_values(struct pci_bus *bus) 757static void acpiphp_set_hpp_values(struct pci_bus *bus)
@@ -950,25 +790,6 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
950 * ACPI event handlers 790 * ACPI event handlers
951 */ 791 */
952 792
953static acpi_status
954check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
955{
956 struct acpiphp_bridge *bridge;
957 char objname[64];
958 struct acpi_buffer buffer = { .length = sizeof(objname),
959 .pointer = objname };
960
961 bridge = acpiphp_handle_to_bridge(handle);
962 if (bridge) {
963 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
964 dbg("%s: re-enumerating slots under %s\n",
965 __func__, objname);
966 acpiphp_check_bridge(bridge);
967 put_bridge(bridge);
968 }
969 return AE_OK ;
970}
971
972void acpiphp_check_host_bridge(acpi_handle handle) 793void acpiphp_check_host_bridge(acpi_handle handle)
973{ 794{
974 struct acpiphp_bridge *bridge; 795 struct acpiphp_bridge *bridge;
@@ -978,27 +799,23 @@ void acpiphp_check_host_bridge(acpi_handle handle)
978 acpiphp_check_bridge(bridge); 799 acpiphp_check_bridge(bridge);
979 put_bridge(bridge); 800 put_bridge(bridge);
980 } 801 }
981
982 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
983 ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
984} 802}
985 803
986static void _handle_hotplug_event_bridge(struct work_struct *work) 804static void hotplug_event(acpi_handle handle, u32 type, void *data)
987{ 805{
806 struct acpiphp_context *context = data;
807 struct acpiphp_func *func = &context->func;
988 struct acpiphp_bridge *bridge; 808 struct acpiphp_bridge *bridge;
989 char objname[64]; 809 char objname[64];
990 struct acpi_buffer buffer = { .length = sizeof(objname), 810 struct acpi_buffer buffer = { .length = sizeof(objname),
991 .pointer = objname }; 811 .pointer = objname };
992 struct acpi_hp_work *hp_work;
993 acpi_handle handle;
994 u32 type;
995 812
996 hp_work = container_of(work, struct acpi_hp_work, work); 813 mutex_lock(&acpiphp_context_lock);
997 handle = hp_work->handle; 814 bridge = context->bridge;
998 type = hp_work->type; 815 if (bridge)
999 bridge = (struct acpiphp_bridge *)hp_work->context; 816 get_bridge(bridge);
1000 817
1001 acpi_scan_lock_acquire(); 818 mutex_unlock(&acpiphp_context_lock);
1002 819
1003 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 820 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1004 821
@@ -1007,188 +824,129 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
1007 /* bus re-enumerate */ 824 /* bus re-enumerate */
1008 dbg("%s: Bus check notify on %s\n", __func__, objname); 825 dbg("%s: Bus check notify on %s\n", __func__, objname);
1009 dbg("%s: re-enumerating slots under %s\n", __func__, objname); 826 dbg("%s: re-enumerating slots under %s\n", __func__, objname);
1010 acpiphp_check_bridge(bridge); 827 if (bridge) {
1011 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 828 acpiphp_check_bridge(bridge);
1012 ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL); 829 } else {
830 struct acpiphp_slot *slot = func->slot;
831
832 mutex_lock(&slot->crit_sect);
833 enable_slot(slot);
834 mutex_unlock(&slot->crit_sect);
835 }
1013 break; 836 break;
1014 837
1015 case ACPI_NOTIFY_DEVICE_CHECK: 838 case ACPI_NOTIFY_DEVICE_CHECK:
1016 /* device check */ 839 /* device check */
1017 dbg("%s: Device check notify on %s\n", __func__, objname); 840 dbg("%s: Device check notify on %s\n", __func__, objname);
1018 acpiphp_check_bridge(bridge); 841 if (bridge)
1019 break; 842 acpiphp_check_bridge(bridge);
843 else
844 acpiphp_check_bridge(func->parent);
1020 845
1021 case ACPI_NOTIFY_DEVICE_WAKE:
1022 /* wake event */
1023 dbg("%s: Device wake notify on %s\n", __func__, objname);
1024 break; 846 break;
1025 847
1026 case ACPI_NOTIFY_EJECT_REQUEST: 848 case ACPI_NOTIFY_EJECT_REQUEST:
1027 /* request device eject */ 849 /* request device eject */
1028 dbg("%s: Device eject notify on %s\n", __func__, objname); 850 dbg("%s: Device eject notify on %s\n", __func__, objname);
1029 if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) { 851 acpiphp_disable_and_eject_slot(func->slot);
1030 struct acpiphp_slot *slot;
1031 slot = bridge->func->slot;
1032 if (!acpiphp_disable_slot(slot))
1033 acpiphp_eject_slot(slot);
1034 }
1035 break; 852 break;
853 }
1036 854
1037 case ACPI_NOTIFY_FREQUENCY_MISMATCH: 855 if (bridge)
1038 printk(KERN_ERR "Device %s cannot be configured due" 856 put_bridge(bridge);
1039 " to a frequency mismatch\n", objname); 857}
1040 break;
1041 858
1042 case ACPI_NOTIFY_BUS_MODE_MISMATCH: 859static void hotplug_event_work(struct work_struct *work)
1043 printk(KERN_ERR "Device %s cannot be configured due" 860{
1044 " to a bus mode mismatch\n", objname); 861 struct acpiphp_context *context;
1045 break; 862 struct acpi_hp_work *hp_work;
1046 863
1047 case ACPI_NOTIFY_POWER_FAULT: 864 hp_work = container_of(work, struct acpi_hp_work, work);
1048 printk(KERN_ERR "Device %s has suffered a power fault\n", 865 context = hp_work->context;
1049 objname); 866 acpi_scan_lock_acquire();
1050 break;
1051 867
1052 default: 868 hotplug_event(hp_work->handle, hp_work->type, context);
1053 warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
1054 break;
1055 }
1056 869
1057 acpi_scan_lock_release(); 870 acpi_scan_lock_release();
1058 kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ 871 kfree(hp_work); /* allocated in handle_hotplug_event() */
1059 put_bridge(bridge); 872 put_bridge(context->func.parent);
1060} 873}
1061 874
1062/** 875/**
1063 * handle_hotplug_event_bridge - handle ACPI event on bridges 876 * handle_hotplug_event - handle ACPI hotplug event
1064 * @handle: Notify()'ed acpi_handle 877 * @handle: Notify()'ed acpi_handle
1065 * @type: Notify code 878 * @type: Notify code
1066 * @context: pointer to acpiphp_bridge structure 879 * @data: pointer to acpiphp_context structure
1067 * 880 *
1068 * Handles ACPI event notification on {host,p2p} bridges. 881 * Handles ACPI event notification on slots.
1069 */ 882 */
1070static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, 883static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
1071 void *context)
1072{ 884{
1073 struct acpiphp_bridge *bridge = context; 885 struct acpiphp_context *context;
1074
1075 /*
1076 * Currently the code adds all hotplug events to the kacpid_wq
1077 * queue when it should add hotplug events to the kacpi_hotplug_wq.
1078 * The proper way to fix this is to reorganize the code so that
1079 * drivers (dock, etc.) do not call acpi_os_execute(), etc.
1080 * For now just re-add this work to the kacpi_hotplug_wq so we
1081 * don't deadlock on hotplug actions.
1082 */
1083 get_bridge(bridge);
1084 alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
1085}
1086
1087static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
1088{
1089 struct acpiphp_func *func = context;
1090 char objname[64];
1091 struct acpi_buffer buffer = { .length = sizeof(objname),
1092 .pointer = objname };
1093
1094 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1095 886
1096 switch (type) { 887 switch (type) {
1097 case ACPI_NOTIFY_BUS_CHECK: 888 case ACPI_NOTIFY_BUS_CHECK:
1098 /* bus re-enumerate */
1099 dbg("%s: Bus check notify on %s\n", __func__, objname);
1100 acpiphp_enable_slot(func->slot);
1101 break;
1102
1103 case ACPI_NOTIFY_DEVICE_CHECK: 889 case ACPI_NOTIFY_DEVICE_CHECK:
1104 /* device check : re-enumerate from parent bus */
1105 dbg("%s: Device check notify on %s\n", __func__, objname);
1106 acpiphp_check_bridge(func->slot->bridge);
1107 break;
1108
1109 case ACPI_NOTIFY_DEVICE_WAKE:
1110 /* wake event */
1111 dbg("%s: Device wake notify on %s\n", __func__, objname);
1112 break;
1113
1114 case ACPI_NOTIFY_EJECT_REQUEST: 890 case ACPI_NOTIFY_EJECT_REQUEST:
1115 /* request device eject */
1116 dbg("%s: Device eject notify on %s\n", __func__, objname);
1117 if (!(acpiphp_disable_slot(func->slot)))
1118 acpiphp_eject_slot(func->slot);
1119 break; 891 break;
1120 892
1121 default: 893 case ACPI_NOTIFY_DEVICE_WAKE:
1122 warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); 894 return;
1123 break;
1124 }
1125}
1126
1127static void _handle_hotplug_event_func(struct work_struct *work)
1128{
1129 struct acpi_hp_work *hp_work;
1130 struct acpiphp_func *func;
1131 895
1132 hp_work = container_of(work, struct acpi_hp_work, work); 896 case ACPI_NOTIFY_FREQUENCY_MISMATCH:
1133 func = hp_work->context; 897 acpi_handle_err(handle, "Device cannot be configured due "
1134 acpi_scan_lock_acquire(); 898 "to a frequency mismatch\n");
899 return;
1135 900
1136 hotplug_event_func(hp_work->handle, hp_work->type, func); 901 case ACPI_NOTIFY_BUS_MODE_MISMATCH:
902 acpi_handle_err(handle, "Device cannot be configured due "
903 "to a bus mode mismatch\n");
904 return;
1137 905
1138 acpi_scan_lock_release(); 906 case ACPI_NOTIFY_POWER_FAULT:
1139 kfree(hp_work); /* allocated in handle_hotplug_event_func */ 907 acpi_handle_err(handle, "Device has suffered a power fault\n");
1140 put_bridge(func->slot->bridge); 908 return;
1141}
1142 909
1143/** 910 default:
1144 * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) 911 acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
1145 * @handle: Notify()'ed acpi_handle 912 return;
1146 * @type: Notify code 913 }
1147 * @context: pointer to acpiphp_func structure
1148 *
1149 * Handles ACPI event notification on slots.
1150 */
1151static void handle_hotplug_event_func(acpi_handle handle, u32 type,
1152 void *context)
1153{
1154 struct acpiphp_func *func = context;
1155 914
1156 /* 915 mutex_lock(&acpiphp_context_lock);
1157 * Currently the code adds all hotplug events to the kacpid_wq 916 context = acpiphp_get_context(handle);
1158 * queue when it should add hotplug events to the kacpi_hotplug_wq. 917 if (context) {
1159 * The proper way to fix this is to reorganize the code so that 918 get_bridge(context->func.parent);
1160 * drivers (dock, etc.) do not call acpi_os_execute(), etc. 919 acpiphp_put_context(context);
1161 * For now just re-add this work to the kacpi_hotplug_wq so we 920 alloc_acpi_hp_work(handle, type, context, hotplug_event_work);
1162 * don't deadlock on hotplug actions. 921 }
1163 */ 922 mutex_unlock(&acpiphp_context_lock);
1164 get_bridge(func->slot->bridge);
1165 alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_func);
1166} 923}
1167 924
1168/* 925/*
1169 * Create hotplug slots for the PCI bus. 926 * Create hotplug slots for the PCI bus.
1170 * It should always return 0 to avoid skipping following notifiers. 927 * It should always return 0 to avoid skipping following notifiers.
1171 */ 928 */
1172void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle) 929void acpiphp_enumerate_slots(struct pci_bus *bus)
1173{ 930{
1174 acpi_handle dummy_handle;
1175 struct acpiphp_bridge *bridge; 931 struct acpiphp_bridge *bridge;
932 acpi_handle handle;
933 acpi_status status;
1176 934
1177 if (acpiphp_disabled) 935 if (acpiphp_disabled)
1178 return; 936 return;
1179 937
1180 if (detect_ejectable_slots(handle) <= 0) 938 handle = ACPI_HANDLE(bus->bridge);
939 if (!handle)
1181 return; 940 return;
1182 941
1183 bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); 942 bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
1184 if (bridge == NULL) { 943 if (!bridge) {
1185 err("out of memory\n"); 944 acpi_handle_err(handle, "No memory for bridge object\n");
1186 return; 945 return;
1187 } 946 }
1188 947
1189 INIT_LIST_HEAD(&bridge->slots); 948 INIT_LIST_HEAD(&bridge->slots);
1190 kref_init(&bridge->ref); 949 kref_init(&bridge->ref);
1191 bridge->handle = handle;
1192 bridge->pci_dev = pci_dev_get(bus->self); 950 bridge->pci_dev = pci_dev_get(bus->self);
1193 bridge->pci_bus = bus; 951 bridge->pci_bus = bus;
1194 952
@@ -1199,31 +957,62 @@ void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle)
1199 */ 957 */
1200 get_device(&bus->dev); 958 get_device(&bus->dev);
1201 959
1202 if (!pci_is_root_bus(bridge->pci_bus) && 960 if (!pci_is_root_bus(bridge->pci_bus)) {
1203 ACPI_SUCCESS(acpi_get_handle(bridge->handle, 961 struct acpiphp_context *context;
1204 "_EJ0", &dummy_handle))) { 962
1205 dbg("found ejectable p2p bridge\n"); 963 /*
1206 bridge->flags |= BRIDGE_HAS_EJ0; 964 * This bridge should have been registered as a hotplug function
1207 bridge->func = acpiphp_bridge_handle_to_function(handle); 965 * under its parent, so the context has to be there. If not, we
966 * are in deep goo.
967 */
968 mutex_lock(&acpiphp_context_lock);
969 context = acpiphp_get_context(handle);
970 if (WARN_ON(!context)) {
971 mutex_unlock(&acpiphp_context_lock);
972 put_device(&bus->dev);
973 kfree(bridge);
974 return;
975 }
976 bridge->context = context;
977 context->bridge = bridge;
978 /* Get a reference to the parent bridge. */
979 get_bridge(context->func.parent);
980 mutex_unlock(&acpiphp_context_lock);
1208 } 981 }
1209 982
1210 init_bridge_misc(bridge); 983 /* must be added to the list prior to calling register_slot */
984 mutex_lock(&bridge_mutex);
985 list_add(&bridge->list, &bridge_list);
986 mutex_unlock(&bridge_mutex);
987
988 /* register all slot objects under this bridge */
989 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
990 register_slot, NULL, bridge, NULL);
991 if (ACPI_FAILURE(status)) {
992 acpi_handle_err(handle, "failed to register slots\n");
993 cleanup_bridge(bridge);
994 put_bridge(bridge);
995 }
1211} 996}
1212 997
1213/* Destroy hotplug slots associated with the PCI bus */ 998/* Destroy hotplug slots associated with the PCI bus */
1214void acpiphp_remove_slots(struct pci_bus *bus) 999void acpiphp_remove_slots(struct pci_bus *bus)
1215{ 1000{
1216 struct acpiphp_bridge *bridge, *tmp; 1001 struct acpiphp_bridge *bridge;
1217 1002
1218 if (acpiphp_disabled) 1003 if (acpiphp_disabled)
1219 return; 1004 return;
1220 1005
1221 list_for_each_entry_safe(bridge, tmp, &bridge_list, list) 1006 mutex_lock(&bridge_mutex);
1007 list_for_each_entry(bridge, &bridge_list, list)
1222 if (bridge->pci_bus == bus) { 1008 if (bridge->pci_bus == bus) {
1009 mutex_unlock(&bridge_mutex);
1223 cleanup_bridge(bridge); 1010 cleanup_bridge(bridge);
1224 put_bridge(bridge); 1011 put_bridge(bridge);
1225 break; 1012 return;
1226 } 1013 }
1014
1015 mutex_unlock(&bridge_mutex);
1227} 1016}
1228 1017
1229/** 1018/**
@@ -1232,51 +1021,39 @@ void acpiphp_remove_slots(struct pci_bus *bus)
1232 */ 1021 */
1233int acpiphp_enable_slot(struct acpiphp_slot *slot) 1022int acpiphp_enable_slot(struct acpiphp_slot *slot)
1234{ 1023{
1235 int retval;
1236
1237 mutex_lock(&slot->crit_sect); 1024 mutex_lock(&slot->crit_sect);
1025 /* configure all functions */
1026 if (!(slot->flags & SLOT_ENABLED))
1027 enable_slot(slot);
1238 1028
1239 /* wake up all functions */
1240 retval = power_on_slot(slot);
1241 if (retval)
1242 goto err_exit;
1243
1244 if (get_slot_status(slot) == ACPI_STA_ALL) {
1245 /* configure all functions */
1246 retval = enable_device(slot);
1247 if (retval)
1248 power_off_slot(slot);
1249 } else {
1250 dbg("%s: Slot status is not ACPI_STA_ALL\n", __func__);
1251 power_off_slot(slot);
1252 }
1253
1254 err_exit:
1255 mutex_unlock(&slot->crit_sect); 1029 mutex_unlock(&slot->crit_sect);
1256 return retval; 1030 return 0;
1257} 1031}
1258 1032
1259/** 1033/**
1260 * acpiphp_disable_slot - power off slot 1034 * acpiphp_disable_and_eject_slot - power off and eject slot
1261 * @slot: ACPI PHP slot 1035 * @slot: ACPI PHP slot
1262 */ 1036 */
1263int acpiphp_disable_slot(struct acpiphp_slot *slot) 1037int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
1264{ 1038{
1039 struct acpiphp_func *func;
1265 int retval = 0; 1040 int retval = 0;
1266 1041
1267 mutex_lock(&slot->crit_sect); 1042 mutex_lock(&slot->crit_sect);
1268 1043
1269 /* unconfigure all functions */ 1044 /* unconfigure all functions */
1270 retval = disable_device(slot); 1045 disable_slot(slot);
1271 if (retval) 1046
1272 goto err_exit; 1047 list_for_each_entry(func, &slot->funcs, sibling)
1048 if (func->flags & FUNC_HAS_EJ0) {
1049 acpi_handle handle = func_to_handle(func);
1273 1050
1274 /* power off all functions */ 1051 if (ACPI_FAILURE(acpi_evaluate_ej0(handle)))
1275 retval = power_off_slot(slot); 1052 acpi_handle_err(handle, "_EJ0 failed\n");
1276 if (retval) 1053
1277 goto err_exit; 1054 break;
1055 }
1278 1056
1279 err_exit:
1280 mutex_unlock(&slot->crit_sect); 1057 mutex_unlock(&slot->crit_sect);
1281 return retval; 1058 return retval;
1282} 1059}
@@ -1288,7 +1065,7 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
1288 */ 1065 */
1289u8 acpiphp_get_power_status(struct acpiphp_slot *slot) 1066u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
1290{ 1067{
1291 return (slot->flags & SLOT_POWEREDON); 1068 return (slot->flags & SLOT_ENABLED);
1292} 1069}
1293 1070
1294 1071
@@ -1298,11 +1075,7 @@ u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
1298 */ 1075 */
1299u8 acpiphp_get_latch_status(struct acpiphp_slot *slot) 1076u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
1300{ 1077{
1301 unsigned int sta; 1078 return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI);
1302
1303 sta = get_slot_status(slot);
1304
1305 return (sta & ACPI_STA_DEVICE_UI) ? 0 : 1;
1306} 1079}
1307 1080
1308 1081
@@ -1312,9 +1085,5 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
1312 */ 1085 */
1313u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot) 1086u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot)
1314{ 1087{
1315 unsigned int sta; 1088 return !!get_slot_status(slot);
1316
1317 sta = get_slot_status(slot);
1318
1319 return (sta == 0) ? 0 : 1;
1320} 1089}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 5394fffdf167..2f5786c8522c 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -66,7 +66,7 @@ do { \
66#define IBM_HARDWARE_ID1 "IBM37D0" 66#define IBM_HARDWARE_ID1 "IBM37D0"
67#define IBM_HARDWARE_ID2 "IBM37D4" 67#define IBM_HARDWARE_ID2 "IBM37D4"
68 68
69#define hpslot_to_sun(A) (((struct slot *)((A)->private))->acpi_slot->sun) 69#define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun)
70 70
71/* union apci_descriptor - allows access to the 71/* union apci_descriptor - allows access to the
72 * various device descriptors that are embedded in the 72 * various device descriptors that are embedded in the
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 01e264fb50e0..7c29ee4ed0ae 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -210,7 +210,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
210 } 210 }
211 211
212 if (!error) 212 if (!error)
213 dev_info(&dev->dev, "power state changed by ACPI to %s\n", 213 dev_dbg(&dev->dev, "power state changed by ACPI to %s\n",
214 acpi_power_state_string(state_conv[state])); 214 acpi_power_state_string(state_conv[state]));
215 215
216 return error; 216 return error;
@@ -290,24 +290,16 @@ static struct pci_platform_pm_ops acpi_pci_platform_pm = {
290 290
291void acpi_pci_add_bus(struct pci_bus *bus) 291void acpi_pci_add_bus(struct pci_bus *bus)
292{ 292{
293 acpi_handle handle = NULL; 293 if (acpi_pci_disabled || !bus->bridge)
294
295 if (bus->bridge)
296 handle = ACPI_HANDLE(bus->bridge);
297 if (acpi_pci_disabled || handle == NULL)
298 return; 294 return;
299 295
300 acpi_pci_slot_enumerate(bus, handle); 296 acpi_pci_slot_enumerate(bus);
301 acpiphp_enumerate_slots(bus, handle); 297 acpiphp_enumerate_slots(bus);
302} 298}
303 299
304void acpi_pci_remove_bus(struct pci_bus *bus) 300void acpi_pci_remove_bus(struct pci_bus *bus)
305{ 301{
306 /* 302 if (acpi_pci_disabled || !bus->bridge)
307 * bus->bridge->acpi_node.handle has already been reset to NULL
308 * when acpi_pci_remove_bus() is called, so don't check ACPI handle.
309 */
310 if (acpi_pci_disabled)
311 return; 303 return;
312 304
313 acpiphp_remove_slots(bus); 305 acpiphp_remove_slots(bus);
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index c47fd1e5450b..94716c779800 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
278{ 278{
279 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 279 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
280 struct sunxi_pinctrl_group *g = &pctl->groups[group]; 280 struct sunxi_pinctrl_group *g = &pctl->groups[group];
281 unsigned long flags;
281 u32 val, mask; 282 u32 val, mask;
282 u16 strength; 283 u16 strength;
283 u8 dlevel; 284 u8 dlevel;
@@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
295 * 3: 40mA 296 * 3: 40mA
296 */ 297 */
297 dlevel = strength / 10 - 1; 298 dlevel = strength / 10 - 1;
299
300 spin_lock_irqsave(&pctl->lock, flags);
301
298 val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); 302 val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
299 mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); 303 mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
300 writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), 304 writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
301 pctl->membase + sunxi_dlevel_reg(g->pin)); 305 pctl->membase + sunxi_dlevel_reg(g->pin));
306
307 spin_unlock_irqrestore(&pctl->lock, flags);
302 break; 308 break;
303 case PIN_CONFIG_BIAS_PULL_UP: 309 case PIN_CONFIG_BIAS_PULL_UP:
310 spin_lock_irqsave(&pctl->lock, flags);
311
304 val = readl(pctl->membase + sunxi_pull_reg(g->pin)); 312 val = readl(pctl->membase + sunxi_pull_reg(g->pin));
305 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); 313 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
306 writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), 314 writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
307 pctl->membase + sunxi_pull_reg(g->pin)); 315 pctl->membase + sunxi_pull_reg(g->pin));
316
317 spin_unlock_irqrestore(&pctl->lock, flags);
308 break; 318 break;
309 case PIN_CONFIG_BIAS_PULL_DOWN: 319 case PIN_CONFIG_BIAS_PULL_DOWN:
320 spin_lock_irqsave(&pctl->lock, flags);
321
310 val = readl(pctl->membase + sunxi_pull_reg(g->pin)); 322 val = readl(pctl->membase + sunxi_pull_reg(g->pin));
311 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); 323 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
312 writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), 324 writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
313 pctl->membase + sunxi_pull_reg(g->pin)); 325 pctl->membase + sunxi_pull_reg(g->pin));
326
327 spin_unlock_irqrestore(&pctl->lock, flags);
314 break; 328 break;
315 default: 329 default:
316 break; 330 break;
@@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
360 u8 config) 374 u8 config)
361{ 375{
362 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 376 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
377 unsigned long flags;
378 u32 val, mask;
379
380 spin_lock_irqsave(&pctl->lock, flags);
363 381
364 u32 val = readl(pctl->membase + sunxi_mux_reg(pin)); 382 val = readl(pctl->membase + sunxi_mux_reg(pin));
365 u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin); 383 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
366 writel((val & ~mask) | config << sunxi_mux_offset(pin), 384 writel((val & ~mask) | config << sunxi_mux_offset(pin),
367 pctl->membase + sunxi_mux_reg(pin)); 385 pctl->membase + sunxi_mux_reg(pin));
386
387 spin_unlock_irqrestore(&pctl->lock, flags);
368} 388}
369 389
370static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, 390static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
@@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
464 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); 484 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
465 u32 reg = sunxi_data_reg(offset); 485 u32 reg = sunxi_data_reg(offset);
466 u8 index = sunxi_data_offset(offset); 486 u8 index = sunxi_data_offset(offset);
487 unsigned long flags;
488 u32 regval;
489
490 spin_lock_irqsave(&pctl->lock, flags);
491
492 regval = readl(pctl->membase + reg);
467 493
468 writel((value & DATA_PINS_MASK) << index, pctl->membase + reg); 494 if (value)
495 regval |= BIT(index);
496 else
497 regval &= ~(BIT(index));
498
499 writel(regval, pctl->membase + reg);
500
501 spin_unlock_irqrestore(&pctl->lock, flags);
469} 502}
470 503
471static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, 504static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
526 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 559 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
527 u32 reg = sunxi_irq_cfg_reg(d->hwirq); 560 u32 reg = sunxi_irq_cfg_reg(d->hwirq);
528 u8 index = sunxi_irq_cfg_offset(d->hwirq); 561 u8 index = sunxi_irq_cfg_offset(d->hwirq);
562 unsigned long flags;
563 u32 regval;
529 u8 mode; 564 u8 mode;
530 565
531 switch (type) { 566 switch (type) {
@@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
548 return -EINVAL; 583 return -EINVAL;
549 } 584 }
550 585
551 writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg); 586 spin_lock_irqsave(&pctl->lock, flags);
587
588 regval = readl(pctl->membase + reg);
589 regval &= ~IRQ_CFG_IRQ_MASK;
590 writel(regval | (mode << index), pctl->membase + reg);
591
592 spin_unlock_irqrestore(&pctl->lock, flags);
552 593
553 return 0; 594 return 0;
554} 595}
@@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d)
560 u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); 601 u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq);
561 u32 status_reg = sunxi_irq_status_reg(d->hwirq); 602 u32 status_reg = sunxi_irq_status_reg(d->hwirq);
562 u8 status_idx = sunxi_irq_status_offset(d->hwirq); 603 u8 status_idx = sunxi_irq_status_offset(d->hwirq);
604 unsigned long flags;
563 u32 val; 605 u32 val;
564 606
607 spin_lock_irqsave(&pctl->lock, flags);
608
565 /* Mask the IRQ */ 609 /* Mask the IRQ */
566 val = readl(pctl->membase + ctrl_reg); 610 val = readl(pctl->membase + ctrl_reg);
567 writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); 611 writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg);
568 612
569 /* Clear the IRQ */ 613 /* Clear the IRQ */
570 writel(1 << status_idx, pctl->membase + status_reg); 614 writel(1 << status_idx, pctl->membase + status_reg);
615
616 spin_unlock_irqrestore(&pctl->lock, flags);
571} 617}
572 618
573static void sunxi_pinctrl_irq_mask(struct irq_data *d) 619static void sunxi_pinctrl_irq_mask(struct irq_data *d)
@@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
575 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 621 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
576 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 622 u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
577 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 623 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
624 unsigned long flags;
578 u32 val; 625 u32 val;
579 626
627 spin_lock_irqsave(&pctl->lock, flags);
628
580 /* Mask the IRQ */ 629 /* Mask the IRQ */
581 val = readl(pctl->membase + reg); 630 val = readl(pctl->membase + reg);
582 writel(val & ~(1 << idx), pctl->membase + reg); 631 writel(val & ~(1 << idx), pctl->membase + reg);
632
633 spin_unlock_irqrestore(&pctl->lock, flags);
583} 634}
584 635
585static void sunxi_pinctrl_irq_unmask(struct irq_data *d) 636static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
@@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
588 struct sunxi_desc_function *func; 639 struct sunxi_desc_function *func;
589 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 640 u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
590 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 641 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
642 unsigned long flags;
591 u32 val; 643 u32 val;
592 644
593 func = sunxi_pinctrl_desc_find_function_by_pin(pctl, 645 func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
@@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
597 /* Change muxing to INT mode */ 649 /* Change muxing to INT mode */
598 sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); 650 sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
599 651
652 spin_lock_irqsave(&pctl->lock, flags);
653
600 /* Unmask the IRQ */ 654 /* Unmask the IRQ */
601 val = readl(pctl->membase + reg); 655 val = readl(pctl->membase + reg);
602 writel(val | (1 << idx), pctl->membase + reg); 656 writel(val | (1 << idx), pctl->membase + reg);
657
658 spin_unlock_irqrestore(&pctl->lock, flags);
603} 659}
604 660
605static struct irq_chip sunxi_pinctrl_irq_chip = { 661static struct irq_chip sunxi_pinctrl_irq_chip = {
@@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
752 return -ENOMEM; 808 return -ENOMEM;
753 platform_set_drvdata(pdev, pctl); 809 platform_set_drvdata(pdev, pctl);
754 810
811 spin_lock_init(&pctl->lock);
812
755 pctl->membase = of_iomap(node, 0); 813 pctl->membase = of_iomap(node, 0);
756 if (!pctl->membase) 814 if (!pctl->membase)
757 return -ENOMEM; 815 return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index d68047d8f699..01c494f8a14f 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -14,6 +14,7 @@
14#define __PINCTRL_SUNXI_H 14#define __PINCTRL_SUNXI_H
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/spinlock.h>
17 18
18#define PA_BASE 0 19#define PA_BASE 0
19#define PB_BASE 32 20#define PB_BASE 32
@@ -407,6 +408,7 @@ struct sunxi_pinctrl {
407 unsigned ngroups; 408 unsigned ngroups;
408 int irq; 409 int irq;
409 int irq_array[SUNXI_IRQ_NUMBER]; 410 int irq_array[SUNXI_IRQ_NUMBER];
411 spinlock_t lock;
410 struct pinctrl_dev *pctl_dev; 412 struct pinctrl_dev *pctl_dev;
411}; 413};
412 414
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 0f9f8596b300..f9119525f557 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void)
330 return platform_driver_register(&olpc_ec_plat_driver); 330 return platform_driver_register(&olpc_ec_plat_driver);
331} 331}
332 332
333module_init(olpc_ec_init_module); 333arch_initcall(olpc_ec_init_module);
334 334
335MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); 335MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
336MODULE_LICENSE("GPL"); 336MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 97bb05edcb5a..d6970f47ae72 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -53,7 +53,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
53#define HPWMI_ALS_QUERY 0x3 53#define HPWMI_ALS_QUERY 0x3
54#define HPWMI_HARDWARE_QUERY 0x4 54#define HPWMI_HARDWARE_QUERY 0x4
55#define HPWMI_WIRELESS_QUERY 0x5 55#define HPWMI_WIRELESS_QUERY 0x5
56#define HPWMI_BIOS_QUERY 0x9
57#define HPWMI_HOTKEY_QUERY 0xc 56#define HPWMI_HOTKEY_QUERY 0xc
58#define HPWMI_WIRELESS2_QUERY 0x1b 57#define HPWMI_WIRELESS2_QUERY 0x1b
59#define HPWMI_POSTCODEERROR_QUERY 0x2a 58#define HPWMI_POSTCODEERROR_QUERY 0x2a
@@ -293,19 +292,6 @@ static int hp_wmi_tablet_state(void)
293 return (state & 0x4) ? 1 : 0; 292 return (state & 0x4) ? 1 : 0;
294} 293}
295 294
296static int hp_wmi_enable_hotkeys(void)
297{
298 int ret;
299 int query = 0x6e;
300
301 ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
302 0);
303
304 if (ret)
305 return -EINVAL;
306 return 0;
307}
308
309static int hp_wmi_set_block(void *data, bool blocked) 295static int hp_wmi_set_block(void *data, bool blocked)
310{ 296{
311 enum hp_wmi_radio r = (enum hp_wmi_radio) data; 297 enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -1009,8 +995,6 @@ static int __init hp_wmi_init(void)
1009 err = hp_wmi_input_setup(); 995 err = hp_wmi_input_setup();
1010 if (err) 996 if (err)
1011 return err; 997 return err;
1012
1013 hp_wmi_enable_hotkeys();
1014 } 998 }
1015 999
1016 if (bios_capable) { 1000 if (bios_capable) {
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 069821b1fc22..d3fd52036fd6 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -2437,7 +2437,10 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
2437 if (pos < 0) 2437 if (pos < 0)
2438 return pos; 2438 return pos;
2439 2439
2440 return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina"); 2440 return snprintf(buffer, PAGE_SIZE, "%s\n",
2441 pos == SPEED ? "speed" :
2442 pos == STAMINA ? "stamina" :
2443 pos == AUTO ? "auto" : "unknown");
2441} 2444}
2442 2445
2443static int sony_nc_gfx_switch_setup(struct platform_device *pd, 2446static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -4316,7 +4319,8 @@ static int sony_pic_add(struct acpi_device *device)
4316 goto err_free_resources; 4319 goto err_free_resources;
4317 } 4320 }
4318 4321
4319 if (sonypi_compat_init()) 4322 result = sonypi_compat_init();
4323 if (result)
4320 goto err_remove_input; 4324 goto err_remove_input;
4321 4325
4322 /* request io port */ 4326 /* request io port */
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 55cd459a3908..34049b0b4c73 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -131,7 +131,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
131 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ 131 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
132 ret = 0; 132 ret = 0;
133 if (acpi_bus_power_manageable(handle)) 133 if (acpi_bus_power_manageable(handle))
134 acpi_bus_set_power(handle, ACPI_STATE_D3); 134 acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
135 /* continue even if acpi_bus_set_power() fails */ 135 /* continue even if acpi_bus_set_power() fails */
136 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) 136 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
137 ret = -ENODEV; 137 ret = -ENODEV;
@@ -174,10 +174,10 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
174 174
175 if (acpi_bus_power_manageable(handle)) { 175 if (acpi_bus_power_manageable(handle)) {
176 int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL, 176 int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL,
177 ACPI_STATE_D3); 177 ACPI_STATE_D3_COLD);
178 if (power_state < 0) 178 if (power_state < 0)
179 power_state = (state.event == PM_EVENT_ON) ? 179 power_state = (state.event == PM_EVENT_ON) ?
180 ACPI_STATE_D0 : ACPI_STATE_D3; 180 ACPI_STATE_D0 : ACPI_STATE_D3_COLD;
181 181
182 /* 182 /*
183 * acpi_bus_set_power() often fails (keyboard port can't be 183 * acpi_bus_set_power() often fails (keyboard port can't be
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 767fee2ab340..26019531db15 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/delay.h>
26#include <linux/rtc.h> 27#include <linux/rtc.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/of_device.h> 29#include <linux/of_device.h>
@@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
119} 120}
120#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ 121#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
121 122
122static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) 123static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
123{ 124{
125 int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */
124 /* 126 /*
125 * The datasheet doesn't say which way round the 127 * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010
126 * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0, 128 * states:
127 * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS 129 * | The order in which registers are updated is
130 * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds.
131 * | (This list is in bitfield order, from LSB to MSB, as they would
132 * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT
133 * | register. For example, the Seconds register corresponds to
134 * | STALE_REGS or NEW_REGS containing 0x80.)
128 */ 135 */
129 while (readl(rtc_data->io + STMP3XXX_RTC_STAT) & 136 do {
130 (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) 137 if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) &
131 cpu_relax(); 138 (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)))
139 return 0;
140 udelay(1);
141 } while (--timeout > 0);
142 return (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
143 (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0;
132} 144}
133 145
134/* Time read/write */ 146/* Time read/write */
135static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) 147static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
136{ 148{
149 int ret;
137 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); 150 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
138 151
139 stmp3xxx_wait_time(rtc_data); 152 ret = stmp3xxx_wait_time(rtc_data);
153 if (ret)
154 return ret;
155
140 rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); 156 rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
141 return 0; 157 return 0;
142} 158}
@@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
146 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); 162 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
147 163
148 writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); 164 writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
149 stmp3xxx_wait_time(rtc_data); 165 return stmp3xxx_wait_time(rtc_data);
150 return 0;
151} 166}
152 167
153/* interrupt(s) handler */ 168/* interrupt(s) handler */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 1d4c8fe72752..c82fe65c4128 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
102 102
103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
104 zfcp_erp_action_dismiss(&port->erp_action); 104 zfcp_erp_action_dismiss(&port->erp_action);
105 else 105 else {
106 shost_for_each_device(sdev, port->adapter->scsi_host) 106 spin_lock(port->adapter->scsi_host->host_lock);
107 __shost_for_each_device(sdev, port->adapter->scsi_host)
107 if (sdev_to_zfcp(sdev)->port == port) 108 if (sdev_to_zfcp(sdev)->port == port)
108 zfcp_erp_action_dismiss_lun(sdev); 109 zfcp_erp_action_dismiss_lun(sdev);
110 spin_unlock(port->adapter->scsi_host->host_lock);
111 }
109} 112}
110 113
111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 114static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
592{ 595{
593 struct scsi_device *sdev; 596 struct scsi_device *sdev;
594 597
595 shost_for_each_device(sdev, port->adapter->scsi_host) 598 spin_lock(port->adapter->scsi_host->host_lock);
599 __shost_for_each_device(sdev, port->adapter->scsi_host)
596 if (sdev_to_zfcp(sdev)->port == port) 600 if (sdev_to_zfcp(sdev)->port == port)
597 _zfcp_erp_lun_reopen(sdev, clear, id, 0); 601 _zfcp_erp_lun_reopen(sdev, clear, id, 0);
602 spin_unlock(port->adapter->scsi_host->host_lock);
598} 603}
599 604
600static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 605static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1434 atomic_set_mask(common_mask, &port->status); 1439 atomic_set_mask(common_mask, &port->status);
1435 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1440 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1436 1441
1437 shost_for_each_device(sdev, adapter->scsi_host) 1442 spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1443 __shost_for_each_device(sdev, adapter->scsi_host)
1438 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1444 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1445 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1439} 1446}
1440 1447
1441/** 1448/**
@@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1469 } 1476 }
1470 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1477 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1471 1478
1472 shost_for_each_device(sdev, adapter->scsi_host) { 1479 spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1480 __shost_for_each_device(sdev, adapter->scsi_host) {
1473 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1481 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1474 if (clear_counter) 1482 if (clear_counter)
1475 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1483 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1476 } 1484 }
1485 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1477} 1486}
1478 1487
1479/** 1488/**
@@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
1487{ 1496{
1488 struct scsi_device *sdev; 1497 struct scsi_device *sdev;
1489 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1498 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1499 unsigned long flags;
1490 1500
1491 atomic_set_mask(mask, &port->status); 1501 atomic_set_mask(mask, &port->status);
1492 1502
1493 if (!common_mask) 1503 if (!common_mask)
1494 return; 1504 return;
1495 1505
1496 shost_for_each_device(sdev, port->adapter->scsi_host) 1506 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1507 __shost_for_each_device(sdev, port->adapter->scsi_host)
1497 if (sdev_to_zfcp(sdev)->port == port) 1508 if (sdev_to_zfcp(sdev)->port == port)
1498 atomic_set_mask(common_mask, 1509 atomic_set_mask(common_mask,
1499 &sdev_to_zfcp(sdev)->status); 1510 &sdev_to_zfcp(sdev)->status);
1511 spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1500} 1512}
1501 1513
1502/** 1514/**
@@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1511 struct scsi_device *sdev; 1523 struct scsi_device *sdev;
1512 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1524 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1513 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; 1525 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1526 unsigned long flags;
1514 1527
1515 atomic_clear_mask(mask, &port->status); 1528 atomic_clear_mask(mask, &port->status);
1516 1529
@@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1520 if (clear_counter) 1533 if (clear_counter)
1521 atomic_set(&port->erp_counter, 0); 1534 atomic_set(&port->erp_counter, 0);
1522 1535
1523 shost_for_each_device(sdev, port->adapter->scsi_host) 1536 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1537 __shost_for_each_device(sdev, port->adapter->scsi_host)
1524 if (sdev_to_zfcp(sdev)->port == port) { 1538 if (sdev_to_zfcp(sdev)->port == port) {
1525 atomic_clear_mask(common_mask, 1539 atomic_clear_mask(common_mask,
1526 &sdev_to_zfcp(sdev)->status); 1540 &sdev_to_zfcp(sdev)->status);
1527 if (clear_counter) 1541 if (clear_counter)
1528 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1542 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1529 } 1543 }
1544 spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1530} 1545}
1531 1546
1532/** 1547/**
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 665e3cfaaf85..de0598eaacd2 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
224 224
225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
226{ 226{
227 spin_lock_irq(&qdio->req_q_lock);
228 if (atomic_read(&qdio->req_q_free) || 227 if (atomic_read(&qdio->req_q_free) ||
229 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 228 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
230 return 1; 229 return 1;
231 spin_unlock_irq(&qdio->req_q_lock);
232 return 0; 230 return 0;
233} 231}
234 232
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
246{ 244{
247 long ret; 245 long ret;
248 246
249 spin_unlock_irq(&qdio->req_q_lock); 247 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
250 ret = wait_event_interruptible_timeout(qdio->req_q_wq, 248 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
251 zfcp_qdio_sbal_check(qdio), 5 * HZ);
252 249
253 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 250 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
254 return -EIO; 251 return -EIO;
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
262 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); 259 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
263 } 260 }
264 261
265 spin_lock_irq(&qdio->req_q_lock);
266 return -EIO; 262 return -EIO;
267} 263}
268 264
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3f01bbf0609f..890639274bcf 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
28 zfcp_sysfs_##_feat##_##_name##_show, NULL); 28 zfcp_sysfs_##_feat##_##_name##_show, NULL);
29 29
30#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
31static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
32 struct device_attribute *at,\
33 char *buf) \
34{ \
35 return sprintf(buf, _format, _value); \
36} \
37static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
38 zfcp_sysfs_##_feat##_##_name##_show, NULL);
39
30#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ 40#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
31static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ 41static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
32 struct device_attribute *at,\ 42 struct device_attribute *at,\
@@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", 85ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
76 (zfcp_unit_sdev_status(unit) & 86 (zfcp_unit_sdev_status(unit) &
77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 87 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
88ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
89ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
78 90
79static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, 91static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
80 struct device_attribute *attr, 92 struct device_attribute *attr,
@@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = {
347 &dev_attr_unit_in_recovery.attr, 359 &dev_attr_unit_in_recovery.attr,
348 &dev_attr_unit_status.attr, 360 &dev_attr_unit_status.attr,
349 &dev_attr_unit_access_denied.attr, 361 &dev_attr_unit_access_denied.attr,
362 &dev_attr_unit_access_shared.attr,
363 &dev_attr_unit_access_readonly.attr,
350 NULL 364 NULL
351}; 365};
352static struct attribute_group zfcp_unit_attr_group = { 366static struct attribute_group zfcp_unit_attr_group = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 48b2918e0d65..92ff027746f2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,7 +1353,6 @@ config SCSI_LPFC
1353 tristate "Emulex LightPulse Fibre Channel Support" 1353 tristate "Emulex LightPulse Fibre Channel Support"
1354 depends on PCI && SCSI 1354 depends on PCI && SCSI
1355 select SCSI_FC_ATTRS 1355 select SCSI_FC_ATTRS
1356 select GENERIC_CSUM
1357 select CRC_T10DIF 1356 select CRC_T10DIF
1358 help 1357 help
1359 This lpfc driver supports the Emulex LightPulse 1358 This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e25eba5713c1..b3b5125faa72 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
482 ret = comedi_device_postconfig(dev); 482 ret = comedi_device_postconfig(dev);
483 if (ret < 0) { 483 if (ret < 0) {
484 comedi_device_detach(dev); 484 comedi_device_detach(dev);
485 module_put(dev->driver->module); 485 module_put(driv->module);
486 } 486 }
487 /* On success, the driver module count has been incremented. */ 487 /* On success, the driver module count has been incremented. */
488 return ret; 488 return ret;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 609dbc2f7151..83b4ef4dfcf8 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf,
1119 /* Determine if it is a Rigol or not */ 1119 /* Determine if it is a Rigol or not */
1120 data->rigol_quirk = 0; 1120 data->rigol_quirk = 0;
1121 dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", 1121 dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
1122 data->usb_dev->descriptor.idVendor, 1122 le16_to_cpu(data->usb_dev->descriptor.idVendor),
1123 data->usb_dev->descriptor.idProduct); 1123 le16_to_cpu(data->usb_dev->descriptor.idProduct));
1124 for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { 1124 for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
1125 if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && 1125 if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) &&
1126 (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { 1126 (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) {
1127 dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); 1127 dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
1128 data->rigol_quirk = 1; 1128 data->rigol_quirk = 1;
1129 break; 1129 break;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a63598895077..5b44cd47da5b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = {
78 { USB_DEVICE(0x04d8, 0x000c), .driver_info = 78 { USB_DEVICE(0x04d8, 0x000c), .driver_info =
79 USB_QUIRK_CONFIG_INTF_STRINGS }, 79 USB_QUIRK_CONFIG_INTF_STRINGS },
80 80
81 /* CarrolTouch 4000U */
82 { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
83
84 /* CarrolTouch 4500U */
85 { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
86
81 /* Samsung Android phone modem - ID conflict with SPH-I500 */ 87 /* Samsung Android phone modem - ID conflict with SPH-I500 */
82 { USB_DEVICE(0x04e8, 0x6601), .driver_info = 88 { USB_DEVICE(0x04e8, 0x6601), .driver_info =
83 USB_QUIRK_CONFIG_INTF_STRINGS }, 89 USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index f80d0330d548..8e3c878f38cf 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1391,21 +1391,20 @@ iso_stream_schedule (
1391 1391
1392 /* Behind the scheduling threshold? */ 1392 /* Behind the scheduling threshold? */
1393 if (unlikely(start < next)) { 1393 if (unlikely(start < next)) {
1394 unsigned now2 = (now - base) & (mod - 1);
1394 1395
1395 /* USB_ISO_ASAP: Round up to the first available slot */ 1396 /* USB_ISO_ASAP: Round up to the first available slot */
1396 if (urb->transfer_flags & URB_ISO_ASAP) 1397 if (urb->transfer_flags & URB_ISO_ASAP)
1397 start += (next - start + period - 1) & -period; 1398 start += (next - start + period - 1) & -period;
1398 1399
1399 /* 1400 /*
1400 * Not ASAP: Use the next slot in the stream. If 1401 * Not ASAP: Use the next slot in the stream,
1401 * the entire URB falls before the threshold, fail. 1402 * no matter what.
1402 */ 1403 */
1403 else if (start + span - period < next) { 1404 else if (start + span - period < now2) {
1404 ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", 1405 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
1405 urb, start + base, 1406 urb, start + base,
1406 span - period, next + base); 1407 span - period, now2 + base);
1407 status = -EXDEV;
1408 goto fail;
1409 } 1408 }
1410 } 1409 }
1411 1410
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 08613e241894..0f1d193fef02 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -304,6 +304,11 @@ static int __init ohci_pci_init(void)
304 pr_info("%s: " DRIVER_DESC "\n", hcd_name); 304 pr_info("%s: " DRIVER_DESC "\n", hcd_name);
305 305
306 ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); 306 ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
307
308 /* Entries for the PCI suspend/resume callbacks are special */
309 ohci_pci_hc_driver.pci_suspend = ohci_suspend;
310 ohci_pci_hc_driver.pci_resume = ohci_resume;
311
307 return pci_register_driver(&ohci_pci_driver); 312 return pci_register_driver(&ohci_pci_driver);
308} 313}
309module_init(ohci_pci_init); 314module_init(ohci_pci_init);
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index eb3c8c142fa9..eeb27208c0d1 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface,
830 830
831 /* let the user know what node this device is now attached to */ 831 /* let the user know what node this device is now attached to */
832 dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", 832 dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
833 udev->descriptor.idProduct, dev->serial_number, 833 le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
834 (dev->minor - ADU_MINOR_BASE)); 834 (dev->minor - ADU_MINOR_BASE));
835exit: 835exit:
836 dbg(2, " %s : leave, return value %p (dev)", __func__, dev); 836 dbg(2, " %s : leave, return value %p (dev)", __func__, dev);
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index ca266280895d..e1859b8ef567 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -15,7 +15,7 @@
15 * 675 Mass Ave, Cambridge, MA 02139, USA. 15 * 675 Mass Ave, Cambridge, MA 02139, USA.
16 */ 16 */
17 17
18#include "otg_fsm.h" 18#include "phy-fsm-usb.h"
19#include <linux/usb/otg.h> 19#include <linux/usb/otg.h>
20#include <linux/ioctl.h> 20#include <linux/ioctl.h>
21 21
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c520b3548e7c..7f4596606e18 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -29,7 +29,7 @@
29#include <linux/usb/gadget.h> 29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h> 30#include <linux/usb/otg.h>
31 31
32#include "phy-otg-fsm.h" 32#include "phy-fsm-usb.h"
33 33
34/* Change USB protocol when there is a protocol change */ 34/* Change USB protocol when there is a protocol change */
35static int otg_set_protocol(struct otg_fsm *fsm, int protocol) 35static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 5a979729f8ec..58c17fdc85eb 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial)
2303 if (d_details == NULL) { 2303 if (d_details == NULL) {
2304 dev_err(&serial->dev->dev, "%s - unknown product id %x\n", 2304 dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
2305 __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); 2305 __func__, le16_to_cpu(serial->dev->descriptor.idProduct));
2306 return 1; 2306 return -ENODEV;
2307 } 2307 }
2308 2308
2309 /* Setup private data for serial driver */ 2309 /* Setup private data for serial driver */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 51da424327b0..b01300164fc0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -90,6 +90,7 @@ struct urbtracker {
90 struct list_head urblist_entry; 90 struct list_head urblist_entry;
91 struct kref ref_count; 91 struct kref ref_count;
92 struct urb *urb; 92 struct urb *urb;
93 struct usb_ctrlrequest *setup;
93}; 94};
94 95
95enum mos7715_pp_modes { 96enum mos7715_pp_modes {
@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref)
271 struct mos7715_parport *mos_parport = urbtrack->mos_parport; 272 struct mos7715_parport *mos_parport = urbtrack->mos_parport;
272 273
273 usb_free_urb(urbtrack->urb); 274 usb_free_urb(urbtrack->urb);
275 kfree(urbtrack->setup);
274 kfree(urbtrack); 276 kfree(urbtrack);
275 kref_put(&mos_parport->ref_count, destroy_mos_parport); 277 kref_put(&mos_parport->ref_count, destroy_mos_parport);
276} 278}
@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
355 struct urbtracker *urbtrack; 357 struct urbtracker *urbtrack;
356 int ret_val; 358 int ret_val;
357 unsigned long flags; 359 unsigned long flags;
358 struct usb_ctrlrequest setup;
359 struct usb_serial *serial = mos_parport->serial; 360 struct usb_serial *serial = mos_parport->serial;
360 struct usb_device *usbdev = serial->dev; 361 struct usb_device *usbdev = serial->dev;
361 362
@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
373 kfree(urbtrack); 374 kfree(urbtrack);
374 return -ENOMEM; 375 return -ENOMEM;
375 } 376 }
376 setup.bRequestType = (__u8)0x40; 377 urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
377 setup.bRequest = (__u8)0x0e; 378 if (!urbtrack->setup) {
378 setup.wValue = get_reg_value(reg, dummy); 379 usb_free_urb(urbtrack->urb);
379 setup.wIndex = get_reg_index(reg); 380 kfree(urbtrack);
380 setup.wLength = 0; 381 return -ENOMEM;
382 }
383 urbtrack->setup->bRequestType = (__u8)0x40;
384 urbtrack->setup->bRequest = (__u8)0x0e;
385 urbtrack->setup->wValue = get_reg_value(reg, dummy);
386 urbtrack->setup->wIndex = get_reg_index(reg);
387 urbtrack->setup->wLength = 0;
381 usb_fill_control_urb(urbtrack->urb, usbdev, 388 usb_fill_control_urb(urbtrack->urb, usbdev,
382 usb_sndctrlpipe(usbdev, 0), 389 usb_sndctrlpipe(usbdev, 0),
383 (unsigned char *)&setup, 390 (unsigned char *)urbtrack->setup,
384 NULL, 0, async_complete, urbtrack); 391 NULL, 0, async_complete, urbtrack);
385 kref_init(&urbtrack->ref_count); 392 kref_init(&urbtrack->ref_count);
386 INIT_LIST_HEAD(&urbtrack->urblist_entry); 393 INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index d953d674f222..3bac4693c038 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2193,7 +2193,7 @@ static int mos7810_check(struct usb_serial *serial)
2193static int mos7840_probe(struct usb_serial *serial, 2193static int mos7840_probe(struct usb_serial *serial,
2194 const struct usb_device_id *id) 2194 const struct usb_device_id *id)
2195{ 2195{
2196 u16 product = serial->dev->descriptor.idProduct; 2196 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
2197 u8 *buf; 2197 u8 *buf;
2198 int device_type; 2198 int device_type;
2199 2199
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 375b5a400b6f..5c9f9b1d7736 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev)
1536 char buf[32]; 1536 char buf[32];
1537 1537
1538 /* try ID specific firmware first, then try generic firmware */ 1538 /* try ID specific firmware first, then try generic firmware */
1539 sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, 1539 sprintf(buf, "ti_usb-v%04x-p%04x.fw",
1540 dev->descriptor.idProduct); 1540 le16_to_cpu(dev->descriptor.idVendor),
1541 le16_to_cpu(dev->descriptor.idProduct));
1541 status = request_firmware(&fw_p, buf, &dev->dev); 1542 status = request_firmware(&fw_p, buf, &dev->dev);
1542 1543
1543 if (status != 0) { 1544 if (status != 0) {
1544 buf[0] = '\0'; 1545 buf[0] = '\0';
1545 if (dev->descriptor.idVendor == MTS_VENDOR_ID) { 1546 if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
1546 switch (dev->descriptor.idProduct) { 1547 switch (le16_to_cpu(dev->descriptor.idProduct)) {
1547 case MTS_CDMA_PRODUCT_ID: 1548 case MTS_CDMA_PRODUCT_ID:
1548 strcpy(buf, "mts_cdma.fw"); 1549 strcpy(buf, "mts_cdma.fw");
1549 break; 1550 break;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 8257d30c4072..85365784040b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb)
291 tty_flip_buffer_push(&port->port); 291 tty_flip_buffer_push(&port->port);
292 } else 292 } else
293 dev_dbg(dev, "%s: empty read urb received\n", __func__); 293 dev_dbg(dev, "%s: empty read urb received\n", __func__);
294 294 }
295 /* Resubmit urb so we continue receiving */ 295 /* Resubmit urb so we continue receiving */
296 err = usb_submit_urb(urb, GFP_ATOMIC); 296 err = usb_submit_urb(urb, GFP_ATOMIC);
297 if (err) { 297 if (err) {
298 if (err != -EPERM) { 298 if (err != -EPERM) {
299 dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); 299 dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
300 /* busy also in error unless we are killed */ 300 __func__, err);
301 usb_mark_last_busy(port->serial->dev); 301 /* busy also in error unless we are killed */
302 }
303 } else {
304 usb_mark_last_busy(port->serial->dev); 302 usb_mark_last_busy(port->serial->dev);
305 } 303 }
304 } else {
305 usb_mark_last_busy(port->serial->dev);
306 } 306 }
307} 307}
308 308
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 16968c899493..d3493ca0525d 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1226 } 1226 }
1227 spin_lock_irqsave(&xfer->lock, flags); 1227 spin_lock_irqsave(&xfer->lock, flags);
1228 rpipe = xfer->ep->hcpriv; 1228 rpipe = xfer->ep->hcpriv;
1229 if (rpipe == NULL) {
1230 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1231 __func__, wa_xfer_id(xfer),
1232 "Probably already aborted.\n" );
1233 goto out_unlock;
1234 }
1229 /* Check the delayed list -> if there, release and complete */ 1235 /* Check the delayed list -> if there, release and complete */
1230 spin_lock_irqsave(&wa->xfer_list_lock, flags2); 1236 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1231 if (!list_empty(&xfer->list_node) && xfer->seg == NULL) 1237 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb)
1644 break; 1650 break;
1645 } 1651 }
1646 usb_status = xfer_result->bTransferStatus & 0x3f; 1652 usb_status = xfer_result->bTransferStatus & 0x3f;
1647 if (usb_status == WA_XFER_STATUS_ABORTED 1653 if (usb_status == WA_XFER_STATUS_NOT_FOUND)
1648 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1649 /* taken care of already */ 1654 /* taken care of already */
1650 break; 1655 break;
1651 xfer_id = xfer_result->dwTransferID; 1656 xfer_id = xfer_result->dwTransferID;
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
index 119d42a2bf57..90307c0b630c 100644
--- a/drivers/xen/acpi.c
+++ b/drivers/xen/acpi.c
@@ -35,28 +35,43 @@
35#include <asm/xen/hypercall.h> 35#include <asm/xen/hypercall.h>
36#include <asm/xen/hypervisor.h> 36#include <asm/xen/hypervisor.h>
37 37
38int xen_acpi_notify_hypervisor_state(u8 sleep_state, 38static int xen_acpi_notify_hypervisor_state(u8 sleep_state,
39 u32 pm1a_cnt, u32 pm1b_cnt) 39 u32 val_a, u32 val_b,
40 bool extended)
40{ 41{
42 unsigned int bits = extended ? 8 : 16;
43
41 struct xen_platform_op op = { 44 struct xen_platform_op op = {
42 .cmd = XENPF_enter_acpi_sleep, 45 .cmd = XENPF_enter_acpi_sleep,
43 .interface_version = XENPF_INTERFACE_VERSION, 46 .interface_version = XENPF_INTERFACE_VERSION,
44 .u = { 47 .u.enter_acpi_sleep = {
45 .enter_acpi_sleep = { 48 .val_a = (u16)val_a,
46 .pm1a_cnt_val = (u16)pm1a_cnt, 49 .val_b = (u16)val_b,
47 .pm1b_cnt_val = (u16)pm1b_cnt, 50 .sleep_state = sleep_state,
48 .sleep_state = sleep_state, 51 .flags = extended ? XENPF_ACPI_SLEEP_EXTENDED : 0,
49 },
50 }, 52 },
51 }; 53 };
52 54
53 if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) { 55 if (WARN((val_a & (~0 << bits)) || (val_b & (~0 << bits)),
54 WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!" 56 "Using more than %u bits of sleep control values %#x/%#x!"
55 "Email xen-devel@lists.xensource.com Thank you.\n", \ 57 "Email xen-devel@lists.xen.org - Thank you.\n", \
56 pm1a_cnt, pm1b_cnt); 58 bits, val_a, val_b))
57 return -1; 59 return -1;
58 }
59 60
60 HYPERVISOR_dom0_op(&op); 61 HYPERVISOR_dom0_op(&op);
61 return 1; 62 return 1;
62} 63}
64
65int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
66 u32 pm1a_cnt, u32 pm1b_cnt)
67{
68 return xen_acpi_notify_hypervisor_state(sleep_state, pm1a_cnt,
69 pm1b_cnt, false);
70}
71
72int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
73 u32 val_a, u32 val_b)
74{
75 return xen_acpi_notify_hypervisor_state(sleep_state, val_a,
76 val_b, true);
77}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index a58ac435a9a4..5e8be462aed5 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void)
348 348
349 for_each_possible_cpu(i) 349 for_each_possible_cpu(i)
350 memset(per_cpu(cpu_evtchn_mask, i), 350 memset(per_cpu(cpu_evtchn_mask, i),
351 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); 351 (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
352} 352}
353 353
354static inline void clear_evtchn(int port) 354static inline void clear_evtchn(int port)
@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
1493/* Rebind an evtchn so that it gets delivered to a specific cpu */ 1493/* Rebind an evtchn so that it gets delivered to a specific cpu */
1494static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 1494static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1495{ 1495{
1496 struct shared_info *s = HYPERVISOR_shared_info;
1496 struct evtchn_bind_vcpu bind_vcpu; 1497 struct evtchn_bind_vcpu bind_vcpu;
1497 int evtchn = evtchn_from_irq(irq); 1498 int evtchn = evtchn_from_irq(irq);
1499 int masked;
1498 1500
1499 if (!VALID_EVTCHN(evtchn)) 1501 if (!VALID_EVTCHN(evtchn))
1500 return -1; 1502 return -1;
@@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1511 bind_vcpu.vcpu = tcpu; 1513 bind_vcpu.vcpu = tcpu;
1512 1514
1513 /* 1515 /*
1516 * Mask the event while changing the VCPU binding to prevent
1517 * it being delivered on an unexpected VCPU.
1518 */
1519 masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1520
1521 /*
1514 * If this fails, it usually just indicates that we're dealing with a 1522 * If this fails, it usually just indicates that we're dealing with a
1515 * virq or IPI channel, which don't actually need to be rebound. Ignore 1523 * virq or IPI channel, which don't actually need to be rebound. Ignore
1516 * it, but don't do the xenlinux-level rebind in that case. 1524 * it, but don't do the xenlinux-level rebind in that case.
@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1518 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 1526 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1519 bind_evtchn_to_cpu(evtchn, tcpu); 1527 bind_evtchn_to_cpu(evtchn, tcpu);
1520 1528
1529 if (!masked)
1530 unmask_evtchn(evtchn);
1531
1521 return 0; 1532 return 0;
1522} 1533}
1523 1534
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 5e376bb93419..8defc6b3f9a2 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
40 int block, off; 40 int block, off;
41 41
42 inode = iget_locked(sb, ino); 42 inode = iget_locked(sb, ino);
43 if (IS_ERR(inode)) 43 if (!inode)
44 return ERR_PTR(-ENOMEM); 44 return ERR_PTR(-ENOMEM);
45 if (!(inode->i_state & I_NEW)) 45 if (!(inode->i_state & I_NEW))
46 return inode; 46 return inode;
diff --git a/fs/bio.c b/fs/bio.c
index 94bbc04dba77..c5eae7251490 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
1045int bio_uncopy_user(struct bio *bio) 1045int bio_uncopy_user(struct bio *bio)
1046{ 1046{
1047 struct bio_map_data *bmd = bio->bi_private; 1047 struct bio_map_data *bmd = bio->bi_private;
1048 int ret = 0; 1048 struct bio_vec *bvec;
1049 int ret = 0, i;
1049 1050
1050 if (!bio_flagged(bio, BIO_NULL_MAPPED)) 1051 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1051 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, 1052 /*
1052 bmd->nr_sgvecs, bio_data_dir(bio) == READ, 1053 * if we're in a workqueue, the request is orphaned, so
1053 0, bmd->is_our_pages); 1054 * don't copy into a random user address space, just free.
1055 */
1056 if (current->mm)
1057 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1058 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1059 0, bmd->is_our_pages);
1060 else if (bmd->is_our_pages)
1061 bio_for_each_segment_all(bvec, bio, i)
1062 __free_page(bvec->bv_page);
1063 }
1054 bio_free_map_data(bmd); 1064 bio_free_map_data(bmd);
1055 bio_put(bio); 1065 bio_put(bio);
1056 return ret; 1066 return ret;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 45e57cc38200..fc6f4f3a1a9d 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
43 server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); 43 server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
44 if (IS_ERR(server->secmech.md5)) { 44 if (IS_ERR(server->secmech.md5)) {
45 cifs_dbg(VFS, "could not allocate crypto md5\n"); 45 cifs_dbg(VFS, "could not allocate crypto md5\n");
46 return PTR_ERR(server->secmech.md5); 46 rc = PTR_ERR(server->secmech.md5);
47 server->secmech.md5 = NULL;
48 return rc;
47 } 49 }
48 50
49 size = sizeof(struct shash_desc) + 51 size = sizeof(struct shash_desc) +
50 crypto_shash_descsize(server->secmech.md5); 52 crypto_shash_descsize(server->secmech.md5);
51 server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); 53 server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
52 if (!server->secmech.sdescmd5) { 54 if (!server->secmech.sdescmd5) {
53 rc = -ENOMEM;
54 crypto_free_shash(server->secmech.md5); 55 crypto_free_shash(server->secmech.md5);
55 server->secmech.md5 = NULL; 56 server->secmech.md5 = NULL;
56 return rc; 57 return -ENOMEM;
57 } 58 }
58 server->secmech.sdescmd5->shash.tfm = server->secmech.md5; 59 server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
59 server->secmech.sdescmd5->shash.flags = 0x0; 60 server->secmech.sdescmd5->shash.flags = 0x0;
@@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
421 if (blobptr + attrsize > blobend) 422 if (blobptr + attrsize > blobend)
422 break; 423 break;
423 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { 424 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
424 if (!attrsize) 425 if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
425 break; 426 break;
426 if (!ses->domainName) { 427 if (!ses->domainName) {
427 ses->domainName = 428 ses->domainName =
@@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
591 592
592static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) 593static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
593{ 594{
595 int rc;
594 unsigned int size; 596 unsigned int size;
595 597
596 /* check if already allocated */ 598 /* check if already allocated */
@@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
600 server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); 602 server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
601 if (IS_ERR(server->secmech.hmacmd5)) { 603 if (IS_ERR(server->secmech.hmacmd5)) {
602 cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); 604 cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
603 return PTR_ERR(server->secmech.hmacmd5); 605 rc = PTR_ERR(server->secmech.hmacmd5);
606 server->secmech.hmacmd5 = NULL;
607 return rc;
604 } 608 }
605 609
606 size = sizeof(struct shash_desc) + 610 size = sizeof(struct shash_desc) +
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bdd547dbf6f..85ea98d139fc 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb)
147 goto out_no_root; 147 goto out_no_root;
148 } 148 }
149 149
150 if (cifs_sb_master_tcon(cifs_sb)->nocase)
151 sb->s_d_op = &cifs_ci_dentry_ops;
152 else
153 sb->s_d_op = &cifs_dentry_ops;
154
150 sb->s_root = d_make_root(inode); 155 sb->s_root = d_make_root(inode);
151 if (!sb->s_root) { 156 if (!sb->s_root) {
152 rc = -ENOMEM; 157 rc = -ENOMEM;
153 goto out_no_root; 158 goto out_no_root;
154 } 159 }
155 160
156 /* do that *after* d_make_root() - we want NULL ->d_op for root here */
157 if (cifs_sb_master_tcon(cifs_sb)->nocase)
158 sb->s_d_op = &cifs_ci_dentry_ops;
159 else
160 sb->s_d_op = &cifs_dentry_ops;
161
162#ifdef CONFIG_CIFS_NFSD_EXPORT 161#ifdef CONFIG_CIFS_NFSD_EXPORT
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 162 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
164 cifs_dbg(FYI, "export ops supported\n"); 163 cifs_dbg(FYI, "export ops supported\n");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1fdc37041057..52ca861ed35e 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -44,6 +44,7 @@
44#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) 44#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
45#define MAX_SERVER_SIZE 15 45#define MAX_SERVER_SIZE 15
46#define MAX_SHARE_SIZE 80 46#define MAX_SHARE_SIZE 80
47#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
47#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ 48#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
48#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ 49#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
49 50
@@ -369,6 +370,9 @@ struct smb_version_operations {
369 void (*generate_signingkey)(struct TCP_Server_Info *server); 370 void (*generate_signingkey)(struct TCP_Server_Info *server);
370 int (*calc_signature)(struct smb_rqst *rqst, 371 int (*calc_signature)(struct smb_rqst *rqst,
371 struct TCP_Server_Info *server); 372 struct TCP_Server_Info *server);
373 int (*query_mf_symlink)(const unsigned char *path, char *pbuf,
374 unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
375 unsigned int xid);
372}; 376};
373 377
374struct smb_version_values { 378struct smb_version_values {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f7e584d047e2..b29a012bed33 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work);
497struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, 497struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
498 work_func_t complete); 498 work_func_t complete);
499void cifs_writedata_release(struct kref *refcount); 499void cifs_writedata_release(struct kref *refcount);
500 500int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
501 unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
502 unsigned int xid);
501#endif /* _CIFSPROTO_H */ 503#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index fa68813396b5..d67c550c4980 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1675 if (string == NULL) 1675 if (string == NULL)
1676 goto out_nomem; 1676 goto out_nomem;
1677 1677
1678 if (strnlen(string, 256) == 256) { 1678 if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
1679 == CIFS_MAX_DOMAINNAME_LEN) {
1679 printk(KERN_WARNING "CIFS: domain name too" 1680 printk(KERN_WARNING "CIFS: domain name too"
1680 " long\n"); 1681 " long\n");
1681 goto cifs_parse_mount_err; 1682 goto cifs_parse_mount_err;
@@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
2276 2277
2277#ifdef CONFIG_KEYS 2278#ifdef CONFIG_KEYS
2278 2279
2279/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ 2280/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
2280#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) 2281#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
2281 2282
2282/* Populate username and pw fields from keyring if possible */ 2283/* Populate username and pw fields from keyring if possible */
2283static int 2284static int
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1e57f36ea1b2..7e36ae34e947 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
647 oflags, &oplock, &cfile->fid.netfid, xid); 647 oflags, &oplock, &cfile->fid.netfid, xid);
648 if (rc == 0) { 648 if (rc == 0) {
649 cifs_dbg(FYI, "posix reopen succeeded\n"); 649 cifs_dbg(FYI, "posix reopen succeeded\n");
650 oparms.reconnect = true;
650 goto reopen_success; 651 goto reopen_success;
651 } 652 }
652 /* 653 /*
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index b83c3f5646bd..562044f700e5 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
305} 305}
306 306
307int 307int
308CIFSCheckMFSymlink(struct cifs_fattr *fattr, 308open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
309 const unsigned char *path, 309 unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
310 struct cifs_sb_info *cifs_sb, unsigned int xid) 310 unsigned int xid)
311{ 311{
312 int rc; 312 int rc;
313 int oplock = 0; 313 int oplock = 0;
314 __u16 netfid = 0; 314 __u16 netfid = 0;
315 struct tcon_link *tlink; 315 struct tcon_link *tlink;
316 struct cifs_tcon *pTcon; 316 struct cifs_tcon *ptcon;
317 struct cifs_io_parms io_parms; 317 struct cifs_io_parms io_parms;
318 u8 *buf;
319 char *pbuf;
320 unsigned int bytes_read = 0;
321 int buf_type = CIFS_NO_BUFFER; 318 int buf_type = CIFS_NO_BUFFER;
322 unsigned int link_len = 0;
323 FILE_ALL_INFO file_info; 319 FILE_ALL_INFO file_info;
324 320
325 if (!CIFSCouldBeMFSymlink(fattr))
326 /* it's not a symlink */
327 return 0;
328
329 tlink = cifs_sb_tlink(cifs_sb); 321 tlink = cifs_sb_tlink(cifs_sb);
330 if (IS_ERR(tlink)) 322 if (IS_ERR(tlink))
331 return PTR_ERR(tlink); 323 return PTR_ERR(tlink);
332 pTcon = tlink_tcon(tlink); 324 ptcon = tlink_tcon(tlink);
333 325
334 rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, 326 rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ,
335 CREATE_NOT_DIR, &netfid, &oplock, &file_info, 327 CREATE_NOT_DIR, &netfid, &oplock, &file_info,
336 cifs_sb->local_nls, 328 cifs_sb->local_nls,
337 cifs_sb->mnt_cifs_flags & 329 cifs_sb->mnt_cifs_flags &
338 CIFS_MOUNT_MAP_SPECIAL_CHR); 330 CIFS_MOUNT_MAP_SPECIAL_CHR);
339 if (rc != 0) 331 if (rc != 0) {
340 goto out; 332 cifs_put_tlink(tlink);
333 return rc;
334 }
341 335
342 if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { 336 if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
343 CIFSSMBClose(xid, pTcon, netfid); 337 CIFSSMBClose(xid, ptcon, netfid);
338 cifs_put_tlink(tlink);
344 /* it's not a symlink */ 339 /* it's not a symlink */
345 goto out; 340 return rc;
346 } 341 }
347 342
348 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
349 if (!buf) {
350 rc = -ENOMEM;
351 goto out;
352 }
353 pbuf = buf;
354 io_parms.netfid = netfid; 343 io_parms.netfid = netfid;
355 io_parms.pid = current->tgid; 344 io_parms.pid = current->tgid;
356 io_parms.tcon = pTcon; 345 io_parms.tcon = ptcon;
357 io_parms.offset = 0; 346 io_parms.offset = 0;
358 io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; 347 io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
359 348
360 rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); 349 rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
361 CIFSSMBClose(xid, pTcon, netfid); 350 CIFSSMBClose(xid, ptcon, netfid);
362 if (rc != 0) { 351 cifs_put_tlink(tlink);
363 kfree(buf); 352 return rc;
353}
354
355
356int
357CIFSCheckMFSymlink(struct cifs_fattr *fattr,
358 const unsigned char *path,
359 struct cifs_sb_info *cifs_sb, unsigned int xid)
360{
361 int rc = 0;
362 u8 *buf = NULL;
363 unsigned int link_len = 0;
364 unsigned int bytes_read = 0;
365 struct cifs_tcon *ptcon;
366
367 if (!CIFSCouldBeMFSymlink(fattr))
368 /* it's not a symlink */
369 return 0;
370
371 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
372 if (!buf) {
373 rc = -ENOMEM;
364 goto out; 374 goto out;
365 } 375 }
366 376
377 ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
378 if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
379 rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
380 &bytes_read, cifs_sb, xid);
381 else
382 goto out;
383
384 if (rc != 0)
385 goto out;
386
387 if (bytes_read == 0) /* not a symlink */
388 goto out;
389
367 rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); 390 rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
368 kfree(buf);
369 if (rc == -EINVAL) { 391 if (rc == -EINVAL) {
370 /* it's not a symlink */ 392 /* it's not a symlink */
371 rc = 0; 393 rc = 0;
@@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
381 fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; 403 fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
382 fattr->cf_dtype = DT_LNK; 404 fattr->cf_dtype = DT_LNK;
383out: 405out:
384 cifs_put_tlink(tlink); 406 kfree(buf);
385 return rc; 407 return rc;
386} 408}
387 409
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index ab8778469394..69d2c826a23b 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
111 return; 111 return;
112 } 112 }
113 113
114 /*
115 * If we know that the inode will need to be revalidated immediately,
116 * then don't create a new dentry for it. We'll end up doing an on
117 * the wire call either way and this spares us an invalidation.
118 */
119 if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
120 return;
121
114 dentry = d_alloc(parent, name); 122 dentry = d_alloc(parent, name);
115 if (!dentry) 123 if (!dentry)
116 return; 124 return;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 79358e341fd2..08dd37bb23aa 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
197 bytes_ret = 0; 197 bytes_ret = 0;
198 } else 198 } else
199 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, 199 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
200 256, nls_cp); 200 CIFS_MAX_DOMAINNAME_LEN, nls_cp);
201 bcc_ptr += 2 * bytes_ret; 201 bcc_ptr += 2 * bytes_ret;
202 bcc_ptr += 2; /* account for null terminator */ 202 bcc_ptr += 2; /* account for null terminator */
203 203
@@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
255 255
256 /* copy domain */ 256 /* copy domain */
257 if (ses->domainName != NULL) { 257 if (ses->domainName != NULL) {
258 strncpy(bcc_ptr, ses->domainName, 256); 258 strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
259 bcc_ptr += strnlen(ses->domainName, 256); 259 bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
260 } /* else we will send a null domain name 260 } /* else we will send a null domain name
261 so the server will default to its own domain */ 261 so the server will default to its own domain */
262 *bcc_ptr = 0; 262 *bcc_ptr = 0;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 6457690731a2..60943978aec3 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = {
944 .mand_lock = cifs_mand_lock, 944 .mand_lock = cifs_mand_lock,
945 .mand_unlock_range = cifs_unlock_range, 945 .mand_unlock_range = cifs_unlock_range,
946 .push_mand_locks = cifs_push_mandatory_locks, 946 .push_mand_locks = cifs_push_mandatory_locks,
947 .query_mf_symlink = open_query_close_cifs_symlink,
947}; 948};
948 949
949struct smb_version_values smb1_values = { 950struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 301b191270b9..4f2300d020c7 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -42,6 +42,7 @@
42static int 42static int
43smb2_crypto_shash_allocate(struct TCP_Server_Info *server) 43smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
44{ 44{
45 int rc;
45 unsigned int size; 46 unsigned int size;
46 47
47 if (server->secmech.sdeschmacsha256 != NULL) 48 if (server->secmech.sdeschmacsha256 != NULL)
@@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
50 server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); 51 server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
51 if (IS_ERR(server->secmech.hmacsha256)) { 52 if (IS_ERR(server->secmech.hmacsha256)) {
52 cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); 53 cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
53 return PTR_ERR(server->secmech.hmacsha256); 54 rc = PTR_ERR(server->secmech.hmacsha256);
55 server->secmech.hmacsha256 = NULL;
56 return rc;
54 } 57 }
55 58
56 size = sizeof(struct shash_desc) + 59 size = sizeof(struct shash_desc) +
@@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
87 server->secmech.sdeschmacsha256 = NULL; 90 server->secmech.sdeschmacsha256 = NULL;
88 crypto_free_shash(server->secmech.hmacsha256); 91 crypto_free_shash(server->secmech.hmacsha256);
89 server->secmech.hmacsha256 = NULL; 92 server->secmech.hmacsha256 = NULL;
90 return PTR_ERR(server->secmech.cmacaes); 93 rc = PTR_ERR(server->secmech.cmacaes);
94 server->secmech.cmacaes = NULL;
95 return rc;
91 } 96 }
92 97
93 size = sizeof(struct shash_desc) + 98 size = sizeof(struct shash_desc) +
diff --git a/fs/dcache.c b/fs/dcache.c
index 87bdb5329c3c..83cfb834db03 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2724,6 +2724,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2724 return memcpy(buffer, temp, sz); 2724 return memcpy(buffer, temp, sz);
2725} 2725}
2726 2726
2727char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
2728{
2729 char *end = buffer + buflen;
2730 /* these dentries are never renamed, so d_lock is not needed */
2731 if (prepend(&end, &buflen, " (deleted)", 11) ||
2732 prepend_name(&end, &buflen, &dentry->d_name) ||
2733 prepend(&end, &buflen, "/", 1))
2734 end = ERR_PTR(-ENAMETOOLONG);
2735 return end;
2736}
2737
2727/* 2738/*
2728 * Write full pathname from the root of the filesystem into the buffer. 2739 * Write full pathname from the root of the filesystem into the buffer.
2729 */ 2740 */
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index f3913eb2c474..d15ccf20f1b3 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
57 struct inode *inode; 57 struct inode *inode;
58 58
59 inode = iget_locked(super, ino); 59 inode = iget_locked(super, ino);
60 if (IS_ERR(inode)) 60 if (!inode)
61 return ERR_PTR(-ENOMEM); 61 return ERR_PTR(-ENOMEM);
62 if (!(inode->i_state & I_NEW)) 62 if (!(inode->i_state & I_NEW))
63 return inode; 63 return inode;
diff --git a/fs/exec.c b/fs/exec.c
index 9c73def87642..fd774c7cb483 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
608 return -ENOMEM; 608 return -ENOMEM;
609 609
610 lru_add_drain(); 610 lru_add_drain();
611 tlb_gather_mmu(&tlb, mm, 0); 611 tlb_gather_mmu(&tlb, mm, old_start, old_end);
612 if (new_end > old_start) { 612 if (new_end > old_start) {
613 /* 613 /*
614 * when the old and new regions overlap clear from new_end. 614 * when the old and new regions overlap clear from new_end.
@@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
625 free_pgd_range(&tlb, old_start, old_end, new_end, 625 free_pgd_range(&tlb, old_start, old_end, new_end,
626 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); 626 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
627 } 627 }
628 tlb_finish_mmu(&tlb, new_end, old_end); 628 tlb_finish_mmu(&tlb, old_start, old_end);
629 629
630 /* 630 /*
631 * Shrink the vma to just the new range. Always succeeds. 631 * Shrink the vma to just the new range. Always succeeds.
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b577e45425b0..0ab26fbf3380 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2086,6 +2086,7 @@ extern int ext4_sync_inode(handle_t *, struct inode *);
2086extern void ext4_dirty_inode(struct inode *, int); 2086extern void ext4_dirty_inode(struct inode *, int);
2087extern int ext4_change_inode_journal_flag(struct inode *, int); 2087extern int ext4_change_inode_journal_flag(struct inode *, int);
2088extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); 2088extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
2089extern int ext4_inode_attach_jinode(struct inode *inode);
2089extern int ext4_can_truncate(struct inode *inode); 2090extern int ext4_can_truncate(struct inode *inode);
2090extern void ext4_truncate(struct inode *); 2091extern void ext4_truncate(struct inode *);
2091extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); 2092extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 72a3600aedbd..17ac112ab101 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -255,10 +255,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
255 set_buffer_prio(bh); 255 set_buffer_prio(bh);
256 if (ext4_handle_valid(handle)) { 256 if (ext4_handle_valid(handle)) {
257 err = jbd2_journal_dirty_metadata(handle, bh); 257 err = jbd2_journal_dirty_metadata(handle, bh);
258 if (err) { 258 /* Errors can only happen if there is a bug */
259 /* Errors can only happen if there is a bug */ 259 if (WARN_ON_ONCE(err)) {
260 handle->h_err = err; 260 ext4_journal_abort_handle(where, line, __func__, bh,
261 __ext4_journal_stop(where, line, handle); 261 handle, err);
262 } 262 }
263 } else { 263 } else {
264 if (inode) 264 if (inode)
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6f4cc567c382..319c9d26279a 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -219,7 +219,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
219{ 219{
220 struct super_block *sb = inode->i_sb; 220 struct super_block *sb = inode->i_sb;
221 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 221 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
222 struct ext4_inode_info *ei = EXT4_I(inode);
223 struct vfsmount *mnt = filp->f_path.mnt; 222 struct vfsmount *mnt = filp->f_path.mnt;
224 struct path path; 223 struct path path;
225 char buf[64], *cp; 224 char buf[64], *cp;
@@ -259,22 +258,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
259 * Set up the jbd2_inode if we are opening the inode for 258 * Set up the jbd2_inode if we are opening the inode for
260 * writing and the journal is present 259 * writing and the journal is present
261 */ 260 */
262 if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { 261 if (filp->f_mode & FMODE_WRITE) {
263 struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); 262 int ret = ext4_inode_attach_jinode(inode);
264 263 if (ret < 0)
265 spin_lock(&inode->i_lock); 264 return ret;
266 if (!ei->jinode) {
267 if (!jinode) {
268 spin_unlock(&inode->i_lock);
269 return -ENOMEM;
270 }
271 ei->jinode = jinode;
272 jbd2_journal_init_jbd_inode(ei->jinode, inode);
273 jinode = NULL;
274 }
275 spin_unlock(&inode->i_lock);
276 if (unlikely(jinode != NULL))
277 jbd2_free_inode(jinode);
278 } 265 }
279 return dquot_file_open(inode, filp); 266 return dquot_file_open(inode, filp);
280} 267}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index dd32a2eacd0d..c2ca04e67a4f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3533,6 +3533,18 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3533 offset; 3533 offset;
3534 } 3534 }
3535 3535
3536 if (offset & (sb->s_blocksize - 1) ||
3537 (offset + length) & (sb->s_blocksize - 1)) {
3538 /*
3539 * Attach jinode to inode for jbd2 if we do any zeroing of
3540 * partial block
3541 */
3542 ret = ext4_inode_attach_jinode(inode);
3543 if (ret < 0)
3544 goto out_mutex;
3545
3546 }
3547
3536 first_block_offset = round_up(offset, sb->s_blocksize); 3548 first_block_offset = round_up(offset, sb->s_blocksize);
3537 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 3549 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3538 3550
@@ -3601,6 +3613,31 @@ out_mutex:
3601 return ret; 3613 return ret;
3602} 3614}
3603 3615
3616int ext4_inode_attach_jinode(struct inode *inode)
3617{
3618 struct ext4_inode_info *ei = EXT4_I(inode);
3619 struct jbd2_inode *jinode;
3620
3621 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
3622 return 0;
3623
3624 jinode = jbd2_alloc_inode(GFP_KERNEL);
3625 spin_lock(&inode->i_lock);
3626 if (!ei->jinode) {
3627 if (!jinode) {
3628 spin_unlock(&inode->i_lock);
3629 return -ENOMEM;
3630 }
3631 ei->jinode = jinode;
3632 jbd2_journal_init_jbd_inode(ei->jinode, inode);
3633 jinode = NULL;
3634 }
3635 spin_unlock(&inode->i_lock);
3636 if (unlikely(jinode != NULL))
3637 jbd2_free_inode(jinode);
3638 return 0;
3639}
3640
3604/* 3641/*
3605 * ext4_truncate() 3642 * ext4_truncate()
3606 * 3643 *
@@ -3661,6 +3698,12 @@ void ext4_truncate(struct inode *inode)
3661 return; 3698 return;
3662 } 3699 }
3663 3700
3701 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
3702 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
3703 if (ext4_inode_attach_jinode(inode) < 0)
3704 return;
3705 }
3706
3664 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3707 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3665 credits = ext4_writepage_trans_blocks(inode); 3708 credits = ext4_writepage_trans_blocks(inode);
3666 else 3709 else
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 9491ac0590f7..c0427e2f6648 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
77 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); 77 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
78 memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); 78 memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
79 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); 79 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
80 memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree)); 80 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
81 memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr)); 81 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
82 ext4_es_lru_del(inode1);
83 ext4_es_lru_del(inode2);
82 84
83 isize = i_size_read(inode1); 85 isize = i_size_read(inode1);
84 i_size_write(inode1, i_size_read(inode2)); 86 i_size_write(inode1, i_size_read(inode2));
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 36b141e420b7..b59373b625e9 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1359,7 +1359,7 @@ static const struct mount_opts {
1359 {Opt_delalloc, EXT4_MOUNT_DELALLOC, 1359 {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1360 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1360 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1361 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 1361 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1362 MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT}, 1362 MOPT_EXT4_ONLY | MOPT_CLEAR},
1363 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1363 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1364 MOPT_EXT4_ONLY | MOPT_SET}, 1364 MOPT_EXT4_ONLY | MOPT_SET},
1365 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | 1365 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3483 } 3483 }
3484 if (test_opt(sb, DIOREAD_NOLOCK)) { 3484 if (test_opt(sb, DIOREAD_NOLOCK)) {
3485 ext4_msg(sb, KERN_ERR, "can't mount with " 3485 ext4_msg(sb, KERN_ERR, "can't mount with "
3486 "both data=journal and delalloc"); 3486 "both data=journal and dioread_nolock");
3487 goto failed_mount; 3487 goto failed_mount;
3488 } 3488 }
3489 if (test_opt(sb, DELALLOC)) 3489 if (test_opt(sb, DELALLOC))
@@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4727 goto restore_opts; 4727 goto restore_opts;
4728 } 4728 }
4729 4729
4730 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4731 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
4732 ext4_msg(sb, KERN_ERR, "can't mount with "
4733 "both data=journal and delalloc");
4734 err = -EINVAL;
4735 goto restore_opts;
4736 }
4737 if (test_opt(sb, DIOREAD_NOLOCK)) {
4738 ext4_msg(sb, KERN_ERR, "can't mount with "
4739 "both data=journal and dioread_nolock");
4740 err = -EINVAL;
4741 goto restore_opts;
4742 }
4743 }
4744
4730 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) 4745 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
4731 ext4_abort(sb, "Abort forced by user"); 4746 ext4_abort(sb, "Abort forced by user");
4732 4747
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9435384562a2..544a809819c3 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1838,14 +1838,14 @@ int __init gfs2_glock_init(void)
1838 1838
1839 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1839 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1840 WQ_HIGHPRI | WQ_FREEZABLE, 0); 1840 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1841 if (IS_ERR(glock_workqueue)) 1841 if (!glock_workqueue)
1842 return PTR_ERR(glock_workqueue); 1842 return -ENOMEM;
1843 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1843 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1844 WQ_MEM_RECLAIM | WQ_FREEZABLE, 1844 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1845 0); 1845 0);
1846 if (IS_ERR(gfs2_delete_workqueue)) { 1846 if (!gfs2_delete_workqueue) {
1847 destroy_workqueue(glock_workqueue); 1847 destroy_workqueue(glock_workqueue);
1848 return PTR_ERR(gfs2_delete_workqueue); 1848 return -ENOMEM;
1849 } 1849 }
1850 1850
1851 register_shrinker(&glock_shrinker); 1851 register_shrinker(&glock_shrinker);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 5f2e5224c51c..e2e0a90396e7 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
47 * None of the buffers should be dirty, locked, or pinned. 47 * None of the buffers should be dirty, locked, or pinned.
48 */ 48 */
49 49
50static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 50static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
51 unsigned int nr_revokes)
51{ 52{
52 struct gfs2_sbd *sdp = gl->gl_sbd; 53 struct gfs2_sbd *sdp = gl->gl_sbd;
53 struct list_head *head = &gl->gl_ail_list; 54 struct list_head *head = &gl->gl_ail_list;
@@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
57 58
58 gfs2_log_lock(sdp); 59 gfs2_log_lock(sdp);
59 spin_lock(&sdp->sd_ail_lock); 60 spin_lock(&sdp->sd_ail_lock);
60 list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { 61 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
62 if (nr_revokes == 0)
63 break;
61 bh = bd->bd_bh; 64 bh = bd->bd_bh;
62 if (bh->b_state & b_state) { 65 if (bh->b_state & b_state) {
63 if (fsync) 66 if (fsync)
@@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
65 gfs2_ail_error(gl, bh); 68 gfs2_ail_error(gl, bh);
66 } 69 }
67 gfs2_trans_add_revoke(sdp, bd); 70 gfs2_trans_add_revoke(sdp, bd);
71 nr_revokes--;
68 } 72 }
69 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 73 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
70 spin_unlock(&sdp->sd_ail_lock); 74 spin_unlock(&sdp->sd_ail_lock);
@@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
91 WARN_ON_ONCE(current->journal_info); 95 WARN_ON_ONCE(current->journal_info);
92 current->journal_info = &tr; 96 current->journal_info = &tr;
93 97
94 __gfs2_ail_flush(gl, 0); 98 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
95 99
96 gfs2_trans_end(sdp); 100 gfs2_trans_end(sdp);
97 gfs2_log_flush(sdp, NULL); 101 gfs2_log_flush(sdp, NULL);
@@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
101{ 105{
102 struct gfs2_sbd *sdp = gl->gl_sbd; 106 struct gfs2_sbd *sdp = gl->gl_sbd;
103 unsigned int revokes = atomic_read(&gl->gl_ail_count); 107 unsigned int revokes = atomic_read(&gl->gl_ail_count);
108 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
104 int ret; 109 int ret;
105 110
106 if (!revokes) 111 if (!revokes)
107 return; 112 return;
108 113
109 ret = gfs2_trans_begin(sdp, 0, revokes); 114 while (revokes > max_revokes)
115 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
116
117 ret = gfs2_trans_begin(sdp, 0, max_revokes);
110 if (ret) 118 if (ret)
111 return; 119 return;
112 __gfs2_ail_flush(gl, fsync); 120 __gfs2_ail_flush(gl, fsync, max_revokes);
113 gfs2_trans_end(sdp); 121 gfs2_trans_end(sdp);
114 gfs2_log_flush(sdp, NULL); 122 gfs2_log_flush(sdp, NULL);
115} 123}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index bbb2715171cd..64915eeae5a7 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
594 } 594 }
595 gfs2_glock_dq_uninit(ghs); 595 gfs2_glock_dq_uninit(ghs);
596 if (IS_ERR(d)) 596 if (IS_ERR(d))
597 return PTR_RET(d); 597 return PTR_ERR(d);
598 return error; 598 return error;
599 } else if (error != -ENOENT) { 599 } else if (error != -ENOENT) {
600 goto fail_gunlock; 600 goto fail_gunlock;
@@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
1750 struct gfs2_holder gh; 1750 struct gfs2_holder gh;
1751 int ret; 1751 int ret;
1752 1752
1753 /* For selinux during lookup */
1754 if (gfs2_glock_is_locked_by_me(ip->i_gl))
1755 return generic_getxattr(dentry, name, data, size);
1756
1753 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 1757 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
1754 ret = gfs2_glock_nq(&gh); 1758 ret = gfs2_glock_nq(&gh);
1755 if (ret == 0) { 1759 if (ret == 0) {
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index e04d0e09ee7b..7b0f5043cf24 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void)
155 goto fail_wq; 155 goto fail_wq;
156 156
157 gfs2_control_wq = alloc_workqueue("gfs2_control", 157 gfs2_control_wq = alloc_workqueue("gfs2_control",
158 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0); 158 WQ_UNBOUND | WQ_FREEZABLE, 0);
159 if (!gfs2_control_wq) 159 if (!gfs2_control_wq)
160 goto fail_recovery; 160 goto fail_recovery;
161 161
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a3f868ae3fd4..d19b30ababf1 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
463 return inode; 463 return inode;
464} 464}
465 465
466/*
467 * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
468 * be taken from reclaim -- unlike regular filesystems. This needs an
469 * annotation because huge_pmd_share() does an allocation under
470 * i_mmap_mutex.
471 */
472struct lock_class_key hugetlbfs_i_mmap_mutex_key;
473
466static struct inode *hugetlbfs_get_inode(struct super_block *sb, 474static struct inode *hugetlbfs_get_inode(struct super_block *sb,
467 struct inode *dir, 475 struct inode *dir,
468 umode_t mode, dev_t dev) 476 umode_t mode, dev_t dev)
@@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
474 struct hugetlbfs_inode_info *info; 482 struct hugetlbfs_inode_info *info;
475 inode->i_ino = get_next_ino(); 483 inode->i_ino = get_next_ino();
476 inode_init_owner(inode, dir, mode); 484 inode_init_owner(inode, dir, mode);
485 lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
486 &hugetlbfs_i_mmap_mutex_key);
477 inode->i_mapping->a_ops = &hugetlbfs_aops; 487 inode->i_mapping->a_ops = &hugetlbfs_aops;
478 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; 488 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
479 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 489 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -916,14 +926,8 @@ static int get_hstate_idx(int page_size_log)
916 return h - hstates; 926 return h - hstates;
917} 927}
918 928
919static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
920{
921 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
922 dentry->d_name.name);
923}
924
925static struct dentry_operations anon_ops = { 929static struct dentry_operations anon_ops = {
926 .d_dname = hugetlb_dname 930 .d_dname = simple_dname
927}; 931};
928 932
929/* 933/*
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b1ca9ba0b0a..a45ba4f267fe 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
1429 CL_COPY_ALL | CL_PRIVATE); 1429 CL_COPY_ALL | CL_PRIVATE);
1430 namespace_unlock(); 1430 namespace_unlock();
1431 if (IS_ERR(tree)) 1431 if (IS_ERR(tree))
1432 return NULL; 1432 return ERR_CAST(tree);
1433 return &tree->mnt; 1433 return &tree->mnt;
1434} 1434}
1435 1435
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index dc9a913784ab..2d8be51f90dc 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
345 345
346 if (err == -EOPNOTSUPP) { 346 if (err == -EOPNOTSUPP) {
347 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 347 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
348 bio_put(bio); 348 /* to be detected by nilfs_segbuf_submit_bio() */
349 /* to be detected by submit_seg_bio() */
350 } 349 }
351 350
352 if (!uptodate) 351 if (!uptodate)
@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
377 bio->bi_private = segbuf; 376 bio->bi_private = segbuf;
378 bio_get(bio); 377 bio_get(bio);
379 submit_bio(mode, bio); 378 submit_bio(mode, bio);
379 segbuf->sb_nbio++;
380 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { 380 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
381 bio_put(bio); 381 bio_put(bio);
382 err = -EOPNOTSUPP; 382 err = -EOPNOTSUPP;
383 goto failed; 383 goto failed;
384 } 384 }
385 segbuf->sb_nbio++;
386 bio_put(bio); 385 bio_put(bio);
387 386
388 wi->bio = NULL; 387 wi->bio = NULL;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 79736a28d84f..2abf97b2a592 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1757,7 +1757,7 @@ try_again:
1757 goto out; 1757 goto out;
1758 } else if (ret == 1) { 1758 } else if (ret == 1) {
1759 clusters_need = wc->w_clen; 1759 clusters_need = wc->w_clen;
1760 ret = ocfs2_refcount_cow(inode, filp, di_bh, 1760 ret = ocfs2_refcount_cow(inode, di_bh,
1761 wc->w_cpos, wc->w_clen, UINT_MAX); 1761 wc->w_cpos, wc->w_clen, UINT_MAX);
1762 if (ret) { 1762 if (ret) {
1763 mlog_errno(ret); 1763 mlog_errno(ret);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index eb760d8acd50..30544ce8e9f7 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode)
2153{ 2153{
2154 int ret; 2154 int ret;
2155 struct ocfs2_empty_dir_priv priv = { 2155 struct ocfs2_empty_dir_priv priv = {
2156 .ctx.actor = ocfs2_empty_dir_filldir 2156 .ctx.actor = ocfs2_empty_dir_filldir,
2157 }; 2157 };
2158 2158
2159 memset(&priv, 0, sizeof(priv));
2160
2161 if (ocfs2_dir_indexed(inode)) { 2159 if (ocfs2_dir_indexed(inode)) {
2162 ret = ocfs2_empty_dir_dx(inode, &priv); 2160 ret = ocfs2_empty_dir_dx(inode, &priv);
2163 if (ret) 2161 if (ret)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 41000f223ca4..3261d71319ee 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
370 if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) 370 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
371 goto out; 371 goto out;
372 372
373 return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1); 373 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
374 374
375out: 375out:
376 return status; 376 return status;
@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
899 zero_clusters = last_cpos - zero_cpos; 899 zero_clusters = last_cpos - zero_cpos;
900 900
901 if (needs_cow) { 901 if (needs_cow) {
902 rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos, 902 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
903 zero_clusters, UINT_MAX); 903 zero_clusters, UINT_MAX);
904 if (rc) { 904 if (rc) {
905 mlog_errno(rc); 905 mlog_errno(rc);
@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2078 2078
2079 *meta_level = 1; 2079 *meta_level = 1;
2080 2080
2081 ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX); 2081 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2082 if (ret) 2082 if (ret)
2083 mlog_errno(ret); 2083 mlog_errno(ret);
2084out: 2084out:
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 96f9ac237e86..0a992737dcaf 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb,
537 extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); 537 extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth);
538 538
539 return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + 539 return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks +
540 ocfs2_quota_trans_credits(sb) + bits_wanted; 540 ocfs2_quota_trans_credits(sb);
541} 541}
542 542
543static inline int ocfs2_calc_symlink_credits(struct super_block *sb) 543static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index f1fc172175b6..452068b45749 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); 69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
70 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); 70 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
71 71
72 ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, 72 ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
73 p_cpos, new_p_cpos, len); 73 p_cpos, new_p_cpos, len);
74 if (ret) { 74 if (ret) {
75 mlog_errno(ret); 75 mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 9f6b96a09615..a70d604593b6 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -49,7 +49,6 @@
49 49
50struct ocfs2_cow_context { 50struct ocfs2_cow_context {
51 struct inode *inode; 51 struct inode *inode;
52 struct file *file;
53 u32 cow_start; 52 u32 cow_start;
54 u32 cow_len; 53 u32 cow_len;
55 struct ocfs2_extent_tree data_et; 54 struct ocfs2_extent_tree data_et;
@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
66 u32 *num_clusters, 65 u32 *num_clusters,
67 unsigned int *extent_flags); 66 unsigned int *extent_flags);
68 int (*cow_duplicate_clusters)(handle_t *handle, 67 int (*cow_duplicate_clusters)(handle_t *handle,
69 struct file *file, 68 struct inode *inode,
70 u32 cpos, u32 old_cluster, 69 u32 cpos, u32 old_cluster,
71 u32 new_cluster, u32 new_len); 70 u32 new_cluster, u32 new_len);
72}; 71};
@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2922} 2921}
2923 2922
2924int ocfs2_duplicate_clusters_by_page(handle_t *handle, 2923int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2925 struct file *file, 2924 struct inode *inode,
2926 u32 cpos, u32 old_cluster, 2925 u32 cpos, u32 old_cluster,
2927 u32 new_cluster, u32 new_len) 2926 u32 new_cluster, u32 new_len)
2928{ 2927{
2929 int ret = 0, partial; 2928 int ret = 0, partial;
2930 struct inode *inode = file_inode(file); 2929 struct super_block *sb = inode->i_sb;
2931 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
2932 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2933 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 2930 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2934 struct page *page; 2931 struct page *page;
2935 pgoff_t page_index; 2932 pgoff_t page_index;
@@ -2978,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2978 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2975 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2979 BUG_ON(PageDirty(page)); 2976 BUG_ON(PageDirty(page));
2980 2977
2981 if (PageReadahead(page)) {
2982 page_cache_async_readahead(mapping,
2983 &file->f_ra, file,
2984 page, page_index,
2985 readahead_pages);
2986 }
2987
2988 if (!PageUptodate(page)) { 2978 if (!PageUptodate(page)) {
2989 ret = block_read_full_page(page, ocfs2_get_block); 2979 ret = block_read_full_page(page, ocfs2_get_block);
2990 if (ret) { 2980 if (ret) {
@@ -3004,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
3004 } 2994 }
3005 } 2995 }
3006 2996
3007 ocfs2_map_and_dirty_page(inode, handle, from, to, 2997 ocfs2_map_and_dirty_page(inode,
2998 handle, from, to,
3008 page, 0, &new_block); 2999 page, 0, &new_block);
3009 mark_page_accessed(page); 3000 mark_page_accessed(page);
3010unlock: 3001unlock:
@@ -3020,12 +3011,11 @@ unlock:
3020} 3011}
3021 3012
3022int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 3013int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
3023 struct file *file, 3014 struct inode *inode,
3024 u32 cpos, u32 old_cluster, 3015 u32 cpos, u32 old_cluster,
3025 u32 new_cluster, u32 new_len) 3016 u32 new_cluster, u32 new_len)
3026{ 3017{
3027 int ret = 0; 3018 int ret = 0;
3028 struct inode *inode = file_inode(file);
3029 struct super_block *sb = inode->i_sb; 3019 struct super_block *sb = inode->i_sb;
3030 struct ocfs2_caching_info *ci = INODE_CACHE(inode); 3020 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
3031 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); 3021 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
@@ -3150,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
3150 3140
3151 /*If the old clusters is unwritten, no need to duplicate. */ 3141 /*If the old clusters is unwritten, no need to duplicate. */
3152 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3142 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
3153 ret = context->cow_duplicate_clusters(handle, context->file, 3143 ret = context->cow_duplicate_clusters(handle, context->inode,
3154 cpos, old, new, len); 3144 cpos, old, new, len);
3155 if (ret) { 3145 if (ret) {
3156 mlog_errno(ret); 3146 mlog_errno(ret);
@@ -3428,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
3428 return ret; 3418 return ret;
3429} 3419}
3430 3420
3431static void ocfs2_readahead_for_cow(struct inode *inode,
3432 struct file *file,
3433 u32 start, u32 len)
3434{
3435 struct address_space *mapping;
3436 pgoff_t index;
3437 unsigned long num_pages;
3438 int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
3439
3440 if (!file)
3441 return;
3442
3443 mapping = file->f_mapping;
3444 num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
3445 if (!num_pages)
3446 num_pages = 1;
3447
3448 index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
3449 page_cache_sync_readahead(mapping, &file->f_ra, file,
3450 index, num_pages);
3451}
3452
3453/* 3421/*
3454 * Starting at cpos, try to CoW write_len clusters. Don't CoW 3422 * Starting at cpos, try to CoW write_len clusters. Don't CoW
3455 * past max_cpos. This will stop when it runs into a hole or an 3423 * past max_cpos. This will stop when it runs into a hole or an
3456 * unrefcounted extent. 3424 * unrefcounted extent.
3457 */ 3425 */
3458static int ocfs2_refcount_cow_hunk(struct inode *inode, 3426static int ocfs2_refcount_cow_hunk(struct inode *inode,
3459 struct file *file,
3460 struct buffer_head *di_bh, 3427 struct buffer_head *di_bh,
3461 u32 cpos, u32 write_len, u32 max_cpos) 3428 u32 cpos, u32 write_len, u32 max_cpos)
3462{ 3429{
@@ -3485,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
3485 3452
3486 BUG_ON(cow_len == 0); 3453 BUG_ON(cow_len == 0);
3487 3454
3488 ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
3489
3490 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3455 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3491 if (!context) { 3456 if (!context) {
3492 ret = -ENOMEM; 3457 ret = -ENOMEM;
@@ -3508,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
3508 context->ref_root_bh = ref_root_bh; 3473 context->ref_root_bh = ref_root_bh;
3509 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; 3474 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
3510 context->get_clusters = ocfs2_di_get_clusters; 3475 context->get_clusters = ocfs2_di_get_clusters;
3511 context->file = file;
3512 3476
3513 ocfs2_init_dinode_extent_tree(&context->data_et, 3477 ocfs2_init_dinode_extent_tree(&context->data_et,
3514 INODE_CACHE(inode), di_bh); 3478 INODE_CACHE(inode), di_bh);
@@ -3537,7 +3501,6 @@ out:
3537 * clusters between cpos and cpos+write_len are safe to modify. 3501 * clusters between cpos and cpos+write_len are safe to modify.
3538 */ 3502 */
3539int ocfs2_refcount_cow(struct inode *inode, 3503int ocfs2_refcount_cow(struct inode *inode,
3540 struct file *file,
3541 struct buffer_head *di_bh, 3504 struct buffer_head *di_bh,
3542 u32 cpos, u32 write_len, u32 max_cpos) 3505 u32 cpos, u32 write_len, u32 max_cpos)
3543{ 3506{
@@ -3557,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode,
3557 num_clusters = write_len; 3520 num_clusters = write_len;
3558 3521
3559 if (ext_flags & OCFS2_EXT_REFCOUNTED) { 3522 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
3560 ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos, 3523 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3561 num_clusters, max_cpos); 3524 num_clusters, max_cpos);
3562 if (ret) { 3525 if (ret) {
3563 mlog_errno(ret); 3526 mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
index 7754608c83a4..6422bbcdb525 100644
--- a/fs/ocfs2/refcounttree.h
+++ b/fs/ocfs2/refcounttree.h
@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
53 int *credits, 53 int *credits,
54 int *ref_blocks); 54 int *ref_blocks);
55int ocfs2_refcount_cow(struct inode *inode, 55int ocfs2_refcount_cow(struct inode *inode,
56 struct file *filep, struct buffer_head *di_bh, 56 struct buffer_head *di_bh,
57 u32 cpos, u32 write_len, u32 max_cpos); 57 u32 cpos, u32 write_len, u32 max_cpos);
58 58
59typedef int (ocfs2_post_refcount_func)(struct inode *inode, 59typedef int (ocfs2_post_refcount_func)(struct inode *inode,
@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
85 u32 cpos, u32 write_len, 85 u32 cpos, u32 write_len,
86 struct ocfs2_post_refcount *post); 86 struct ocfs2_post_refcount *post);
87int ocfs2_duplicate_clusters_by_page(handle_t *handle, 87int ocfs2_duplicate_clusters_by_page(handle_t *handle,
88 struct file *file, 88 struct inode *inode,
89 u32 cpos, u32 old_cluster, 89 u32 cpos, u32 old_cluster,
90 u32 new_cluster, u32 new_len); 90 u32 new_cluster, u32 new_len);
91int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 91int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
92 struct file *file, 92 struct inode *inode,
93 u32 cpos, u32 old_cluster, 93 u32 cpos, u32 old_cluster,
94 u32 new_cluster, u32 new_len); 94 u32 new_cluster, u32 new_len);
95int ocfs2_cow_sync_writeback(struct super_block *sb, 95int ocfs2_cow_sync_writeback(struct super_block *sb,
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 75f2890abbd8..0ff80f9b930f 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -230,8 +230,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
230 230
231 if (!dir_emit_dots(file, ctx)) 231 if (!dir_emit_dots(file, ctx))
232 goto out; 232 goto out;
233 if (!dir_emit_dots(file, ctx))
234 goto out;
235 files = get_files_struct(p); 233 files = get_files_struct(p);
236 if (!files) 234 if (!files)
237 goto out; 235 goto out;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 94441a407337..737e15615b04 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
271 de = next; 271 de = next;
272 } while (de); 272 } while (de);
273 spin_unlock(&proc_subdir_lock); 273 spin_unlock(&proc_subdir_lock);
274 return 0; 274 return 1;
275} 275}
276 276
277int proc_readdir(struct file *file, struct dir_context *ctx) 277int proc_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 229e366598da..e0a790da726d 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
205static int proc_root_readdir(struct file *file, struct dir_context *ctx) 205static int proc_root_readdir(struct file *file, struct dir_context *ctx)
206{ 206{
207 if (ctx->pos < FIRST_PROCESS_ENTRY) { 207 if (ctx->pos < FIRST_PROCESS_ENTRY) {
208 proc_readdir(file, ctx); 208 int error = proc_readdir(file, ctx);
209 if (unlikely(error <= 0))
210 return error;
209 ctx->pos = FIRST_PROCESS_ENTRY; 211 ctx->pos = FIRST_PROCESS_ENTRY;
210 } 212 }
211 213
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dbf61f6174f0..107d026f5d6e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
730 * of how soft-dirty works. 730 * of how soft-dirty works.
731 */ 731 */
732 pte_t ptent = *pte; 732 pte_t ptent = *pte;
733 ptent = pte_wrprotect(ptent); 733
734 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 734 if (pte_present(ptent)) {
735 ptent = pte_wrprotect(ptent);
736 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
737 } else if (is_swap_pte(ptent)) {
738 ptent = pte_swp_clear_soft_dirty(ptent);
739 } else if (pte_file(ptent)) {
740 ptent = pte_file_clear_soft_dirty(ptent);
741 }
742
735 set_pte_at(vma->vm_mm, addr, pte, ptent); 743 set_pte_at(vma->vm_mm, addr, pte, ptent);
736#endif 744#endif
737} 745}
@@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
752 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 760 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
753 for (; addr != end; pte++, addr += PAGE_SIZE) { 761 for (; addr != end; pte++, addr += PAGE_SIZE) {
754 ptent = *pte; 762 ptent = *pte;
755 if (!pte_present(ptent))
756 continue;
757 763
758 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 764 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
759 clear_soft_dirty(vma, addr, pte); 765 clear_soft_dirty(vma, addr, pte);
760 continue; 766 continue;
761 } 767 }
762 768
769 if (!pte_present(ptent))
770 continue;
771
763 page = vm_normal_page(vma, addr, ptent); 772 page = vm_normal_page(vma, addr, ptent);
764 if (!page) 773 if (!page)
765 continue; 774 continue;
@@ -859,7 +868,7 @@ typedef struct {
859} pagemap_entry_t; 868} pagemap_entry_t;
860 869
861struct pagemapread { 870struct pagemapread {
862 int pos, len; 871 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
863 pagemap_entry_t *buffer; 872 pagemap_entry_t *buffer;
864 bool v2; 873 bool v2;
865}; 874};
@@ -867,7 +876,7 @@ struct pagemapread {
867#define PAGEMAP_WALK_SIZE (PMD_SIZE) 876#define PAGEMAP_WALK_SIZE (PMD_SIZE)
868#define PAGEMAP_WALK_MASK (PMD_MASK) 877#define PAGEMAP_WALK_MASK (PMD_MASK)
869 878
870#define PM_ENTRY_BYTES sizeof(u64) 879#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
871#define PM_STATUS_BITS 3 880#define PM_STATUS_BITS 3
872#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) 881#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
873#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) 882#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
@@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
930 flags = PM_PRESENT; 939 flags = PM_PRESENT;
931 page = vm_normal_page(vma, addr, pte); 940 page = vm_normal_page(vma, addr, pte);
932 } else if (is_swap_pte(pte)) { 941 } else if (is_swap_pte(pte)) {
933 swp_entry_t entry = pte_to_swp_entry(pte); 942 swp_entry_t entry;
934 943 if (pte_swp_soft_dirty(pte))
944 flags2 |= __PM_SOFT_DIRTY;
945 entry = pte_to_swp_entry(pte);
935 frame = swp_type(entry) | 946 frame = swp_type(entry) |
936 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 947 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
937 flags = PM_SWAP; 948 flags = PM_SWAP;
@@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
1116 goto out_task; 1127 goto out_task;
1117 1128
1118 pm.v2 = soft_dirty_cleared; 1129 pm.v2 = soft_dirty_cleared;
1119 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1130 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1120 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 1131 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1121 ret = -ENOMEM; 1132 ret = -ENOMEM;
1122 if (!pm.buffer) 1133 if (!pm.buffer)
1123 goto out_task; 1134 goto out_task;
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index a87eeda25627..22650cd9e72a 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -56,6 +56,16 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
56 56
57acpi_status 57acpi_status
58acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld); 58acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
59
60bool acpi_has_method(acpi_handle handle, char *name);
61acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
62 u64 arg);
63acpi_status acpi_evaluate_ej0(acpi_handle handle);
64acpi_status acpi_evaluate_lck(acpi_handle handle, int lock);
65bool acpi_ata_match(acpi_handle handle);
66bool acpi_bay_match(acpi_handle handle);
67bool acpi_dock_match(acpi_handle handle);
68
59#ifdef CONFIG_ACPI 69#ifdef CONFIG_ACPI
60 70
61#include <linux/proc_fs.h> 71#include <linux/proc_fs.h>
@@ -352,8 +362,6 @@ extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
352extern int register_acpi_notifier(struct notifier_block *); 362extern int register_acpi_notifier(struct notifier_block *);
353extern int unregister_acpi_notifier(struct notifier_block *); 363extern int unregister_acpi_notifier(struct notifier_block *);
354 364
355extern int register_acpi_bus_notifier(struct notifier_block *nb);
356extern void unregister_acpi_bus_notifier(struct notifier_block *nb);
357/* 365/*
358 * External Functions 366 * External Functions
359 */ 367 */
@@ -468,7 +476,8 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
468 if (p) 476 if (p)
469 *p = ACPI_STATE_D0; 477 *p = ACPI_STATE_D0;
470 478
471 return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0; 479 return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ?
480 m : ACPI_STATE_D0;
472} 481}
473static inline void acpi_dev_pm_add_dependent(acpi_handle handle, 482static inline void acpi_dev_pm_add_dependent(acpi_handle handle,
474 struct device *depdev) {} 483 struct device *depdev) {}
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index b420939f5eb5..1cedfcb1bd88 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -113,14 +113,13 @@ void pci_acpi_crs_quirks(void);
113 Dock Station 113 Dock Station
114 -------------------------------------------------------------------------- */ 114 -------------------------------------------------------------------------- */
115struct acpi_dock_ops { 115struct acpi_dock_ops {
116 acpi_notify_handler fixup;
116 acpi_notify_handler handler; 117 acpi_notify_handler handler;
117 acpi_notify_handler uevent; 118 acpi_notify_handler uevent;
118}; 119};
119 120
120#if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE) 121#ifdef CONFIG_ACPI_DOCK
121extern int is_dock_device(acpi_handle handle); 122extern int is_dock_device(acpi_handle handle);
122extern int register_dock_notifier(struct notifier_block *nb);
123extern void unregister_dock_notifier(struct notifier_block *nb);
124extern int register_hotplug_dock_device(acpi_handle handle, 123extern int register_hotplug_dock_device(acpi_handle handle,
125 const struct acpi_dock_ops *ops, 124 const struct acpi_dock_ops *ops,
126 void *context, 125 void *context,
@@ -132,13 +131,6 @@ static inline int is_dock_device(acpi_handle handle)
132{ 131{
133 return 0; 132 return 0;
134} 133}
135static inline int register_dock_notifier(struct notifier_block *nb)
136{
137 return -ENODEV;
138}
139static inline void unregister_dock_notifier(struct notifier_block *nb)
140{
141}
142static inline int register_hotplug_dock_device(acpi_handle handle, 134static inline int register_hotplug_dock_device(acpi_handle handle,
143 const struct acpi_dock_ops *ops, 135 const struct acpi_dock_ops *ops,
144 void *context, 136 void *context,
@@ -150,6 +142,6 @@ static inline int register_hotplug_dock_device(acpi_handle handle,
150static inline void unregister_hotplug_dock_device(acpi_handle handle) 142static inline void unregister_hotplug_dock_device(acpi_handle handle)
151{ 143{
152} 144}
153#endif 145#endif /* CONFIG_ACPI_DOCK */
154 146
155#endif /*__ACPI_DRIVERS_H__*/ 147#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 22d497ee6ef9..85bfdbe17805 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20130517 49#define ACPI_CA_VERSION 0x20130725
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -147,6 +147,8 @@ acpi_status acpi_install_interface(acpi_string interface_name);
147 147
148acpi_status acpi_remove_interface(acpi_string interface_name); 148acpi_status acpi_remove_interface(acpi_string interface_name);
149 149
150acpi_status acpi_update_interfaces(u8 action);
151
150u32 152u32
151acpi_check_address_range(acpi_adr_space_type space_id, 153acpi_check_address_range(acpi_adr_space_type space_id,
152 acpi_physical_address address, 154 acpi_physical_address address,
@@ -210,8 +212,8 @@ acpi_status
210acpi_walk_namespace(acpi_object_type type, 212acpi_walk_namespace(acpi_object_type type,
211 acpi_handle start_object, 213 acpi_handle start_object,
212 u32 max_depth, 214 u32 max_depth,
213 acpi_walk_callback pre_order_visit, 215 acpi_walk_callback descending_callback,
214 acpi_walk_callback post_order_visit, 216 acpi_walk_callback ascending_callback,
215 void *context, void **return_value); 217 void *context, void **return_value);
216 218
217acpi_status 219acpi_status
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 22b03c9286e9..b748aefce929 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -668,13 +668,6 @@ typedef u32 acpi_event_status;
668#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 668#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04
669#define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08 669#define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08
670 670
671/*
672 * General Purpose Events (GPE)
673 */
674#define ACPI_GPE_INVALID 0xFF
675#define ACPI_GPE_MAX 0xFF
676#define ACPI_NUM_GPE 256
677
678/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */ 671/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
679 672
680#define ACPI_GPE_ENABLE 0 673#define ACPI_GPE_ENABLE 0
@@ -1144,7 +1137,19 @@ struct acpi_memory_list {
1144#endif 1137#endif
1145}; 1138};
1146 1139
1147/* Definitions for _OSI support */ 1140/* Definitions of _OSI support */
1141
1142#define ACPI_VENDOR_STRINGS 0x01
1143#define ACPI_FEATURE_STRINGS 0x02
1144#define ACPI_ENABLE_INTERFACES 0x00
1145#define ACPI_DISABLE_INTERFACES 0x04
1146
1147#define ACPI_DISABLE_ALL_VENDOR_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS)
1148#define ACPI_DISABLE_ALL_FEATURE_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_FEATURE_STRINGS)
1149#define ACPI_DISABLE_ALL_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS)
1150#define ACPI_ENABLE_ALL_VENDOR_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS)
1151#define ACPI_ENABLE_ALL_FEATURE_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_FEATURE_STRINGS)
1152#define ACPI_ENABLE_ALL_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS)
1148 1153
1149#define ACPI_OSI_WIN_2000 0x01 1154#define ACPI_OSI_WIN_2000 0x01
1150#define ACPI_OSI_WIN_XP 0x02 1155#define ACPI_OSI_WIN_XP 0x02
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2f47ade1b567..0807ddf97b05 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
417{ 417{
418 return pmd; 418 return pmd;
419} 419}
420
421static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
422{
423 return pte;
424}
425
426static inline int pte_swp_soft_dirty(pte_t pte)
427{
428 return 0;
429}
430
431static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
432{
433 return pte;
434}
435
436static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
437{
438 return pte;
439}
440
441static inline pte_t pte_file_mksoft_dirty(pte_t pte)
442{
443 return pte;
444}
445
446static inline int pte_file_soft_dirty(pte_t pte)
447{
448 return 0;
449}
420#endif 450#endif
421 451
422#ifndef __HAVE_PFNMAP_TRACKING 452#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 13821c339a41..5672d7ea1fa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,7 +112,7 @@ struct mmu_gather {
112 112
113#define HAVE_GENERIC_MMU_GATHER 113#define HAVE_GENERIC_MMU_GATHER
114 114
115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); 115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
116void tlb_flush_mmu(struct mmu_gather *tlb); 116void tlb_flush_mmu(struct mmu_gather *tlb);
117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
118 unsigned long end); 118 unsigned long end);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 353ba256f368..a5db4aeefa36 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -481,6 +481,13 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
481 481
482acpi_status acpi_os_prepare_sleep(u8 sleep_state, 482acpi_status acpi_os_prepare_sleep(u8 sleep_state,
483 u32 pm1a_control, u32 pm1b_control); 483 u32 pm1a_control, u32 pm1b_control);
484
485void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
486 u32 val_a, u32 val_b));
487
488acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
489 u32 val_a, u32 val_b);
490
484#ifdef CONFIG_X86 491#ifdef CONFIG_X86
485void arch_reserve_mem_area(acpi_physical_address addr, size_t size); 492void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
486#else 493#else
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c9d468..4a12532da8c4 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -336,6 +336,7 @@ extern int d_validate(struct dentry *, struct dentry *);
336 * helper function for dentry_operations.d_dname() members 336 * helper function for dentry_operations.d_dname() members
337 */ 337 */
338extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 338extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
339extern char *simple_dname(struct dentry *, char *, int);
339 340
340extern char *__d_path(const struct path *, const struct path *, char *, int); 341extern char *__d_path(const struct path *, const struct path *, char *, int);
341extern char *d_absolute_path(const struct path *, char *, int); 342extern char *d_absolute_path(const struct path *, char *, int);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b99cd23f3474..79640e015a86 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -5,45 +5,13 @@
5 5
6#include <linux/bitmap.h> 6#include <linux/bitmap.h>
7#include <linux/if.h> 7#include <linux/if.h>
8#include <linux/ip.h>
8#include <linux/netdevice.h> 9#include <linux/netdevice.h>
9#include <linux/rcupdate.h> 10#include <linux/rcupdate.h>
10#include <linux/timer.h> 11#include <linux/timer.h>
11#include <linux/sysctl.h> 12#include <linux/sysctl.h>
12#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
13 14
14enum
15{
16 IPV4_DEVCONF_FORWARDING=1,
17 IPV4_DEVCONF_MC_FORWARDING,
18 IPV4_DEVCONF_PROXY_ARP,
19 IPV4_DEVCONF_ACCEPT_REDIRECTS,
20 IPV4_DEVCONF_SECURE_REDIRECTS,
21 IPV4_DEVCONF_SEND_REDIRECTS,
22 IPV4_DEVCONF_SHARED_MEDIA,
23 IPV4_DEVCONF_RP_FILTER,
24 IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
25 IPV4_DEVCONF_BOOTP_RELAY,
26 IPV4_DEVCONF_LOG_MARTIANS,
27 IPV4_DEVCONF_TAG,
28 IPV4_DEVCONF_ARPFILTER,
29 IPV4_DEVCONF_MEDIUM_ID,
30 IPV4_DEVCONF_NOXFRM,
31 IPV4_DEVCONF_NOPOLICY,
32 IPV4_DEVCONF_FORCE_IGMP_VERSION,
33 IPV4_DEVCONF_ARP_ANNOUNCE,
34 IPV4_DEVCONF_ARP_IGNORE,
35 IPV4_DEVCONF_PROMOTE_SECONDARIES,
36 IPV4_DEVCONF_ARP_ACCEPT,
37 IPV4_DEVCONF_ARP_NOTIFY,
38 IPV4_DEVCONF_ACCEPT_LOCAL,
39 IPV4_DEVCONF_SRC_VMARK,
40 IPV4_DEVCONF_PROXY_ARP_PVLAN,
41 IPV4_DEVCONF_ROUTE_LOCALNET,
42 __IPV4_DEVCONF_MAX
43};
44
45#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
46
47struct ipv4_devconf { 15struct ipv4_devconf {
48 void *sysctl; 16 void *sysctl;
49 int data[IPV4_DEVCONF_MAX]; 17 int data[IPV4_DEVCONF_MAX];
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 850e95bc766c..b8b7dc755752 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -101,6 +101,7 @@ struct inet6_skb_parm {
101#define IP6SKB_FORWARDED 2 101#define IP6SKB_FORWARDED 2
102#define IP6SKB_REROUTED 4 102#define IP6SKB_REROUTED 4
103#define IP6SKB_ROUTERALERT 8 103#define IP6SKB_ROUTERALERT 8
104#define IP6SKB_FRAGMENTED 16
104}; 105};
105 106
106#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 107#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 737685e9e852..68029b30c3dc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
309 __be16 max_desc_sz_rq; 309 __be16 max_desc_sz_rq;
310 u8 rsvd21[2]; 310 u8 rsvd21[2];
311 __be16 max_desc_sz_sq_dc; 311 __be16 max_desc_sz_sq_dc;
312 u8 rsvd22[4]; 312 __be32 max_qp_mcg;
313 __be16 max_qp_mcg; 313 u8 rsvd22[3];
314 u8 rsvd23;
315 u8 log_max_mcg; 314 u8 log_max_mcg;
316 u8 rsvd24; 315 u8 rsvd23;
317 u8 log_max_pd; 316 u8 log_max_pd;
318 u8 rsvd25; 317 u8 rsvd24;
319 u8 log_max_xrcd; 318 u8 log_max_xrcd;
320 u8 rsvd26[42]; 319 u8 rsvd25[42];
321 __be16 log_uar_page_sz; 320 __be16 log_uar_page_sz;
322 u8 rsvd27[28]; 321 u8 rsvd26[28];
323 u8 log_msx_atomic_size_qp; 322 u8 log_msx_atomic_size_qp;
324 u8 rsvd28[2]; 323 u8 rsvd27[2];
325 u8 log_msx_atomic_size_dc; 324 u8 log_msx_atomic_size_dc;
326 u8 rsvd29[76]; 325 u8 rsvd28[76];
327}; 326};
328 327
329 328
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
472struct mlx5_eqe_page_req { 471struct mlx5_eqe_page_req {
473 u8 rsvd0[2]; 472 u8 rsvd0[2];
474 __be16 func_id; 473 __be16 func_id;
475 u8 rsvd1[2]; 474 __be32 num_pages;
476 __be16 num_pages; 475 __be32 rsvd1[5];
477 __be32 rsvd2[5];
478}; 476};
479 477
480union ev_data { 478union ev_data {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2aa258b0ced1..8888381fc150 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -358,7 +358,7 @@ struct mlx5_caps {
358 u32 reserved_lkey; 358 u32 reserved_lkey;
359 u8 local_ca_ack_delay; 359 u8 local_ca_ack_delay;
360 u8 log_max_mcg; 360 u8 log_max_mcg;
361 u16 max_qp_mcg; 361 u32 max_qp_mcg;
362 int min_page_sz; 362 int min_page_sz;
363}; 363};
364 364
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
691int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 691int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
692void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 692void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
693void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 693void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
694 s16 npages); 694 s32 npages);
695int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 695int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
696int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 696int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
697void mlx5_register_debugfs(void); 697void mlx5_register_debugfs(void);
@@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
731int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); 731int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
732void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); 732void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
733 733
734typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
735int mlx5_register_health_report_handler(health_handler_t handler);
736void mlx5_unregister_health_report_handler(void);
737const char *mlx5_command_str(int command); 734const char *mlx5_command_str(int command);
738int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); 735int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
739void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); 736void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fb425aa16c01..faf4b7c1ad12 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -332,6 +332,7 @@ struct mm_struct {
332 unsigned long pgoff, unsigned long flags); 332 unsigned long pgoff, unsigned long flags);
333#endif 333#endif
334 unsigned long mmap_base; /* base of mmap area */ 334 unsigned long mmap_base; /* base of mmap area */
335 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
335 unsigned long task_size; /* size of task vm space */ 336 unsigned long task_size; /* size of task vm space */
336 unsigned long highest_vm_end; /* highest vma end address */ 337 unsigned long highest_vm_end; /* highest vma end address */
337 pgd_t * pgd; 338 pgd_t * pgd;
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 170447977278..d006f0ca60f4 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -47,24 +47,22 @@ void acpi_pci_remove_bus(struct pci_bus *bus);
47 47
48#ifdef CONFIG_ACPI_PCI_SLOT 48#ifdef CONFIG_ACPI_PCI_SLOT
49void acpi_pci_slot_init(void); 49void acpi_pci_slot_init(void);
50void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle); 50void acpi_pci_slot_enumerate(struct pci_bus *bus);
51void acpi_pci_slot_remove(struct pci_bus *bus); 51void acpi_pci_slot_remove(struct pci_bus *bus);
52#else 52#else
53static inline void acpi_pci_slot_init(void) { } 53static inline void acpi_pci_slot_init(void) { }
54static inline void acpi_pci_slot_enumerate(struct pci_bus *bus, 54static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { }
55 acpi_handle handle) { }
56static inline void acpi_pci_slot_remove(struct pci_bus *bus) { } 55static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
57#endif 56#endif
58 57
59#ifdef CONFIG_HOTPLUG_PCI_ACPI 58#ifdef CONFIG_HOTPLUG_PCI_ACPI
60void acpiphp_init(void); 59void acpiphp_init(void);
61void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle); 60void acpiphp_enumerate_slots(struct pci_bus *bus);
62void acpiphp_remove_slots(struct pci_bus *bus); 61void acpiphp_remove_slots(struct pci_bus *bus);
63void acpiphp_check_host_bridge(acpi_handle handle); 62void acpiphp_check_host_bridge(acpi_handle handle);
64#else 63#else
65static inline void acpiphp_init(void) { } 64static inline void acpiphp_init(void) { }
66static inline void acpiphp_enumerate_slots(struct pci_bus *bus, 65static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { }
67 acpi_handle handle) { }
68static inline void acpiphp_remove_slots(struct pci_bus *bus) { } 66static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
69static inline void acpiphp_check_host_bridge(acpi_handle handle) { } 67static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
70#endif 68#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d722490da030..078066daffd4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1532 * Test if a process is not yet dead (at most zombie state) 1532 * Test if a process is not yet dead (at most zombie state)
1533 * If pid_alive fails, then pointers within the task structure 1533 * If pid_alive fails, then pointers within the task structure
1534 * can be stale and must not be dereferenced. 1534 * can be stale and must not be dereferenced.
1535 *
1536 * Return: 1 if the process is alive. 0 otherwise.
1535 */ 1537 */
1536static inline int pid_alive(struct task_struct *p) 1538static inline int pid_alive(struct task_struct *p)
1537{ 1539{
@@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p)
1543 * @tsk: Task structure to be checked. 1545 * @tsk: Task structure to be checked.
1544 * 1546 *
1545 * Check if a task structure is the first user space task the kernel created. 1547 * Check if a task structure is the first user space task the kernel created.
1548 *
1549 * Return: 1 if the task structure is init. 0 otherwise.
1546 */ 1550 */
1547static inline int is_global_init(struct task_struct *tsk) 1551static inline int is_global_init(struct task_struct *tsk)
1548{ 1552{
@@ -1894,6 +1898,8 @@ extern struct task_struct *idle_task(int cpu);
1894/** 1898/**
1895 * is_idle_task - is the specified task an idle task? 1899 * is_idle_task - is the specified task an idle task?
1896 * @p: the task in question. 1900 * @p: the task in question.
1901 *
1902 * Return: 1 if @p is an idle task. 0 otherwise.
1897 */ 1903 */
1898static inline bool is_idle_task(const struct task_struct *p) 1904static inline bool is_idle_task(const struct task_struct *p)
1899{ 1905{
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ced949a..75f34949d9ab 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@ do { \
117#endif /*arch_spin_is_contended*/ 117#endif /*arch_spin_is_contended*/
118#endif 118#endif
119 119
120/* The lock does not imply full memory barrier. */ 120/*
121#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK 121 * Despite its name it doesn't necessarily has to be a full barrier.
122static inline void smp_mb__after_lock(void) { smp_mb(); } 122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129#ifndef smp_mb__before_spinlock
130#define smp_mb__before_spinlock() smp_wmb()
123#endif 131#endif
124 132
125/** 133/**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d2a415..8d4fa82bfb91 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
67 swp_entry_t arch_entry; 67 swp_entry_t arch_entry;
68 68
69 BUG_ON(pte_file(pte)); 69 BUG_ON(pte_file(pte));
70 if (pte_swp_soft_dirty(pte))
71 pte = pte_swp_clear_soft_dirty(pte);
70 arch_entry = __pte_to_swp_entry(pte); 72 arch_entry = __pte_to_swp_entry(pte);
71 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 73 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
72} 74}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4147d700a293..84662ecc7b51 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
802asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, 802asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
803 int __user *); 803 int __user *);
804#else 804#else
805#ifdef CONFIG_CLONE_BACKWARDS3
806asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
807 int __user *, int);
808#else
805asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, 809asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
806 int __user *, int); 810 int __user *, int);
807#endif 811#endif
812#endif
808 813
809asmlinkage long sys_execve(const char __user *filename, 814asmlinkage long sys_execve(const char __user *filename,
810 const char __user *const __user *argv, 815 const char __user *const __user *argv,
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a4750b7f..a67fc1635592 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@ do { \
811 __ret; \ 811 __ret; \
812}) 812})
813 813
814#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
815 lock, ret) \
816do { \
817 DEFINE_WAIT(__wait); \
818 \
819 for (;;) { \
820 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
821 if (condition) \
822 break; \
823 if (signal_pending(current)) { \
824 ret = -ERESTARTSYS; \
825 break; \
826 } \
827 spin_unlock_irq(&lock); \
828 ret = schedule_timeout(ret); \
829 spin_lock_irq(&lock); \
830 if (!ret) \
831 break; \
832 } \
833 finish_wait(&wq, &__wait); \
834} while (0)
835
836/**
837 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
838 * The condition is checked under the lock. This is expected
839 * to be called with the lock taken.
840 * @wq: the waitqueue to wait on
841 * @condition: a C expression for the event to wait for
842 * @lock: a locked spinlock_t, which will be released before schedule()
843 * and reacquired afterwards.
844 * @timeout: timeout, in jiffies
845 *
846 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
847 * @condition evaluates to true or signal is received. The @condition is
848 * checked each time the waitqueue @wq is woken up.
849 *
850 * wake_up() has to be called after changing any variable that could
851 * change the result of the wait condition.
852 *
853 * This is supposed to be called while holding the lock. The lock is
854 * dropped before going to sleep and is reacquired afterwards.
855 *
856 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
857 * was interrupted by a signal, and the remaining jiffies otherwise
858 * if the condition evaluated to true before the timeout elapsed.
859 */
860#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
861 timeout) \
862({ \
863 int __ret = timeout; \
864 \
865 if (!(condition)) \
866 __wait_event_interruptible_lock_irq_timeout( \
867 wq, condition, lock, __ret); \
868 __ret; \
869})
870
814 871
815/* 872/*
816 * These are the old interfaces to sleep waiting for an event. 873 * These are the old interfaces to sleep waiting for an event.
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index f18b91966d3d..8a358a2c97e6 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
122 if (rc > 0) 122 if (rc > 0)
123 /* local bh are disabled so it is ok to use _BH */ 123 /* local bh are disabled so it is ok to use _BH */
124 NET_ADD_STATS_BH(sock_net(sk), 124 NET_ADD_STATS_BH(sock_net(sk),
125 LINUX_MIB_LOWLATENCYRXPACKETS, rc); 125 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
126 126
127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128 !need_resched() && !busy_loop_timeout(end_time)); 128 !need_resched() && !busy_loop_timeout(end_time));
@@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
162 return false; 162 return false;
163} 163}
164 164
165static inline bool sk_busy_poll(struct sock *sk, int nonblock)
166{
167 return false;
168}
169
170static inline void skb_mark_napi_id(struct sk_buff *skb, 165static inline void skb_mark_napi_id(struct sk_buff *skb,
171 struct napi_struct *napi) 166 struct napi_struct *napi)
172{ 167{
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 260f83f16bcf..f667248202b6 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -135,6 +135,8 @@ extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
135extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, 135extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
136 __be32 mtu); 136 __be32 mtu);
137extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark); 137extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
138extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
139 u32 mark);
138extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); 140extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
139 141
140struct netlink_callback; 142struct netlink_callback;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 781b3cf86a2f..a354db5b7662 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
145 return INET_ECN_encapsulate(tos, inner); 145 return INET_ECN_encapsulate(tos, inner);
146} 146}
147 147
148static inline void tunnel_ip_select_ident(struct sk_buff *skb,
149 const struct iphdr *old_iph,
150 struct dst_entry *dst)
151{
152 struct iphdr *iph = ip_hdr(skb);
153
154 /* Use inner packet iph-id if possible. */
155 if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
156 iph->id = old_iph->id;
157 else
158 __ip_select_ident(iph, dst,
159 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
160}
161
162int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); 148int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
163int iptunnel_xmit(struct net *net, struct rtable *rt, 149int iptunnel_xmit(struct net *net, struct rtable *rt,
164 struct sk_buff *skb, 150 struct sk_buff *skb,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6eab63363e59..e5ae0c50fa9c 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -683,13 +683,19 @@ struct psched_ratecfg {
683 u64 rate_bytes_ps; /* bytes per second */ 683 u64 rate_bytes_ps; /* bytes per second */
684 u32 mult; 684 u32 mult;
685 u16 overhead; 685 u16 overhead;
686 u8 linklayer;
686 u8 shift; 687 u8 shift;
687}; 688};
688 689
689static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 690static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
690 unsigned int len) 691 unsigned int len)
691{ 692{
692 return ((u64)(len + r->overhead) * r->mult) >> r->shift; 693 len += r->overhead;
694
695 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
696 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
697
698 return ((u64)len * r->mult) >> r->shift;
693} 699}
694 700
695extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); 701extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
@@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
700 memset(res, 0, sizeof(*res)); 706 memset(res, 0, sizeof(*res));
701 res->rate = r->rate_bytes_ps; 707 res->rate = r->rate_bytes_ps;
702 res->overhead = r->overhead; 708 res->overhead = r->overhead;
709 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
703} 710}
704 711
705#endif 712#endif
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 6cf06bfd841b..2fee45bdec0a 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -133,4 +133,38 @@ struct ip_beet_phdr {
133 __u8 reserved; 133 __u8 reserved;
134}; 134};
135 135
136/* index values for the variables in ipv4_devconf */
137enum
138{
139 IPV4_DEVCONF_FORWARDING=1,
140 IPV4_DEVCONF_MC_FORWARDING,
141 IPV4_DEVCONF_PROXY_ARP,
142 IPV4_DEVCONF_ACCEPT_REDIRECTS,
143 IPV4_DEVCONF_SECURE_REDIRECTS,
144 IPV4_DEVCONF_SEND_REDIRECTS,
145 IPV4_DEVCONF_SHARED_MEDIA,
146 IPV4_DEVCONF_RP_FILTER,
147 IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
148 IPV4_DEVCONF_BOOTP_RELAY,
149 IPV4_DEVCONF_LOG_MARTIANS,
150 IPV4_DEVCONF_TAG,
151 IPV4_DEVCONF_ARPFILTER,
152 IPV4_DEVCONF_MEDIUM_ID,
153 IPV4_DEVCONF_NOXFRM,
154 IPV4_DEVCONF_NOPOLICY,
155 IPV4_DEVCONF_FORCE_IGMP_VERSION,
156 IPV4_DEVCONF_ARP_ANNOUNCE,
157 IPV4_DEVCONF_ARP_IGNORE,
158 IPV4_DEVCONF_PROMOTE_SECONDARIES,
159 IPV4_DEVCONF_ARP_ACCEPT,
160 IPV4_DEVCONF_ARP_NOTIFY,
161 IPV4_DEVCONF_ACCEPT_LOCAL,
162 IPV4_DEVCONF_SRC_VMARK,
163 IPV4_DEVCONF_PROXY_ARP_PVLAN,
164 IPV4_DEVCONF_ROUTE_LOCALNET,
165 __IPV4_DEVCONF_MAX
166};
167
168#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
169
136#endif /* _UAPI_LINUX_IP_H */ 170#endif /* _UAPI_LINUX_IP_H */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index dbd71b0c7d8c..09d62b9228ff 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -73,9 +73,17 @@ struct tc_estimator {
73#define TC_H_ROOT (0xFFFFFFFFU) 73#define TC_H_ROOT (0xFFFFFFFFU)
74#define TC_H_INGRESS (0xFFFFFFF1U) 74#define TC_H_INGRESS (0xFFFFFFF1U)
75 75
76/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
77enum tc_link_layer {
78 TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
79 TC_LINKLAYER_ETHERNET,
80 TC_LINKLAYER_ATM,
81};
82#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
83
76struct tc_ratespec { 84struct tc_ratespec {
77 unsigned char cell_log; 85 unsigned char cell_log;
78 unsigned char __reserved; 86 __u8 linklayer; /* lower 4 bits */
79 unsigned short overhead; 87 unsigned short overhead;
80 short cell_align; 88 short cell_align;
81 unsigned short mpu; 89 unsigned short mpu;
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index af0a674cc677..a1356d3b54df 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -253,7 +253,7 @@ enum
253 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ 253 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
254 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ 254 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
255 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ 255 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
256 LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ 256 LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
257 __LINUX_MIB_MAX 257 __LINUX_MIB_MAX
258}; 258};
259 259
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index 46aa3d1c1654..4ddd7dc4a61e 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -75,8 +75,10 @@ static inline int xen_acpi_get_pxm(acpi_handle h)
75 return -ENXIO; 75 return -ENXIO;
76} 76}
77 77
78int xen_acpi_notify_hypervisor_state(u8 sleep_state, 78int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
79 u32 pm1a_cnt, u32 pm1b_cnd); 79 u32 pm1a_cnt, u32 pm1b_cnd);
80int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
81 u32 val_a, u32 val_b);
80 82
81static inline int xen_acpi_suspend_lowlevel(void) 83static inline int xen_acpi_suspend_lowlevel(void)
82{ 84{
@@ -93,7 +95,9 @@ static inline void xen_acpi_sleep_register(void)
93{ 95{
94 if (xen_initial_domain()) { 96 if (xen_initial_domain()) {
95 acpi_os_set_prepare_sleep( 97 acpi_os_set_prepare_sleep(
96 &xen_acpi_notify_hypervisor_state); 98 &xen_acpi_notify_hypervisor_sleep);
99 acpi_os_set_prepare_extended_sleep(
100 &xen_acpi_notify_hypervisor_extended_sleep);
97 101
98 acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel; 102 acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
99 } 103 }
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index c57d5f67f702..f1331e3e7271 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -152,10 +152,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xenpf_firmware_info_t);
152#define XENPF_enter_acpi_sleep 51 152#define XENPF_enter_acpi_sleep 51
153struct xenpf_enter_acpi_sleep { 153struct xenpf_enter_acpi_sleep {
154 /* IN variables */ 154 /* IN variables */
155 uint16_t pm1a_cnt_val; /* PM1a control value. */ 155 uint16_t val_a; /* PM1a control / sleep type A. */
156 uint16_t pm1b_cnt_val; /* PM1b control value. */ 156 uint16_t val_b; /* PM1b control / sleep type B. */
157 uint32_t sleep_state; /* Which state to enter (Sn). */ 157 uint32_t sleep_state; /* Which state to enter (Sn). */
158 uint32_t flags; /* Must be zero. */ 158#define XENPF_ACPI_SLEEP_EXTENDED 0x00000001
159 uint32_t flags; /* XENPF_ACPI_SLEEP_*. */
159}; 160};
160DEFINE_GUEST_HANDLE_STRUCT(xenpf_enter_acpi_sleep_t); 161DEFINE_GUEST_HANDLE_STRUCT(xenpf_enter_acpi_sleep_t);
161 162
diff --git a/init/Kconfig b/init/Kconfig
index 247084be0590..fed81b576f29 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -955,7 +955,7 @@ config MEMCG_SWAP_ENABLED
955 Memory Resource Controller Swap Extension comes with its price in 955 Memory Resource Controller Swap Extension comes with its price in
956 a bigger memory consumption. General purpose distribution kernels 956 a bigger memory consumption. General purpose distribution kernels
957 which want to enable the feature but keep it disabled by default 957 which want to enable the feature but keep it disabled by default
958 and let the user enable it by swapaccount boot command line 958 and let the user enable it by swapaccount=1 boot command line
959 parameter should have this option unselected. 959 parameter should have this option unselected.
960 For those who want to have the feature enabled by default should 960 For those who want to have the feature enabled by default should
961 select this option (if, for some reason, they need to disable it 961 select this option (if, for some reason, they need to disable it
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e5657788fedd..ea1966db34f2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
475 475
476 /* 476 /*
477 * Cpusets with tasks - existing or newly being attached - can't 477 * Cpusets with tasks - existing or newly being attached - can't
478 * have empty cpus_allowed or mems_allowed. 478 * be changed to have empty cpus_allowed or mems_allowed.
479 */ 479 */
480 ret = -ENOSPC; 480 ret = -ENOSPC;
481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && 481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
482 (cpumask_empty(trial->cpus_allowed) && 482 if (!cpumask_empty(cur->cpus_allowed) &&
483 nodes_empty(trial->mems_allowed))) 483 cpumask_empty(trial->cpus_allowed))
484 goto out; 484 goto out;
485 if (!nodes_empty(cur->mems_allowed) &&
486 nodes_empty(trial->mems_allowed))
487 goto out;
488 }
485 489
486 ret = 0; 490 ret = 0;
487out: 491out:
@@ -1608,11 +1612,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1608{ 1612{
1609 struct cpuset *cs = cgroup_cs(cgrp); 1613 struct cpuset *cs = cgroup_cs(cgrp);
1610 cpuset_filetype_t type = cft->private; 1614 cpuset_filetype_t type = cft->private;
1611 int retval = -ENODEV; 1615 int retval = 0;
1612 1616
1613 mutex_lock(&cpuset_mutex); 1617 mutex_lock(&cpuset_mutex);
1614 if (!is_cpuset_online(cs)) 1618 if (!is_cpuset_online(cs)) {
1619 retval = -ENODEV;
1615 goto out_unlock; 1620 goto out_unlock;
1621 }
1616 1622
1617 switch (type) { 1623 switch (type) {
1618 case FILE_CPU_EXCLUSIVE: 1624 case FILE_CPU_EXCLUSIVE:
diff --git a/kernel/fork.c b/kernel/fork.c
index 403d2bb8a968..e23bb19e2a3e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
1679 int __user *, parent_tidptr, 1679 int __user *, parent_tidptr,
1680 int __user *, child_tidptr, 1680 int __user *, child_tidptr,
1681 int, tls_val) 1681 int, tls_val)
1682#elif defined(CONFIG_CLONE_BACKWARDS3)
1683SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
1684 int, stack_size,
1685 int __user *, parent_tidptr,
1686 int __user *, child_tidptr,
1687 int, tls_val)
1682#else 1688#else
1683SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 1689SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1684 int __user *, parent_tidptr, 1690 int __user *, parent_tidptr,
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ff05f4bd86eb..a52ee7bb830d 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
686 might_sleep(); 686 might_sleep();
687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
688 0, &ctx->dep_map, _RET_IP_, ctx); 688 0, &ctx->dep_map, _RET_IP_, ctx);
689 if (!ret && ctx->acquired > 0) 689 if (!ret && ctx->acquired > 1)
690 return ww_mutex_deadlock_injection(lock, ctx); 690 return ww_mutex_deadlock_injection(lock, ctx);
691 691
692 return ret; 692 return ret;
@@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
703 0, &ctx->dep_map, _RET_IP_, ctx); 703 0, &ctx->dep_map, _RET_IP_, ctx);
704 704
705 if (!ret && ctx->acquired > 0) 705 if (!ret && ctx->acquired > 1)
706 return ww_mutex_deadlock_injection(lock, ctx); 706 return ww_mutex_deadlock_injection(lock, ctx);
707 707
708 return ret; 708 return ret;
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 06fe28589e9c..a394297f8b2f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req)
296} 296}
297EXPORT_SYMBOL_GPL(pm_qos_request_active); 297EXPORT_SYMBOL_GPL(pm_qos_request_active);
298 298
299static void __pm_qos_update_request(struct pm_qos_request *req,
300 s32 new_value)
301{
302 trace_pm_qos_update_request(req->pm_qos_class, new_value);
303
304 if (new_value != req->node.prio)
305 pm_qos_update_target(
306 pm_qos_array[req->pm_qos_class]->constraints,
307 &req->node, PM_QOS_UPDATE_REQ, new_value);
308}
309
299/** 310/**
300 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout 311 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
301 * @work: work struct for the delayed work (timeout) 312 * @work: work struct for the delayed work (timeout)
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work)
308 struct pm_qos_request, 319 struct pm_qos_request,
309 work); 320 work);
310 321
311 pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); 322 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
312} 323}
313 324
314/** 325/**
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
364 } 375 }
365 376
366 cancel_delayed_work_sync(&req->work); 377 cancel_delayed_work_sync(&req->work);
367 378 __pm_qos_update_request(req, new_value);
368 trace_pm_qos_update_request(req->pm_qos_class, new_value);
369 if (new_value != req->node.prio)
370 pm_qos_update_target(
371 pm_qos_array[req->pm_qos_class]->constraints,
372 &req->node, PM_QOS_UPDATE_REQ, new_value);
373} 379}
374EXPORT_SYMBOL_GPL(pm_qos_update_request); 380EXPORT_SYMBOL_GPL(pm_qos_update_request);
375 381
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7c32cb7bfeb..05c39f030314 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
933/** 933/**
934 * task_curr - is this task currently executing on a CPU? 934 * task_curr - is this task currently executing on a CPU?
935 * @p: the task in question. 935 * @p: the task in question.
936 *
937 * Return: 1 if the task is currently executing. 0 otherwise.
936 */ 938 */
937inline int task_curr(const struct task_struct *p) 939inline int task_curr(const struct task_struct *p)
938{ 940{
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
1482 * the simpler "current->state = TASK_RUNNING" to mark yourself 1484 * the simpler "current->state = TASK_RUNNING" to mark yourself
1483 * runnable without the overhead of this. 1485 * runnable without the overhead of this.
1484 * 1486 *
1485 * Returns %true if @p was woken up, %false if it was already running 1487 * Return: %true if @p was woken up, %false if it was already running.
1486 * or @state didn't match @p's state. 1488 * or @state didn't match @p's state.
1487 */ 1489 */
1488static int 1490static int
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1491 unsigned long flags; 1493 unsigned long flags;
1492 int cpu, success = 0; 1494 int cpu, success = 0;
1493 1495
1494 smp_wmb(); 1496 /*
1497 * If we are going to wake up a thread waiting for CONDITION we
1498 * need to ensure that CONDITION=1 done by the caller can not be
1499 * reordered with p->state check below. This pairs with mb() in
1500 * set_current_state() the waiting thread does.
1501 */
1502 smp_mb__before_spinlock();
1495 raw_spin_lock_irqsave(&p->pi_lock, flags); 1503 raw_spin_lock_irqsave(&p->pi_lock, flags);
1496 if (!(p->state & state)) 1504 if (!(p->state & state))
1497 goto out; 1505 goto out;
@@ -1577,8 +1585,9 @@ out:
1577 * @p: The process to be woken up. 1585 * @p: The process to be woken up.
1578 * 1586 *
1579 * Attempt to wake up the nominated process and move it to the set of runnable 1587 * Attempt to wake up the nominated process and move it to the set of runnable
1580 * processes. Returns 1 if the process was woken up, 0 if it was already 1588 * processes.
1581 * running. 1589 *
1590 * Return: 1 if the process was woken up, 0 if it was already running.
1582 * 1591 *
1583 * It may be assumed that this function implies a write memory barrier before 1592 * It may be assumed that this function implies a write memory barrier before
1584 * changing the task state if and only if any tasks are woken up. 1593 * changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2200,8 @@ void scheduler_tick(void)
2191 * This makes sure that uptime, CFS vruntime, load 2200 * This makes sure that uptime, CFS vruntime, load
2192 * balancing, etc... continue to move forward, even 2201 * balancing, etc... continue to move forward, even
2193 * with a very low granularity. 2202 * with a very low granularity.
2203 *
2204 * Return: Maximum deferment in nanoseconds.
2194 */ 2205 */
2195u64 scheduler_tick_max_deferment(void) 2206u64 scheduler_tick_max_deferment(void)
2196{ 2207{
@@ -2394,6 +2405,12 @@ need_resched:
2394 if (sched_feat(HRTICK)) 2405 if (sched_feat(HRTICK))
2395 hrtick_clear(rq); 2406 hrtick_clear(rq);
2396 2407
2408 /*
2409 * Make sure that signal_pending_state()->signal_pending() below
2410 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
2411 * done by the caller to avoid the race with signal_wake_up().
2412 */
2413 smp_mb__before_spinlock();
2397 raw_spin_lock_irq(&rq->lock); 2414 raw_spin_lock_irq(&rq->lock);
2398 2415
2399 switch_count = &prev->nivcsw; 2416 switch_count = &prev->nivcsw;
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion);
2796 * specified timeout to expire. The timeout is in jiffies. It is not 2813 * specified timeout to expire. The timeout is in jiffies. It is not
2797 * interruptible. 2814 * interruptible.
2798 * 2815 *
2799 * The return value is 0 if timed out, and positive (at least 1, or number of 2816 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2800 * jiffies left till timeout) if completed. 2817 * till timeout) if completed.
2801 */ 2818 */
2802unsigned long __sched 2819unsigned long __sched
2803wait_for_completion_timeout(struct completion *x, unsigned long timeout) 2820wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
2829 * specified timeout to expire. The timeout is in jiffies. It is not 2846 * specified timeout to expire. The timeout is in jiffies. It is not
2830 * interruptible. The caller is accounted as waiting for IO. 2847 * interruptible. The caller is accounted as waiting for IO.
2831 * 2848 *
2832 * The return value is 0 if timed out, and positive (at least 1, or number of 2849 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2833 * jiffies left till timeout) if completed. 2850 * till timeout) if completed.
2834 */ 2851 */
2835unsigned long __sched 2852unsigned long __sched
2836wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) 2853wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
2846 * This waits for completion of a specific task to be signaled. It is 2863 * This waits for completion of a specific task to be signaled. It is
2847 * interruptible. 2864 * interruptible.
2848 * 2865 *
2849 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2866 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2850 */ 2867 */
2851int __sched wait_for_completion_interruptible(struct completion *x) 2868int __sched wait_for_completion_interruptible(struct completion *x)
2852{ 2869{
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
2865 * This waits for either a completion of a specific task to be signaled or for a 2882 * This waits for either a completion of a specific task to be signaled or for a
2866 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 2883 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
2867 * 2884 *
2868 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2885 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2869 * positive (at least 1, or number of jiffies left till timeout) if completed. 2886 * or number of jiffies left till timeout) if completed.
2870 */ 2887 */
2871long __sched 2888long __sched
2872wait_for_completion_interruptible_timeout(struct completion *x, 2889wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
2883 * This waits to be signaled for completion of a specific task. It can be 2900 * This waits to be signaled for completion of a specific task. It can be
2884 * interrupted by a kill signal. 2901 * interrupted by a kill signal.
2885 * 2902 *
2886 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2903 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2887 */ 2904 */
2888int __sched wait_for_completion_killable(struct completion *x) 2905int __sched wait_for_completion_killable(struct completion *x)
2889{ 2906{
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
2903 * signaled or for a specified timeout to expire. It can be 2920 * signaled or for a specified timeout to expire. It can be
2904 * interrupted by a kill signal. The timeout is in jiffies. 2921 * interrupted by a kill signal. The timeout is in jiffies.
2905 * 2922 *
2906 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2923 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2907 * positive (at least 1, or number of jiffies left till timeout) if completed. 2924 * or number of jiffies left till timeout) if completed.
2908 */ 2925 */
2909long __sched 2926long __sched
2910wait_for_completion_killable_timeout(struct completion *x, 2927wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
2918 * try_wait_for_completion - try to decrement a completion without blocking 2935 * try_wait_for_completion - try to decrement a completion without blocking
2919 * @x: completion structure 2936 * @x: completion structure
2920 * 2937 *
2921 * Returns: 0 if a decrement cannot be done without blocking 2938 * Return: 0 if a decrement cannot be done without blocking
2922 * 1 if a decrement succeeded. 2939 * 1 if a decrement succeeded.
2923 * 2940 *
2924 * If a completion is being used as a counting completion, 2941 * If a completion is being used as a counting completion,
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
2945 * completion_done - Test to see if a completion has any waiters 2962 * completion_done - Test to see if a completion has any waiters
2946 * @x: completion structure 2963 * @x: completion structure
2947 * 2964 *
2948 * Returns: 0 if there are waiters (wait_for_completion() in progress) 2965 * Return: 0 if there are waiters (wait_for_completion() in progress)
2949 * 1 if there are no waiters. 2966 * 1 if there are no waiters.
2950 * 2967 *
2951 */ 2968 */
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment)
3182 * task_prio - return the priority value of a given task. 3199 * task_prio - return the priority value of a given task.
3183 * @p: the task in question. 3200 * @p: the task in question.
3184 * 3201 *
3185 * This is the priority value as seen by users in /proc. 3202 * Return: The priority value as seen by users in /proc.
3186 * RT tasks are offset by -200. Normal tasks are centered 3203 * RT tasks are offset by -200. Normal tasks are centered
3187 * around 0, value goes from -16 to +15. 3204 * around 0, value goes from -16 to +15.
3188 */ 3205 */
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p)
3194/** 3211/**
3195 * task_nice - return the nice value of a given task. 3212 * task_nice - return the nice value of a given task.
3196 * @p: the task in question. 3213 * @p: the task in question.
3214 *
3215 * Return: The nice value [ -20 ... 0 ... 19 ].
3197 */ 3216 */
3198int task_nice(const struct task_struct *p) 3217int task_nice(const struct task_struct *p)
3199{ 3218{
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice);
3204/** 3223/**
3205 * idle_cpu - is a given cpu idle currently? 3224 * idle_cpu - is a given cpu idle currently?
3206 * @cpu: the processor in question. 3225 * @cpu: the processor in question.
3226 *
3227 * Return: 1 if the CPU is currently idle. 0 otherwise.
3207 */ 3228 */
3208int idle_cpu(int cpu) 3229int idle_cpu(int cpu)
3209{ 3230{
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu)
3226/** 3247/**
3227 * idle_task - return the idle task for a given cpu. 3248 * idle_task - return the idle task for a given cpu.
3228 * @cpu: the processor in question. 3249 * @cpu: the processor in question.
3250 *
3251 * Return: The idle task for the cpu @cpu.
3229 */ 3252 */
3230struct task_struct *idle_task(int cpu) 3253struct task_struct *idle_task(int cpu)
3231{ 3254{
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu)
3235/** 3258/**
3236 * find_process_by_pid - find a process with a matching PID value. 3259 * find_process_by_pid - find a process with a matching PID value.
3237 * @pid: the pid in question. 3260 * @pid: the pid in question.
3261 *
3262 * The task of @pid, if found. %NULL otherwise.
3238 */ 3263 */
3239static struct task_struct *find_process_by_pid(pid_t pid) 3264static struct task_struct *find_process_by_pid(pid_t pid)
3240{ 3265{
@@ -3432,6 +3457,8 @@ recheck:
3432 * @policy: new policy. 3457 * @policy: new policy.
3433 * @param: structure containing the new RT priority. 3458 * @param: structure containing the new RT priority.
3434 * 3459 *
3460 * Return: 0 on success. An error code otherwise.
3461 *
3435 * NOTE that the task may be already dead. 3462 * NOTE that the task may be already dead.
3436 */ 3463 */
3437int sched_setscheduler(struct task_struct *p, int policy, 3464int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
3451 * current context has permission. For example, this is needed in 3478 * current context has permission. For example, this is needed in
3452 * stop_machine(): we create temporary high priority worker threads, 3479 * stop_machine(): we create temporary high priority worker threads,
3453 * but our caller might not have that capability. 3480 * but our caller might not have that capability.
3481 *
3482 * Return: 0 on success. An error code otherwise.
3454 */ 3483 */
3455int sched_setscheduler_nocheck(struct task_struct *p, int policy, 3484int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3456 const struct sched_param *param) 3485 const struct sched_param *param)
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3485 * @pid: the pid in question. 3514 * @pid: the pid in question.
3486 * @policy: new policy. 3515 * @policy: new policy.
3487 * @param: structure containing the new RT priority. 3516 * @param: structure containing the new RT priority.
3517 *
3518 * Return: 0 on success. An error code otherwise.
3488 */ 3519 */
3489SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 3520SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3490 struct sched_param __user *, param) 3521 struct sched_param __user *, param)
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3500 * sys_sched_setparam - set/change the RT priority of a thread 3531 * sys_sched_setparam - set/change the RT priority of a thread
3501 * @pid: the pid in question. 3532 * @pid: the pid in question.
3502 * @param: structure containing the new RT priority. 3533 * @param: structure containing the new RT priority.
3534 *
3535 * Return: 0 on success. An error code otherwise.
3503 */ 3536 */
3504SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 3537SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3505{ 3538{
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3509/** 3542/**
3510 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 3543 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3511 * @pid: the pid in question. 3544 * @pid: the pid in question.
3545 *
3546 * Return: On success, the policy of the thread. Otherwise, a negative error
3547 * code.
3512 */ 3548 */
3513SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 3549SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3514{ 3550{
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3535 * sys_sched_getparam - get the RT priority of a thread 3571 * sys_sched_getparam - get the RT priority of a thread
3536 * @pid: the pid in question. 3572 * @pid: the pid in question.
3537 * @param: structure containing the RT priority. 3573 * @param: structure containing the RT priority.
3574 *
3575 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
3576 * code.
3538 */ 3577 */
3539SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3578SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3540{ 3579{
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3659 * @pid: pid of the process 3698 * @pid: pid of the process
3660 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3699 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3661 * @user_mask_ptr: user-space pointer to the new cpu mask 3700 * @user_mask_ptr: user-space pointer to the new cpu mask
3701 *
3702 * Return: 0 on success. An error code otherwise.
3662 */ 3703 */
3663SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 3704SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3664 unsigned long __user *, user_mask_ptr) 3705 unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3751,8 @@ out_unlock:
3710 * @pid: pid of the process 3751 * @pid: pid of the process
3711 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3752 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3712 * @user_mask_ptr: user-space pointer to hold the current cpu mask 3753 * @user_mask_ptr: user-space pointer to hold the current cpu mask
3754 *
3755 * Return: 0 on success. An error code otherwise.
3713 */ 3756 */
3714SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 3757SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3715 unsigned long __user *, user_mask_ptr) 3758 unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3744 * 3787 *
3745 * This function yields the current CPU to other tasks. If there are no 3788 * This function yields the current CPU to other tasks. If there are no
3746 * other threads running on this CPU then this function will return. 3789 * other threads running on this CPU then this function will return.
3790 *
3791 * Return: 0.
3747 */ 3792 */
3748SYSCALL_DEFINE0(sched_yield) 3793SYSCALL_DEFINE0(sched_yield)
3749{ 3794{
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield);
3869 * It's the caller's job to ensure that the target task struct 3914 * It's the caller's job to ensure that the target task struct
3870 * can't go away on us before we can do any checks. 3915 * can't go away on us before we can do any checks.
3871 * 3916 *
3872 * Returns: 3917 * Return:
3873 * true (>0) if we indeed boosted the target task. 3918 * true (>0) if we indeed boosted the target task.
3874 * false (0) if we failed to boost the target. 3919 * false (0) if we failed to boost the target.
3875 * -ESRCH if there's no task to yield to. 3920 * -ESRCH if there's no task to yield to.
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout)
3972 * sys_sched_get_priority_max - return maximum RT priority. 4017 * sys_sched_get_priority_max - return maximum RT priority.
3973 * @policy: scheduling class. 4018 * @policy: scheduling class.
3974 * 4019 *
3975 * this syscall returns the maximum rt_priority that can be used 4020 * Return: On success, this syscall returns the maximum
3976 * by a given scheduling class. 4021 * rt_priority that can be used by a given scheduling class.
4022 * On failure, a negative error code is returned.
3977 */ 4023 */
3978SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4024SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3979{ 4025{
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3997 * sys_sched_get_priority_min - return minimum RT priority. 4043 * sys_sched_get_priority_min - return minimum RT priority.
3998 * @policy: scheduling class. 4044 * @policy: scheduling class.
3999 * 4045 *
4000 * this syscall returns the minimum rt_priority that can be used 4046 * Return: On success, this syscall returns the minimum
4001 * by a given scheduling class. 4047 * rt_priority that can be used by a given scheduling class.
4048 * On failure, a negative error code is returned.
4002 */ 4049 */
4003SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4050SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4004{ 4051{
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4024 * 4071 *
4025 * this syscall writes the default timeslice value of a given process 4072 * this syscall writes the default timeslice value of a given process
4026 * into the user-space timespec buffer. A value of '0' means infinity. 4073 * into the user-space timespec buffer. A value of '0' means infinity.
4074 *
4075 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4076 * an error code.
4027 */ 4077 */
4028SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4078SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4029 struct timespec __user *, interval) 4079 struct timespec __user *, interval)
@@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void)
6632 * @cpu: the processor in question. 6682 * @cpu: the processor in question.
6633 * 6683 *
6634 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6684 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6685 *
6686 * Return: The current task for @cpu.
6635 */ 6687 */
6636struct task_struct *curr_task(int cpu) 6688struct task_struct *curr_task(int cpu)
6637{ 6689{
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1095e878a46f..8b836b376d91 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
62 * any discrepancies created by racing against the uncertainty of the current 62 * any discrepancies created by racing against the uncertainty of the current
63 * priority configuration. 63 * priority configuration.
64 * 64 *
65 * Returns: (int)bool - CPUs were found 65 * Return: (int)bool - CPUs were found
66 */ 66 */
67int cpupri_find(struct cpupri *cp, struct task_struct *p, 67int cpupri_find(struct cpupri *cp, struct task_struct *p,
68 struct cpumask *lowest_mask) 68 struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
203 * cpupri_init - initialize the cpupri structure 203 * cpupri_init - initialize the cpupri structure
204 * @cp: The cpupri context 204 * @cp: The cpupri context
205 * 205 *
206 * Returns: -ENOMEM if memory fails. 206 * Return: -ENOMEM on memory allocation failure.
207 */ 207 */
208int cpupri_init(struct cpupri *cp) 208int cpupri_init(struct cpupri *cp)
209{ 209{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9565645e3202..68f1609ca149 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2032 */ 2032 */
2033 update_entity_load_avg(curr, 1); 2033 update_entity_load_avg(curr, 1);
2034 update_cfs_rq_blocked_load(cfs_rq, 1); 2034 update_cfs_rq_blocked_load(cfs_rq, 1);
2035 update_cfs_shares(cfs_rq);
2035 2036
2036#ifdef CONFIG_SCHED_HRTICK 2037#ifdef CONFIG_SCHED_HRTICK
2037 /* 2038 /*
@@ -4280,6 +4281,8 @@ struct sg_lb_stats {
4280 * get_sd_load_idx - Obtain the load index for a given sched domain. 4281 * get_sd_load_idx - Obtain the load index for a given sched domain.
4281 * @sd: The sched_domain whose load_idx is to be obtained. 4282 * @sd: The sched_domain whose load_idx is to be obtained.
4282 * @idle: The Idle status of the CPU for whose sd load_icx is obtained. 4283 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4284 *
4285 * Return: The load index.
4283 */ 4286 */
4284static inline int get_sd_load_idx(struct sched_domain *sd, 4287static inline int get_sd_load_idx(struct sched_domain *sd,
4285 enum cpu_idle_type idle) 4288 enum cpu_idle_type idle)
@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
4574 * 4577 *
4575 * Determine if @sg is a busier group than the previously selected 4578 * Determine if @sg is a busier group than the previously selected
4576 * busiest group. 4579 * busiest group.
4580 *
4581 * Return: %true if @sg is a busier group than the previously selected
4582 * busiest group. %false otherwise.
4577 */ 4583 */
4578static bool update_sd_pick_busiest(struct lb_env *env, 4584static bool update_sd_pick_busiest(struct lb_env *env,
4579 struct sd_lb_stats *sds, 4585 struct sd_lb_stats *sds,
@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
4691 * assuming lower CPU number will be equivalent to lower a SMT thread 4697 * assuming lower CPU number will be equivalent to lower a SMT thread
4692 * number. 4698 * number.
4693 * 4699 *
4694 * Returns 1 when packing is required and a task should be moved to 4700 * Return: 1 when packing is required and a task should be moved to
4695 * this CPU. The amount of the imbalance is returned in *imbalance. 4701 * this CPU. The amount of the imbalance is returned in *imbalance.
4696 * 4702 *
4697 * @env: The load balancing environment. 4703 * @env: The load balancing environment.
@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4869 * @balance: Pointer to a variable indicating if this_cpu 4875 * @balance: Pointer to a variable indicating if this_cpu
4870 * is the appropriate cpu to perform load balancing at this_level. 4876 * is the appropriate cpu to perform load balancing at this_level.
4871 * 4877 *
4872 * Returns: - the busiest group if imbalance exists. 4878 * Return: - The busiest group if imbalance exists.
4873 * - If no imbalance and user has opted for power-savings balance, 4879 * - If no imbalance and user has opted for power-savings balance,
4874 * return the least loaded group whose CPUs can be 4880 * return the least loaded group whose CPUs can be
4875 * put to idle by rebalancing its tasks onto our group. 4881 * put to idle by rebalancing its tasks onto our group.
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a326f27d7f09..0b479a6a22bb 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
121 BUG_ON(bits > 32); 121 BUG_ON(bits > 32);
122 WARN_ON(!irqs_disabled()); 122 WARN_ON(!irqs_disabled());
123 read_sched_clock = read; 123 read_sched_clock = read;
124 sched_clock_mask = (1 << bits) - 1; 124 sched_clock_mask = (1ULL << bits) - 1;
125 cd.rate = rate; 125 cd.rate = rate;
126 126
127 /* calculate the mult/shift to convert counter ticks to ns. */ 127 /* calculate the mult/shift to convert counter ticks to ns. */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e77edc97e036..e8a1516cc0a3 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -182,7 +182,8 @@ static bool can_stop_full_tick(void)
182 * Don't allow the user to think they can get 182 * Don't allow the user to think they can get
183 * full NO_HZ with this machine. 183 * full NO_HZ with this machine.
184 */ 184 */
185 WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); 185 WARN_ONCE(have_nohz_full_mask,
186 "NO_HZ FULL will not work with unstable sched clock");
186 return false; 187 return false;
187 } 188 }
188#endif 189#endif
@@ -343,8 +344,6 @@ static int tick_nohz_init_all(void)
343 344
344void __init tick_nohz_init(void) 345void __init tick_nohz_init(void)
345{ 346{
346 int cpu;
347
348 if (!have_nohz_full_mask) { 347 if (!have_nohz_full_mask) {
349 if (tick_nohz_init_all() < 0) 348 if (tick_nohz_init_all() < 0)
350 return; 349 return;
diff --git a/kernel/wait.c b/kernel/wait.c
index dec68bd4e9d8..d550920e040c 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
363 363
364/** 364/**
365 * wake_up_atomic_t - Wake up a waiter on a atomic_t 365 * wake_up_atomic_t - Wake up a waiter on a atomic_t
366 * @word: The word being waited on, a kernel virtual address 366 * @p: The atomic_t being waited on, a kernel virtual address
367 * @bit: The bit of the word being waited on
368 * 367 *
369 * Wake up anyone waiting for the atomic_t to go to zero. 368 * Wake up anyone waiting for the atomic_t to go to zero.
370 * 369 *
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index fd94058bd7f9..28321d8f75ef 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -437,7 +437,7 @@ int lz4_compress(const unsigned char *src, size_t src_len,
437exit: 437exit:
438 return ret; 438 return ret;
439} 439}
440EXPORT_SYMBOL_GPL(lz4_compress); 440EXPORT_SYMBOL(lz4_compress);
441 441
442MODULE_LICENSE("GPL"); 442MODULE_LICENSE("Dual BSD/GPL");
443MODULE_DESCRIPTION("LZ4 compressor"); 443MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index d3414eae73a1..411be80ddb46 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -299,7 +299,7 @@ exit_0:
299 return ret; 299 return ret;
300} 300}
301#ifndef STATIC 301#ifndef STATIC
302EXPORT_SYMBOL_GPL(lz4_decompress); 302EXPORT_SYMBOL(lz4_decompress);
303#endif 303#endif
304 304
305int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, 305int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
@@ -319,8 +319,8 @@ exit_0:
319 return ret; 319 return ret;
320} 320}
321#ifndef STATIC 321#ifndef STATIC
322EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize); 322EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
323 323
324MODULE_LICENSE("GPL"); 324MODULE_LICENSE("Dual BSD/GPL");
325MODULE_DESCRIPTION("LZ4 Decompressor"); 325MODULE_DESCRIPTION("LZ4 Decompressor");
326#endif 326#endif
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index eb1a74f5e368..f344f76b6559 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -533,7 +533,7 @@ int lz4hc_compress(const unsigned char *src, size_t src_len,
533exit: 533exit:
534 return ret; 534 return ret;
535} 535}
536EXPORT_SYMBOL_GPL(lz4hc_compress); 536EXPORT_SYMBOL(lz4hc_compress);
537 537
538MODULE_LICENSE("GPL"); 538MODULE_LICENSE("Dual BSD/GPL");
539MODULE_DESCRIPTION("LZ4HC compressor"); 539MODULE_DESCRIPTION("LZ4HC compressor");
diff --git a/mm/fremap.c b/mm/fremap.c
index 87da3590c61e..5bff08147768 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
57 unsigned long addr, unsigned long pgoff, pgprot_t prot) 57 unsigned long addr, unsigned long pgoff, pgprot_t prot)
58{ 58{
59 int err = -ENOMEM; 59 int err = -ENOMEM;
60 pte_t *pte; 60 pte_t *pte, ptfile;
61 spinlock_t *ptl; 61 spinlock_t *ptl;
62 62
63 pte = get_locked_pte(mm, addr, &ptl); 63 pte = get_locked_pte(mm, addr, &ptl);
64 if (!pte) 64 if (!pte)
65 goto out; 65 goto out;
66 66
67 if (!pte_none(*pte)) 67 ptfile = pgoff_to_pte(pgoff);
68
69 if (!pte_none(*pte)) {
70 if (pte_present(*pte) && pte_soft_dirty(*pte))
71 pte_file_mksoft_dirty(ptfile);
68 zap_pte(mm, vma, addr, pte); 72 zap_pte(mm, vma, addr, pte);
73 }
69 74
70 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); 75 set_pte_at(mm, addr, pte, ptfile);
71 /* 76 /*
72 * We don't need to run update_mmu_cache() here because the "file pte" 77 * We don't need to run update_mmu_cache() here because the "file pte"
73 * being installed by install_file_pte() is not a real pte - it's a 78 * being installed by install_file_pte() is not a real pte - it's a
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 83aff0a4d093..b60f33080a28 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2490 2490
2491 mm = vma->vm_mm; 2491 mm = vma->vm_mm;
2492 2492
2493 tlb_gather_mmu(&tlb, mm, 0); 2493 tlb_gather_mmu(&tlb, mm, start, end);
2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2495 tlb_finish_mmu(&tlb, start, end); 2495 tlb_finish_mmu(&tlb, start, end);
2496} 2496}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c290a1cf3862..0878ff7c26a9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3195 if (!s->memcg_params) 3195 if (!s->memcg_params)
3196 return -ENOMEM; 3196 return -ENOMEM;
3197 3197
3198 INIT_WORK(&s->memcg_params->destroy,
3199 kmem_cache_destroy_work_func);
3200 if (memcg) { 3198 if (memcg) {
3201 s->memcg_params->memcg = memcg; 3199 s->memcg_params->memcg = memcg;
3202 s->memcg_params->root_cache = root_cache; 3200 s->memcg_params->root_cache = root_cache;
3201 INIT_WORK(&s->memcg_params->destroy,
3202 kmem_cache_destroy_work_func);
3203 } else 3203 } else
3204 s->memcg_params->is_root_cache = true; 3204 s->memcg_params->is_root_cache = true;
3205 3205
@@ -6969,7 +6969,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
6969#ifdef CONFIG_MEMCG_SWAP 6969#ifdef CONFIG_MEMCG_SWAP
6970static int __init enable_swap_account(char *s) 6970static int __init enable_swap_account(char *s)
6971{ 6971{
6972 /* consider enabled if no parameter or 1 is given */
6973 if (!strcmp(s, "1")) 6972 if (!strcmp(s, "1"))
6974 really_do_swap_account = 1; 6973 really_do_swap_account = 1;
6975 else if (!strcmp(s, "0")) 6974 else if (!strcmp(s, "0"))
diff --git a/mm/memory.c b/mm/memory.c
index 1ce2e2a734fc..af84bc0ec17c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
209 * tear-down from @mm. The @fullmm argument is used when @mm is without 209 * tear-down from @mm. The @fullmm argument is used when @mm is without
210 * users and we're going to destroy the full address space (exit/execve). 210 * users and we're going to destroy the full address space (exit/execve).
211 */ 211 */
212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) 212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
213{ 213{
214 tlb->mm = mm; 214 tlb->mm = mm;
215 215
216 tlb->fullmm = fullmm; 216 /* Is it from 0 to ~0? */
217 tlb->fullmm = !(start | (end+1));
217 tlb->need_flush_all = 0; 218 tlb->need_flush_all = 0;
218 tlb->start = -1UL; 219 tlb->start = start;
219 tlb->end = 0; 220 tlb->end = end;
220 tlb->need_flush = 0; 221 tlb->need_flush = 0;
221 tlb->local.next = NULL; 222 tlb->local.next = NULL;
222 tlb->local.nr = 0; 223 tlb->local.nr = 0;
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
256{ 257{
257 struct mmu_gather_batch *batch, *next; 258 struct mmu_gather_batch *batch, *next;
258 259
259 tlb->start = start;
260 tlb->end = end;
261 tlb_flush_mmu(tlb); 260 tlb_flush_mmu(tlb);
262 261
263 /* keep the page table cache within bounds */ 262 /* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1099 spinlock_t *ptl; 1098 spinlock_t *ptl;
1100 pte_t *start_pte; 1099 pte_t *start_pte;
1101 pte_t *pte; 1100 pte_t *pte;
1102 unsigned long range_start = addr;
1103 1101
1104again: 1102again:
1105 init_rss_vec(rss); 1103 init_rss_vec(rss);
@@ -1141,9 +1139,12 @@ again:
1141 continue; 1139 continue;
1142 if (unlikely(details) && details->nonlinear_vma 1140 if (unlikely(details) && details->nonlinear_vma
1143 && linear_page_index(details->nonlinear_vma, 1141 && linear_page_index(details->nonlinear_vma,
1144 addr) != page->index) 1142 addr) != page->index) {
1145 set_pte_at(mm, addr, pte, 1143 pte_t ptfile = pgoff_to_pte(page->index);
1146 pgoff_to_pte(page->index)); 1144 if (pte_soft_dirty(ptent))
1145 pte_file_mksoft_dirty(ptfile);
1146 set_pte_at(mm, addr, pte, ptfile);
1147 }
1147 if (PageAnon(page)) 1148 if (PageAnon(page))
1148 rss[MM_ANONPAGES]--; 1149 rss[MM_ANONPAGES]--;
1149 else { 1150 else {
@@ -1202,17 +1203,25 @@ again:
1202 * and page-free while holding it. 1203 * and page-free while holding it.
1203 */ 1204 */
1204 if (force_flush) { 1205 if (force_flush) {
1206 unsigned long old_end;
1207
1205 force_flush = 0; 1208 force_flush = 0;
1206 1209
1207#ifdef HAVE_GENERIC_MMU_GATHER 1210 /*
1208 tlb->start = range_start; 1211 * Flush the TLB just for the previous segment,
1212 * then update the range to be the remaining
1213 * TLB range.
1214 */
1215 old_end = tlb->end;
1209 tlb->end = addr; 1216 tlb->end = addr;
1210#endif 1217
1211 tlb_flush_mmu(tlb); 1218 tlb_flush_mmu(tlb);
1212 if (addr != end) { 1219
1213 range_start = addr; 1220 tlb->start = addr;
1221 tlb->end = old_end;
1222
1223 if (addr != end)
1214 goto again; 1224 goto again;
1215 }
1216 } 1225 }
1217 1226
1218 return addr; 1227 return addr;
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1397 unsigned long end = start + size; 1406 unsigned long end = start + size;
1398 1407
1399 lru_add_drain(); 1408 lru_add_drain();
1400 tlb_gather_mmu(&tlb, mm, 0); 1409 tlb_gather_mmu(&tlb, mm, start, end);
1401 update_hiwater_rss(mm); 1410 update_hiwater_rss(mm);
1402 mmu_notifier_invalidate_range_start(mm, start, end); 1411 mmu_notifier_invalidate_range_start(mm, start, end);
1403 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) 1412 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1423 unsigned long end = address + size; 1432 unsigned long end = address + size;
1424 1433
1425 lru_add_drain(); 1434 lru_add_drain();
1426 tlb_gather_mmu(&tlb, mm, 0); 1435 tlb_gather_mmu(&tlb, mm, address, end);
1427 update_hiwater_rss(mm); 1436 update_hiwater_rss(mm);
1428 mmu_notifier_invalidate_range_start(mm, address, end); 1437 mmu_notifier_invalidate_range_start(mm, address, end);
1429 unmap_single_vma(&tlb, vma, address, end, details); 1438 unmap_single_vma(&tlb, vma, address, end, details);
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3115 exclusive = 1; 3124 exclusive = 1;
3116 } 3125 }
3117 flush_icache_page(vma, page); 3126 flush_icache_page(vma, page);
3127 if (pte_swp_soft_dirty(orig_pte))
3128 pte = pte_mksoft_dirty(pte);
3118 set_pte_at(mm, address, page_table, pte); 3129 set_pte_at(mm, address, page_table, pte);
3119 if (page == swapcache) 3130 if (page == swapcache)
3120 do_page_add_anon_rmap(page, vma, address, exclusive); 3131 do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3408 entry = mk_pte(page, vma->vm_page_prot); 3419 entry = mk_pte(page, vma->vm_page_prot);
3409 if (flags & FAULT_FLAG_WRITE) 3420 if (flags & FAULT_FLAG_WRITE)
3410 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3421 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3422 else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
3423 pte_mksoft_dirty(entry);
3411 if (anon) { 3424 if (anon) {
3412 inc_mm_counter_fast(mm, MM_ANONPAGES); 3425 inc_mm_counter_fast(mm, MM_ANONPAGES);
3413 page_add_new_anon_rmap(page, vma, address); 3426 page_add_new_anon_rmap(page, vma, address);
diff --git a/mm/mmap.c b/mm/mmap.c
index 1edbaa3136c3..f9c97d10b873 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm,
2336 struct mmu_gather tlb; 2336 struct mmu_gather tlb;
2337 2337
2338 lru_add_drain(); 2338 lru_add_drain();
2339 tlb_gather_mmu(&tlb, mm, 0); 2339 tlb_gather_mmu(&tlb, mm, start, end);
2340 update_hiwater_rss(mm); 2340 update_hiwater_rss(mm);
2341 unmap_vmas(&tlb, vma, start, end); 2341 unmap_vmas(&tlb, vma, start, end);
2342 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2342 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm)
2709 2709
2710 lru_add_drain(); 2710 lru_add_drain();
2711 flush_cache_mm(mm); 2711 flush_cache_mm(mm);
2712 tlb_gather_mmu(&tlb, mm, 1); 2712 tlb_gather_mmu(&tlb, mm, 0, -1);
2713 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2713 /* update_hiwater_rss(mm) here? but nobody should be looking */
2714 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2714 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2715 unmap_vmas(&tlb, vma, 0, -1); 2715 unmap_vmas(&tlb, vma, 0, -1);
diff --git a/mm/rmap.c b/mm/rmap.c
index cd356df4f71a..b2e29acd7e3d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1236 swp_entry_to_pte(make_hwpoison_entry(page))); 1236 swp_entry_to_pte(make_hwpoison_entry(page)));
1237 } else if (PageAnon(page)) { 1237 } else if (PageAnon(page)) {
1238 swp_entry_t entry = { .val = page_private(page) }; 1238 swp_entry_t entry = { .val = page_private(page) };
1239 pte_t swp_pte;
1239 1240
1240 if (PageSwapCache(page)) { 1241 if (PageSwapCache(page)) {
1241 /* 1242 /*
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1264 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 1265 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
1265 entry = make_migration_entry(page, pte_write(pteval)); 1266 entry = make_migration_entry(page, pte_write(pteval));
1266 } 1267 }
1267 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1268 swp_pte = swp_entry_to_pte(entry);
1269 if (pte_soft_dirty(pteval))
1270 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1271 set_pte_at(mm, address, pte, swp_pte);
1268 BUG_ON(pte_file(*pte)); 1272 BUG_ON(pte_file(*pte));
1269 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1273 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1270 (TTU_ACTION(flags) == TTU_MIGRATION)) { 1274 (TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1401 pteval = ptep_clear_flush(vma, address, pte); 1405 pteval = ptep_clear_flush(vma, address, pte);
1402 1406
1403 /* If nonlinear, store the file page offset in the pte. */ 1407 /* If nonlinear, store the file page offset in the pte. */
1404 if (page->index != linear_page_index(vma, address)) 1408 if (page->index != linear_page_index(vma, address)) {
1405 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 1409 pte_t ptfile = pgoff_to_pte(page->index);
1410 if (pte_soft_dirty(pteval))
1411 pte_file_mksoft_dirty(ptfile);
1412 set_pte_at(mm, address, pte, ptfile);
1413 }
1406 1414
1407 /* Move the dirty bit to the physical page now the pte is gone. */ 1415 /* Move the dirty bit to the physical page now the pte is gone. */
1408 if (pte_dirty(pteval)) 1416 if (pte_dirty(pteval))
diff --git a/mm/shmem.c b/mm/shmem.c
index 8335dbd3fc35..e43dc555069d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2909,14 +2909,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
2909 2909
2910/* common code */ 2910/* common code */
2911 2911
2912static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
2913{
2914 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
2915 dentry->d_name.name);
2916}
2917
2918static struct dentry_operations anon_ops = { 2912static struct dentry_operations anon_ops = {
2919 .d_dname = shmem_dname 2913 .d_dname = simple_dname
2920}; 2914};
2921 2915
2922/** 2916/**
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 36af6eeaa67e..6cf2e60983b7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free)
866} 866}
867#endif /* CONFIG_HIBERNATION */ 867#endif /* CONFIG_HIBERNATION */
868 868
869static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
870{
871#ifdef CONFIG_MEM_SOFT_DIRTY
872 /*
873 * When pte keeps soft dirty bit the pte generated
874 * from swap entry does not has it, still it's same
875 * pte from logical point of view.
876 */
877 pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
878 return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
879#else
880 return pte_same(pte, swp_pte);
881#endif
882}
883
869/* 884/*
870 * No need to decide whether this PTE shares the swap entry with others, 885 * No need to decide whether this PTE shares the swap entry with others,
871 * just let do_wp_page work it out if a write is requested later - to 886 * just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
892 } 907 }
893 908
894 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 909 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
895 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 910 if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
896 mem_cgroup_cancel_charge_swapin(memcg); 911 mem_cgroup_cancel_charge_swapin(memcg);
897 ret = 0; 912 ret = 0;
898 goto out; 913 goto out;
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
947 * swapoff spends a _lot_ of time in this loop! 962 * swapoff spends a _lot_ of time in this loop!
948 * Test inline before going to call unuse_pte. 963 * Test inline before going to call unuse_pte.
949 */ 964 */
950 if (unlikely(pte_same(*pte, swp_pte))) { 965 if (unlikely(maybe_same_pte(*pte, swp_pte))) {
951 pte_unmap(pte); 966 pte_unmap(pte);
952 ret = unuse_pte(vma, pmd, addr, entry, page); 967 ret = unuse_pte(vma, pmd, addr, entry, page);
953 if (ret) 968 if (ret)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4a78c4de9f20..6ee48aac776f 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
91 91
92struct net_device *vlan_dev_real_dev(const struct net_device *dev) 92struct net_device *vlan_dev_real_dev(const struct net_device *dev)
93{ 93{
94 return vlan_dev_priv(dev)->real_dev; 94 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
95
96 while (is_vlan_dev(ret))
97 ret = vlan_dev_priv(ret)->real_dev;
98
99 return ret;
95} 100}
96EXPORT_SYMBOL(vlan_dev_real_dev); 101EXPORT_SYMBOL(vlan_dev_real_dev);
97 102
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e14531f1ce1c..264de88db320 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1529,6 +1529,8 @@ out:
1529 * in these cases, the skb is further handled by this function and 1529 * in these cases, the skb is further handled by this function and
1530 * returns 1, otherwise it returns 0 and the caller shall further 1530 * returns 1, otherwise it returns 0 and the caller shall further
1531 * process the skb. 1531 * process the skb.
1532 *
1533 * This call might reallocate skb data.
1532 */ 1534 */
1533int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1535int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1534 unsigned short vid) 1536 unsigned short vid)
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index f105219f4a4b..7614af31daff 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -508,6 +508,7 @@ out:
508 return 0; 508 return 0;
509} 509}
510 510
511/* this call might reallocate skb data */
511static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) 512static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
512{ 513{
513 int ret = false; 514 int ret = false;
@@ -568,6 +569,7 @@ out:
568 return ret; 569 return ret;
569} 570}
570 571
572/* this call might reallocate skb data */
571bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) 573bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
572{ 574{
573 struct ethhdr *ethhdr; 575 struct ethhdr *ethhdr;
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
619 621
620 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) 622 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
621 return false; 623 return false;
624
625 /* skb->data might have been reallocated by pskb_may_pull() */
626 ethhdr = (struct ethhdr *)skb->data;
627 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
628 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
629
622 udphdr = (struct udphdr *)(skb->data + *header_len); 630 udphdr = (struct udphdr *)(skb->data + *header_len);
623 *header_len += sizeof(*udphdr); 631 *header_len += sizeof(*udphdr);
624 632
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
634 return true; 642 return true;
635} 643}
636 644
645/* this call might reallocate skb data */
637bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 646bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
638 struct sk_buff *skb, struct ethhdr *ethhdr) 647 struct sk_buff *skb)
639{ 648{
640 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; 649 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
641 struct batadv_orig_node *orig_dst_node = NULL; 650 struct batadv_orig_node *orig_dst_node = NULL;
642 struct batadv_gw_node *curr_gw = NULL; 651 struct batadv_gw_node *curr_gw = NULL;
652 struct ethhdr *ethhdr;
643 bool ret, out_of_range = false; 653 bool ret, out_of_range = false;
644 unsigned int header_len = 0; 654 unsigned int header_len = 0;
645 uint8_t curr_tq_avg; 655 uint8_t curr_tq_avg;
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
648 if (!ret) 658 if (!ret)
649 goto out; 659 goto out;
650 660
661 ethhdr = (struct ethhdr *)skb->data;
651 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, 662 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
652 ethhdr->h_dest); 663 ethhdr->h_dest);
653 if (!orig_dst_node) 664 if (!orig_dst_node)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 039902dca4a6..1037d75da51f 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
34void batadv_gw_node_purge(struct batadv_priv *bat_priv); 34void batadv_gw_node_purge(struct batadv_priv *bat_priv);
35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); 35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); 36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
38 struct sk_buff *skb, struct ethhdr *ethhdr);
39 38
40#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ 39#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 700d0b49742d..0f04e1c302b4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
180 if (batadv_bla_tx(bat_priv, skb, vid)) 180 if (batadv_bla_tx(bat_priv, skb, vid))
181 goto dropped; 181 goto dropped;
182 182
183 /* skb->data might have been reallocated by batadv_bla_tx() */
184 ethhdr = (struct ethhdr *)skb->data;
185
183 /* Register the client MAC in the transtable */ 186 /* Register the client MAC in the transtable */
184 if (!is_multicast_ether_addr(ethhdr->h_source)) 187 if (!is_multicast_ether_addr(ethhdr->h_source))
185 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 188 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
220 default: 223 default:
221 break; 224 break;
222 } 225 }
226
227 /* reminder: ethhdr might have become unusable from here on
228 * (batadv_gw_is_dhcp_target() might have reallocated skb data)
229 */
223 } 230 }
224 231
225 /* ethernet packet should be broadcasted */ 232 /* ethernet packet should be broadcasted */
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
266 /* unicast packet */ 273 /* unicast packet */
267 } else { 274 } else {
268 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { 275 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
269 ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); 276 ret = batadv_gw_out_of_range(bat_priv, skb);
270 if (ret) 277 if (ret)
271 goto dropped; 278 goto dropped;
272 } 279 }
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc8b5d4dd636..857e1b8349ee 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
326 * @skb: the skb containing the payload to encapsulate 326 * @skb: the skb containing the payload to encapsulate
327 * @orig_node: the destination node 327 * @orig_node: the destination node
328 * 328 *
329 * Returns false if the payload could not be encapsulated or true otherwise 329 * Returns false if the payload could not be encapsulated or true otherwise.
330 *
331 * This call might reallocate skb data.
330 */ 332 */
331static bool batadv_unicast_prepare_skb(struct sk_buff *skb, 333static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
332 struct batadv_orig_node *orig_node) 334 struct batadv_orig_node *orig_node)
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
343 * @orig_node: the destination node 345 * @orig_node: the destination node
344 * @packet_subtype: the batman 4addr packet subtype to use 346 * @packet_subtype: the batman 4addr packet subtype to use
345 * 347 *
346 * Returns false if the payload could not be encapsulated or true otherwise 348 * Returns false if the payload could not be encapsulated or true otherwise.
349 *
350 * This call might reallocate skb data.
347 */ 351 */
348bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, 352bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
349 struct sk_buff *skb, 353 struct sk_buff *skb,
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
401 struct batadv_neigh_node *neigh_node; 405 struct batadv_neigh_node *neigh_node;
402 int data_len = skb->len; 406 int data_len = skb->len;
403 int ret = NET_RX_DROP; 407 int ret = NET_RX_DROP;
404 unsigned int dev_mtu; 408 unsigned int dev_mtu, header_len;
405 409
406 /* get routing information */ 410 /* get routing information */
407 if (is_multicast_ether_addr(ethhdr->h_dest)) { 411 if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -428,11 +432,17 @@ find_router:
428 432
429 switch (packet_type) { 433 switch (packet_type) {
430 case BATADV_UNICAST: 434 case BATADV_UNICAST:
431 batadv_unicast_prepare_skb(skb, orig_node); 435 if (!batadv_unicast_prepare_skb(skb, orig_node))
436 goto out;
437
438 header_len = sizeof(struct batadv_unicast_packet);
432 break; 439 break;
433 case BATADV_UNICAST_4ADDR: 440 case BATADV_UNICAST_4ADDR:
434 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, 441 if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
435 packet_subtype); 442 packet_subtype))
443 goto out;
444
445 header_len = sizeof(struct batadv_unicast_4addr_packet);
436 break; 446 break;
437 default: 447 default:
438 /* this function supports UNICAST and UNICAST_4ADDR only. It 448 /* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +451,7 @@ find_router:
441 goto out; 451 goto out;
442 } 452 }
443 453
454 ethhdr = (struct ethhdr *)(skb->data + header_len);
444 unicast_packet = (struct batadv_unicast_packet *)skb->data; 455 unicast_packet = (struct batadv_unicast_packet *)skb->data;
445 456
446 /* inform the destination node that we are still missing a correct route 457 /* inform the destination node that we are still missing a correct route
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 60aca9109a50..ffd5874f2592 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
161 if (!pv) 161 if (!pv)
162 return; 162 return;
163 163
164 for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 164 for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
165 f = __br_fdb_get(br, br->dev->dev_addr, vid); 165 f = __br_fdb_get(br, br->dev->dev_addr, vid);
166 if (f && f->is_local && !f->dst) 166 if (f && f->is_local && !f->dst)
167 fdb_delete(br, f); 167 fdb_delete(br, f);
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
730 /* VID was specified, so use it. */ 730 /* VID was specified, so use it. */
731 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 731 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
732 } else { 732 } else {
733 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 733 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
734 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); 734 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
735 goto out; 735 goto out;
736 } 736 }
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
739 * specify a VLAN. To be nice, add/update entry for every 739 * specify a VLAN. To be nice, add/update entry for every
740 * vlan on this port. 740 * vlan on this port.
741 */ 741 */
742 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 742 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
743 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 743 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
744 if (err) 744 if (err)
745 goto out; 745 goto out;
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
817 817
818 err = __br_fdb_delete(p, addr, vid); 818 err = __br_fdb_delete(p, addr, vid);
819 } else { 819 } else {
820 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 820 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
821 err = __br_fdb_delete(p, addr, 0); 821 err = __br_fdb_delete(p, addr, 0);
822 goto out; 822 goto out;
823 } 823 }
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
827 * vlan on this port. 827 * vlan on this port.
828 */ 828 */
829 err = -ENOENT; 829 err = -ENOENT;
830 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 830 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
831 err &= __br_fdb_delete(p, addr, vid); 831 err &= __br_fdb_delete(p, addr, vid);
832 } 832 }
833 } 833 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 61c5e819380e..08e576ada0b2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1195 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1195 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1196 if (max_delay) 1196 if (max_delay)
1197 group = &mld->mld_mca; 1197 group = &mld->mld_mca;
1198 } else if (skb->len >= sizeof(*mld2q)) { 1198 } else {
1199 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1199 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1200 err = -EINVAL; 1200 err = -EINVAL;
1201 goto out; 1201 goto out;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 1fc30abd3a52..b9259efa636e 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
132 else 132 else
133 pv = br_get_vlan_info(br); 133 pv = br_get_vlan_info(br);
134 134
135 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) 135 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
136 goto done; 136 goto done;
137 137
138 af = nla_nest_start(skb, IFLA_AF_SPEC); 138 af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
140 goto nla_put_failure; 140 goto nla_put_failure;
141 141
142 pvid = br_get_pvid(pv); 142 pvid = br_get_pvid(pv);
143 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 143 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
144 vinfo.vid = vid; 144 vinfo.vid = vid;
145 vinfo.flags = 0; 145 vinfo.flags = 0;
146 if (vid == pvid) 146 if (vid == pvid)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 394bb96b6087..3b9637fb7939 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Sysfs attributes of bridge ports 2 * Sysfs attributes of bridge
3 * Linux ethernet bridge 3 * Linux ethernet bridge
4 * 4 *
5 * Authors: 5 * Authors:
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index bd58b45f5f90..9a9ffe7e4019 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
108 108
109 clear_bit(vid, v->vlan_bitmap); 109 clear_bit(vid, v->vlan_bitmap);
110 v->num_vlans--; 110 v->num_vlans--;
111 if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 111 if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
112 if (v->port_idx) 112 if (v->port_idx)
113 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 113 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
114 else 114 else
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v)
122{ 122{
123 smp_wmb(); 123 smp_wmb();
124 v->pvid = 0; 124 v->pvid = 0;
125 bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN); 125 bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
126 if (v->port_idx) 126 if (v->port_idx)
127 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 127 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
128 else 128 else
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 00ee068efc1c..b84a1b155bc1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -65,6 +65,7 @@ ipv6:
65 nhoff += sizeof(struct ipv6hdr); 65 nhoff += sizeof(struct ipv6hdr);
66 break; 66 break;
67 } 67 }
68 case __constant_htons(ETH_P_8021AD):
68 case __constant_htons(ETH_P_8021Q): { 69 case __constant_htons(ETH_P_8021Q): {
69 const struct vlan_hdr *vlan; 70 const struct vlan_hdr *vlan;
70 struct vlan_hdr _vlan; 71 struct vlan_hdr _vlan;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9232c68941ab..60533db8b72d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1441 atomic_set(&p->refcnt, 1); 1441 atomic_set(&p->refcnt, 1);
1442 p->reachable_time = 1442 p->reachable_time =
1443 neigh_rand_reach_time(p->base_reachable_time); 1443 neigh_rand_reach_time(p->base_reachable_time);
1444 dev_hold(dev);
1445 p->dev = dev;
1446 write_pnet(&p->net, hold_net(net));
1447 p->sysctl_table = NULL;
1444 1448
1445 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1449 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1450 release_net(net);
1451 dev_put(dev);
1446 kfree(p); 1452 kfree(p);
1447 return NULL; 1453 return NULL;
1448 } 1454 }
1449 1455
1450 dev_hold(dev);
1451 p->dev = dev;
1452 write_pnet(&p->net, hold_net(net));
1453 p->sysctl_table = NULL;
1454 write_lock_bh(&tbl->lock); 1456 write_lock_bh(&tbl->lock);
1455 p->next = tbl->parms.next; 1457 p->next = tbl->parms.next;
1456 tbl->parms.next = p; 1458 tbl->parms.next = p;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3de740834d1f..ca198c1d1d30 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
2156 /* If aging addresses are supported device will need to 2156 /* If aging addresses are supported device will need to
2157 * implement its own handler for this. 2157 * implement its own handler for this.
2158 */ 2158 */
2159 if (ndm->ndm_state & NUD_PERMANENT) { 2159 if (!(ndm->ndm_state & NUD_PERMANENT)) {
2160 pr_info("%s: FDB only supports static addresses\n", dev->name); 2160 pr_info("%s: FDB only supports static addresses\n", dev->name);
2161 return -EINVAL; 2161 return -EINVAL;
2162 } 2162 }
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2384 struct nlattr *extfilt; 2384 struct nlattr *extfilt;
2385 u32 filter_mask = 0; 2385 u32 filter_mask = 0;
2386 2386
2387 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), 2387 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
2388 IFLA_EXT_MASK); 2388 IFLA_EXT_MASK);
2389 if (extfilt) 2389 if (extfilt)
2390 filter_mask = nla_get_u32(extfilt); 2390 filter_mask = nla_get_u32(extfilt);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ab3d814bc80a..109ee89f123e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
477 } 477 }
478 478
479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
480 net_adj) & ~(align - 1)) + (net_adj - 2); 480 net_adj) & ~(align - 1)) + net_adj - 2;
481} 481}
482 482
483static void esp4_err(struct sk_buff *skb, u32 info) 483static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 108a1e9c9eac..3df6d3edb2a1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
71#include <linux/init.h> 71#include <linux/init.h>
72#include <linux/list.h> 72#include <linux/list.h>
73#include <linux/slab.h> 73#include <linux/slab.h>
74#include <linux/prefetch.h>
75#include <linux/export.h> 74#include <linux/export.h>
76#include <net/net_namespace.h> 75#include <net/net_namespace.h>
77#include <net/ip.h> 76#include <net/ip.h>
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
1761 if (!c) 1760 if (!c)
1762 continue; 1761 continue;
1763 1762
1764 if (IS_LEAF(c)) { 1763 if (IS_LEAF(c))
1765 prefetch(rcu_dereference_rtnl(p->child[idx]));
1766 return (struct leaf *) c; 1764 return (struct leaf *) c;
1767 }
1768 1765
1769 /* Rescan start scanning in new node */ 1766 /* Rescan start scanning in new node */
1770 p = (struct tnode *) c; 1767 p = (struct tnode *) c;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 1f6eab66f7ce..8d6939eeb492 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
383 if (daddr) 383 if (daddr)
384 memcpy(&iph->daddr, daddr, 4); 384 memcpy(&iph->daddr, daddr, 4);
385 if (iph->daddr) 385 if (iph->daddr)
386 return t->hlen; 386 return t->hlen + sizeof(*iph);
387 387
388 return -(t->hlen + sizeof(*iph)); 388 return -(t->hlen + sizeof(*iph));
389} 389}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 7167b08977df..850525b34899 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
76 iph->daddr = dst; 76 iph->daddr = dst;
77 iph->saddr = src; 77 iph->saddr = src;
78 iph->ttl = ttl; 78 iph->ttl = ttl;
79 tunnel_ip_select_ident(skb, 79 __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
80 (const struct iphdr *)skb_inner_network_header(skb),
81 &rt->dst);
82 80
83 err = ip_local_out(skb); 81 err = ip_local_out(skb);
84 if (unlikely(net_xmit_eval(err))) 82 if (unlikely(net_xmit_eval(err)))
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6577a1149a47..463bd1273346 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
273 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), 273 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
274 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), 274 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
275 SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), 275 SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
276 SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), 276 SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
277 SNMP_MIB_SENTINEL 277 SNMP_MIB_SENTINEL
278}; 278};
279 279
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5423223e93c2..b2f6c74861af 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1121,6 +1121,13 @@ new_segment:
1121 goto wait_for_memory; 1121 goto wait_for_memory;
1122 1122
1123 /* 1123 /*
1124 * All packets are restored as if they have
1125 * already been sent.
1126 */
1127 if (tp->repair)
1128 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1129
1130 /*
1124 * Check whether we can use HW checksum. 1131 * Check whether we can use HW checksum.
1125 */ 1132 */
1126 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 1133 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f441cb2..b6ae92a51f58 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
206 */ 206 */
207static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 207static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
208{ 208{
209 u64 offs; 209 u32 delta, bic_target, max_cnt;
210 u32 delta, t, bic_target, max_cnt; 210 u64 offs, t;
211 211
212 ca->ack_cnt++; /* count the number of ACKs */ 212 ca->ack_cnt++; /* count the number of ACKs */
213 213
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
250 * if the cwnd < 1 million packets !!! 250 * if the cwnd < 1 million packets !!!
251 */ 251 */
252 252
253 t = (s32)(tcp_time_stamp - ca->epoch_start);
254 t += msecs_to_jiffies(ca->delay_min >> 3);
253 /* change the unit from HZ to bictcp_HZ */ 255 /* change the unit from HZ to bictcp_HZ */
254 t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) 256 t <<= BICTCP_HZ;
255 - ca->epoch_start) << BICTCP_HZ) / HZ; 257 do_div(t, HZ);
256 258
257 if (t < ca->bic_K) /* t - K */ 259 if (t < ca->bic_K) /* t - K */
258 offs = ca->bic_K - t; 260 offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
414 return; 416 return;
415 417
416 /* Discard delay samples right after fast recovery */ 418 /* Discard delay samples right after fast recovery */
417 if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) 419 if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
418 return; 420 return;
419 421
420 delay = (rtt_us << 3) / USEC_PER_MSEC; 422 delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index da4241c8c7da..498ea99194af 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1126,12 +1126,10 @@ retry:
1126 if (ifp->flags & IFA_F_OPTIMISTIC) 1126 if (ifp->flags & IFA_F_OPTIMISTIC)
1127 addr_flags |= IFA_F_OPTIMISTIC; 1127 addr_flags |= IFA_F_OPTIMISTIC;
1128 1128
1129 ift = !max_addresses || 1129 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1130 ipv6_count_addresses(idev) < max_addresses ? 1130 ipv6_addr_scope(&addr), addr_flags,
1131 ipv6_add_addr(idev, &addr, NULL, tmp_plen, 1131 tmp_valid_lft, tmp_prefered_lft);
1132 ipv6_addr_scope(&addr), addr_flags, 1132 if (IS_ERR(ift)) {
1133 tmp_valid_lft, tmp_prefered_lft) : NULL;
1134 if (IS_ERR_OR_NULL(ift)) {
1135 in6_ifa_put(ifp); 1133 in6_ifa_put(ifp);
1136 in6_dev_put(idev); 1134 in6_dev_put(idev);
1137 pr_info("%s: retry temporary address regeneration\n", __func__); 1135 pr_info("%s: retry temporary address regeneration\n", __func__);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 40ffd72243a4..aeac0dc3635d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
425 net_adj = 0; 425 net_adj = 0;
426 426
427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
428 net_adj) & ~(align - 1)) + (net_adj - 2); 428 net_adj) & ~(align - 1)) + net_adj - 2;
429} 429}
430 430
431static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 431static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bff3d821c7eb..c4ff5bbb45c4 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
993 993
994 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { 994 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
995#ifdef CONFIG_IPV6_SUBTREES 995#ifdef CONFIG_IPV6_SUBTREES
996 if (fn->subtree) 996 if (fn->subtree) {
997 fn = fib6_lookup_1(fn->subtree, args + 1); 997 struct fib6_node *sfn;
998 sfn = fib6_lookup_1(fn->subtree,
999 args + 1);
1000 if (!sfn)
1001 goto backtrack;
1002 fn = sfn;
1003 }
998#endif 1004#endif
999 if (!fn || fn->fn_flags & RTN_RTINFO) 1005 if (fn->fn_flags & RTN_RTINFO)
1000 return fn; 1006 return fn;
1001 } 1007 }
1002 } 1008 }
1003 1009#ifdef CONFIG_IPV6_SUBTREES
1010backtrack:
1011#endif
1004 if (fn->fn_flags & RTN_ROOT) 1012 if (fn->fn_flags & RTN_ROOT)
1005 break; 1013 break;
1006 1014
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 79aa9652ed86..04d31c2fbef1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1369,8 +1369,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1369 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) 1369 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
1370 return; 1370 return;
1371 1371
1372 if (!ndopts.nd_opts_rh) 1372 if (!ndopts.nd_opts_rh) {
1373 ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0);
1373 return; 1374 return;
1375 }
1374 1376
1375 hdr = (u8 *)ndopts.nd_opts_rh; 1377 hdr = (u8 *)ndopts.nd_opts_rh;
1376 hdr += 8; 1378 hdr += 8;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 790d9f4b8b0b..1aeb473b2cc6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
490 ipv6_hdr(head)->payload_len = htons(payload_len); 490 ipv6_hdr(head)->payload_len = htons(payload_len);
491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); 491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
492 IP6CB(head)->nhoff = nhoff; 492 IP6CB(head)->nhoff = nhoff;
493 IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
493 494
494 /* Yes, and fold redundant checksum back. 8) */ 495 /* Yes, and fold redundant checksum back. 8) */
495 if (head->ip_summed == CHECKSUM_COMPLETE) 496 if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
524 struct net *net = dev_net(skb_dst(skb)->dev); 525 struct net *net = dev_net(skb_dst(skb)->dev);
525 int evicted; 526 int evicted;
526 527
528 if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
529 goto fail_hdr;
530
527 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 531 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
528 532
529 /* Jumbo payload inhibits frag. header */ 533 /* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
544 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 548 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
545 549
546 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 550 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
551 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
547 return 1; 552 return 1;
548 } 553 }
549 554
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b70f8979003b..8d9a93ed9c59 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1178,6 +1178,27 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1178} 1178}
1179EXPORT_SYMBOL_GPL(ip6_redirect); 1179EXPORT_SYMBOL_GPL(ip6_redirect);
1180 1180
1181void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1182 u32 mark)
1183{
1184 const struct ipv6hdr *iph = ipv6_hdr(skb);
1185 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1186 struct dst_entry *dst;
1187 struct flowi6 fl6;
1188
1189 memset(&fl6, 0, sizeof(fl6));
1190 fl6.flowi6_oif = oif;
1191 fl6.flowi6_mark = mark;
1192 fl6.flowi6_flags = 0;
1193 fl6.daddr = msg->dest;
1194 fl6.saddr = iph->daddr;
1195
1196 dst = ip6_route_output(net, NULL, &fl6);
1197 if (!dst->error)
1198 rt6_do_redirect(dst, NULL, skb);
1199 dst_release(dst);
1200}
1201
1181void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 1202void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1182{ 1203{
1183 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark); 1204 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ae31968d42d3..cc9e02d79b55 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -31,10 +31,12 @@
31#include "led.h" 31#include "led.h"
32 32
33#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 33#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
34#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
34#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) 35#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
35#define IEEE80211_AUTH_MAX_TRIES 3 36#define IEEE80211_AUTH_MAX_TRIES 3
36#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) 37#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
37#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 38#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
39#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
38#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) 40#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
39#define IEEE80211_ASSOC_MAX_TRIES 3 41#define IEEE80211_ASSOC_MAX_TRIES 3
40 42
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
209 struct ieee80211_channel *channel, 211 struct ieee80211_channel *channel,
210 const struct ieee80211_ht_operation *ht_oper, 212 const struct ieee80211_ht_operation *ht_oper,
211 const struct ieee80211_vht_operation *vht_oper, 213 const struct ieee80211_vht_operation *vht_oper,
212 struct cfg80211_chan_def *chandef, bool verbose) 214 struct cfg80211_chan_def *chandef, bool tracking)
213{ 215{
216 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
214 struct cfg80211_chan_def vht_chandef; 217 struct cfg80211_chan_def vht_chandef;
215 u32 ht_cfreq, ret; 218 u32 ht_cfreq, ret;
216 219
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
229 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, 232 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
230 channel->band); 233 channel->band);
231 /* check that channel matches the right operating channel */ 234 /* check that channel matches the right operating channel */
232 if (channel->center_freq != ht_cfreq) { 235 if (!tracking && channel->center_freq != ht_cfreq) {
233 /* 236 /*
234 * It's possible that some APs are confused here; 237 * It's possible that some APs are confused here;
235 * Netgear WNDR3700 sometimes reports 4 higher than 238 * Netgear WNDR3700 sometimes reports 4 higher than
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
237 * since we look at probe response/beacon data here 240 * since we look at probe response/beacon data here
238 * it should be OK. 241 * it should be OK.
239 */ 242 */
240 if (verbose) 243 sdata_info(sdata,
241 sdata_info(sdata, 244 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
242 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", 245 channel->center_freq, ht_cfreq,
243 channel->center_freq, ht_cfreq, 246 ht_oper->primary_chan, channel->band);
244 ht_oper->primary_chan, channel->band);
245 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; 247 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
246 goto out; 248 goto out;
247 } 249 }
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
295 channel->band); 297 channel->band);
296 break; 298 break;
297 default: 299 default:
298 if (verbose) 300 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
299 sdata_info(sdata, 301 sdata_info(sdata,
300 "AP VHT operation IE has invalid channel width (%d), disable VHT\n", 302 "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
301 vht_oper->chan_width); 303 vht_oper->chan_width);
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
304 } 306 }
305 307
306 if (!cfg80211_chandef_valid(&vht_chandef)) { 308 if (!cfg80211_chandef_valid(&vht_chandef)) {
307 if (verbose) 309 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
308 sdata_info(sdata, 310 sdata_info(sdata,
309 "AP VHT information is invalid, disable VHT\n"); 311 "AP VHT information is invalid, disable VHT\n");
310 ret = IEEE80211_STA_DISABLE_VHT; 312 ret = IEEE80211_STA_DISABLE_VHT;
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
317 } 319 }
318 320
319 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { 321 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
320 if (verbose) 322 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
321 sdata_info(sdata, 323 sdata_info(sdata,
322 "AP VHT information doesn't match HT, disable VHT\n"); 324 "AP VHT information doesn't match HT, disable VHT\n");
323 ret = IEEE80211_STA_DISABLE_VHT; 325 ret = IEEE80211_STA_DISABLE_VHT;
@@ -333,18 +335,27 @@ out:
333 if (ret & IEEE80211_STA_DISABLE_VHT) 335 if (ret & IEEE80211_STA_DISABLE_VHT)
334 vht_chandef = *chandef; 336 vht_chandef = *chandef;
335 337
338 /*
339 * Ignore the DISABLED flag when we're already connected and only
340 * tracking the APs beacon for bandwidth changes - otherwise we
341 * might get disconnected here if we connect to an AP, update our
342 * regulatory information based on the AP's country IE and the
343 * information we have is wrong/outdated and disables the channel
344 * that we're actually using for the connection to the AP.
345 */
336 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, 346 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
337 IEEE80211_CHAN_DISABLED)) { 347 tracking ? 0 :
348 IEEE80211_CHAN_DISABLED)) {
338 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { 349 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
339 ret = IEEE80211_STA_DISABLE_HT | 350 ret = IEEE80211_STA_DISABLE_HT |
340 IEEE80211_STA_DISABLE_VHT; 351 IEEE80211_STA_DISABLE_VHT;
341 goto out; 352 break;
342 } 353 }
343 354
344 ret |= chandef_downgrade(chandef); 355 ret |= chandef_downgrade(chandef);
345 } 356 }
346 357
347 if (chandef->width != vht_chandef.width && verbose) 358 if (chandef->width != vht_chandef.width && !tracking)
348 sdata_info(sdata, 359 sdata_info(sdata,
349 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); 360 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
350 361
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
384 395
385 /* calculate new channel (type) based on HT/VHT operation IEs */ 396 /* calculate new channel (type) based on HT/VHT operation IEs */
386 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, 397 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
387 vht_oper, &chandef, false); 398 vht_oper, &chandef, true);
388 399
389 /* 400 /*
390 * Downgrade the new channel if we associated with restricted 401 * Downgrade the new channel if we associated with restricted
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
3394 3405
3395 if (tx_flags == 0) { 3406 if (tx_flags == 0) {
3396 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 3407 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
3397 ifmgd->auth_data->timeout_started = true; 3408 auth_data->timeout_started = true;
3398 run_again(sdata, auth_data->timeout); 3409 run_again(sdata, auth_data->timeout);
3399 } else { 3410 } else {
3400 auth_data->timeout_started = false; 3411 auth_data->timeout =
3412 round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
3413 auth_data->timeout_started = true;
3414 run_again(sdata, auth_data->timeout);
3401 } 3415 }
3402 3416
3403 return 0; 3417 return 0;
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
3434 assoc_data->timeout_started = true; 3448 assoc_data->timeout_started = true;
3435 run_again(sdata, assoc_data->timeout); 3449 run_again(sdata, assoc_data->timeout);
3436 } else { 3450 } else {
3437 assoc_data->timeout_started = false; 3451 assoc_data->timeout =
3452 round_jiffies_up(jiffies +
3453 IEEE80211_ASSOC_TIMEOUT_LONG);
3454 assoc_data->timeout_started = true;
3455 run_again(sdata, assoc_data->timeout);
3438 } 3456 }
3439 3457
3440 return 0; 3458 return 0;
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3829 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, 3847 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
3830 cbss->channel, 3848 cbss->channel,
3831 ht_oper, vht_oper, 3849 ht_oper, vht_oper,
3832 &chandef, true); 3850 &chandef, false);
3833 3851
3834 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), 3852 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
3835 local->rx_chains); 3853 local->rx_chains);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 7dcc376eea5f..2f8010707d01 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
527 __u32 seq, ack, sack, end, win, swin; 527 __u32 seq, ack, sack, end, win, swin;
528 s16 receiver_offset; 528 s16 receiver_offset;
529 bool res; 529 bool res, in_recv_win;
530 530
531 /* 531 /*
532 * Get the required data from the packet. 532 * Get the required data from the packet.
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
649 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 649 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
650 receiver->td_scale); 650 receiver->td_scale);
651 651
652 /* Is the ending sequence in the receive window (if available)? */
653 in_recv_win = !receiver->td_maxwin ||
654 after(end, sender->td_end - receiver->td_maxwin - 1);
655
652 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", 656 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
653 before(seq, sender->td_maxend + 1), 657 before(seq, sender->td_maxend + 1),
654 after(end, sender->td_end - receiver->td_maxwin - 1), 658 (in_recv_win ? 1 : 0),
655 before(sack, receiver->td_end + 1), 659 before(sack, receiver->td_end + 1),
656 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); 660 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
657 661
658 if (before(seq, sender->td_maxend + 1) && 662 if (before(seq, sender->td_maxend + 1) &&
659 after(end, sender->td_end - receiver->td_maxwin - 1) && 663 in_recv_win &&
660 before(sack, receiver->td_end + 1) && 664 before(sack, receiver->td_end + 1) &&
661 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { 665 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
662 /* 666 /*
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
725 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 729 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
726 "nf_ct_tcp: %s ", 730 "nf_ct_tcp: %s ",
727 before(seq, sender->td_maxend + 1) ? 731 before(seq, sender->td_maxend + 1) ?
728 after(end, sender->td_end - receiver->td_maxwin - 1) ? 732 in_recv_win ?
729 before(sack, receiver->td_end + 1) ? 733 before(sack, receiver->td_end + 1) ?
730 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" 734 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
731 : "ACK is under the lower bound (possible overly delayed ACK)" 735 : "ACK is under the lower bound (possible overly delayed ACK)"
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 962e9792e317..d92cc317bf8b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
419 nfmsg->version = NFNETLINK_V0; 419 nfmsg->version = NFNETLINK_V0;
420 nfmsg->res_id = htons(inst->group_num); 420 nfmsg->res_id = htons(inst->group_num);
421 421
422 memset(&pmsg, 0, sizeof(pmsg));
422 pmsg.hw_protocol = skb->protocol; 423 pmsg.hw_protocol = skb->protocol;
423 pmsg.hook = hooknum; 424 pmsg.hook = hooknum;
424 425
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
498 if (indev && skb->dev && 499 if (indev && skb->dev &&
499 skb->mac_header != skb->network_header) { 500 skb->mac_header != skb->network_header) {
500 struct nfulnl_msg_packet_hw phw; 501 struct nfulnl_msg_packet_hw phw;
501 int len = dev_parse_header(skb, phw.hw_addr); 502 int len;
503
504 memset(&phw, 0, sizeof(phw));
505 len = dev_parse_header(skb, phw.hw_addr);
502 if (len > 0) { 506 if (len > 0) {
503 phw.hw_addrlen = htons(len); 507 phw.hw_addrlen = htons(len);
504 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) 508 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 971ea145ab3e..8a703c3dd318 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
463 if (indev && entskb->dev && 463 if (indev && entskb->dev &&
464 entskb->mac_header != entskb->network_header) { 464 entskb->mac_header != entskb->network_header) {
465 struct nfqnl_msg_packet_hw phw; 465 struct nfqnl_msg_packet_hw phw;
466 int len = dev_parse_header(entskb, phw.hw_addr); 466 int len;
467
468 memset(&phw, 0, sizeof(phw));
469 len = dev_parse_header(entskb, phw.hw_addr);
467 if (len) { 470 if (len) {
468 phw.hw_addrlen = htons(len); 471 phw.hw_addrlen = htons(len);
469 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) 472 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 7011c71646f0..6113cc7efffc 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
52{ 52{
53 const struct xt_tcpmss_info *info = par->targinfo; 53 const struct xt_tcpmss_info *info = par->targinfo;
54 struct tcphdr *tcph; 54 struct tcphdr *tcph;
55 unsigned int tcplen, i; 55 int len, tcp_hdrlen;
56 unsigned int i;
56 __be16 oldval; 57 __be16 oldval;
57 u16 newmss; 58 u16 newmss;
58 u8 *opt; 59 u8 *opt;
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
64 if (!skb_make_writable(skb, skb->len)) 65 if (!skb_make_writable(skb, skb->len))
65 return -1; 66 return -1;
66 67
67 tcplen = skb->len - tcphoff; 68 len = skb->len - tcphoff;
69 if (len < (int)sizeof(struct tcphdr))
70 return -1;
71
68 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 72 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
73 tcp_hdrlen = tcph->doff * 4;
69 74
70 /* Header cannot be larger than the packet */ 75 if (len < tcp_hdrlen)
71 if (tcplen < tcph->doff*4)
72 return -1; 76 return -1;
73 77
74 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 78 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
87 newmss = info->mss; 91 newmss = info->mss;
88 92
89 opt = (u_int8_t *)tcph; 93 opt = (u_int8_t *)tcph;
90 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { 94 for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
91 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && 95 if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
92 opt[i+1] == TCPOLEN_MSS) {
93 u_int16_t oldmss; 96 u_int16_t oldmss;
94 97
95 oldmss = (opt[i+2] << 8) | opt[i+3]; 98 oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
112 } 115 }
113 116
114 /* There is data after the header so the option can't be added 117 /* There is data after the header so the option can't be added
115 without moving it, and doing so may make the SYN packet 118 * without moving it, and doing so may make the SYN packet
116 itself too large. Accept the packet unmodified instead. */ 119 * itself too large. Accept the packet unmodified instead.
117 if (tcplen > tcph->doff*4) 120 */
121 if (len > tcp_hdrlen)
118 return 0; 122 return 0;
119 123
120 /* 124 /*
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
143 newmss = min(newmss, (u16)1220); 147 newmss = min(newmss, (u16)1220);
144 148
145 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); 149 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
146 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 150 memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
147 151
148 inet_proto_csum_replace2(&tcph->check, skb, 152 inet_proto_csum_replace2(&tcph->check, skb,
149 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); 153 htons(len), htons(len + TCPOLEN_MSS), 1);
150 opt[0] = TCPOPT_MSS; 154 opt[0] = TCPOPT_MSS;
151 opt[1] = TCPOLEN_MSS; 155 opt[1] = TCPOLEN_MSS;
152 opt[2] = (newmss & 0xff00) >> 8; 156 opt[2] = (newmss & 0xff00) >> 8;
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index b68fa191710f..625fa1d636a0 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
38 struct tcphdr *tcph; 38 struct tcphdr *tcph;
39 u_int16_t n, o; 39 u_int16_t n, o;
40 u_int8_t *opt; 40 u_int8_t *opt;
41 int len; 41 int len, tcp_hdrlen;
42 42
43 /* This is a fragment, no TCP header is available */ 43 /* This is a fragment, no TCP header is available */
44 if (par->fragoff != 0) 44 if (par->fragoff != 0)
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
52 return NF_DROP; 52 return NF_DROP;
53 53
54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
55 if (tcph->doff * 4 > len) 55 tcp_hdrlen = tcph->doff * 4;
56
57 if (len < tcp_hdrlen)
56 return NF_DROP; 58 return NF_DROP;
57 59
58 opt = (u_int8_t *)tcph; 60 opt = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
61 * Walk through all TCP options - if we find some option to remove, 63 * Walk through all TCP options - if we find some option to remove,
62 * set all octets to %TCPOPT_NOP and adjust checksum. 64 * set all octets to %TCPOPT_NOP and adjust checksum.
63 */ 65 */
64 for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { 66 for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
65 optl = optlen(opt, i); 67 optl = optlen(opt, i);
66 68
67 if (i + optl > tcp_hdrlen(skb)) 69 if (i + optl > tcp_hdrlen)
68 break; 70 break;
69 71
70 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) 72 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 22c5f399f1cf..ab101f715447 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
535{ 535{
536 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); 536 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
537 537
538 OVS_CB(skb)->tun_key = NULL;
538 return do_execute_actions(dp, skb, acts->actions, 539 return do_execute_actions(dp, skb, acts->actions,
539 acts->actions_len, false); 540 acts->actions_len, false);
540} 541}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f7e3a0d84c40..f2ed7600084e 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2076 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 2076 ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2077 return 0; 2077 return 0;
2078 2078
2079 rtnl_unlock();
2080 return 0;
2081
2082exit_free: 2079exit_free:
2083 kfree_skb(reply); 2080 kfree_skb(reply);
2084exit_unlock: 2081exit_unlock:
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 5c519b121e1b..1aa84dc58777 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
240 struct flex_array *buckets; 240 struct flex_array *buckets;
241 int i, err; 241 int i, err;
242 242
243 buckets = flex_array_alloc(sizeof(struct hlist_head *), 243 buckets = flex_array_alloc(sizeof(struct hlist_head),
244 n_buckets, GFP_KERNEL); 244 n_buckets, GFP_KERNEL);
245 if (!buckets) 245 if (!buckets)
246 return NULL; 246 return NULL;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4b66c752eae5..75c8bbf598c8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3259,9 +3259,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3259 3259
3260 if (po->tp_version == TPACKET_V3) { 3260 if (po->tp_version == TPACKET_V3) {
3261 lv = sizeof(struct tpacket_stats_v3); 3261 lv = sizeof(struct tpacket_stats_v3);
3262 st.stats3.tp_packets += st.stats3.tp_drops;
3262 data = &st.stats3; 3263 data = &st.stats3;
3263 } else { 3264 } else {
3264 lv = sizeof(struct tpacket_stats); 3265 lv = sizeof(struct tpacket_stats);
3266 st.stats1.tp_packets += st.stats1.tp_drops;
3265 data = &st.stats1; 3267 data = &st.stats1;
3266 } 3268 }
3267 3269
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bded1f6..51b968d3febb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
285 return q; 285 return q;
286} 286}
287 287
288/* The linklayer setting were not transferred from iproute2, in older
289 * versions, and the rate tables lookup systems have been dropped in
290 * the kernel. To keep backward compatible with older iproute2 tc
291 * utils, we detect the linklayer setting by detecting if the rate
292 * table were modified.
293 *
294 * For linklayer ATM table entries, the rate table will be aligned to
295 * 48 bytes, thus some table entries will contain the same value. The
296 * mpu (min packet unit) is also encoded into the old rate table, thus
297 * starting from the mpu, we find low and high table entries for
298 * mapping this cell. If these entries contain the same value, when
299 * the rate tables have been modified for linklayer ATM.
300 *
301 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
302 * and then roundup to the next cell, calc the table entry one below,
303 * and compare.
304 */
305static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
306{
307 int low = roundup(r->mpu, 48);
308 int high = roundup(low+1, 48);
309 int cell_low = low >> r->cell_log;
310 int cell_high = (high >> r->cell_log) - 1;
311
312 /* rtab is too inaccurate at rates > 100Mbit/s */
313 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
314 pr_debug("TC linklayer: Giving up ATM detection\n");
315 return TC_LINKLAYER_ETHERNET;
316 }
317
318 if ((cell_high > cell_low) && (cell_high < 256)
319 && (rtab[cell_low] == rtab[cell_high])) {
320 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
321 cell_low, cell_high, rtab[cell_high]);
322 return TC_LINKLAYER_ATM;
323 }
324 return TC_LINKLAYER_ETHERNET;
325}
326
288static struct qdisc_rate_table *qdisc_rtab_list; 327static struct qdisc_rate_table *qdisc_rtab_list;
289 328
290struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) 329struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
308 rtab->rate = *r; 347 rtab->rate = *r;
309 rtab->refcnt = 1; 348 rtab->refcnt = 1;
310 memcpy(rtab->data, nla_data(tab), 1024); 349 memcpy(rtab->data, nla_data(tab), 1024);
350 if (r->linklayer == TC_LINKLAYER_UNAWARE)
351 r->linklayer = __detect_linklayer(r, rtab->data);
311 rtab->next = qdisc_rtab_list; 352 rtab->next = qdisc_rtab_list;
312 qdisc_rtab_list = rtab; 353 qdisc_rtab_list = rtab;
313 } 354 }
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4626cef4b76e..48be3d5c0d92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/if_vlan.h>
28#include <net/sch_generic.h> 29#include <net/sch_generic.h>
29#include <net/pkt_sched.h> 30#include <net/pkt_sched.h>
30#include <net/dst.h> 31#include <net/dst.h>
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
207 208
208unsigned long dev_trans_start(struct net_device *dev) 209unsigned long dev_trans_start(struct net_device *dev)
209{ 210{
210 unsigned long val, res = dev->trans_start; 211 unsigned long val, res;
211 unsigned int i; 212 unsigned int i;
212 213
214 if (is_vlan_dev(dev))
215 dev = vlan_dev_real_dev(dev);
216 res = dev->trans_start;
213 for (i = 0; i < dev->num_tx_queues; i++) { 217 for (i = 0; i < dev->num_tx_queues; i++) {
214 val = netdev_get_tx_queue(dev, i)->trans_start; 218 val = netdev_get_tx_queue(dev, i)->trans_start;
215 if (val && time_after(val, res)) 219 if (val && time_after(val, res))
216 res = val; 220 res = val;
217 } 221 }
218 dev->trans_start = res; 222 dev->trans_start = res;
223
219 return res; 224 return res;
220} 225}
221EXPORT_SYMBOL(dev_trans_start); 226EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
904 memset(r, 0, sizeof(*r)); 909 memset(r, 0, sizeof(*r));
905 r->overhead = conf->overhead; 910 r->overhead = conf->overhead;
906 r->rate_bytes_ps = conf->rate; 911 r->rate_bytes_ps = conf->rate;
912 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
907 r->mult = 1; 913 r->mult = 1;
908 /* 914 /*
909 * The deal here is to replace a divide by a reciprocal one 915 * The deal here is to replace a divide by a reciprocal one
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 45e751527dfc..c2178b15ca6e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1329 struct htb_sched *q = qdisc_priv(sch); 1329 struct htb_sched *q = qdisc_priv(sch);
1330 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1330 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1331 struct nlattr *opt = tca[TCA_OPTIONS]; 1331 struct nlattr *opt = tca[TCA_OPTIONS];
1332 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1332 struct nlattr *tb[TCA_HTB_MAX + 1]; 1333 struct nlattr *tb[TCA_HTB_MAX + 1];
1333 struct tc_htb_opt *hopt; 1334 struct tc_htb_opt *hopt;
1334 1335
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1350 if (!hopt->rate.rate || !hopt->ceil.rate) 1351 if (!hopt->rate.rate || !hopt->ceil.rate)
1351 goto failure; 1352 goto failure;
1352 1353
1354 /* Keeping backward compatible with rate_table based iproute2 tc */
1355 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
1356 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1357 if (rtab)
1358 qdisc_put_rtab(rtab);
1359 }
1360 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
1361 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1362 if (ctab)
1363 qdisc_put_rtab(ctab);
1364 }
1365
1353 if (!cl) { /* new class */ 1366 if (!cl) { /* new class */
1354 struct Qdisc *new_q; 1367 struct Qdisc *new_q;
1355 int prio; 1368 int prio;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index bce5b79662a6..ab67efc64b24 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
846 else 846 else
847 spc_state = SCTP_ADDR_AVAILABLE; 847 spc_state = SCTP_ADDR_AVAILABLE;
848 /* Don't inform ULP about transition from PF to 848 /* Don't inform ULP about transition from PF to
849 * active state and set cwnd to 1, see SCTP 849 * active state and set cwnd to 1 MTU, see SCTP
850 * Quick failover draft section 5.1, point 5 850 * Quick failover draft section 5.1, point 5
851 */ 851 */
852 if (transport->state == SCTP_PF) { 852 if (transport->state == SCTP_PF) {
853 ulp_notify = false; 853 ulp_notify = false;
854 transport->cwnd = 1; 854 transport->cwnd = asoc->pathmtu;
855 } 855 }
856 transport->state = SCTP_ACTIVE; 856 transport->state = SCTP_ACTIVE;
857 break; 857 break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index bdbbc3fd7c14..8fdd16046d66 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
181 return; 181 return;
182 } 182 }
183 183
184 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
185
186 sctp_packet_free(&transport->packet); 184 sctp_packet_free(&transport->packet);
187 185
188 if (transport->asoc) 186 if (transport->asoc)
189 sctp_association_put(transport->asoc); 187 sctp_association_put(transport->asoc);
188
189 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
190} 190}
191 191
192/* Start T3_rtx timer if it is not already running and update the heartbeat 192/* Start T3_rtx timer if it is not already running and update the heartbeat
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index cb29ef7ba2f0..609c30c80816 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
460{ 460{
461 struct tipc_link *l_ptr; 461 struct tipc_link *l_ptr;
462 struct tipc_link *temp_l_ptr; 462 struct tipc_link *temp_l_ptr;
463 struct tipc_link_req *temp_req;
463 464
464 pr_info("Disabling bearer <%s>\n", b_ptr->name); 465 pr_info("Disabling bearer <%s>\n", b_ptr->name);
465 spin_lock_bh(&b_ptr->lock); 466 spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
468 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 469 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
469 tipc_link_delete(l_ptr); 470 tipc_link_delete(l_ptr);
470 } 471 }
471 if (b_ptr->link_req) 472 temp_req = b_ptr->link_req;
472 tipc_disc_delete(b_ptr->link_req); 473 b_ptr->link_req = NULL;
473 spin_unlock_bh(&b_ptr->lock); 474 spin_unlock_bh(&b_ptr->lock);
475
476 if (temp_req)
477 tipc_disc_delete(temp_req);
478
474 memset(b_ptr, 0, sizeof(struct tipc_bearer)); 479 memset(b_ptr, 0, sizeof(struct tipc_bearer));
475} 480}
476 481
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 593071dabd1c..4d9334683f84 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
347 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { 347 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
348 struct vsock_sock *vsk; 348 struct vsock_sock *vsk;
349 list_for_each_entry(vsk, &vsock_connected_table[i], 349 list_for_each_entry(vsk, &vsock_connected_table[i],
350 connected_table); 350 connected_table)
351 fn(sk_vsock(vsk)); 351 fn(sk_vsock(vsk));
352 } 352 }
353 353
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 4f9f216665e9..a8c29fa4f1b3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
765 cfg80211_leave_mesh(rdev, dev); 765 cfg80211_leave_mesh(rdev, dev);
766 break; 766 break;
767 case NL80211_IFTYPE_AP: 767 case NL80211_IFTYPE_AP:
768 case NL80211_IFTYPE_P2P_GO:
768 cfg80211_stop_ap(rdev, dev); 769 cfg80211_stop_ap(rdev, dev);
769 break; 770 break;
770 default: 771 default:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 25d217d90807..5f6e982cdcf4 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
441 goto out_unlock; 441 goto out_unlock;
442 } 442 }
443 *rdev = wiphy_to_dev((*wdev)->wiphy); 443 *rdev = wiphy_to_dev((*wdev)->wiphy);
444 cb->args[0] = (*rdev)->wiphy_idx; 444 /* 0 is the first index - add 1 to parse only once */
445 cb->args[0] = (*rdev)->wiphy_idx + 1;
445 cb->args[1] = (*wdev)->identifier; 446 cb->args[1] = (*wdev)->identifier;
446 } else { 447 } else {
447 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]); 448 /* subtract the 1 again here */
449 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
448 struct wireless_dev *tmp; 450 struct wireless_dev *tmp;
449 451
450 if (!wiphy) { 452 if (!wiphy) {
@@ -2620,8 +2622,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2620 2622
2621 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 2623 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
2622 NL80211_CMD_NEW_KEY); 2624 NL80211_CMD_NEW_KEY);
2623 if (IS_ERR(hdr)) 2625 if (!hdr)
2624 return PTR_ERR(hdr); 2626 return -ENOBUFS;
2625 2627
2626 cookie.msg = msg; 2628 cookie.msg = msg;
2627 cookie.idx = key_idx; 2629 cookie.idx = key_idx;
@@ -6505,6 +6507,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
6505 NL80211_CMD_TESTMODE); 6507 NL80211_CMD_TESTMODE);
6506 struct nlattr *tmdata; 6508 struct nlattr *tmdata;
6507 6509
6510 if (!hdr)
6511 break;
6512
6508 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { 6513 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
6509 genlmsg_cancel(skb, hdr); 6514 genlmsg_cancel(skb, hdr);
6510 break; 6515 break;
@@ -6949,9 +6954,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
6949 6954
6950 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 6955 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
6951 NL80211_CMD_REMAIN_ON_CHANNEL); 6956 NL80211_CMD_REMAIN_ON_CHANNEL);
6952 6957 if (!hdr) {
6953 if (IS_ERR(hdr)) { 6958 err = -ENOBUFS;
6954 err = PTR_ERR(hdr);
6955 goto free_msg; 6959 goto free_msg;
6956 } 6960 }
6957 6961
@@ -7249,9 +7253,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
7249 7253
7250 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 7254 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
7251 NL80211_CMD_FRAME); 7255 NL80211_CMD_FRAME);
7252 7256 if (!hdr) {
7253 if (IS_ERR(hdr)) { 7257 err = -ENOBUFS;
7254 err = PTR_ERR(hdr);
7255 goto free_msg; 7258 goto free_msg;
7256 } 7259 }
7257 } 7260 }
@@ -8130,9 +8133,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
8130 8133
8131 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 8134 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
8132 NL80211_CMD_PROBE_CLIENT); 8135 NL80211_CMD_PROBE_CLIENT);
8133 8136 if (!hdr) {
8134 if (IS_ERR(hdr)) { 8137 err = -ENOBUFS;
8135 err = PTR_ERR(hdr);
8136 goto free_msg; 8138 goto free_msg;
8137 } 8139 }
8138 8140
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 81c8a10d743c..20e86a95dc4e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -976,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
976 struct net_device *dev, u16 reason, bool wextev) 976 struct net_device *dev, u16 reason, bool wextev)
977{ 977{
978 struct wireless_dev *wdev = dev->ieee80211_ptr; 978 struct wireless_dev *wdev = dev->ieee80211_ptr;
979 int err; 979 int err = 0;
980 980
981 ASSERT_WDEV_LOCK(wdev); 981 ASSERT_WDEV_LOCK(wdev);
982 982
983 kfree(wdev->connect_keys); 983 kfree(wdev->connect_keys);
984 wdev->connect_keys = NULL; 984 wdev->connect_keys = NULL;
985 985
986 if (wdev->conn) { 986 if (wdev->conn)
987 err = cfg80211_sme_disconnect(wdev, reason); 987 err = cfg80211_sme_disconnect(wdev, reason);
988 } else if (!rdev->ops->disconnect) { 988 else if (!rdev->ops->disconnect)
989 cfg80211_mlme_down(rdev, dev); 989 cfg80211_mlme_down(rdev, dev);
990 err = 0; 990 else if (wdev->current_bss)
991 } else {
992 err = rdev_disconnect(rdev, dev, reason); 991 err = rdev_disconnect(rdev, dev, reason);
993 }
994 992
995 return err; 993 return err;
996} 994}
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8e77cbbad871..e3c7ba8d7582 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -522,7 +522,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1,
522} 522}
523 523
524#define nid_has_mute(codec, nid, dir) \ 524#define nid_has_mute(codec, nid, dir) \
525 check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE) 525 check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
526#define nid_has_volume(codec, nid, dir) \ 526#define nid_has_volume(codec, nid, dir) \
527 check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) 527 check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
528 528
@@ -624,7 +624,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
624 if (enable) 624 if (enable)
625 val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; 625 val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT;
626 } 626 }
627 if (caps & AC_AMPCAP_MUTE) { 627 if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
628 if (!enable) 628 if (!enable)
629 val |= HDA_AMP_MUTE; 629 val |= HDA_AMP_MUTE;
630 } 630 }
@@ -648,7 +648,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec,
648{ 648{
649 unsigned int mask = 0xff; 649 unsigned int mask = 0xff;
650 650
651 if (caps & AC_AMPCAP_MUTE) { 651 if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
652 if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) 652 if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL))
653 mask &= ~0x80; 653 mask &= ~0x80;
654 } 654 }
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8bd226149868..f303cd898515 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1031,6 +1031,7 @@ enum {
1031 ALC880_FIXUP_GPIO2, 1031 ALC880_FIXUP_GPIO2,
1032 ALC880_FIXUP_MEDION_RIM, 1032 ALC880_FIXUP_MEDION_RIM,
1033 ALC880_FIXUP_LG, 1033 ALC880_FIXUP_LG,
1034 ALC880_FIXUP_LG_LW25,
1034 ALC880_FIXUP_W810, 1035 ALC880_FIXUP_W810,
1035 ALC880_FIXUP_EAPD_COEF, 1036 ALC880_FIXUP_EAPD_COEF,
1036 ALC880_FIXUP_TCL_S700, 1037 ALC880_FIXUP_TCL_S700,
@@ -1089,6 +1090,14 @@ static const struct hda_fixup alc880_fixups[] = {
1089 { } 1090 { }
1090 } 1091 }
1091 }, 1092 },
1093 [ALC880_FIXUP_LG_LW25] = {
1094 .type = HDA_FIXUP_PINS,
1095 .v.pins = (const struct hda_pintbl[]) {
1096 { 0x1a, 0x0181344f }, /* line-in */
1097 { 0x1b, 0x0321403f }, /* headphone */
1098 { }
1099 }
1100 },
1092 [ALC880_FIXUP_W810] = { 1101 [ALC880_FIXUP_W810] = {
1093 .type = HDA_FIXUP_PINS, 1102 .type = HDA_FIXUP_PINS,
1094 .v.pins = (const struct hda_pintbl[]) { 1103 .v.pins = (const struct hda_pintbl[]) {
@@ -1341,6 +1350,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
1341 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), 1350 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
1342 SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), 1351 SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
1343 SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), 1352 SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
1353 SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25),
1344 SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), 1354 SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
1345 1355
1346 /* Below is the copied entries from alc880_quirks.c. 1356 /* Below is the copied entries from alc880_quirks.c.
@@ -4329,6 +4339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4329 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), 4339 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
4330 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), 4340 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
4331 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), 4341 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
4342 SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
4332 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), 4343 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
4333 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4344 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4334 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4345 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 987f728718c5..be2ba1b6fe4a 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
195 195
196static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); 196static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
197 197
198static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
199
198static const unsigned int limiter_tlv[] = { 200static const unsigned int limiter_tlv[] = {
199 TLV_DB_RANGE_HEAD(2), 201 TLV_DB_RANGE_HEAD(2),
200 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), 202 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
451 SOC_ENUM("Beep Pitch", beep_pitch_enum), 453 SOC_ENUM("Beep Pitch", beep_pitch_enum),
452 SOC_ENUM("Beep on Time", beep_ontime_enum), 454 SOC_ENUM("Beep on Time", beep_ontime_enum),
453 SOC_ENUM("Beep off Time", beep_offtime_enum), 455 SOC_ENUM("Beep off Time", beep_offtime_enum),
454 SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv), 456 SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL,
457 0, 0x07, 0x1f, beep_tlv),
455 SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), 458 SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1),
456 SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), 459 SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum),
457 SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum), 460 SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 6c8a9e7bee25..760e8bfeacaa 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
153static int power_vag_event(struct snd_soc_dapm_widget *w, 153static int power_vag_event(struct snd_soc_dapm_widget *w,
154 struct snd_kcontrol *kcontrol, int event) 154 struct snd_kcontrol *kcontrol, int event)
155{ 155{
156 const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
157
156 switch (event) { 158 switch (event) {
157 case SND_SOC_DAPM_POST_PMU: 159 case SND_SOC_DAPM_POST_PMU:
158 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, 160 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
@@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
160 break; 162 break;
161 163
162 case SND_SOC_DAPM_PRE_PMD: 164 case SND_SOC_DAPM_PRE_PMD:
163 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, 165 /*
164 SGTL5000_VAG_POWERUP, 0); 166 * Don't clear VAG_POWERUP, when both DAC and ADC are
165 msleep(400); 167 * operational to prevent inadvertently starving the
168 * other one of them.
169 */
170 if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) &
171 mask) != mask) {
172 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
173 SGTL5000_VAG_POWERUP, 0);
174 msleep(400);
175 }
166 break; 176 break;
167 default: 177 default:
168 break; 178 break;
@@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
388 SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), 398 SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0),
389 SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", 399 SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)",
390 SGTL5000_CHIP_ANA_ADC_CTRL, 400 SGTL5000_CHIP_ANA_ADC_CTRL,
391 8, 2, 0, capture_6db_attenuate), 401 8, 1, 0, capture_6db_attenuate),
392 SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), 402 SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0),
393 403
394 SOC_DOUBLE_TLV("Headphone Playback Volume", 404 SOC_DOUBLE_TLV("Headphone Playback Volume",
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index bd16010441cc..4375c9f2b791 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -679,13 +679,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
679 return -EINVAL; 679 return -EINVAL;
680 } 680 }
681 681
682 path = list_first_entry(&w->sources, struct snd_soc_dapm_path, 682 if (list_empty(&w->sources)) {
683 list_sink);
684 if (!path) {
685 dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); 683 dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name);
686 return -EINVAL; 684 return -EINVAL;
687 } 685 }
688 686
687 path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
688 list_sink);
689
689 ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); 690 ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
690 if (ret < 0) 691 if (ret < 0)
691 return ret; 692 return ret;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index d04146cad61f..47565fd04505 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
228 reg = TEGRA30_I2S_CIF_RX_CTRL; 228 reg = TEGRA30_I2S_CIF_RX_CTRL;
229 } else { 229 } else {
230 val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; 230 val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
231 reg = TEGRA30_I2S_CIF_RX_CTRL; 231 reg = TEGRA30_I2S_CIF_TX_CTRL;
232 } 232 }
233 233
234 regmap_write(i2s->regmap, reg, val); 234 regmap_write(i2s->regmap, reg, val);
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
index 26722423330d..f3dd7266c391 100644
--- a/sound/usb/6fire/midi.c
+++ b/sound/usb/6fire/midi.c
@@ -19,6 +19,10 @@
19#include "chip.h" 19#include "chip.h"
20#include "comm.h" 20#include "comm.h"
21 21
22enum {
23 MIDI_BUFSIZE = 64
24};
25
22static void usb6fire_midi_out_handler(struct urb *urb) 26static void usb6fire_midi_out_handler(struct urb *urb)
23{ 27{
24 struct midi_runtime *rt = urb->context; 28 struct midi_runtime *rt = urb->context;
@@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip)
156 if (!rt) 160 if (!rt)
157 return -ENOMEM; 161 return -ENOMEM;
158 162
163 rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
164 if (!rt->out_buffer) {
165 kfree(rt);
166 return -ENOMEM;
167 }
168
159 rt->chip = chip; 169 rt->chip = chip;
160 rt->in_received = usb6fire_midi_in_received; 170 rt->in_received = usb6fire_midi_in_received;
161 rt->out_buffer[0] = 0x80; /* 'send midi' command */ 171 rt->out_buffer[0] = 0x80; /* 'send midi' command */
@@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip)
169 179
170 ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); 180 ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
171 if (ret < 0) { 181 if (ret < 0) {
182 kfree(rt->out_buffer);
172 kfree(rt); 183 kfree(rt);
173 snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); 184 snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
174 return ret; 185 return ret;
@@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip)
197 208
198void usb6fire_midi_destroy(struct sfire_chip *chip) 209void usb6fire_midi_destroy(struct sfire_chip *chip)
199{ 210{
200 kfree(chip->midi); 211 struct midi_runtime *rt = chip->midi;
212
213 kfree(rt->out_buffer);
214 kfree(rt);
201 chip->midi = NULL; 215 chip->midi = NULL;
202} 216}
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
index c321006e5430..84851b9f5559 100644
--- a/sound/usb/6fire/midi.h
+++ b/sound/usb/6fire/midi.h
@@ -16,10 +16,6 @@
16 16
17#include "common.h" 17#include "common.h"
18 18
19enum {
20 MIDI_BUFSIZE = 64
21};
22
23struct midi_runtime { 19struct midi_runtime {
24 struct sfire_chip *chip; 20 struct sfire_chip *chip;
25 struct snd_rawmidi *instance; 21 struct snd_rawmidi *instance;
@@ -32,7 +28,7 @@ struct midi_runtime {
32 struct snd_rawmidi_substream *out; 28 struct snd_rawmidi_substream *out;
33 struct urb out_urb; 29 struct urb out_urb;
34 u8 out_serial; /* serial number of out packet */ 30 u8 out_serial; /* serial number of out packet */
35 u8 out_buffer[MIDI_BUFSIZE]; 31 u8 *out_buffer;
36 int buffer_offset; 32 int buffer_offset;
37 33
38 void (*in_received)(struct midi_runtime *rt, u8 *data, int length); 34 void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 3d2551cc10f2..b5eb97fdc842 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
582 urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; 582 urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
583} 583}
584 584
585static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
586{
587 int i;
588
589 for (i = 0; i < PCM_N_URBS; i++) {
590 rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
591 * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
592 if (!rt->out_urbs[i].buffer)
593 return -ENOMEM;
594 rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
595 * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
596 if (!rt->in_urbs[i].buffer)
597 return -ENOMEM;
598 }
599 return 0;
600}
601
602static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
603{
604 int i;
605
606 for (i = 0; i < PCM_N_URBS; i++) {
607 kfree(rt->out_urbs[i].buffer);
608 kfree(rt->in_urbs[i].buffer);
609 }
610}
611
585int usb6fire_pcm_init(struct sfire_chip *chip) 612int usb6fire_pcm_init(struct sfire_chip *chip)
586{ 613{
587 int i; 614 int i;
@@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
593 if (!rt) 620 if (!rt)
594 return -ENOMEM; 621 return -ENOMEM;
595 622
623 ret = usb6fire_pcm_buffers_init(rt);
624 if (ret) {
625 usb6fire_pcm_buffers_destroy(rt);
626 kfree(rt);
627 return ret;
628 }
629
596 rt->chip = chip; 630 rt->chip = chip;
597 rt->stream_state = STREAM_DISABLED; 631 rt->stream_state = STREAM_DISABLED;
598 rt->rate = ARRAY_SIZE(rates); 632 rt->rate = ARRAY_SIZE(rates);
@@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
614 648
615 ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); 649 ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
616 if (ret < 0) { 650 if (ret < 0) {
651 usb6fire_pcm_buffers_destroy(rt);
617 kfree(rt); 652 kfree(rt);
618 snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); 653 snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
619 return ret; 654 return ret;
@@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
625 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); 660 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
626 661
627 if (ret) { 662 if (ret) {
663 usb6fire_pcm_buffers_destroy(rt);
628 kfree(rt); 664 kfree(rt);
629 snd_printk(KERN_ERR PREFIX 665 snd_printk(KERN_ERR PREFIX
630 "error preallocating pcm buffers.\n"); 666 "error preallocating pcm buffers.\n");
@@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip)
669 705
670void usb6fire_pcm_destroy(struct sfire_chip *chip) 706void usb6fire_pcm_destroy(struct sfire_chip *chip)
671{ 707{
672 kfree(chip->pcm); 708 struct pcm_runtime *rt = chip->pcm;
709
710 usb6fire_pcm_buffers_destroy(rt);
711 kfree(rt);
673 chip->pcm = NULL; 712 chip->pcm = NULL;
674} 713}
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
index 9b01133ee3fe..f5779d6182c6 100644
--- a/sound/usb/6fire/pcm.h
+++ b/sound/usb/6fire/pcm.h
@@ -32,7 +32,7 @@ struct pcm_urb {
32 struct urb instance; 32 struct urb instance;
33 struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; 33 struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
34 /* END DO NOT SEPARATE */ 34 /* END DO NOT SEPARATE */
35 u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE]; 35 u8 *buffer;
36 36
37 struct pcm_urb *peer; 37 struct pcm_urb *peer;
38}; 38};
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d5438083fd6a..95558ef4a7a0 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
888 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ 888 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
889 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ 889 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
890 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ 890 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
891 case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
891 case USB_ID(0x046d, 0x0991): 892 case USB_ID(0x046d, 0x0991):
892 /* Most audio usb devices lie about volume resolution. 893 /* Most audio usb devices lie about volume resolution.
893 * Most Logitech webcams have res = 384. 894 * Most Logitech webcams have res = 384.
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 1bc45e71f1fe..0df9ede99dfd 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip,
319 if (altsd->bNumEndpoints < 1) 319 if (altsd->bNumEndpoints < 1)
320 return -ENODEV; 320 return -ENODEV;
321 epd = get_endpoint(alts, 0); 321 epd = get_endpoint(alts, 0);
322 if (!usb_endpoint_xfer_bulk(epd) || 322 if (!usb_endpoint_xfer_bulk(epd) &&
323 !usb_endpoint_xfer_int(epd)) 323 !usb_endpoint_xfer_int(epd))
324 return -ENODEV; 324 return -ENODEV;
325 325
326 switch (USB_ID_VENDOR(chip->usb_id)) { 326 switch (USB_ID_VENDOR(chip->usb_id)) {
327 case 0x0499: /* Yamaha */ 327 case 0x0499: /* Yamaha */
328 err = create_yamaha_midi_quirk(chip, iface, driver, alts); 328 err = create_yamaha_midi_quirk(chip, iface, driver, alts);
329 if (err < 0 && err != -ENODEV) 329 if (err != -ENODEV)
330 return err; 330 return err;
331 break; 331 break;
332 case 0x0582: /* Roland */ 332 case 0x0582: /* Roland */
333 err = create_roland_midi_quirk(chip, iface, driver, alts); 333 err = create_roland_midi_quirk(chip, iface, driver, alts);
334 if (err < 0 && err != -ENODEV) 334 if (err != -ENODEV)
335 return err; 335 return err;
336 break; 336 break;
337 } 337 }